Pandaaaa906

transforemer

Jul 2nd, 2024
77
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. import os
  2. import pandas as pd
  3. import numpy as np
  4. import tensorflow as tf
  5. from tensorflow.keras.models import Model
  6. from tensorflow.keras.layers import Input, Dense, Flatten, Reshape, MultiHeadAttention, LayerNormalization, Dropout, Add, GaussianNoise
  7. from tensorflow.keras.optimizers import Adam
  8. from tensorflow.keras.regularizers import l1_l2
  9. from tensorflow.keras.callbacks import EarlyStopping
  10. from sklearn.model_selection import train_test_split
  11. from sklearn.preprocessing import StandardScaler
  12. from sklearn.metrics import classification_report
  13. import matplotlib.pyplot as plt
  14.  
  15. # 禁用GPU
  16. os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
  17. tf.config.set_visible_devices([], 'GPU')
  18.  
  19. # 加载本地数据并查看前几行
  20. data = pd.read_csv('/home/xjy/pythonProject/US_Accidents_March23.csv')
  21. print("DataFrame 中的列名:", data.columns)
  22. print(data.head())
  23.  
  24. # 假设找到实际的标签列 'Severity'
  25. actual_label_column = 'Severity'
  26.  
  27. # 数据预处理
  28. data = data.dropna()
  29. features = data.select_dtypes(include=[np.number])
  30. labels = data[actual_label_column] - 1  # 使用正确的列名,假设标签从1开始
  31.  
  32. scaler = StandardScaler()
  33. features = scaler.fit_transform(features)
  34.  
  35. # 按照7:1:2的比例划分训练集、验证集和测试集
  36. X_train, X_temp, y_train, y_temp = train_test_split(features, labels, test_size=0.3, random_state=42)
  37. X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, test_size=0.6667, random_state=42)
  38.  
  39. # 输出划分结果
  40. print(f"训练集大小: {X_train.shape[0]}")
  41. print(f"验证集大小: {X_val.shape[0]}")
  42. print(f"测试集大小: {X_test.shape[0]}")
  43.  
  44. # 构建Transformer模型
  45. def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
  46.     # Multi-Head Attention layer
  47.     attention = MultiHeadAttention(key_dim=head_size, num_heads=num_heads, dropout=dropout)(inputs, inputs)
  48.     attention = Add()([attention, inputs])
  49.     attention = LayerNormalization(epsilon=1e-6)(attention)
  50.  
  51.     # Feed Forward network
  52.     ff = Dense(ff_dim, activation='relu', kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(attention)
  53.     ff = Dense(inputs.shape[-1])(ff)
  54.     ff = Add()([ff, attention])
  55.     ff = LayerNormalization(epsilon=1e-6)(ff)
  56.     return ff
  57.  
  58. input_layer = Input(shape=(X_train.shape[1], 1))
  59. reshape = Reshape((X_train.shape[1], 1))(input_layer)
  60.  
  61. # 增加Transformer编码器层的数量和复杂度
  62. transformer_block = transformer_encoder(reshape, head_size=64, num_heads=4, ff_dim=64, dropout=0.2)
  63. transformer_block = transformer_encoder(transformer_block, head_size=64, num_heads=4, ff_dim=64, dropout=0.2)
  64. transformer_block = transformer_encoder(transformer_block, head_size=64, num_heads=4, ff_dim=64, dropout=0.2)
  65.  
  66. flatten = Flatten()(transformer_block)
  67. noise = GaussianNoise(0.1)(flatten)
  68. dense = Dense(64, activation='relu', kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(noise)
  69. dropout = Dropout(0.6)(dense)
  70. output_layer = Dense(4, activation='softmax')(dropout)
  71.  
  72. model = Model(inputs=input_layer, outputs=output_layer)
  73.  
  74. # 编译模型
  75. model.compile(optimizer=Adam(learning_rate=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
  76.  
  77. # 训练模型
  78. X_train = np.expand_dims(X_train, axis=-1)
  79. X_val = np.expand_dims(X_val, axis=-1)
  80. X_test = np.expand_dims(X_test, axis=-1)
  81.  
  82. # 设置 EarlyStopping 回调
  83. early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
  84.  
  85. history = model.fit(X_train, y_train, epochs=50, batch_size=32, validation_data=(X_val, y_val), callbacks=[early_stopping])
  86.  
  87. # 评估模型
  88. train_acc = history.history['accuracy'][-1]
  89. val_acc = history.history['val_accuracy'][-1]
  90. test_loss, test_acc = model.evaluate(X_test, y_test)
  91.  
  92. print(f'Training Accuracy: {train_acc}')
  93. print(f'Validation Accuracy: {val_acc}')
  94. print(f'Test Accuracy: {test_acc}')
  95.  
  96. # 画出 loss 和 accuracy 图
  97. train_loss = history.history['loss']
  98. val_loss = history.history['val_loss']
  99. train_accuracy = history.history['accuracy']
  100. val_accuracy = history.history['val_accuracy']
  101.  
  102. plt.figure(figsize=(12, 5))
  103.  
  104. plt.subplot(1, 2, 1)
  105. plt.plot(train_loss, label='Training Loss')
  106. plt.plot(val_loss, label='Validation Loss')
  107. plt.title('Training and Validation Loss Over Epochs')
  108. plt.xlabel('Epochs')
  109. plt.ylabel('Loss')
  110. plt.legend()
  111. plt.grid(True)
  112.  
  113. plt.subplot(1, 2, 2)
  114. plt.plot(train_accuracy, label='Training Accuracy')
  115. plt.plot(val_accuracy, label='Validation Accuracy')
  116. plt.title('Training and Validation Accuracy Over Epochs')
  117. plt.xlabel('Epochs')
  118. plt.ylabel('Accuracy')
  119. plt.legend()
  120. plt.grid(True)
  121.  
  122. plt.tight_layout()
  123. plt.show()
  124.  
  125. # 判断是否过拟合
  126. if train_loss[-1] < val_loss[-1] and train_accuracy[-1] > val_accuracy[-1]:
  127.     print("可能存在过拟合,因为训练损失明显低于验证损失,且训练准确率高于验证准确率。")
  128. else:
  129.     print("训练和验证损失接近,训练和验证准确率也接近,过拟合可能性较小。")
  130.  
  131. # 保存预测结果
  132. y_pred = model.predict(X_test)
  133. y_pred_classes = np.argmax(y_pred, axis=1)
  134. y_pred_classes_adjusted = y_pred_classes + 1
  135. predictions_adjusted = pd.DataFrame({'True': y_test + 1, 'Predicted': y_pred_classes_adjusted})
  136. predictions_adjusted.to_csv('/home/xjy/pythonProject/predictions_solo.csv', index=False)
  137.  
  138. # 打印评价指标
  139. print(classification_report(y_test + 1, y_pred_classes_adjusted))
Add Comment
Please, Sign In to add comment