Advertisement
print623

CW_task4_Decision_trees

Dec 20th, 2023 (edited)
94
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.20 KB | Cybersecurity | 0 0
  1. import numpy as np
  2. from sklearn.model_selection import train_test_split
  3. from sklearn.tree import DecisionTreeClassifier
  4. from sklearn.metrics import confusion_matrix, accuracy_score
  5. from sklearn.metrics import make_scorer, f1_score
  6. # Load the dataset
  7. data = np.load('C:/Users/print15207/MATLAB Drive/Print HVDC/Smartgrid CW/train_dataset.npy', allow_pickle=True)
  8. # Extract features and labels
  9. x = data.item()['feature']
  10. y = data.item()['label']
  11. x1=x[:4800] #Only classify between class 0 (normal measurement) and class 1 (FDI attack measurement)
  12. y1=y[:4800]
  13. # Split the data into training and testing sets
  14. x_train, x_test, y_train, y_test = train_test_split(x1, y1, test_size=0.325, random_state=42)
  15. # Print the shape of training and testing sets
  16. print("Training set size:", x_train.shape)
  17. print("Testing set size:", x_test.shape)
  18. # Setting maximum depth and minimum samples per leaf
  19. pruned_model = DecisionTreeClassifier(max_depth=10, min_samples_leaf=4, random_state=42)
  20. pruned_model.fit(x_train, y_train)
  21. # Make predictions on the test set
  22. test_predict = pruned_model.predict(x_test)
  23. # Evaluate the model
  24. accuracy = accuracy_score(y_test, test_predict)
  25. # y_test set is the true value and test_predict set is the predicted value
  26. print("accuracy on test set: ", accuracy)
  27. # Evaluate the model using TPR and FPR
  28. conf_matrix = confusion_matrix(y_test, test_predict)
  29. print("Confusion Matrix:")
  30. print(conf_matrix)
  31. TN, FP, FN, TP = conf_matrix.ravel()
  32. # Calculate TPR and FPR
  33. TPR = TP / (TP + FN)
  34. FPR = FP / (FP + TN)
  35. # Print or use the metrics
  36. print("True Positive Rate (TPR):", TPR)
  37. print("False Positive Rate (FPR):", FPR)
  38. # Define F1 score as the evaluation metric for hyperparameter tuning
  39. scorer = make_scorer(f1_score)
  40. test_f1_score = f1_score(y_test, test_predict)
  41. print("F1 score on test set: ",test_f1_score)
  42. #Result with elapsed time: 2 seconds:
  43. #Training set size: (3240, 34)
  44. #Testing set size: (1560, 34)
  45. #accuracy on test set:  0.9846153846153847
  46. #Confusion Matrix:
  47. [[789  12]
  48.  [ 12 747]]
  49. #True Positive Rate (TPR): 0.9841897233201581
  50. #False Positive Rate (FPR): 0.0149812734082397
  51. #F1 score on test set:  0.9841897233201581
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement