Advertisement
ZergRushA

rt-ai-7

Dec 16th, 2022
75
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.40 KB | None | 0 0
  1. '''
  2. import numpy as np
  3.  
  4. def sigmoid(x):
  5.    return 1 / (1 + np.exp(-x))
  6.  
  7. def tanh(x):
  8.    return np.tanh(x)
  9.  
  10. def relu(x):
  11.    return max(0, x)
  12.  
  13. class Neuron:
  14.    def __init__(self, weights, bias):
  15.        self.weights = weights
  16.        self.bias = bias
  17.    def feedforward(self, inputs, actfunc=sigmoid):
  18.        total = np.dot(self.weights, inputs) + self.bias
  19.        return actfunc(total)
  20.  
  21. class NeuronNetwork1:
  22.    def __init__(self):
  23.        weights = np.array([0.5, 0.5, 0.5])
  24.        bias = 0
  25.        self.h1 = Neuron(weights, bias)
  26.        self.h2 = Neuron(weights, bias)
  27.        self.h3 = Neuron(weights, bias)
  28.        self.o1 = Neuron(weights, bias)
  29.    def feedforward(self, x, actfunc=sigmoid):
  30.        out_h1 = self.h1.feedforward(x, actfunc)
  31.        out_h2 = self.h2.feedforward(x, actfunc)
  32.        out_h3 = self.h3.feedforward(x, actfunc)
  33.        out_o1 = self.o1.feedforward(np.array([out_h1,out_h2,out_h3]), actfunc)
  34.        return out_o1
  35.  
  36. class NeuronNetwork2:
  37.    def __init__(self):
  38.        weights = np.array([1., 0.])
  39.        bias = 1
  40.        self.h1 = Neuron(weights, bias)
  41.        self.h2 = Neuron(weights, bias)
  42.        self.o1 = Neuron(weights, bias)
  43.        self.o2 = Neuron(weights, bias)
  44.    def feedforward(self, x, actfunc=sigmoid):
  45.        out_h1 = self.h1.feedforward(x, actfunc)
  46.        out_h2 = self.h2.feedforward(x, actfunc)
  47.        out_o1 = self.o1.feedforward(np.array([out_h1, out_h2]), actfunc)
  48.        out_o2 = self.o2.feedforward(np.array([out_h1, out_h2]), actfunc)
  49.        return out_o1, out_o2
  50.  
  51. n1 = NeuronNetwork1()
  52. x1 = np.array([3., 4., 5.])
  53. print(n1.feedforward(x1))
  54. print(n1.feedforward(x1, tanh))
  55. print(n1.feedforward(x1, relu))
  56. print()
  57. n2 = NeuronNetwork2()
  58. x2 = np.array([1., 2.])
  59. print(n2.feedforward(x2))
  60. print(n2.feedforward(x2, tanh))
  61. print(n2.feedforward(x2, relu))
  62.  
  63.  
  64.  
  65. import numpy as np
  66. import pandas as pd
  67. import matplotlib.pyplot as plt
  68. import seaborn as sn
  69. import sklearn
  70.  
  71. from sklearn.model_selection import train_test_split
  72. from sklearn.neural_network import MLPClassifier, MLPRegressor
  73. from sklearn.metrics import confusion_matrix
  74.  
  75.  
  76. def plot_confusion_matrix(Y_test, Y_preds):
  77.    conf_mat = confusion_matrix(Y_test, Y_preds)
  78.    fig = plt.figure(figsize=(6,5))
  79.    sn.heatmap(conf_mat, annot=True, cmap="Blues")
  80.  
  81. url_class = "https://gist.githubusercontent.com/netj/8836201/raw/6f9306ad21398ea43cba4f7d537619d0e07d5ae3/iris.csv"
  82. url_reg = "https://raw.githubusercontent.com/AnnaShestova/salary-years-simple-linear-regression/master/Salary_Data.csv"
  83.  
  84. dfclass = pd.read_csv(url_class)
  85. X_iris, Y_iris = dfclass.iloc[:, :-1], dfclass.iloc[:, -1]
  86. print(f"iris database sizes: {X_iris.shape} {Y_iris.shape}")
  87. dfreg = pd.read_csv(url_reg)
  88. X_salary, Y_salary = dfreg.iloc[:, :-1], dfreg.iloc[:, -1]
  89. print(f"salary database sizes: {X_salary.shape} {Y_salary.shape}")
  90.  
  91. X_iris_train, X_iris_test, Y_iris_train, Y_iris_test = train_test_split(X_iris, Y_iris, train_size=0.8, test_size=0.2, stratify=Y_iris, random_state=4)
  92. X_salary_train, X_salary_test, Y_salary_train, Y_salary_test = train_test_split(X_salary, Y_salary, train_size=0.8, random_state=4)
  93.  
  94. mlp_classifier = MLPClassifier(max_iter=600, random_state=4)
  95. mlp_classifier.fit(X_iris_train, Y_iris_train)
  96. Y_iris_preds = mlp_classifier.predict(X_iris_test)
  97. print(f"test accuracy: {mlp_classifier.score(X_iris_test, Y_iris_test)}")
  98. print(f"training accuracy: {mlp_classifier.score(X_iris_train, Y_iris_train)}")
  99. plot_confusion_matrix(Y_iris_test, Y_iris_preds)
  100. print(f"loss: {mlp_classifier.loss_}")
  101. print(f"number of coefs: {len(mlp_classifier.coefs_)}")
  102. print(f"number of intercepts: {len(mlp_classifier.intercepts_)}")
  103. print(f"number of iters for which estimator ran: {mlp_classifier.n_iter_}")
  104. print(f"number of output layer activation function: {mlp_classifier.out_activation_}")
  105.  
  106. print()
  107.  
  108. mlp_regressor = MLPRegressor(max_iter=100000, random_state=4)
  109. mlp_regressor.fit(X_salary_train, Y_salary_train)
  110. Y_salary_preds = mlp_regressor.predict(X_salary_test)
  111. print(f"test accuracy: {mlp_regressor.score(X_salary_test, Y_salary_test)}")
  112. print(f"training accuracy: {mlp_regressor.score(X_salary_train, Y_salary_train)}")
  113. print(f"loss: {mlp_regressor.loss_}")
  114. print(f"number of coefs: {len(mlp_regressor.coefs_)}")
  115. print(f"number of iters for which estimator ran: {mlp_regressor.n_iter_}")
  116. print(f"number of output layer activation function: {mlp_regressor.out_activation_}")
  117. '''
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement