Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import pandas as pd
- import seaborn as sns
- import matplotlib.pyplot as plt
- from sklearn.metrics import log_loss, accuracy_score, classification_report, mean_squared_error
- class MultiNeuralNetwork:
- def __init__(self, type='reg', learning_rate=0.01, epochs=100000, hidden_layers=[10], C=1):
- self.type = type # 'reg', 'binary', 'multi'
- self.learning_rate = learning_rate
- self.epochs = epochs
- self.hidden_layers = hidden_layers
- self.C = C
- self.weights = []
- self.biases = []
- self.loss_ = []
- self.epochs_ = []
- def sigmoid(self, z):
- return 1 / (1 + np.exp(-z))
- def sigmoid_derivative(self, z):
- s = self.sigmoid(z)
- return s * (1 - s)
- def linear(self, z, C=1):
- return z * C
- def softmax(self, z):
- exp_values = np.exp(z - np.max(z))
- return exp_values / np.sum(exp_values, axis=0)
- def fit(self, x, y):
- def initialize_parameters(input_size, output_size):
- layers = [input_size] + self.hidden_layers + [output_size]
- self.weights = [np.random.randn(layers[i], layers[i + 1]) * np.sqrt(1 / layers[i]) for i in range(len(layers) - 1)]
- self.biases = [np.random.randn(layers[i + 1]) * 0.01 for i in range(len(layers) - 1)]
- best_err, count = np.inf, 0
- try:
- input_size = x.shape[1]
- except:
- x = x.reshape(-1, 1)
- input_size = x.shape[1]
- output_size = len(np.unique(y)) if self.type == 'multi' else 1
- initialize_parameters(input_size=input_size, output_size=output_size)
- for epoch in range(self.epochs):
- y_pred = []
- for i, x_sample in enumerate(x):
- inputs, activations, outputs = [x_sample], [], []
- input = x_sample
- len_ = len(self.weights)
- d_weights, d_biases = [None] * len_ , [None] * len_
- for idx, _ in enumerate(zip(self.weights, self.biases)):
- w, b = _[0], _[1]
- activation = np.dot(input, w) + b
- activations.append(activation)
- if w.shape[1] == 1 and self.type == 'reg':
- output = self.linear(activation, C=self.C)
- outputs.append(output)
- elif w.shape[1] == 1 and self.type == 'binary':
- output = self.sigmoid(activation)
- outputs.append(output)
- elif idx == (len_-1) and self.type == 'multi':
- output = self.softmax(activation)
- outputs.append(output)
- else:
- input = self.sigmoid(activation)
- outputs.append(input)
- inputs.append(input)
- y_pred.append(output)
- if self.type == 'multi':
- delta = output - np.eye(output_size)[y[i]]
- else:
- delta = output - y[i]
- d_biases[len_-1] = delta * self.C
- d_weights[len_-1] = np.dot(inputs[len_-1].reshape(-1, 1), d_biases[len_-1].reshape(-1, 1).T)
- for idx in reversed(range(len_ - 1)):
- d_biases[idx] = np.dot(d_biases[idx+1], self.weights[idx+1].T * self.sigmoid_derivative(activations[idx]))
- d_weights[idx] = np.dot(inputs[idx].reshape(-1, 1), d_biases[idx].reshape(-1, 1).T)
- for idx in (range(len_)):
- self.weights[idx] -= self.learning_rate * d_weights[idx]
- self.biases[idx] -= self.learning_rate * d_biases[idx]
- if self.type == 'reg':
- err = mean_squared_error(y_true=y, y_pred=y_pred)
- print(f'Epoche={epoch}, MSE={err}')
- elif self.type == 'binary' or self.type == 'multi':
- err = log_loss(y, y_pred)
- print(f'Epoche={epoch}, logloss={err}')
- if err < best_err:
- count = 0
- best_err = err
- else:
- count += 1
- self.epochs_.append(epoch+1)
- self.loss_.append(err)
- if count >= 250:
- print(f"Навчання завершено на епохі {epoch} з помилкою {err}")
- break
- def predict(self, x):
- try:
- _ = x.shape[1]
- except:
- x = x.reshape(-1, 1)
- y_pred = []
- len_ = len(self.weights)
- for x_sample in x:
- input = x_sample
- for idx, _ in enumerate(zip(self.weights, self.biases)):
- w, b = _[0], _[1]
- activation = np.dot(input, w) + b
- if w.shape[1] == 1 and self.type == 'reg':
- output = self.linear(activation, C=self.C)
- elif w.shape[1] == 1 and self.type == 'binary':
- output = (self.sigmoid(activation) > 0.5).astype(int)
- elif idx == (len_-1) and self.type == 'multi':
- output = np.argmax(self.softmax(activation))
- else:
- input = self.sigmoid(activation)
- y_pred.append(output)
- return np.array(y_pred)
- def f(x):
- return np.sin(x)
- N = 100
- a, b = 0, 10
- x = np.linspace(a, b, N)
- y = f(x)
- x_test = x + 0.05
- y_test = f(x_test)
- df = pd.DataFrame(x, columns=['X'])
- df['target'] = y
- print(df)
- sns.pairplot(df)
- plt.show()
- model = MultiNeuralNetwork(type='reg', learning_rate=0.01, epochs=25000, hidden_layers=[10], C=1)
- model.fit(x, y)
- plt.figure(figsize=(10, 5))
- plt.title('Залежність помилки від епохи навчання')
- plt.plot(model.epochs_, model.loss_, c='red', label=f'lr={model.learning_rate}')
- plt.ylabel('MSE')
- plt.xlabel('Epoche')
- plt.grid()
- plt.legend()
- plt.show()
- y_pred = model.predict(x)
- plt.figure(figsize=(10, 5))
- plt.title('Тренувальні дані')
- plt.scatter(x, y, label='Реальні дані')
- plt.plot(x, y_pred, label=f'Передбачення {round(mean_squared_error(y_true=y, y_pred=y_pred), 6)}', color='red')
- plt.grid()
- plt.legend()
- plt.show()
- y_pred_test = model.predict(x_test)
- plt.figure(figsize=(10, 5))
- plt.title('Тестові дані (x_test = x_train + 0.05)')
- plt.scatter(x_test, y_test, label='Тестові дані')
- plt.plot(x_test, y_pred_test, label=f'Передбачення (MSE={round(mean_squared_error(y_test, y_pred_test), 6)})', color='red')
- plt.grid()
- plt.legend()
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement