Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import pandas as pd
- import seaborn as sns
- import matplotlib.pyplot as plt
- from sklearn.metrics import mean_squared_error
- def f(x):
- return np.sin(x)
- class MultiNeuralNetwork:
- def __init__(self, learning_rate=0.01, epochs=100000, hidden_layers=[10], epsilon=0.001, C=1):
- self.learning_rate = learning_rate
- self.epochs = epochs
- self.hidden_layers = hidden_layers
- self.epsilon = epsilon
- self.C = 1
- self.weights = []
- self.biases = []
- self.mse_err = []
- self.train_epochs = []
- def sigmoid(self, z):
- return 1 / (1 + np.exp(-z))
- def sigmoid_derivative(self, z):
- s = self.sigmoid(z)
- return s * (1 - s)
- def linear(self, z, C=1):
- return z * C
- def mean_squared_error(self, y_true, y_pred):
- return np.mean((y_true - y_pred) ** 2)
- def initialize_parameters(self, input_size, output_size):
- layers = [input_size] + self.hidden_layers + [output_size]
- self.weights = [np.random.randn(layers[i], layers[i + 1]) for i in range(len(layers) - 1)]
- self.biases = [np.random.randn(layers[i + 1]) for i in range(len(layers) - 1)]
- def forward_propagation(self, x):
- activations = [x[:, np.newaxis]]
- z_values = []
- for w, b in zip(self.weights, self.biases):
- z = np.dot(activations[-1], w) + b
- z_values.append(z)
- if w.shape[1] == 1:
- a = self.linear(z, C=self.C)
- else:
- a = self.sigmoid(z)
- activations.append(a)
- return activations, z_values
- def backward_propagation(self, activations, z_values, y):
- d_weights = [None] * len(self.weights)
- d_biases = [None] * len(self.biases)
- delta = 2 * (activations[-1] - y[:, np.newaxis]) / len(y)
- d_weights[-1] = np.dot(activations[-2].T, delta)
- d_biases[-1] = np.sum(delta, axis=0)
- for i in reversed(range(len(d_weights) - 1)):
- delta = np.dot(delta, self.weights[i + 1].T) * self.sigmoid_derivative(z_values[i])
- d_weights[i] = np.dot(activations[i].T, delta)
- d_biases[i] = np.sum(delta, axis=0)
- return d_weights, d_biases
- def fit(self, x, y):
- self.initialize_parameters(input_size=1, output_size=1)
- for epoch in range(self.epochs):
- activations, z_values = self.forward_propagation(x)
- y_pred = activations[-1].flatten()
- error = self.mean_squared_error(y, y_pred)
- self.train_epochs.append(epoch+1)
- self.mse_err.append(error)
- if error < self.epsilon:
- print(f"Навчання зупинено на епохі {epoch} з помилкою {error}")
- break
- d_weights, d_biases = self.backward_propagation(activations, z_values, y)
- for i in range(len(self.weights)):
- self.weights[i] -= self.learning_rate * d_weights[i]
- self.biases[i] -= self.learning_rate * d_biases[i]
- def predict(self, x):
- activations, _ = self.forward_propagation(x)
- return activations[-1].flatten()
- N = 100
- a, b = 0, 10
- x = np.linspace(a, b, N)
- y = f(x)
- x_test = x + 0.05
- y_test = f(x_test)
- df = pd.DataFrame(x, columns=['X'])
- df['target'] = y
- print(df)
- sns.pairplot(df)
- plt.show()
- model = MultiNeuralNetwork(hidden_layers=[10], learning_rate=0.01, epochs=1000000, epsilon=0.001)
- model.fit(x, y)
- plt.figure(figsize=(10, 5))
- plt.title('Залежність помилки від епохи навчання')
- plt.plot(model.train_epochs, model.mse_err, label=f'lr={model.learning_rate}, epsilon={model.epsilon}')
- plt.ylabel('MSE')
- plt.xlabel('Epoche')
- plt.grid()
- plt.legend()
- plt.show()
- y_pred = model.predict(x)
- plt.figure(figsize=(10, 5))
- plt.title('Тренувальні дані')
- plt.scatter(x, y, label='Реальні дані')
- plt.plot(x, y_pred, label='Передбачення нейронної мережі', color='red')
- plt.grid()
- plt.legend()
- plt.show()
- y_pred_test = model.predict(x)
- plt.figure(figsize=(10, 5))
- plt.title('Тестові дані (x_test = x_train + 0.05)')
- plt.scatter(x_test, y_test, label='Тестові дані')
- plt.plot(x_test, y_pred_test, label=f'Передбачення (MSE={round(mean_squared_error(y_test, y_pred_train), 4)})', color='red')
- plt.grid()
- plt.legend()
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement