Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import pandas as pd
- import tensorflow as tf
- import matplotlib.pyplot as plt
- from sklearn.metrics import log_loss, accuracy_score, classification_report, mean_squared_error
- from imblearn.over_sampling import SMOTE
- from mpl_toolkits.mplot3d import Axes3D
- from sklearn.preprocessing import StandardScaler
- from sklearn.decomposition import PCA
- import plotly.express as px
- import re
- from PIL import Image
- from scipy.ndimage import center_of_mass
- from scipy.ndimage import shift
- from matplotlib.colors import Normalize
- from matplotlib.cm import ScalarMappable
- class Weights_pipeline:
- def __init__(self, model):
- self.model = model
- def save_to_file(self, fname):
- try:
- params = {}
- for i, w in enumerate(self.model.weights):
- params[f'weight_{i}'] = w
- for i, b in enumerate(self.model.biases):
- params[f'bias_{i}'] = b
- np.savez(fname, **params)
- print(f"Weights and biases saved to {fname}")
- except Exception as e:
- print(f"Error saving weights: {e}")
- def load_from_file(self, fname):
- try:
- loaded_params = np.load(fname)
- weights = []
- biases = []
- for key in loaded_params.files:
- if 'weight' in key:
- weights.append(loaded_params[key])
- elif 'bias' in key:
- biases.append(loaded_params[key])
- self.model.weights = weights
- self.model.biases = biases
- print(f"Weights and biases loaded from {fname}")
- return self.model
- except Exception as e:
- print(f"Error loading weights: {e}")
- class MultiNeuralNetwork:
- def __init__(self, type='reg', learning_rate=0.01, epochs=100000, hidden_layers=[10], C=1, early_stopping_rounds=np.inf):
- self.type = type # 'reg', 'binary', 'multi'
- self.learning_rate = learning_rate
- self.epochs = epochs
- self.hidden_layers = hidden_layers
- self.C = C
- self.early_stopping_rounds = early_stopping_rounds
- self.weights = []
- self.biases = []
- self.loss_ = []
- self.epochs_ = []
- def sigmoid(self, z):
- return 1 / (1 + np.exp(-z))
- def sigmoid_derivative(self, z):
- s = self.sigmoid(z)
- return s * (1 - s)
- def linear(self, z, C=1):
- return z * C
- def softmax(self, z):
- exp_values = np.exp(z - np.max(z))
- return exp_values / np.sum(exp_values, axis=0)
- def fit(self, x, y):
- def initialize_parameters(input_size, output_size):
- layers = [input_size] + self.hidden_layers + [output_size]
- self.weights = [np.random.randn(layers[i], layers[i + 1]) * np.sqrt(1 / layers[i]) for i in range(len(layers) - 1)]
- self.biases = [np.random.randn(layers[i + 1]) * 0.01 for i in range(len(layers) - 1)]
- best_err, count = np.inf, 0
- try:
- input_size = x.shape[1]
- except:
- x = x.reshape(-1, 1)
- input_size = x.shape[1]
- output_size = len(np.unique(y)) if self.type == 'multi' else 1
- initialize_parameters(input_size=input_size, output_size=output_size)
- for epoch in range(self.epochs):
- y_pred = []
- for i, x_sample in enumerate(x):
- inputs, activations, outputs = [x_sample], [], []
- input = x_sample
- len_ = len(self.weights)
- d_weights, d_biases = [None] * len_ , [None] * len_
- for idx, _ in enumerate(zip(self.weights, self.biases)):
- w, b = _[0], _[1]
- activation = np.dot(input, w) + b
- activations.append(activation)
- if w.shape[1] == 1 and self.type == 'reg':
- output = self.linear(activation, C=self.C)
- outputs.append(output)
- elif w.shape[1] == 1 and self.type == 'binary':
- output = self.sigmoid(activation)
- outputs.append(output)
- elif idx == (len_-1) and self.type == 'multi':
- output = self.softmax(activation)
- outputs.append(output)
- else:
- input = self.sigmoid(activation)
- outputs.append(input)
- inputs.append(input)
- y_pred.append(output)
- if self.type == 'multi':
- delta = output - np.eye(output_size)[y[i]]
- else:
- delta = output - y[i]
- d_biases[len_-1] = delta * self.C
- d_weights[len_-1] = np.dot(inputs[len_-1].reshape(-1, 1), d_biases[len_-1].reshape(-1, 1).T)
- for idx in reversed(range(len_ - 1)):
- d_biases[idx] = np.dot(d_biases[idx+1], self.weights[idx+1].T * self.sigmoid_derivative(activations[idx]))
- d_weights[idx] = np.dot(inputs[idx].reshape(-1, 1), d_biases[idx].reshape(-1, 1).T)
- for idx in (range(len_)):
- self.weights[idx] -= self.learning_rate * d_weights[idx]
- self.biases[idx] -= self.learning_rate * d_biases[idx]
- if self.type == 'reg':
- err = mean_squared_error(y_true=y, y_pred=y_pred)
- print(f'Epoche={epoch}, MSE={err}')
- elif self.type == 'binary' or self.type == 'multi':
- err = log_loss(y, y_pred)
- print(f'Epoche={epoch}, logloss={err}')
- if err < best_err:
- count = 0
- best_err = err
- else:
- count += 1
- self.epochs_.append(epoch+1)
- self.loss_.append(err)
- if count >= self.early_stopping_rounds:
- print(f"Навчання завершено на епохі {epoch} з помилкою {err}")
- break
- def predict(self, x):
- try:
- _ = x.shape[1]
- except:
- x = x.reshape(-1, 1)
- y_pred = []
- len_ = len(self.weights)
- for x_sample in x:
- input = x_sample
- for idx, _ in enumerate(zip(self.weights, self.biases)):
- w, b = _[0], _[1]
- activation = np.dot(input, w) + b
- if w.shape[1] == 1 and self.type == 'reg':
- output = self.linear(activation, C=self.C)
- elif w.shape[1] == 1 and self.type == 'binary':
- output = (self.sigmoid(activation) > 0.5).astype(int)
- elif idx == (len_-1) and self.type == 'multi':
- output = np.argmax(self.softmax(activation))
- else:
- input = self.sigmoid(activation)
- y_pred.append(output)
- return np.array(y_pred)
- def plot_classification_report(report):
- pattern = r'\s*(\d+)\s+(\d+\.\d{2})\s+(\d+\.\d{2})\s+(\d+\.\d{2})\s+(\d+)\n?'
- matches = re.findall(pattern, report)
- matches = pd.DataFrame(matches, columns=['Class', 'Precision', 'Recall', 'F1-score', 'Support'])
- for col in matches.columns: matches[col] = matches[col].astype(float)
- for col in ['Precision', 'Recall', 'F1-score']:
- plt.figure(figsize=(10, 4))
- cmap = plt.get_cmap('coolwarm')
- norm = Normalize(0, 1)
- for index, value in enumerate(matches[col]):
- gradient = np.linspace(0, value, 100)
- gradient = gradient.reshape(1, -1)
- plt.imshow(gradient, aspect='auto', cmap=cmap, norm=norm, extent=[0, value, index - 0.4, index + 0.4])
- plt.text(value - 0.05, index, f'{value:.2f}', va='center')
- plt.grid(True, axis='x')
- plt.title(f'{col} кожного з класів')
- plt.ylabel('Клас')
- plt.xlabel(col)
- plt.yticks(ticks=np.arange(len(matches)), labels=matches['Class'].astype(int))
- plt.xlim(0, 1)
- plt.ylim(-0.5, len(matches) - 0.5)
- plt.show()
- def center_image_before_resize(img):
- cy, cx = center_of_mass(img)
- shift_y = img.shape[0] // 2 - cy
- shift_x = img.shape[1] // 2 - cx
- centered_img = shift(img, shift=[shift_y, shift_x], mode='constant', cval=0)
- return centered_img
- mnist = tf.keras.datasets.mnist
- (X_train, y_train), (X_test, y_test) = mnist.load_data()
- x_train = X_train.reshape(X_train.shape[0], -1) / 255.0
- x_test = X_test.reshape(X_test.shape[0], -1) / 255.0
- loaded_model = Weights_pipeline(model=MultiNeuralNetwork(type='multi')).load_from_file('model_wthout_res_params.npz')
- y_pred = loaded_model.predict(x_test)
- text = classification_report(y_test, y_pred)
- plot_classification_report(text)
- print(text)
- arr = []
- y_true = []
- for i in range(10):
- for k in range(1, 11):
- img = 255 - np.array(Image.open(f'D:/SUMDU/4 курс/Моделювання нейронних мереж/pr7(Numbers)/{i}_{k}.png').convert('L'))
- centered_img = center_image_before_resize(img)
- centered_img = Image.fromarray(centered_img).resize((28, 28))
- centered_img = np.array(centered_img)
- arr.append(centered_img)
- y_true.append(i)
- img_array, y_true = np.array(arr), np.array(y_true)
- my_x_test = img_array.reshape(img_array.shape[0], -1) / 255.0
- y_pred = loaded_model.predict(my_x_test)
- plt.figure(figsize=(10, 50))
- for i in range(100):
- plt.subplot(20, 5, i + 1)
- plt.imshow(img_array[i], cmap='gray_r')
- plt.title(f"Real: {y_true[i]} | Pred: ", color='black')
- color = 'green' if y_true[i] == y_pred[i] else 'red'
- plt.text(0.85, 1.045, f"{y_pred[i]}", color=color, fontsize=12, ha='center', transform=plt.gca().transAxes)
- plt.axis("off")
- plt.tight_layout()
- plt.show()
- text = (classification_report(y_true, y_pred))
- plot_classification_report(text)
- print(text)
Add Comment
Please, Sign In to add comment