Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- using System;
- using System.Linq;
- public enum ActivationFunction
- {
- ReLU,
- Sigmoid,
- Tanh,
- LeakyReLU,
- Swish,
- Mish,
- GELU
- }
- public enum Regularizer
- {
- None,
- L1,
- L2
- }
- public class GithubNeuralNetwork
- {
- private int[] _layers;
- private Matrix[] _weights;
- private Matrix[] _biases;
- private Func<Matrix, Matrix>[] _activationFunctions;
- private double _learningRate;
- private double _epsilon;
- private Matrix[] _gamma;
- private Matrix[] _beta;
- private double _initialLearningRate;
- private double _decayRate;
- private string _optimizer;
- private Matrix[] _movingMeans;
- private Matrix[] _movingVariances;
- private Matrix[] _mWeights;
- private Matrix[] _vWeights;
- private Matrix[] _mBiases;
- private Matrix[] _vBiases;
- private Matrix[] _mGamma;
- private Matrix[] _vGamma;
- private Matrix[] _mBeta;
- private Matrix[] _vBeta;
- private Matrix[] _slowWeights;
- private Matrix[] _slowBiases;
- private double _lookaheadAlpha;
- private double _lookaheadBeta;
- private int _t;
- private double _dropoutRate;
- private Matrix[] _dropoutMasks;
- private ActivationFunction[] _activationOptions;
- private Regularizer _regularizer;
- private double _lambda;
- public GithubNeuralNetwork(double learningRate, double epsilon, string optimizer, double decayRate, double dropoutRate, Regularizer regularizer, double lambda, params int[] layers, double lookaheadAlpha = 0.5, double lookaheadBeta = 0.9)
- {
- _layers = layers;
- _weights = new Matrix[layers.Length - 1];
- _biases = new Matrix[layers.Length - 1];
- _activationFunctions = new Func<Matrix, Matrix>[layers.Length - 1];
- _learningRate = learningRate;
- _epsilon = epsilon;
- _gamma = new Matrix[layers.Length - 1];
- _beta = new Matrix[layers.Length - 1];
- _initialLearningRate = learningRate;
- _decayRate = decayRate;
- _optimizer = optimizer;
- _movingMeans = new Matrix[layers.Length - 1];
- _movingVariances = new Matrix[layers.Length - 1];
- _mWeights = new Matrix[layers.Length - 1];
- _vWeights = new Matrix[layers.Length - 1];
- _mBiases = new Matrix[layers.Length - 1];
- _vBiases = new Matrix[layers.Length - 1];
- _mGamma = new Matrix[layers.Length - 1];
- _vGamma = new Matrix[layers.Length - 1];
- _mBeta = new Matrix[layers.Length - 1];
- _vBeta = new Matrix[layers.Length - 1];
- _slowWeights = new Matrix[_weights.Length];
- _slowBiases = new Matrix[_biases.Length];
- _lookaheadAlpha = lookaheadAlpha;
- _lookaheadBeta = lookaheadBeta;
- _t = 1;
- _dropoutRate = dropoutRate;
- _dropoutMasks = new Matrix[layers.Length - 1];
- _activationOptions = new ActivationFunction[layers.Length - 1];
- _regularizer = regularizer;
- _lambda = lambda;
- InitializeWeightsAndBiases();
- SetActivationFunctions();
- InitializeSlowWeightsAndBiases();
- }
- private void InitializeSlowWeightsAndBiases()
- {
- for (int i = 0; i < _weights.Length; i++)
- {
- _slowWeights[i] = _weights[i].Copy();
- _slowBiases[i] = _biases[i].Copy();
- }
- }
- private Matrix ResidualBlock(Matrix input, int layerIndex)
- {
- Matrix residual = input;
- Matrix outputs = input;
- int numLayersInBlock = 2;
- int units = _layers[layerIndex + 1];
- for (int i = 0; i < numLayersInBlock; i++)
- {
- Matrix layerOutput = outputs * _weights[layerIndex] + _biases[layerIndex];
- layerOutput = layerOutput.Map(_activationFunctions[layerIndex]);
- outputs = layerOutput;
- }
- if (outputs.RowCount == residual.RowCount && outputs.ColumnCount == residual.ColumnCount)
- {
- outputs += residual; // Adding the shortcut (residual) to the output
- }
- return outputs;
- }
- private void LookaheadOptimizer(Matrix[] gradientsWeights, Matrix[] gradientsBiases)
- {
- for (int i = 0; i < _weights.Length; i++)
- {
- _slowWeights[i] = (_lookaheadAlpha * _slowWeights[i]) + ((1 - _lookaheadAlpha) * _weights[i]);
- _slowBiases[i] = (_lookaheadAlpha * _slowBiases[i]) + ((1 - _lookaheadAlpha) * _biases[i]);
- _weights[i] -= _learningRate * (_lookaheadBeta * gradientsWeights[i] + (1 - _lookaheadBeta) * (_slowWeights[i]));
- _biases[i] -= _learningRate * (_lookaheadBeta * gradientsBiases[i] + (1 - _lookaheadBeta) * (_slowBiases[i]));
- }
- }
- private void Optimizer(Matrix[] gradientsWeights, Matrix[] gradientsBiases, /* Existing parameters */)
- {
- private void InitializeWeightsAndBiases()
- {
- Random rand = new Random();
- for (int i = 0; i < _weights.Length; i++)
- {
- _weights[i] = XavierInitialization(_layers[i + 1], _layers[i], rand);
- _biases[i] = Matrix.Zeros(_layers[i + 1], 1);
- _gamma[i] = Matrix.Ones(_layers[i + 1], 1);
- _beta[i] = Matrix.Zeros(_layers[i + 1], 1);
- _movingMeans[i] = Matrix.Zeros(_layers[i + 1], 1);
- _movingVariances[i] = Matrix.Ones(_layers[i + 1], 1);
- _mWeights[i] = Matrix.Zeros(_weights[i].RowCount, _weights[i].ColumnCount);
- _vWeights[i] = Matrix.Zeros(_weights[i].RowCount, _weights[i].ColumnCount);
- _mBiases[i] = Matrix.Zeros(_biases[i].RowCount, _biases[i].ColumnCount);
- _vBiases[i] = Matrix.Zeros(_biases[i].RowCount, _biases[i].ColumnCount);
- _mGamma[i] = Matrix.Zeros(_gamma[i].RowCount, _gamma[i].ColumnCount);
- _vGamma[i] = Matrix.Zeros(_gamma[i].RowCount, _gamma[i].ColumnCount);
- _mBeta[i] = Matrix.Zeros(_beta[i].RowCount, _beta[i].ColumnCount);
- _vBeta[i] = Matrix.Zeros(_beta[i].RowCount, _beta[i].ColumnCount);
- }
- }
- private Matrix Swish(Matrix x)
- {
- return x * MatrixFunctions.Sigmoid(x);
- }
- private Matrix Mish(Matrix x)
- {
- return x * MatrixFunctions.Tanh(MatrixFunctions.Softplus(x));
- }
- private Matrix GELU(Matrix x)
- {
- return 0.5 * x * (1 + MatrixFunctions.Tanh((Math.Sqrt(2 / Math.PI) * (x + 0.044715 * Math.Pow(x, 3)))));
- }
- private void SetActivationFunctions()
- {
- Random rand = new Random();
- for (int i = 0; i < _activationOptions.Length; i++)
- {
- int choice = rand.Next(7); // Randomly choose an activation function
- _activationOptions[i] = (ActivationFunction)choice;
- }
- }
- private Matrix XavierInitialization(int rows, int cols, Random rand)
- {
- double scale = Math.Sqrt(2.0 / (rows + cols));
- return Matrix.RandomMatrix(rows, cols, rand) * scale;
- }
- private Matrix LayerNormalization(Matrix x, Matrix gamma, Matrix beta, int layerIndex)
- {
- Matrix mean = MatrixFunctions.Mean(x, axis: 1);
- Matrix variance = MatrixFunctions.Variance(x, axis: 1);
- _movingMeans[layerIndex] = (_movingMeans[layerIndex] * 0.9) + (mean * 0.1);
- _movingVariances[layerIndex] = (_movingVariances[layerIndex] * 0.9) + (variance * 0.1);
- Matrix normalized = (x - mean) / MatrixFunctions.Sqrt(variance + _epsilon);
- return (gamma * normalized) + beta;
- }
- private Matrix FeedForward(Matrix input, bool training)
- {
- Matrix outputs = input;
- for (int i = 0; i < _weights.Length; i++)
- {
- if (training && _dropoutRate > 0.0)
- {
- _dropoutMasks[i] = Matrix.RandomMatrix(outputs.RowCount, outputs.ColumnCount);
- _dropoutMasks[i] = _dropoutMasks[i].Map(x => x < _dropoutRate ? 0 : 1);
- outputs = outputs.PointwiseMultiply(_dropoutMasks[i]);
- outputs *= 1.0 / (1.0 - _dropoutRate); // Scale the remaining neurons
- }
- outputs = outputs * _weights[i] + _biases[i];
- switch (_activationOptions[i])
- {
- case ActivationFunction.ReLU:
- outputs = outputs.Map(MatrixFunctions.ReLU);
- break;
- case ActivationFunction.Sigmoid:
- outputs = outputs.Map(MatrixFunctions.Sigmoid);
- break;
- case ActivationFunction.Tanh:
- outputs = outputs.Map(MatrixFunctions.Tanh);
- break;
- case ActivationFunction.LeakyReLU:
- outputs = outputs.Map(MatrixFunctions.LeakyReLU);
- break;
- default:
- outputs = outputs.Map(MatrixFunctions.ReLU);
- break;
- }
- }
- return outputs;
- private Matrix FeedForward(Matrix input, bool training)
- {
- Matrix outputs = input;
- for (int i = 0; i < _weights.Length; i++)
- {
- if (training && _dropoutRate > 0.0)
- {
- _dropoutMasks[i] = Matrix.RandomMatrix(outputs.RowCount, outputs.ColumnCount);
- _dropoutMasks[i] = _dropoutMasks[i].Map(x => x < _dropoutRate ? 0 : 1);
- outputs = outputs.PointwiseMultiply(_dropoutMasks[i]);
- outputs *= 1.0 / (1.0 - _dropoutRate); // Scale the remaining neurons
- }
- outputs = outputs * _weights[i] + _biases[i];
- switch (_activationOptions[i])
- {
- case ActivationFunction.ReLU:
- outputs = outputs.Map(MatrixFunctions.ReLU);
- break;
- case ActivationFunction.Sigmoid:
- outputs = outputs.Map(MatrixFunctions.Sigmoid);
- break;
- case ActivationFunction.Tanh:
- outputs = outputs.Map(MatrixFunctions.Tanh);
- break;
- case ActivationFunction.LeakyReLU:
- outputs = outputs.Map(MatrixFunctions.LeakyReLU);
- break;
- default:
- outputs = outputs.Map(MatrixFunctions.ReLU);
- break;
- }
- }
- return outputs;
- }
- private void Backpropagation(Matrix input, Matrix target)
- {
- Matrix[] outputs = new Matrix[_weights.Length + 1];
- outputs[0] = input;
- for (int i = 0; i < _weights.Length; i++)
- {
- outputs[i + 1] = outputs[i] * _weights[i] + _biases[i];
- outputs[i + 1] = outputs[i + 1].Map(_activationFunctions[i]);
- }
- Matrix[] errors = new Matrix[_weights.Length];
- errors[_weights.Length - 1] = outputs[^1] - target;
- for (int i = _weights.Length - 2; i >= 0; i--)
- {
- errors[i] = (_weights[i + 1].Transpose() * errors[i + 1]).MapDerivative(_activationFunctions[i]);
- }
- Matrix[] gradientsWeights = new Matrix[_weights.Length];
- Matrix[] gradientsBiases = new Matrix[_weights.Length];
- Matrix[] gradientsGamma = new Matrix[_weights.Length];
- Matrix[] gradientsBeta = new Matrix[_weights.Length];
- for (int i = 0; i < _weights.Length; i++)
- {
- gradientsWeights[i] = errors[i] * outputs[i].Transpose();
- gradientsBiases[i] = errors[i];
- gradientsGamma[i] = errors[i] * _movingMeans[i];
- gradientsBeta[i] = errors[i] * _movingVariances[i];
- }
- Optimizer(gradientsWeights, gradientsBiases, gradientsGamma, gradientsBeta);
- // Regularization
- if (_regularizer != Regularizer.None)
- {
- for (int i = 0; i < _weights.Length; i++)
- {
- if (_regularizer == Regularizer.L1)
- {
- _weights[i] -= (_lambda * MatrixFunctions.Sign(_weights[i]));
- }
- else if (_regularizer == Regularizer.L2)
- {
- _weights[i] -= (_lambda * _weights[i]);
- }
- }
- }
- }
- public void Train(Matrix[] inputs, Matrix[] targets, int epochs, int batchSize)
- {
- Random rand = new Random();
- for (int epoch = 0; epoch < epochs; epoch++)
- {
- for (int i = 0; i < inputs.Length; i += batchSize)
- {
- Matrix[] batchInputs = inputs.Skip(i).Take(batchSize).ToArray();
- Matrix[] batchTargets = targets.Skip(i).Take(batchSize).ToArray();
- for (int j = 0; j < batchSize; j++)
- {
- Matrix outputs = FeedForward(batchInputs[j], true);
- Backpropagation(batchInputs[j], batchTargets[j]);
- }
- }
- LearningRateScheduler(epoch);
- }
- }
- public Matrix Predict(Matrix input)
- {
- return FeedForward(input, false);
- }
- private void LearningRateScheduler(int epoch)
- {
- _learningRate = _initialLearningRate / (1 + _decayRate * epoch);
- }
- public class GithubNeuralNetwork
- {
- // Existing fields and methods...
- private Matrix XavierInitialization(int rows, int cols, Random rand)
- {
- double scale = Math.Sqrt(2.0 / (rows + cols));
- return Matrix.RandomMatrix(rows, cols, rand) * scale;
- }
- private Matrix LayerNormalization(Matrix x, Matrix gamma, Matrix beta, int layerIndex)
- {
- Matrix mean = MatrixFunctions.Mean(x, axis: 1);
- Matrix variance = MatrixFunctions.Variance(x, axis: 1);
- _movingMeans[layerIndex] = (_movingMeans[layerIndex] * 0.9) + (mean * 0.1);
- _movingVariances[layerIndex] = (_movingVariances[layerIndex] * 0.9) + (variance * 0.1);
- Matrix normalized = (x - mean) / MatrixFunctions.Sqrt(variance + _epsilon);
- return (gamma * normalized) + beta;
- }
- private void Optimizer(Matrix[] gradientsWeights, Matrix[] gradientsBiases, Matrix[] gradientsGamma, Matrix[] gradientsBeta)
- {
- double final_lr = 0.1; // Define the final learning rate for AdaBound
- double beta1 = 0.9; // Adam's hyperparameter (momentum decay)
- double beta2 = 0.999; // Adam's hyperparameter (RMSprop decay)
- double epsilon = 1e-8; // Small constant to prevent division by zero
- double gamma = 1e-3; // AdaBound's hyperparameter
- double schedule_decay = 0.004;
- for (int epoch = 0; epoch < epochs; epoch++)
- {
- for (int i = 0; i < inputs.Length; i += batchSize)
- {
- Matrix[] batchInputs = inputs.Skip(i).Take(batchSize).ToArray();
- Matrix[] batchTargets = targets.Skip(i).Take(batchSize).ToArray();
- _t++;
- double step_size = _learningRate * Math.Sqrt(1 - Math.Pow(beta2, _t)) / (1 - Math.Pow(beta1, _t));
- double lower_bound = final_lr * (1.0 - 1.0 / (_t + 1));
- double upper_bound = final_lr * (1.0 + 1.0 / (_t + 1));
- for (int k = 0; k < _weights.Length; k++)
- {
- for (int j = 0; j < batchSize; j++)
- {
- Matrix outputs = FeedForward(batchInputs[j], true);
- Backpropagation(batchInputs[j], batchTargets[j]);
- for (int i = 0; i < _weights.Length; i++)
- {
- for (int i = 0; i < _weights.Length; i++)
- {
- _t++;
- _mWeights[i] = (beta1 * _mWeights[i]) + ((1 - beta1) * gradientsWeights[i]);
- _vWeights[i] = (beta2 * _vWeights[i]) + ((1 - beta2) * (gradientsWeights[i] * gradientsWeights[i]));
- _mBiases[i] = (beta1 * _mBiases[i]) + ((1 - beta1) * gradientsBiases[i]);
- _vBiases[i] = (beta2 * _vBiases[i]) + ((1 - beta2) * (gradientsBiases[i] * gradientsBiases[i]));
- _mGamma[i] = (beta1 * _mGamma[i]) + ((1 - beta1) * gradientsGamma[i]);
- _vGamma[i] = (beta2 * _vGamma[i]) + ((1 - beta2) * (gradientsGamma[i] * gradientsGamma[i]));
- _mBeta[i] = (beta1 * _mBeta[i]) + ((1 - beta1) * gradientsBeta[i]);
- _vBeta[i] = (beta2 * _vBeta[i]) + ((1 - beta2) * (gradientsBeta[i] * gradientsBeta[i]));
- double schedule = schedule_decay * (1 - Math.Pow(0.999, _t)) / (1 - Math.Pow(0.9, _t));
- Matrix mHatWeights = _mWeights[i] / (1 - Math.Pow(beta1, _t));
- Matrix vHatWeights = _vWeights[i] / (1 - Math.Pow(beta2, _t));
- Matrix mHatBiases = _mBiases[i] / (1 - Math.Pow(beta1, _t));
- Matrix vHatBiases = _vBiases[i] / (1 - Math.Pow(beta2, _t));
- Matrix mHatGamma = _mGamma[i] / (1 - Math.Pow(beta1, _t));
- Matrix vHatGamma = _vGamma[i] / (1 - Math.Pow(beta2, _t));
- Matrix mHatBeta = _mBeta[i] / (1 - Math.Pow(beta1, _t));
- Matrix vHatBeta = _vBeta[i] / (1 - Math.Pow(beta2, _t));
- _weights[i] -= (_learningRate * schedule * mHatWeights) / (MatrixFunctions.Sqrt(vHatWeights) + epsilon);
- _biases[i] -= (_learningRate * schedule * mHatBiases) / (MatrixFunctions.Sqrt(vHatBiases) + epsilon);
- _gamma[i] -= (_learningRate * schedule * mHatGamma) / (MatrixFunctions.Sqrt(vHatGamma) + epsilon);
- _beta[i] -= (_learningRate * schedule * mHatBeta) / (MatrixFunctions.Sqrt(vHatBeta) + epsilon);
- }
- if (_optimizer == "Adam")
- {
- _mWeights[i] = (beta1 * _mWeights[i]) + ((1 - beta1) * gradientsWeights[i]);
- _vWeights[i] = (beta2 * _vWeights[i]) + ((1 - beta2) * (gradientsWeights[i] * gradientsWeights[i]));
- _mBiases[i] = (beta1 * _mBiases[i]) + ((1 - beta1) * gradientsBiases[i]);
- _vBiases[i] = (beta2 * _vBiases[i]) + ((1 - beta2) * (gradientsBiases[i] * gradientsBiases[i]));
- _mGamma[i] = (beta1 * _mGamma[i]) + ((1 - beta1) * gradientsGamma[i]);
- _vGamma[i] = (beta2 * _vGamma[i]) + ((1 - beta2) * (gradientsGamma[i] * gradientsGamma[i]));
- _mBeta[i] = (beta1 * _mBeta[i]) + ((1 - beta1) * gradientsBeta[i]);
- _vBeta[i] = (beta2 * _vBeta[i]) + ((1 - beta2) * (gradientsBeta[i] * gradientsBeta[i]));
- Matrix mHatWeights = _mWeights[i] / (1 - Math.Pow(beta1, _t));
- Matrix vHatWeights = _vWeights[i] / (1 - Math.Pow(beta2, _t));
- Matrix mHatBiases = _mBiases[i] / (1 - Math.Pow(beta1, _t));
- Matrix vHatBiases = _vBiases[i] / (1 - Math.Pow(beta2, _t));
- Matrix mHatGamma = _mGamma[i] / (1 - Math.Pow(beta1, _t));
- Matrix vHatGamma = _vGamma[i] / (1 - Math.Pow(beta2, _t));
- Matrix mHatBeta = _mBeta[i] / (1 - Math.Pow(beta1, _t));
- Matrix vHatBeta = _vBeta[i] / (1 - Math.Pow(beta2, _t));
- _weights[i] -= (_learningRate * mHatWeights) / (MatrixFunctions.Sqrt(vHatWeights) + epsilon);
- _biases[i] -= (_learningRate * mHatBiases) / (MatrixFunctions.Sqrt(vHatBiases) + epsilon);
- _gamma[i] -= (_learningRate * mHatGamma) / (MatrixFunctions.Sqrt(vHatGamma) + epsilon);
- _beta[i] -= (_learningRate * mHatBeta) / (MatrixFunctions.Sqrt(vHatBeta) + epsilon);
- _t++;
- }
- else if (_optimizer == "AdaGrad")
- {
- _vWeights[i] += gradientsWeights[i] * gradientsWeights[i];
- _vBiases[i] += gradientsBiases[i] * gradientsBiases[i];
- _vGamma[i] += gradientsGamma[i] * gradientsGamma[i];
- _vBeta[i] += gradientsBeta[i] * gradientsBeta[i];
- _weights[i] -= (_learningRate / (MatrixFunctions.Sqrt(_vWeights[i]) + epsilon)) * gradientsWeights[i];
- _biases[i] -= (_learningRate / (MatrixFunctions.Sqrt(_vBiases[i]) + epsilon)) * gradientsBiases[i];
- _gamma[i] -= (_learningRate / (MatrixFunctions.Sqrt(_vGamma[i]) + epsilon)) * gradientsGamma[i];
- _beta[i] -= (_learningRate / (MatrixFunctions.Sqrt(_vBeta[i]) + epsilon)) * gradientsBeta[i];
- }
- else if (_optimizer == "RMSProp")
- {
- _vWeights[i] = (beta1 * _vWeights[i]) + ((1 - beta1) * (gradientsWeights[i] * gradientsWeights[i]));
- _vBiases[i] = (beta1 * _vBiases[i]) + ((1 - beta1) * (gradientsBiases[i] * gradientsBiases[i]));
- _vGamma[i] = (beta1 * _vGamma[i]) + ((1 - beta1) * (gradientsGamma[i] * gradientsGamma[i]));
- _vBeta[i] = (beta1 * _vBeta[i]) + ((1 - beta1) * (gradientsBeta[i] * gradientsBeta[i]));
- _weights[i] -= (_learningRate / (MatrixFunctions.Sqrt(_vWeights[i]) + epsilon)) * gradientsWeights[i];
- _biases[i] -= (_learningRate / (MatrixFunctions.Sqrt(_vBiases[i]) + epsilon)) * gradientsBiases[i];
- _gamma[i] -= (_learningRate / (MatrixFunctions.Sqrt(_vGamma[i]) + epsilon)) * gradientsGamma[i];
- _beta[i] -= (_learningRate / (MatrixFunctions.Sqrt(_vBeta[i]) + epsilon)) * gradientsBeta[i];
- }
- else if (_optimizer == "Lookahead")
- {
- LookaheadOptimizer(gradientsWeights, gradientsBiases);
- }
- else
- {
- _weights[i] -= _learningRate * gradientsWeights[i];
- _biases[i] -= _learningRate * gradientsBiases[i];
- _gamma[i] -= _learningRate * gradientsGamma[i];
- _beta[i] -= _learningRate * gradientsBeta[i];
- }
- }
- }
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement