Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #include <iostream>
- #include <vector>
- #include <cmath>
- #include <iomanip>
- #include <algorithm>
- #include <random>
- #include <memory.h>
- using namespace std;
- using ld = long long;
- using ll = long long;
- double f(double x) {
- return 1 / (1 + exp(-x));
- }
- double df_dx(double x) {
- return (1 - x) * x;
- }
- const int LAYERS = 3;
- const int NEURONS_IN_LAYER = 2;
- double in[LAYERS][NEURONS_IN_LAYER];
- double out[LAYERS][NEURONS_IN_LAYER];
- double deltas[LAYERS][NEURONS_IN_LAYER];
- double gradient[LAYERS][NEURONS_IN_LAYER][NEURONS_IN_LAYER]; // [layer][from][to];
- double w[LAYERS][NEURONS_IN_LAYER][NEURONS_IN_LAYER]; // [layer][from][to];
- int main() {
- vector<int> neuronsInLayers = {2, 2, 1};
- for(int i = 0; i < LAYERS - 1; i++) {
- int numberOfNeuronsInCurrentLayer = neuronsInLayers[i];
- int numberOfNeuronInNextLayer = neuronsInLayers[i + 1];
- for(int from = 0; from < numberOfNeuronsInCurrentLayer; from++) {
- for(int to = 0; to < numberOfNeuronInNextLayer; to++) {
- w[i][from][to] = 1.0 * rand() / RAND_MAX;
- }
- }
- }
- const vector<pair<vector<double>, double>> data = {
- {{0, 0}, 0},
- {{0, 1}, 1},
- {{1, 0}, 1},
- {{1, 1}, 0}
- };
- const double learningRate = 0.001;
- for(int epoch = 0; epoch < 200; epoch++) {
- memset(in, 0, sizeof in);
- memset(out, 0, sizeof out);
- memset(gradient, 0, sizeof gradient);
- memset(deltas, 0, sizeof deltas);
- for(const auto&[input, output] : data) {
- for(int i = 0; i < size(input); i++) {
- in[0][i] = out[0][i] = input[i];
- }
- for(int layer = 1; layer < LAYERS; layer++) {
- for(int i = 0; i < neuronsInLayers[layer]; i++) {
- for(int j = 0; j < neuronsInLayers[layer - 1]; j++) {
- in[layer][i] += w[layer - 1][j][i] * out[layer - 1][j];
- }
- out[layer][i] = f(in[layer][i]);
- }
- }
- deltas[LAYERS - 1][0] = (out[LAYERS - 1][0] - output) * df_dx(in[LAYERS - 1][0]);
- for(int layer = LAYERS - 2; layer >= 0; layer--) {
- for(int j = 0; j < neuronsInLayers[layer]; j++) {
- double sum = 0;
- for(int i = 0; i < neuronsInLayers[layer + 1]; i++) {
- sum += w[layer][j][i] * deltas[layer + 1][i];
- }
- deltas[layer][j] = sum * df_dx(in[layer][j]);
- }
- }
- for(int layer = 0; layer < LAYERS - 1; layer++) {
- for(int j = 0; j < neuronsInLayers[layer]; j++) {
- for(int i = 0; i < neuronsInLayers[layer + 1]; i++) {
- gradient[layer][j][i] += deltas[layer + 1][i] * out[layer][j] / size(data);
- }
- }
- }
- }
- for(int layer = 0; layer < LAYERS - 1; layer++) {
- for(int j = 0; j < neuronsInLayers[layer]; j++) {
- for(int i = 0; i < neuronsInLayers[layer + 1]; i++) {
- w[layer][j][i] += gradient[layer][j][i] * learningRate;
- }
- }
- }
- }
- auto get = [&](vector<double> input) {
- memset(in, 0, sizeof in);
- memset(out, 0, sizeof out);
- memset(gradient, 0, sizeof gradient);
- memset(deltas, 0, sizeof deltas);
- for(int i = 0; i < size(input); i++) {
- in[0][i] = out[0][i] = input[i];
- }
- for(int layer = 1; layer < LAYERS; layer++) {
- for(int i = 0; i < neuronsInLayers[layer]; i++) {
- for(int j = 0; j < neuronsInLayers[layer - 1]; j++) {
- in[layer][i] += w[layer - 1][j][i] * out[layer - 1][j];
- }
- out[layer][i] = f(in[layer][i]);
- }
- }
- return out[LAYERS - 1][0];
- };
- for (int i = 0; i < 2; i++) {
- for (int j = 0; j < 2; j++) {
- vector<double> v = {i * 1.0, j * 1.0};
- cout << fixed << setprecision(5) << i << " ^ " << j << " = " << get(v) << endl;
- }
- }
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement