Advertisement
riabcis

Untitled

Mar 24th, 2018
534
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 11.13 KB | None | 0 0
  1. // Ann.cpp : Defines the entry point for the console application.
  2. //
  3.  
  4. #include "stdafx.h"
  5. # include <cmath>
  6. #include <math.h>
  7. #include <vector>
  8. #include <iostream>
  9. #include <iomanip>
  10. #include <fstream>
  11. #include <string>
  12. using namespace std;
  13. //Generating random number: either 0 or 1, uniform distribution, for XOR operation. Can remove later if using data from files.
  14. int randint();
  15. struct Sample
  16. {
  17. double input[2];
  18. double output[2];
  19. string ToString() {
  20. string str;
  21. str = "input: " + to_string(input[0]) + " " + to_string(input[1]) + " output: " + to_string(output[0]) + " " + to_string(output[1])+"\n";
  22. return str;
  23. }
  24. };
  25.  
  26. class Data
  27. {
  28. public:
  29. int getNumberOfInputs() { return inputs; }
  30. int getNumberOfOutputs() { return outputs; }
  31.  
  32. double *getInput(int index)
  33. {
  34. double *input = data[index].input;
  35. return input;
  36. }
  37.  
  38. double *getOutput(int index)
  39. {
  40. double *output = data[index].output;
  41. return output;
  42. }
  43.  
  44. int getNumberOfSamples() { return samples; }
  45.  
  46. void addSample(Sample sample)
  47. {
  48. data.push_back(sample);
  49. samples++;
  50. //cout << sample.ToString();
  51. }
  52.  
  53. void setSizes(int input_size, int output_size)
  54. {
  55. inputs = input_size;
  56. outputs = output_size;
  57. }
  58.  
  59. protected:
  60. std::vector<Sample> data;
  61. int inputs;
  62. int outputs;
  63. int samples = 0;
  64. };
  65.  
  66. class XOR : public Data
  67. {
  68. public:
  69. void generate(int n)
  70. {
  71. for (int i = 0; i < n; i++)
  72. {
  73. double input1 = randint();
  74. double input2 = randint();
  75. double output1 = input1 == input2;
  76. double output2 = input1 != input2;
  77. addSample({ { input1, input2 },{ output1, output2 } });
  78. }
  79. }
  80.  
  81. XOR()
  82. {
  83. inputs = 2;
  84. outputs = 2;
  85. samples = 0;
  86. }
  87. void printInputs(int index)
  88. {
  89. cout << index << " index inputs: " << endl;
  90. for (int i = 0; i < inputs; i++)
  91. {
  92. cout << getInput(index)[i] << " ";
  93. }
  94. cout << endl;
  95. }
  96.  
  97. void printOutputs(int index)
  98. {
  99. cout << index << " index outputs: " << endl;
  100. for (int i = 0; i < outputs; i++)
  101. {
  102. cout << fixed << setprecision(2) << data[index].output[i] << " ";
  103. }
  104. cout << endl;
  105. }
  106. };
  107.  
  108. struct Topology
  109. {
  110. int L;//sluoksniu sk
  111. std::vector<int> l;//kiekiai sluoksnyje
  112. } topolygy;
  113.  
  114. int kiek = 0;//for statistics - ignore
  115. double f(double x);
  116. double f_deriv(double x);
  117. double gL(double a, double z, double t);
  118. double gl(int layer_id, int w_i, int w_j, double *a_arr, double *z_arr, double *t_arr, double *w_arr, int *s,int *sw, int L, int *l);
  119. double w_gradient(int layer_id, int w_i, int w_j, double *a_arr, double *z_arr, double *t_arr, double *w_arr,int *s, int *sw, int L, int *l);
  120. double delta_w(double grad, double dw);
  121.  
  122. const double ETA = 0.1;
  123. const double ALPHA = 0.5;
  124.  
  125. class AnnBase {
  126. public:
  127. virtual void prepare(Topology top) = 0;
  128. virtual void init(Topology top, double w_arr_1[]) = 0;
  129. virtual void train(double *a, double *b) = 0;
  130. virtual void feedForward(double *a, double *b) = 0;//a and b temp vars for now.
  131. virtual void destroy() = 0;
  132. };
  133.  
  134. class AnnSerialDBL : public AnnBase {
  135. public:
  136. void prepare(Topology top);
  137. void init(Topology top, double w_arr_1[]);
  138. void train(double *a, double *b);
  139. void feedForward(double *a,double *b);
  140. void destroy();
  141. public:
  142. int sum1;//temp var to keep the length of z, so z could be reset for calcs. Can adapt code later, so this could be removed
  143. int L;
  144. int * l;
  145. int * s;
  146. double * a_arr;
  147. double * z_arr;
  148. int * W;
  149. int * sw;
  150. double * w_arr;
  151. double * dw_arr;
  152. double * t_arr;
  153. };
  154. int main()
  155. {
  156. topolygy.L = 5;
  157. topolygy.l.push_back(2);
  158. topolygy.l.push_back(5);
  159. topolygy.l.push_back(6);
  160. topolygy.l.push_back(7);
  161. topolygy.l.push_back(2);
  162. AnnSerialDBL labas;
  163. labas.prepare(topolygy);
  164. labas.init(topolygy, NULL);
  165.  
  166.  
  167. //****temp
  168. int sum = 0;
  169. for (int i = 0; i < labas.L; i++) {
  170. sum += topolygy.l.at(i) + 1;
  171. }
  172. //temp***
  173.  
  174.  
  175. XOR xo;
  176. xo.generate(10000);
  177. for (int i = 0; i < xo.getNumberOfSamples(); i++) {
  178. labas.train(xo.getInput(i), xo.getOutput(i));
  179. }
  180.  
  181.  
  182. //****temp
  183. for (int i = 0; i < sum; i++) {
  184. // cout << labas.a_arr[i] << endl;
  185. }
  186. //******temp
  187.  
  188.  
  189. //Checking results(all combinations 0 and 1)
  190. for (double i = 0; i < 2;i++){
  191. for (double j = 0; j < 2; j++) {
  192. double input[] = {i ,j };
  193. double output[] = { 0,0 };
  194. labas.feedForward(input, output);
  195.  
  196. cout << endl << "input : " << input[0] << " " << input[1] << endl;
  197. cout << endl << "output: " << output[0] << " " << output[1] << endl<<endl;
  198. cout << "--------------------------------------------------------------------" << endl;
  199. }
  200. }
  201.  
  202. //Checking results(all combinations 0 and 1)
  203. for (double i = 0; i < 100; i++) {
  204. double input[] = { randint()*1.0, randint()*1.0 };
  205. double output[] = { 0,0 };
  206. labas.feedForward(input, output);
  207.  
  208. cout << endl << "input : " << input[0] << " " << input[1] << endl;
  209. cout << endl << "output: " << output[0] << " " << output[1] << endl << endl;
  210. cout << "--------------------------------------------------------------------" << endl;
  211. }
  212.  
  213. labas.destroy();
  214.  
  215. int a;
  216. cout << kiek;
  217. cin >> a;
  218.  
  219. return 0;
  220. }
  221.  
  222. //returns random int, either 0 or 1
  223. int randint() {
  224. double r = ((double)rand() / (RAND_MAX));
  225. int a = 0;
  226. if (r > 0.5) {
  227. a = 1;
  228. }
  229. else
  230. {
  231. a = 0;
  232. }
  233. return a;
  234. }
  235.  
  236. void AnnSerialDBL::prepare(Topology top)
  237. {
  238. l = new int[top.L];
  239. s = new int[top.L];
  240.  
  241. int sum = 0;
  242. int mult = 0;
  243. for (int i = 0; i < top.L; i++) {
  244. sum += top.l.at(i) + 1;
  245. }
  246. sum1 = sum;
  247. for (int i = 0; i < top.L - 1; i++) {
  248. mult += (top.l.at(i) + 1)*top.l.at(i+1);
  249. }
  250. a_arr = new double[sum];
  251. z_arr = new double[sum];
  252.  
  253. W = new int[top.L];
  254. sw = new int[top.L];
  255.  
  256. w_arr = new double[mult];
  257. dw_arr = new double[mult];
  258.  
  259. t_arr = new double[top.l.at(top.L - 1)];
  260. }
  261.  
  262. void AnnSerialDBL::init(Topology top, double w_arr_1[] = NULL)
  263. {
  264. L = top.L;
  265. //Neuronu kiekiai sluoksnyje
  266. for (int i = 0; i < top.L; i++) {
  267. l[i] = top.l.at(i) + 1;
  268. }
  269.  
  270. //Sluoksniu pradzios indeksai
  271. for (int i = 0; i < top.L; i++) {
  272. s[i] = 0;
  273. for (int j = i; j > 0; j--) {
  274. s[i] += l[j - 1];
  275. }
  276. }
  277.  
  278. //Bias neuronai
  279. for (int i = 0; i < top.L - 1; i++) {
  280. a_arr[s[i + 1] - 1] = 1;
  281. }
  282.  
  283. //Svoriu kiekiai l-ame sluoksnyje
  284. for (int i = 0; i < top.L - 1; i++) {
  285. W[i] = l[i] * (l[i + 1] - 1);
  286. //cout << "Svoriu sk: " << W[i] << " Pradzios index: ";
  287. sw[i] = 0;
  288. if (i != 0) {
  289. for (int j = 0; j < i; j++) {
  290. sw[i] += W[j];
  291. }
  292. }
  293. if (w_arr_1 == NULL) {
  294. for (int j = 0; j < W[i]; j++) {
  295. w_arr[sw[i] + j] = (double)rand() / double(RAND_MAX);
  296. //cout << w_arr[sw[i] + j]<< endl;
  297. dw_arr[sw[i] + j] = 0;
  298. }
  299. }
  300. else {
  301. w_arr = w_arr_1; //ar reikia pokycius issisaugoti irgi?
  302. }
  303.  
  304. //cout << sw[i] << " " << endl;
  305. }
  306. }
  307.  
  308. void AnnSerialDBL::train(double *a,double *b)
  309. {
  310. a_arr[0] = a[0];
  311. a_arr[1] = a[1];
  312.  
  313. for (int j = 0; j < sum1; j++) {
  314. z_arr[j] = 0;
  315. }
  316.  
  317. //FFEEED FORWARD
  318. for (int i = 0; i < L - 1; i++) {//per sluoksnius einu+
  319. for (int j = 0; j < l[i]; j++) { //kiek neuronu sluoksnyje+
  320. for (int k = 0; k < l[i + 1] - 1; k++) {//per sekancio sluoksnio z+
  321. z_arr[s[i + 1] + k] += w_arr[sw[i] + k + j*(l[i + 1] - 1)] * a_arr[s[i] + j];
  322. //cout << " w: "<< w_arr[sw[i] + k + j*(l[i + 1]-1)] << endl;
  323. //cout << "a: " << a_arr[s[i] + j] << endl;
  324. //cout << "z reiksmes: " << z_arr[s[i+1] + k] << endl;
  325. //cout << endl;
  326. kiek++;//temp
  327. }
  328. }
  329. for (int k = 0; k < l[i + 1] - 1; k++) {//per sekancio sluoksnio z
  330. a_arr[s[i + 1] + k] = f(z_arr[s[i + 1] + k]);
  331. // cout << s[i + 1] + k << " a reiksmes: " << a_arr[s[i + 1] + k] << endl;
  332. }
  333. }
  334.  
  335. t_arr[0] = b[0];
  336. t_arr[1] = b[1];
  337.  
  338. //back propogation:
  339. for (int i = L-2; i >=0; i--) {//per sluoksnius
  340. // for (int i = 0; i <L-1; i++) {//per sluoksnius
  341. for (int j = 0; j < l[i]; j++) {//per neuronus
  342. for (int k = 0; k < l[i + 1] - 1; k++) {//per kito sluoksnio neuronus
  343. dw_arr[sw[i] + k+j*(l[i + 1] - 1)] = delta_w(w_gradient(i, j, k, a_arr, z_arr, t_arr, w_arr, s,sw, L, l), dw_arr[sw[i] + k+ j*(l[i + 1] - 1)]);
  344. w_arr[sw[i] + k+ j*(l[i + 1] - 1)] += dw_arr[sw[i] + k+ j*(l[i + 1] - 1)];
  345. // cout << w_arr[sw[i] + k] << " " << endl;
  346. }
  347. }
  348. }
  349.  
  350. }
  351.  
  352. void AnnSerialDBL::feedForward(double *a, double *b)
  353. {
  354. a_arr[0] = a[0];
  355. a_arr[1] = a[1];
  356.  
  357. for (int j = 0; j < sum1; j++) {
  358. z_arr[j] = 0;
  359. }
  360. //FFEEED FORWARD
  361. for (int i = 0; i < L - 1; i++) {//per sluoksnius einu+
  362. for (int j = 0; j < l[i]; j++) { //kiek neuronu sluoksnyje+
  363. for (int k = 0; k < l[i + 1] - 1; k++) {//per sekancio sluoksnio z+
  364. z_arr[s[i + 1] + k] += w_arr[sw[i] + k + j*(l[i + 1] - 1)] * a_arr[s[i] + j];
  365. // cout << "w: "<< w_arr[sw[i] + k + j*(l[i + 1] - 1)] << endl;
  366. // cout << "a: " << a_arr[s[i] + j] << endl;
  367. // cout << "z reiksmes: " << z_arr[s[i+1] + k] << endl;
  368. // cout << endl;
  369. }
  370. }
  371. for (int k = 0; k < l[i + 1] - 1; k++) {//per sekancio sluoksnio z
  372. a_arr[s[i + 1] + k] = f(z_arr[s[i + 1] + k]);
  373. //cout << s[i + 1] + k << " a reiksmes: " << a_arr[s[i + 1] + k] << endl;
  374. }
  375. }
  376.  
  377. //cout << a_arr[s[L - 1]];
  378. //cout << a_arr[s[L - 1]+1];
  379. if (a_arr[s[L - 1]] > a_arr[s[L - 1] + 1]) {
  380. b[0] = 1;
  381. b[1] = 0;
  382. }
  383. else {
  384. b[0] = 0;
  385. b[1] = 1;
  386. }
  387. }
  388.  
  389. void AnnSerialDBL::destroy()
  390. {
  391. delete l;
  392. l = NULL;
  393. delete s;
  394. s = NULL;
  395.  
  396. delete a_arr;
  397. a_arr = NULL;
  398. delete z_arr;
  399. z_arr = NULL;
  400.  
  401. delete W;
  402. W = NULL;
  403. delete sw;
  404. sw = NULL;
  405.  
  406. delete w_arr;
  407. w_arr = NULL;
  408. delete dw_arr;
  409. dw_arr = NULL;
  410.  
  411. delete t_arr;
  412. t_arr = NULL;
  413. }
  414.  
  415. double f(double x) {
  416. double y = 1+exp(-x);
  417. //temp*********************
  418. if (y == 0) {
  419. cout << "Error 1";
  420. }
  421. if ((y-1) == 0) {
  422. //cout << "Error 2";
  423. }
  424. //temp**********************
  425. return 1 / y;
  426. }
  427.  
  428.  
  429. double f_deriv(double x) {
  430. //Temp**********
  431. double y = pow((1 + exp(-x)), 2);
  432. double z = exp(-x);
  433. if (y == 0) {
  434. cout << "Error 3";
  435. }
  436. if (z == 0) {
  437. cout << "Error 4";
  438. }
  439. //temp**********************
  440. return exp(-x) / pow((1 + exp(-x)), 2);
  441. }
  442.  
  443. double gL(double a, double z, double t) {
  444. double w = f_deriv(z) * (a - t);
  445. //cout << "z: " << z << " a: " << a << " t: " << t << endl;
  446. return w;
  447. }
  448.  
  449. double gl(int layer_id, int w_i, int w_j, double *a_arr, double *z_arr, double *t_arr, double *w_arr, int *s,int *sw, int L, int *l) {
  450. double w = f_deriv(z_arr[s[layer_id] + w_j]);
  451. double sum = 0;
  452. for (int i = 0; i < l[layer_id + 1] - 1; i++) {
  453. if (layer_id + 2 == L) {
  454. sum += w_arr[sw[layer_id] + i] * gL(a_arr[s[layer_id + 1] + i], z_arr[s[layer_id + 1]+ i], t_arr[i]);
  455. }
  456. else {
  457. sum += w_arr[sw[layer_id] + w_j] * gl(layer_id + 1, w_i, i, a_arr, z_arr, t_arr,w_arr, s, sw, L, l);
  458. }
  459. }
  460. return w*sum;
  461. }
  462.  
  463. double w_gradient(int layer_id, int w_i, int w_j, double *a_arr, double *z_arr, double *t_arr, double *w_arr,int *s, int *sw, int L, int *l) {
  464. double w = a_arr[s[layer_id] + w_i];
  465. if (layer_id + 2 == L) {
  466. w *= gL(a_arr[s[layer_id + 1] + w_j], z_arr[s[layer_id + 1] + w_j], t_arr[w_j]);
  467. }
  468. else {
  469. w *= gl(layer_id + 1, w_i, w_j, a_arr, z_arr, t_arr, w_arr, s, sw, L, l);
  470. }
  471. //cout << L << endl;
  472. //cout << w << " layer id:"<< layer_id <<endl;
  473. return w;
  474. }
  475.  
  476. double delta_w(double grad, double dw) {
  477. return (-ETA)*grad + ALPHA*dw;
  478. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement