Advertisement
max2201111

problemy s normalizaci jinak OK

May 28th, 2024
554
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 48.28 KB | Science | 0 0
  1. #Navod na pouziti, Mgr. Hynek Mlčoušek, v Brne 2.5.2024
  2. #Ulozte do lokalniho souboru u sebe na PC data tohoto tvaru vzdy ukoncene 0 ci 1 (jde o uceni s ucitelem: 1 = nemocny, 0 = prezil/zdravy, ve vystupu bude zelena znacit 0, cervena 1)  a bez znaku #; pozor na ","
  3.  
  4. # [ [23.657800719276743,18.859916797201468,0],
  5. # [22.573729142097473,17.96922325097786,0],
  6. # [32.55342396968757,29.463651408558803,0],
  7. # [6.718035041529263,25.704665468161718,1],
  8. # [14.401918566243225,16.770856492924658,0],
  9. # [17.457907312962234,21.76521470574044,0],
  10. # [20.02796946568093,73.45445954770891,1],
  11. # [30.295138369778076,62.901112886193246,1],
  12. # [15.128977804449633,32.40267702110393,0],
  13. # [30.179457395820013,58.982492125646104,1],
  14. # [28.01649701854089,63.92781357637711,1],
  15. # [16.791838457871147,42.33482314089884,0],
  16. # [10.583694293380976,19.61926728942497,0],
  17. # [26.634447074406467,91.96624817360987,1],
  18. # [26.217868623367643,36.400293587062976,0],
  19. # [17.689396788624936,60.79797114006423,1],
  20. # [33.17193822527976,66.75277364959176,1],
  21. # [23.793952755709153,22.57501437360518,0]]
  22.  
  23. #kliknete na cerne tlacitko s trojuhelnickem vlevo nahore
  24. #pod kodem se objevi moznost spustit dialogove okenko, kliknete na nej
  25. #soubor, ktery mate z bodu vyse vyberte a nahrajte
  26. #Najdete v tomto kodu retezec:
  27. ###ZDE VLOZTE DATA OD NOVYCH PACIENTU
  28.  
  29. #Vlozte do pole
  30. # new_persons_results = []
  31. # data o nekolika malo novych pacientech bez ukoncovaci 0 a 1, ale se stejnym poctem sloupcu jako ma soubor z Vaseho lokalniho disku, vyse by tedy toto bylo rovno 2
  32. #kod vyhodi hned po natrenovani, (jehoz prubeh muzete sledovat na modre progres bare) pro kazdy radek z new_persons_results bilo-sedo-cerne ctverecky vznikle z normalizace poskytnutych dat a ukoncovaci ctverecek cerveny pripadne zeleny
  33. #zaroven s tim se vypise realne cislo mezi 0 a 1 znacici jak moc je pacient zdravy (blizke 0) ci nemocny (blizke 1)
  34. #cisla uprostred pak indikuji zadany oranzovy semafor.
  35. #je na lekarich nastavit tresholdy (tedy pravdepodobnosti: cisla mezi 0 a 1) ktere pak daji zaver, zda je pacient cerveny, oranzovy ci zeleny
  36.  
  37. # prosim o komnetare a vysledky na realnych datech, je zadouci aby radku v matici, tedy pacientu byly stovky a sloupcu desitky
  38. # Moznosti vyuziti: onkologicka diagnoza vs. zdrava kontorlni skupina, diabetes (pritomnost/nepritomnost), testovani noveho leku oproti placebu atd.
  39.  
  40. #kod zaroven vyhodi confusion matici, tedy mozne True Negative a False Positive plus spravne zarazene hodnoty spolu s presnosti,F1 score recall atd.
  41. #poznamka ke kodu: jde o epxerimentalni verzi, ktera krome skutecne potrebneho kodu obsahuje ladici informace, ruzne duplicity, nadbytecne prikazy atd.
  42. # Na uvod behu programu se pro kontorlu vypise poskytnuta matice a jeji normalizovana verze, je treba sjet jezdcem napravo nize na obrazky a dalsi vystupy
  43.  
  44. #Dekuji profesoru Petru Dostalovi za namet k teto praci a poskytnuta data, byt je potreba mit data realna
  45.  
  46. import numpy as np
  47. import matplotlib.pyplot as plt
  48. import tensorflow as tf
  49. from tqdm import tqdm
  50.  
  51.  
  52. from IPython.display import display
  53. from IPython.display import Javascript
  54. display(Javascript('IPython.OutputArea.auto_scroll_threshold = 9999;'))
  55.  
  56. label_colors = {0: [0, 128, 0], 1: [255, 0, 0]}
  57. label_colors_testing = {0: [0, 128, 0], 1: [255, 0, 0]}
  58.  
  59.  
  60. %matplotlib inline
  61.  
  62.  
  63.  
  64. # Function to create images based on predictions
  65. def create_image(data, predictions):
  66.     num_rows, num_columns = len(data), len(data[0])
  67.     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  68.  
  69.     for i in range(num_rows):
  70.         for j in range(num_columns):
  71.             pixel_value = int(np.interp(data[i][j], [np.min(data), np.max(data)], [0, 255]))
  72.             image[i, j] = np.array([pixel_value] * 3)
  73.  
  74.         # Create a gradient based on the normalized values
  75.         gradient_value = int(np.interp(predictions[i], [0, 1], [0, 255]))
  76.         image[i, -1] = np.array([gradient_value] * 3)
  77.  
  78.     return image
  79.  
  80. def create_image(data, predictions):
  81.     num_rows, num_columns = len(data), len(data[0])
  82.     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  83.  
  84.     for i in range(num_rows):
  85.         for j in range(num_columns):
  86.             pixel_value = int(np.interp(data[i][j], [np.min(data), np.max(data)], [0, 255]))
  87.             image[i, j] = np.array([pixel_value] * 3)
  88.  
  89.         # Use red for class 0 and green for class 1
  90.         if predictions[i] == 0:
  91.             image[i, -1] = np.array([255, 0, 0])  # Red
  92.         elif predictions[i] == 1:
  93.             image[i, -1] = np.array([0, 128, 0])  # Green
  94.  
  95.     return image
  96.  
  97. def create_image(data, predictions, label_colors):
  98.     num_rows, num_columns = len(data), len(data[0])
  99.     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  100.  
  101.     for i in range(num_rows):
  102.         for j in range(num_columns):
  103.             pixel_value = int(np.interp(data[i][j], [np.min(data), np.max(data)], [0, 255]))
  104.             image[i, j] = np.array([pixel_value] * 3)
  105.  
  106.         # Use the specified color for the last column based on the label
  107.         image[i, -1] = label_colors[predictions[i]]
  108.  
  109.     return image
  110.  
  111. # def create_imageN(data, predictions, label_colors=None):
  112. #     num_rows, num_columns = len(data), len(data[0])
  113. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  114.  
  115. #     for i in range(num_rows):
  116. #         for j in range(num_columns):
  117. #             # Map data values to the full range of 0 to 255
  118. #             pixel_value = int(np.interp(data[i][j], [np.min(data), np.max(data)], [0, 255]))
  119. #             image[i, j] = np.array([pixel_value] * 3)
  120.  
  121. #         # Use the specified color for the last column based on the label
  122. #         if label_colors is not None:
  123. #             image[i, -1] = label_colors[predictions[i]]
  124. #         else:
  125. #             # If label_colors is not provided, set the last column to grayscale
  126. #             pixel_value = int(np.interp(predictions[i], [0, 1], [0, 255]))
  127. #             image[i, -1] = np.array([pixel_value] * 3)
  128.  
  129. #     return image
  130.  
  131. # def create_imageN(data, predictions, label_colors=None):
  132. #     num_rows, num_columns = len(data), len(data[0])
  133. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  134.  
  135. #     for i in range(num_rows):
  136. #         for j in range(num_columns):
  137. #             # Map data values to the full range of 0 to 255
  138. #             pixel_value = int(np.interp(data[i][j], [np.min(data), np.max(data)], [0, 255]))
  139. #             image[i, j] = np.array([pixel_value] * 3)
  140.  
  141. #         # Use the specified color for the last column based on the label
  142. #         if label_colors is not None:
  143. #             image[i, -1] = label_colors[predictions[i]]
  144. #         else:
  145. #             # If label_colors is not provided, set the last column to grayscale
  146. #             pixel_value = int(np.interp(predictions[i], [np.min(data), np.max(data)], [0, 255]))
  147. #             image[i, -1] = np.array([pixel_value] * 3)
  148.  
  149. #     return image
  150.  
  151. # def create_imageN(data, predictions, label_colors=None):
  152. #     num_rows, num_columns = len(data), len(data[0])
  153. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  154.  
  155. #     for i in range(num_rows):
  156. #         for j in range(num_columns - 1):  # Exclude the last column for now
  157. #             # Map data values to the full range of 0 to 255
  158. #             pixel_value = int(np.interp(data[i][j], [np.min(data[:, j]), np.max(data[:, j])], [0, 255]))
  159. #             image[i, j] = np.array([pixel_value] * 3)
  160.  
  161. #         # Use the specified color for the last column based on the label
  162. #         if label_colors is not None:
  163. #             image[i, -1] = label_colors[predictions[i]]
  164. #         else:
  165. #             # If label_colors is not provided, set the last column to grayscale
  166. #             pixel_value = int(np.interp(predictions[i], [0, 1], [0, 255]))
  167. #             image[i, -1] = np.array([pixel_value] * 3)
  168.  
  169. #     return image
  170.  
  171.  
  172. # def create_imageN(data, predictions, label_colors=None):
  173. #     num_rows, num_columns = len(data), len(data[0])
  174. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  175.  
  176. #     data_array = np.array(data)  # Convert data to a NumPy array
  177.  
  178. #     for i in range(num_rows):
  179. #         for j in range(num_columns - 1):  # Exclude the last column for now
  180. #             # Map data values to the full range of 0 to 255
  181. #             pixel_value = int(np.interp(data_array[i, j], [np.min(data_array[:, j]), np.max(data_array[:, j])], [0, 255]))
  182. #             image[i, j] = np.array([pixel_value] * 3)
  183.  
  184. #         # Use the specified color for the last column based on the label
  185. #         if label_colors is not None:
  186. #             image[i, -1] = label_colors[predictions[i]]
  187. #         else:
  188. #             # If label_colors is not provided, set the last column to grayscale
  189. #             pixel_value = int(np.interp(predictions[i], [0, 1], [0, 255]))
  190. #             image[i, -1] = np.array([pixel_value] * 3)
  191.  
  192. #     return image
  193.  
  194. # def create_imageN(data, predictions, label_colors=None):
  195. #     num_rows, num_columns = len(data), len(data[0])
  196. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  197.  
  198. #     data_array = np.array(data)  # Convert data to a NumPy array
  199.  
  200. #     for i in range(num_rows):
  201. #         for j in range(num_columns - 1):  # Exclude the last column for now
  202. #             # Map data values to the full range of 0 to 255
  203. #             pixel_value = int(np.interp(data_array[i, j], [np.min(data_array[:, j]), np.max(data_array[:, j])], [0, 255]))
  204. #             image[i, j] = np.array([pixel_value] * 3)
  205.  
  206. #         # Use the specified color for the last column based on the label
  207. #         if label_colors is not None:
  208. #             image[i, -1] = label_colors[predictions[i]]
  209. #         else:
  210. #             # If label_colors is not provided, set the last column to grayscale
  211. #             pixel_value = int(np.interp(predictions[i], [0, 1], [0, 255]))
  212. #             image[i, -1] = np.array([pixel_value] * 3)
  213.  
  214. #     # Now, normalize the last column separately to achieve grayscale
  215. #     min_pixel_value = np.min(image[:, -1])
  216. #     max_pixel_value = np.max(image[:, -1])
  217. #     for i in range(num_rows):
  218. #         pixel_value = int(np.interp(image[i, -1], [min_pixel_value, max_pixel_value], [0, 255]))
  219. #         image[i, -1] = np.array([pixel_value] * 3)
  220.  
  221. #     return image
  222.  
  223. # def create_imageN(data, predictions, label_colors=None):
  224. #     num_rows, num_columns = len(data), len(data[0])
  225. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  226.  
  227. #     for i in range(num_rows):
  228. #         for j in range(num_columns):
  229. #             # Map data values to the full range of 0 to 255
  230. #             pixel_value = int(np.interp(data[i][j], [np.min(data), np.max(data)], [0, 255]))
  231. #             image[i, j] = np.array([pixel_value] * 3)
  232.  
  233. #         # Now, normalize the last column separately to achieve grayscale
  234.  
  235.  
  236. #         min_pixel_value = np.min(data[:, -1])
  237. #         max_pixel_value = np.max(data[:, -1])
  238. #         pixel_value = int(np.interp(data[i][-1], [min_pixel_value, max_pixel_value], [0, 255]))
  239.  
  240. #         # Use the specified color for the last column based on the label
  241. #         if label_colors is not None:
  242. #             image[i, -1] = label_colors[predictions[i]]
  243.  
  244. #     return image
  245.  
  246. # def create_imageN(data, predictions, label_colors=None):
  247. #     num_rows, num_columns = len(data), len(data[0])
  248. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  249.  
  250. #     for i in range(num_rows):
  251. #         for j in range(num_columns):
  252. #             # Map data values to the full range of 0 to 255
  253. #             pixel_value = int(np.interp(data[i][j], [np.min(data), np.max(data)], [0, 255]))
  254. #             image[i, j] = np.array([pixel_value] * 3)
  255.  
  256. #         # Normalize the last column separately to achieve grayscale
  257. #         min_pixel_value = np.min(data[i])
  258. #         max_pixel_value = np.max(data[i])
  259. #         pixel_value = int(np.interp(data[i][-1], [min_pixel_value, max_pixel_value], [0, 255]))
  260. #         image[i, -1] = np.array([pixel_value] * 3)
  261.  
  262. #         # Use the specified color for the last column based on the label
  263. #         if label_colors is not None:
  264. #             image[i, -1] = label_colors[predictions[i]]
  265.  
  266. #     return image
  267.  
  268.  
  269. # def create_imageN(data, predictions, label_colors=None):
  270. #     num_rows, num_columns = len(data), len(data[0])
  271. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  272.  
  273. #     # Normalize the first two columns independently
  274. #     for j in range(2):
  275. #         min_pixel_value = np.min(data[:, j])
  276. #         max_pixel_value = np.max(data[:, j])
  277. #         for i in range(num_rows):
  278. #             pixel_value = int(np.interp(data[i][j], [min_pixel_value, max_pixel_value], [0, 255]))
  279. #             image[i, j] = np.array([pixel_value] * 3)
  280.  
  281. #     # Normalize the last column separately to achieve grayscale
  282. #     min_pixel_value = np.min(data[:, -1])
  283. #     max_pixel_value = np.max(data[:, -1])
  284. #     for i in range(num_rows):
  285. #         pixel_value = int(np.interp(data[i][-1], [min_pixel_value, max_pixel_value], [0, 255]))
  286. #         image[i, -1] = np.array([pixel_value] * 3)
  287.  
  288. #         # Use the specified color for the last column based on the label
  289. #         if label_colors is not None:
  290. #             image[i, -1] = label_colors[predictions[i]]
  291.  
  292. #     return image
  293.  
  294. # def create_imageN(data, predictions, label_colors=None):
  295. #     # Convert data to a NumPy array
  296. #     data = np.array(data)
  297.  
  298. #     num_rows, num_columns = data.shape
  299. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  300.  
  301. #     # Normalize the first two columns independently
  302. #     for j in range(2):
  303. #         min_pixel_value = np.min(data[:, j])
  304. #         max_pixel_value = np.max(data[:, j])
  305. #         for i in range(num_rows):
  306. #             pixel_value = int(np.interp(data[i][j], [min_pixel_value, max_pixel_value], [0, 255]))
  307. #             image[i, j] = np.array([pixel_value] * 3)
  308.  
  309. #     # Normalize the last column separately to achieve grayscale
  310. #     min_pixel_value = np.min(data[:, -1])
  311. #     max_pixel_value = np.max(data[:, -1])
  312. #     for i in range(num_rows):
  313. #         pixel_value = int(np.interp(data[i][-1], [min_pixel_value, max_pixel_value], [0, 255]))
  314. #         image[i, -1] = np.array([pixel_value] * 3)
  315.  
  316. #         # Use the specified color for the last column based on the label
  317. #         if label_colors is not None:
  318. #             image[i, -1] = label_colors[predictions[i]]
  319.  
  320. #     return image
  321.  
  322.  
  323. # def create_imageN(data, predictions, label_colors=None):
  324. #     # Convert data to a NumPy array
  325. #     data = np.array(data)
  326.  
  327. #     num_rows, num_columns = data.shape
  328. #     image = np.zeros((num_rows, num_columns + 1, 3), dtype=np.uint8)
  329.  
  330. #     # Normalize the first two columns independently
  331. #     for j in range(2):
  332. #         min_pixel_value = np.min(data[:, j])
  333. #         max_pixel_value = np.max(data[:, j])
  334. #         for i in range(num_rows):
  335. #             pixel_value = int(np.interp(data[i][j], [min_pixel_value, max_pixel_value], [0, 255]))
  336. #             image[i, j] = np.array([pixel_value] * 3)
  337.  
  338. #     # Normalize the last column separately to achieve grayscale
  339. #     min_pixel_value_last = np.min(data[:, -1])
  340. #     max_pixel_value_last = np.max(data[:, -1])
  341. #     for i in range(num_rows):
  342. #         pixel_value_last = int(np.interp(data[i][-1], [min_pixel_value_last, max_pixel_value_last], [0, 255]))
  343. #         image[i, -1] = np.array([pixel_value_last] * 3)
  344.  
  345. #         # Use the specified color for the last column based on the label
  346. #         if label_colors is not None:
  347. #             image[i, -1] = label_colors[predictions[i]]
  348.  
  349. #     return image
  350.  
  351. # def create_imageN(data, predictions, label_colors=None):
  352. #     image_training = np.zeros((num_training_rows, len(X_train[0]) + 1, 3), dtype=np.uint8)
  353.  
  354.  
  355. #     print("**************************",num_training_rows,"*******************")
  356.  
  357. #     min_pixel_value = np.min(X_train_normalized)
  358. #     max_pixel_value = np.max(X_train_normalized)
  359.  
  360. #     # Populate image_training with consistent gray and red/green colors based on the labels in the last column
  361. #     # for i, label in enumerate(y_train):
  362. #     #     for j in range(len(X_train[0])
  363. #     #         pixel_value = int(np.interp(X_train_normalized[i][j], [min_pixel_value, max_pixel_value], [0, 255]))
  364. #     #         image_training[i, j] = np.array([pixel_value] * 3)
  365. #     #         image_training[i, -1] = np.array([128, 128, 128])
  366. #     #     if label == 0:
  367. #     #         image_training[i, -1] = np.array([0, 128, 0])
  368. #     #     elif label == 1:
  369. #     #         image_training[i, -1] = np.array([255, 0, 0])
  370.  
  371.  
  372.  
  373. #     # Populate image_training with consistent gray and red/green colors based on the labels in the last column
  374. #     for i, label in enumerate(y_train):
  375. #         for j in range(len(X_train[0])):
  376. #             pixel_value = int(np.interp(X_train_normalized[i][j], [min_pixel_value, max_pixel_value], [0, 255]))
  377. #             image_training[i, j] = np.array([pixel_value] * 3)
  378. #         image_training[i, -1] = np.array([128, 128, 128])
  379. #         if label == 0:
  380. #             image_training[i, -1] = np.array([0, 128, 0])
  381. #         elif label == 1:
  382. #             image_training[i, -1] = np.array([255, 0, 0])
  383.  
  384.  
  385. #     return image_training
  386.  
  387.  
  388.  
  389.  
  390.  
  391.  
  392.  
  393.  
  394. # def create_imageN(data, predictions, label_colors=None):
  395. #     num_training_rows = 1  # Set the number of rows to 1
  396. #     image_training = np.zeros((num_training_rows, len(X_train[0]) + 1, 3), dtype=np.uint8)
  397.  
  398. #     min_pixel_value = np.min(X_train_normalized)
  399. #     max_pixel_value = np.max(X_train_normalized)
  400.  
  401. #     # Populate image_training with consistent gray and red/green colors based on the labels in the last column
  402. #     for j in range(len(X_train[0])):
  403. #         pixel_value = int(np.interp(data[0][j], [min_pixel_value, max_pixel_value], [0, 255]))
  404. #         image_training[0, j] = np.array([pixel_value] * 3)
  405.  
  406. #     image_training[0, -1] = np.array([128, 128, 128])  # Set a consistent gray background
  407.  
  408. #     label = y_train[0]
  409. #     if label == 0:
  410. #         image_training[0, -1] = np.array([0, 128, 0])  # Green for label 0
  411. #     elif label == 1:
  412. #         image_training[0, -1] = np.array([255, 0, 0])  # Red for label 1
  413.  
  414. #     return image_training
  415.  
  416. def create_imageN(data, predictions, label_colors=None):
  417.     num_training_rows = len(data)  # Set the number of rows based on the data
  418.     num_columns = len(data[0])
  419.  
  420.     image_training = np.zeros((num_training_rows, num_columns + 1, 3), dtype=np.uint8)
  421.  
  422.     min_pixel_value = np.min(X_train_normalized)
  423.     max_pixel_value = np.max(X_train_normalized)
  424.  
  425.  
  426.    
  427.  
  428.     for i in range(num_training_rows):
  429.         # Normalize the first columns independently
  430.         for j in range(num_columns):
  431.             pixel_value = int(np.interp(data[i][j], [min_pixel_value, max_pixel_value], [0, 255]))
  432.             image_training[i, j] = np.array([pixel_value] * 3)
  433.  
  434.         # Normalize the last column separately to achieve grayscale
  435.         pixel_value_last = int(np.interp(data[i][-1], [min_pixel_value, max_pixel_value], [0, 255]))
  436.         image_training[i, -1] = np.array([pixel_value_last] * 3)
  437.  
  438.         # Use the specified color for the last column based on the label
  439.         if label_colors is not None:
  440.             image_training[i, -1] = label_colors[predictions[i]]
  441.  
  442.     return image_training
  443.  
  444.  
  445.  
  446.  
  447. # Load data from a file
  448. #file_path = 'C:/Users/Hynek/Desktop/example4.txt'
  449. from google.colab import files
  450. uploaded = files.upload()
  451.  
  452. # Tento kód otevře dialogové okno pro výběr souboru z vašeho počítače.
  453. import io
  454. import pandas as pd
  455.  
  456. # Předpokládáme, že jste nahráli CSV soubor
  457. for fn in uploaded.keys():
  458.   print('User uploaded file "{name}" with length {length} bytes'.format(
  459.       name=fn, length=len(uploaded[fn])))
  460.   path = io.BytesIO(uploaded[fn])  # Pro soubory, které potřebují být čteny jako binární objekty
  461.   df = pd.read_csv(path)
  462.   print(df.head())  # Vypíše prvních pět řádků DataFrame
  463.  
  464.  
  465. all_results = []
  466. #with open(file_path, 'r') as file:
  467. #    file_content = file.read()
  468.  
  469. # Execute the content as Python code
  470. ##exec(file_content)
  471.  
  472. import os
  473. import shutil
  474. import ast
  475.  
  476. for filename in uploaded.keys():
  477.     original_path = f"/content/{filename}"
  478.     destination_path = os.path.join("/content/", "/content/DATA2")
  479.     shutil.move(original_path, destination_path)
  480.     print(f"Soubor {filename} byl přesunut do {destination_path}")
  481.  
  482. file_path = '/content/DATA2'  # Cesta k souboru
  483. with open(file_path, 'r') as file:
  484.     code = file.read()
  485.  
  486. A_list = ast.literal_eval(code)
  487.  
  488.  
  489. # Převod na NumPy pole
  490. A = np.array(A_list)
  491.  
  492. #exec(code)
  493.  
  494. # Now, all_results contains lists corresponding to each row in the CSV file
  495. ##print(all_results)
  496.  
  497. # Assign values to variables dynamically based on the rows of matrix A
  498. for i, row in enumerate(A, start=1):
  499.     globals()[f"person{i}_results"] = list(row)
  500.  
  501. # Print the assigned variables
  502. for i in range(1, len(A) + 1):
  503.   #  print(f"person{i}_results {globals()[f'person{i}_results']}")
  504.     all_results.append(f"person{i}_results")
  505. ##print(all_results)
  506.  
  507.  
  508.  
  509. result_variables = []
  510.  
  511. # Loop through the variable names and get the corresponding variables using globals()
  512. for var_name in all_results:
  513.     result_variables.append(globals()[var_name])
  514.  
  515. # Now, result_variables contains the variables with names specified in variable_names
  516. #print(result_variables)
  517.  
  518. all_results = result_variables
  519. new_persons_results = result_variables
  520.  
  521.  
  522. # # Define the blood test results for sixteen persons
  523. # person1_results = [80, 90, 100, 125, 120, 0]
  524. # person2_results = [95, 105, 115, 110, 135, 1]
  525. # person3_results = [110, 120, 130, 140, 150, 0]
  526. # person4_results = [100, 110, 120, 130, 140, 1]
  527. # person5_results = [105, 115, 100, 105, 110, 0]
  528. # person6_results = [90, 110, 115, 95, 120, 1]
  529. # person7_results = [116, 99, 106, 105, 119, 0]
  530. # person8_results = [111, 93, 118, 118, 107, 1]
  531. # person9_results = [107, 97, 105, 119, 98, 0]
  532. # person10_results = [92, 108, 90, 117, 111, 1]
  533. # person11_results = [118, 105, 103, 118, 99, 0]
  534. # person12_results = [97, 115, 101, 101, 113, 1]
  535. # person13_results = [95, 111, 93, 112, 120, 0]
  536. # person14_results = [100, 112, 118, 109, 103, 1]
  537. # person15_results = [113, 91, 94, 93, 99, 0]
  538. # person16_results = [103, 92, 95, 110, 98, 1]
  539.  
  540. # # Combine the results into a list
  541. # all_results = [person1_results, person2_results, person3_results, person4_results,
  542. #                person5_results, person6_results, person7_results, person8_results,
  543. #                person9_results, person10_results, person11_results, person12_results,
  544. #                person13_results, person14_results, person15_results, person16_results]
  545.  
  546.  
  547. # #all_results = [person1_results, person2_results]
  548.  
  549.  
  550. # Extract the last column (0 or 1) as labels
  551. # labels = [results[-1] for results in all_results]
  552.  
  553. # # Remove the last column from the dataset
  554. # data = [results[:-1] for results in all_results]
  555.  
  556. # # Define the number of rows for training and testing
  557. # num_training_rows = 100
  558. # num_testing_rows = 10
  559.  
  560. # # Split the data into training and testing datasets
  561. # #X_train, X_test, y_train, y_test = data[:num_training_rows], data[-num_testing_rows:], labels[:num_training_rows], labels[-num_testing_rows:]
  562.  
  563. # X_train, X_test, y_train, y_test = data[:num_training_rows], data[:num_testing_rows], labels[:num_training_rows], labels[:num_testing_rows]
  564.  
  565.  
  566. # # Normalize the training data
  567. # min_values = np.min(X_train, axis=0)
  568. # max_values = np.max(X_train, axis=0)
  569. # X_train_normalized = (X_train - min_values) / (max_values - min_values)
  570.  
  571.  
  572. # # Normalize the training data
  573. # min_values = np.min(X_train, axis=0)
  574. # max_values = np.max(X_train, axis=0)
  575. # X_train_normalized = (X_train - min_values) / (max_values - min_values)
  576.  
  577. # # Normalize the testing data using the min and max values of the training data
  578. # X_test_normalized = (X_test - min_values) / (max_values - min_values)
  579.  
  580.  
  581. # # Print normalized training data
  582. # print("Normalized Training Data:")
  583. # print(X_train_normalized)
  584. # print("Adenormalized",X_train_normalized*(max_values - min_values)+min_values,"Bdenormalized")
  585.  
  586. # # Define a simple neural network model
  587. # # model = tf.keras.Sequential([
  588. # #     tf.keras.layers.Dense(128, activation='relu', input_shape=(len(X_train[0]),)),
  589. # #     tf.keras.layers.Dense(64, activation='relu'),
  590. # #     tf.keras.layers.Dense(1, activation='sigmoid')
  591. # # ])
  592.  
  593. # # # Compile the model
  594. # # model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
  595.  
  596.  
  597. # import tensorflow as tf
  598.  
  599. # # Vylepšený model
  600. # model = tf.keras.Sequential([
  601. #     tf.keras.layers.Dense(256, activation='relu', input_shape=(len(X_train[0]),)),
  602. #     tf.keras.layers.Dropout(0.3),
  603. #     tf.keras.layers.Dense(128, activation='relu'),
  604. #     tf.keras.layers.Dropout(0.3),
  605. #     tf.keras.layers.Dense(64, activation='relu'),
  606. #     tf.keras.layers.Dropout(0.3),
  607. #     tf.keras.layers.Dense(1, activation='sigmoid')
  608. # ])
  609.  
  610. # # Použití Adam optimizer s learning rate schedulerem
  611. # lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
  612. #     initial_learning_rate=1e-3,
  613. #     decay_steps=10000,
  614. #     decay_rate=0.9
  615. # )
  616. # optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
  617.  
  618. # # Kompilace modelu
  619. # model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
  620.  
  621.  
  622. # # Lists to store accuracy values
  623. # accuracy_history = []
  624.  
  625.  
  626. labels = [results[-1] for results in all_results]
  627.  
  628. # Odstranění posledního sloupce z datasetu
  629. data = [results[:-1] for results in all_results]
  630.  
  631. # Definice počtu řádků pro trénování a testování
  632. num_training_rows = 100
  633. num_testing_rows = 100
  634.  
  635. # Rozdělení datasetu na trénovací a testovací sady
  636. X_train, X_test, y_train, y_test = data[:num_training_rows], data[:num_testing_rows], labels[:num_training_rows], labels[:num_testing_rows]
  637.  
  638. # Převod na NumPy pole
  639. X_train = np.array(X_train)
  640. X_test = np.array(X_test)
  641. y_train = np.array(y_train)
  642. y_test = np.array(y_test)
  643.  
  644. # Normalizace dat (s ohledem na -1)
  645. min_values = np.min(X_train[X_train != -1], axis=0)
  646. max_values = np.max(X_train[X_train != -1], axis=0)
  647. X_train_normalized = (X_train - min_values) / (max_values - min_values)
  648. X_train_normalized[X_train == -1] = 0  # Nastavení -1 na 0 po normalizaci
  649.  
  650. X_test_normalized = (X_test - min_values) / (max_values - min_values)
  651. X_test_normalized[X_test == -1] = 0  # Nastavení -1 na 0 po normalizaci
  652.  
  653. # Vylepšený model
  654. model = tf.keras.Sequential([
  655.     tf.keras.layers.Dense(256, activation='relu', input_shape=(len(X_train[0]),)),
  656.     tf.keras.layers.Dropout(0.3),
  657.     tf.keras.layers.Dense(128, activation='relu'),
  658.     tf.keras.layers.Dropout(0.3),
  659.     tf.keras.layers.Dense(64, activation='relu'),
  660.     tf.keras.layers.Dropout(0.3),
  661.     tf.keras.layers.Dense(1, activation='sigmoid')
  662. ])
  663.  
  664. # Použití Adam optimizer s learning rate schedulerem
  665. lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
  666.     initial_learning_rate=1e-3,
  667.     decay_steps=10000,
  668.     decay_rate=0.9
  669. )
  670. optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
  671.  
  672. # Kompilace modelu
  673. model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
  674.  
  675. # Lists to store accuracy values
  676. accuracy_history = []
  677.  
  678.  
  679.  
  680. # Create images for the training data
  681. image_training = np.zeros((num_training_rows, len(X_train[0]) + 1, 3), dtype=np.uint8)
  682.  
  683.  
  684. min_pixel_value = np.min(X_train_normalized)
  685. max_pixel_value = np.max(X_train_normalized)
  686.  
  687. # Populate image_training with consistent gray and red/green colors based on the labels in the last column
  688. # for i, label in enumerate(y_train):
  689. #     for j in range(len(X_train[0])
  690. #         pixel_value = int(np.interp(X_train_normalized[i][j], [min_pixel_value, max_pixel_value], [0, 255]))
  691. #         image_training[i, j] = np.array([pixel_value] * 3)
  692. #         image_training[i, -1] = np.array([128, 128, 128])
  693. #     if label == 0:
  694. #         image_training[i, -1] = np.array([0, 128, 0])
  695. #     elif label == 1:
  696. #         image_training[i, -1] = np.array([255, 0, 0])
  697.  
  698.  
  699.  
  700. # Populate image_training with consistent gray and red/green colors based on the labels in the last column
  701. for i, label in enumerate(y_train):
  702.     for j in range(len(X_train[0])):
  703.         pixel_value = int(np.interp(X_train_normalized[i][j], [min_pixel_value, max_pixel_value], [0, 255]))
  704.         image_training[i, j] = np.array([pixel_value] * 3)
  705.     image_training[i, -1] = np.array([128, 128, 128])
  706.     if label == 0:
  707.         image_training[i, -1] = np.array([0, 128, 0])
  708.     elif label == 1:
  709.         image_training[i, -1] = np.array([255, 0, 0])
  710.  
  711.  
  712.  
  713.  
  714.  
  715.  
  716. from tqdm.notebook import tqdm_notebook
  717.  
  718.  
  719. ###ZDE VLOZTE DATA OD NOVYCH PACIENTU
  720.  
  721.  
  722. # Train the model for 400 epochs
  723. epochs = 1390
  724. # Assuming 'new_persons_results' is a list of new persons, where each person is represented as a list of features
  725. new_persons_results = [
  726.    # [101, 112],
  727.    # [0.54422416, 0.02778176],
  728.    # [22.57372914, 17.96922325],
  729. #    [22.57372914, 17.96922325]
  730.     # Add more new persons as needed
  731. #          [23.65780072, 18.8599168 ],
  732. #          [22.57372914, 17.96922325],
  733. #          [32.55342397, 29.46365141],
  734. #          [ 6.71803504, 25.70466547],
  735. #          [ 6.71803504, 25.70466547],
  736. #          [14.40191857, 16.77085649],
  737. #          [17.45790731, 21.76521471],
  738. #          [2110.02796947, 73.45445955],
  739. #          [30.29513837, 62.90111289],
  740. #          [15.1289778,  32.40267702],
  741.  
  742.  [23.65780072, 18.8599168 ],
  743.  [22.57372914, 17.96922325],
  744.  [32.55342397, 29.46365141],
  745.  [ 6.71803504, 25.70466547],
  746.  [14.40191857, 16.77085649],
  747.  [17.45790731, 21.76521471],
  748.  [20.02796947, 73.45445955],
  749.  [26.2042, 10.6782],
  750.  [35.7258, 12.8027],
  751.  [21.2, 7.8],
  752.  
  753. # [23.657800719276743,18.859916797201468,0],
  754. # [22.573729142097473,17.96922325097786,0],
  755. # [32.55342396968757,29.463651408558803,0],
  756. # [6.718035041529263,25.704665468161718,2],
  757. # [14.401918566243225,16.770856492924658,0],
  758. # [17.457907312962234,21.76521470574044,0],
  759. # [20.02796946568093,73.45445954770891,2],  
  760.  
  761. ]
  762.  
  763. import sys
  764.  
  765. for epoch in tqdm_notebook(range(epochs)):
  766.     history = model.fit(X_train_normalized, np.array(y_train), epochs=1, verbose=0, shuffle=False)
  767.     accuracy_history.append(history.history['accuracy'][0])
  768.  
  769.     if epoch == 1:
  770.         # Normalize the testing data
  771.         X_test_normalized = (X_test - min_values) / (max_values - min_values)
  772.         y_pred_after_2nd_epoch = model.predict(X_test_normalized)
  773.         y_pred_binary_after_2nd_epoch = [1 if pred >= 0.5 else 0 for pred in y_pred_after_2nd_epoch]
  774.         image_testing_before_2nd_epoch = create_image(X_test_normalized, y_pred_binary_after_2nd_epoch, label_colors_testing)
  775.  
  776.     if epoch >= epochs-1:
  777.         print(f"HERE HERE Epoch: {epoch}, Epochs: {epochs}\n")
  778.         sys.stdout.flush()
  779.  
  780.         # Iterate through new persons
  781.         for idx, personNEW_results in enumerate(new_persons_results, start=0):
  782.             # Ensure that personNEW_results has the same number of features as the model expects
  783.             assert len(personNEW_results) == len(X_train[0]), "Mismatch in the number of features."
  784.  
  785.             personNEW_results_normalized = (np.array(personNEW_results) - min_values) / (max_values - min_values)
  786.  
  787.             personNEW_prediction = model.predict(np.array([personNEW_results_normalized]))
  788.             personNEW_label = 1 if personNEW_prediction >= 0.5 else 0
  789.             y_pred_after_50_epochs = model.predict(X_test_normalized)
  790.             y_pred_binary_after_50_epochs = [1 if pred >= 0.5 else 0 for pred in y_pred_after_50_epochs]
  791.             image_testing_after_50_epochs = create_image(X_test_normalized, y_pred_binary_after_50_epochs, label_colors_testing)
  792.  
  793.             # Create an image for the new person
  794.             image_personNEW = create_imageN([personNEW_results_normalized], [personNEW_label], label_colors)
  795.  
  796.             # Display the images
  797.             plt.figure(figsize=(5, 5))
  798.             plt.imshow(image_personNEW)
  799.             plt.title(f"New Person {idx}\nLabel: {personNEW_label}, Prediction: {personNEW_prediction},personNEW_results: {personNEW_results}")
  800.             plt.axis("off")
  801.             plt.show()
  802.  
  803.  
  804. # Display the images
  805. plt.figure(figsize=(25, 15))
  806. plt.subplot(2, 2, 1)
  807. plt.imshow(image_training)
  808. plt.title("Training Data")
  809. plt.axis("off")
  810.  
  811. plt.subplot(2, 2, 2)
  812. plt.imshow(image_testing_before_2nd_epoch)
  813. plt.title("Testing Data (2nd Epoch)")
  814. plt.axis("off")
  815.  
  816. plt.subplot(2, 2, 3)
  817. plt.imshow(image_testing_after_50_epochs)
  818. plt.title(f"Testing Data ({epochs} Epochs)")
  819. plt.axis("off")
  820.  
  821. plt.subplot(2, 2, 4)
  822. plt.imshow(image_personNEW)
  823. plt.title(f"New Person\nLabel: {personNEW_label},[{personNEW_prediction}]")
  824. plt.axis("off")
  825.  
  826. # Plot accuracy history
  827. plt.figure(figsize=(12, 5))
  828. plt.plot(range(1, epochs + 1), accuracy_history, marker='o')
  829. plt.title('Accuracy Over Epochs')
  830. plt.xlabel('Epochs')
  831. plt.ylabel('Accuracy')
  832. plt.grid()
  833.  
  834. # Print normalized data
  835. print("Normalized PersonNEW Data:")
  836. print(personNEW_results_normalized)
  837.  
  838. print("ZZ")
  839. personNEW_results_array = np.array(personNEW_results)
  840.  
  841. # Výpočet min a max hodnot z trénovacích dat (ignorování hodnot -1)
  842. min_values = np.min(X_train[X_train != -1], axis=0)
  843. max_values = np.max(X_train[X_train != -1], axis=0)
  844.  
  845. # Výpis pro kontrolu
  846. print("Min values:", min_values)
  847. print("Max values:", max_values)
  848. print("Person NEW results (before normalization):", personNEW_results_array)
  849.  
  850. # Normalizace nových hodnot (s ohledem na rozsah trénovacích dat)
  851. personNEW_results_normalized = (personNEW_results_array - min_values) / (max_values - min_values)
  852.  
  853. # Výpis pro kontrolu
  854. print("Person NEW results (after normalization):", personNEW_results_normalized)
  855.  
  856. print("VV")
  857.  
  858.  
  859.  
  860.  
  861. plt.show()
  862.  
  863. print("X_train before normalization:")
  864. print(X_train)
  865. print("X_test before normalization:")
  866. print(X_test)
  867.  
  868. import seaborn as sns
  869.  
  870.  
  871. print("KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK")
  872. print(X_test)
  873. print("HHHHHHHHHHHHHHHHHHHHHHHHHHHHHH")
  874. print(X_train)
  875. print("LLLLLLLLLLLLLLLLLLLLLLLLLLLLL")
  876.  
  877.  
  878. # y_pred_binary = [1 if pred >= 0.5 else 0 for pred in model.predict(X_test_normalized)]
  879.  
  880. # # Create confusion matrix
  881. # conf_matrix = confusion_matrix(y_train, y_pred_binary)
  882. # print(conf_matrix)
  883.  
  884.  
  885. from sklearn.metrics import confusion_matrix
  886. from tensorflow.keras.utils import to_categorical
  887.  
  888. # # Normalize the training data
  889. # min_values = np.min(np.concatenate([X_train, X_test], axis=0), axis=0)
  890. # max_values = np.max(np.concatenate([X_train, X_test], axis=0), axis=0)
  891. # X_train_normalized = (X_train - min_values) / (max_values - min_values)
  892. # X_test_normalized = (X_test - min_values) / (max_values - min_values)
  893.  
  894. np.set_printoptions(threshold=np.inf, precision=4, suppress=True)
  895.  
  896.  
  897. # # Assuming X_test_normalized and y_test are your test set data
  898. # y_pred_binary = [1 if pred >= 0.5 else 0 for pred in model.predict(X_test_normalized)]
  899.  
  900. # # Create confusion matrix using the test set
  901. # conf_matrix = confusion_matrix(y_test, y_pred_binary)
  902. # print(conf_matrix)
  903.  
  904.  
  905.  
  906. # plt.figure(figsize=(6, 6))
  907. # sns.heatmap(conf_matrix, annot=True, fmt="d", cmap="Blues", xticklabels=['Predicted 0', 'Predicted 1'], yticklabels=['Actual 0', 'Actual 1'])
  908. # plt.xlabel("Predicted Label")
  909. # plt.ylabel("True Label")
  910. # plt.title("Confusion Matrix")
  911. # plt.show()
  912.  
  913. # X_train = np.array(X_train)
  914. # y_train_one_hot = np.array(y_train_one_hot)
  915.  
  916. # Rozdělení dat na trénovací a testovací množiny
  917. ###X_train, X_test, y_train, y_test = data[:num_training_rows], data[-num_testing_rows:], labels[:num_training_rows], labels[-num_testing_rows:]
  918.  
  919. ###X_train, X_test, y_train, y_test = data[:num_training_rows], data[:num_training_rows], labels[:num_training_rows], labels[:num_training_rows]
  920. X_train, X_test, y_train, y_test = data[:num_training_rows], data[:num_testing_rows], labels[:num_training_rows], labels[:num_testing_rows]
  921.  
  922. import numpy as np
  923. import matplotlib.pyplot as plt
  924. from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
  925. import tensorflow as tf
  926. import seaborn as sns
  927.  
  928. # Assuming data splitting and model definition have been done correctly
  929.  
  930. # Compile the model
  931. model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
  932.  
  933. # Train the model
  934. print("Training Start")
  935. for epoch in tqdm_notebook(range(1000), desc="Training Progress"):
  936.     model.fit(np.array(X_train_normalized), np.array(y_train), epochs=1, verbose=0)
  937. print("Training Complete")
  938.  
  939. # Generate predictions from the model
  940. predictions = (model.predict(X_test_normalized) > 0.5).astype(int)
  941.  
  942. # Convert y_test to a numpy array and then to binary labels
  943. y_test_array = np.array(y_test)  # Convert y_test to a numpy array
  944. y_test_binary = (y_test_array > 0.5).astype(int)  # Convert to binary
  945.  
  946. # Compute the confusion matrix
  947. conf_matrix = confusion_matrix(y_test_binary, predictions)
  948.  
  949. # Evaluate the model's performance
  950. accuracy = accuracy_score(y_test_binary, predictions)
  951. precision = precision_score(y_test_binary, predictions)
  952. recall = recall_score(y_test_binary, predictions)
  953. f1 = f1_score(y_test_binary, predictions)
  954.  
  955. # Display the confusion matrix
  956. sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues')
  957. plt.xlabel('Predicted')
  958. plt.ylabel('Actual')
  959. plt.title('Confusion Matrix')
  960. plt.show()
  961.  
  962. print(f"Accuracy: {accuracy:.4f}")
  963. print(f"Precision: {precision:.4f}")
  964. print(f"Recall: {recall:.4f}")
  965. print(f"F1 Score: {f1:.4f}")
  966.  
  967. print(f"Confusion Matrix2122:\n{conf_matrix}")
  968.  
  969.  
  970. import random
  971.  
  972. def find_best_pair(min_val, max_val, num_features, model, min_values, max_values):
  973.     best_pair = None
  974.     best_prediction = 1
  975.     for _ in range(1000):  # Number of iterations to find the best pair
  976.         new_data = np.random.uniform(min_val, max_val, num_features)
  977.         new_data_normalized = (new_data - min_values) / (max_values - min_values)
  978.        
  979.         # Suppress model output
  980.         tf.get_logger().setLevel('ERROR')
  981.         with tf.device('/CPU:0'):  # Ensure to run on CPU to minimize unwanted logs
  982.             prediction = model.predict(np.array([new_data_normalized]), verbose=0)[0][0]
  983.         tf.get_logger().setLevel('INFO')
  984.        
  985.         if prediction < best_prediction:
  986.             best_prediction = prediction
  987.             best_pair = new_data
  988.     return best_pair, best_prediction
  989.  
  990.  
  991.  
  992. best_pair, best_prediction = find_best_pair(min_values, max_values, len(X_train[0]), model, min_values, max_values)
  993.  
  994.  
  995. def find_worst_pair(min_val, max_val, num_features, model, min_values, max_values):
  996.     worst_pair = None
  997.     worst_prediction = 0
  998.     for _ in range(1000):  # Number of iterations to find the best pair
  999.         new_data = np.random.uniform(min_val, max_val, num_features)
  1000.         new_data_normalized = (new_data - min_values) / (max_values - min_values)
  1001.        
  1002.         # Suppress model output
  1003.         tf.get_logger().setLevel('ERROR')
  1004.         with tf.device('/CPU:0'):  # Ensure to run on CPU to minimize unwanted logs
  1005.             prediction = model.predict(np.array([new_data_normalized]), verbose=0)[0][0]
  1006.         tf.get_logger().setLevel('INFO')
  1007.        
  1008.         if prediction > worst_prediction:
  1009.             worst_prediction = prediction
  1010.             worst_pair = new_data
  1011.     return worst_pair, worst_prediction
  1012.  
  1013.  
  1014.  
  1015. worst_pair, worst_prediction = find_worst_pair(min_values, max_values, len(X_train[0]), model, min_values, max_values)
  1016.  
  1017.  
  1018. print(f"Best Pair: {best_pair}, Best Prediction: {best_prediction}")
  1019. print(f"Worst Pair: {worst_pair}, Worst Prediction: {worst_prediction}")
  1020.  
  1021. import numpy as np
  1022. import matplotlib.pyplot as plt
  1023. from sklearn.decomposition import PCA
  1024. import tensorflow as tf
  1025. from tqdm.notebook import tqdm_notebook
  1026.  
  1027. # Vaše data
  1028. # A = [
  1029. #     [23.657800719276743,18.859916797201468,0,0],
  1030. #     [22.573729142097473,17.96922325097786,0,0],
  1031. #     [32.55342396968757,29.463651408558803,0,0],
  1032. #     [6.718035041529263,25.704665468161718,2,1],
  1033. #     [14.401918566243225,16.770856492924658,0,0],
  1034. #     [17.457907312962234,21.76521470574044,0,0],
  1035. #     [20.02796946568093,73.45445954770891,2,1],
  1036. #     [30.295138369778076,62.901112886193246,2,1],
  1037. #     [15.128977804449633,32.40267702110393,0,0],
  1038. #     [30.179457395820013,58.982492125646104,2,1],
  1039. #     [28.01649701854089,63.92781357637711,2,1],
  1040. #     [16.791838457871147,42.33482314089884,0,0],
  1041. #     [10.583694293380976,19.61926728942497,0,0],
  1042. #     [26.634447074406467,91.96624817360987,2,1],
  1043. #     [26.217868623367643,36.400293587062976,0,0],
  1044. #     [17.689396788624936,60.79797114006423,2,1],
  1045. #     [33.17193822527976,66.75277364959176,2,1],
  1046. #     [23.793952755709153,22.57501437360518,0,0],
  1047. #     [37.844484133572124,36.320623921263156,2,1],
  1048. #     [35.16135413357336,33.16395078484642,2,1],
  1049. #     [29.380894071974286,25.28297332192533,0,0],
  1050. #     [31.65893504663792,73.13603413708854,2,1],
  1051. # ]
  1052.  
  1053. # # Převod na NumPy pole
  1054. # A = np.array(A)
  1055.  
  1056. # Extrakce dat a labelů
  1057. X = A[:, :-1]  # Všechny sloupce kromě posledního jsou vstupy
  1058. y = A[:, -1]  # Poslední sloupec je label
  1059.  
  1060. # Normalizace dat
  1061. min_values = np.min(X, axis=0)
  1062. max_values = np.max(X, axis=0)
  1063. X_normalized = (X - min_values) / (max_values - min_values)
  1064.  
  1065. # Rozdělení dat na trénovací a testovací množiny
  1066. num_training_rows = len(X)
  1067. X_train_normalized = X_normalized[:num_training_rows]
  1068. y_train = y[:num_training_rows]
  1069. X_test_normalized = X_normalized[num_training_rows:]
  1070. y_test = y[num_training_rows:]
  1071.  
  1072. # Definice a kompilace modelu
  1073. model = tf.keras.Sequential([
  1074.     tf.keras.layers.Dense(256, activation='relu', input_shape=(len(X_train_normalized[0]),)),
  1075.     tf.keras.layers.Dropout(0.3),
  1076.     tf.keras.layers.Dense(128, activation='relu'),
  1077.     tf.keras.layers.Dropout(0.3),
  1078.     tf.keras.layers.Dense(64, activation='relu'),
  1079.     tf.keras.layers.Dropout(0.3),
  1080.     tf.keras.layers.Dense(1, activation='sigmoid')
  1081. ])
  1082.  
  1083. model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
  1084.  
  1085. # Trénování modelu
  1086. epochs = 138
  1087. accuracy_history = []
  1088.  
  1089. for epoch in tqdm_notebook(range(epochs)):
  1090.     history = model.fit(X_train_normalized, np.array(y_train), epochs=1, verbose=0, shuffle=False)
  1091.     accuracy_history.append(history.history['accuracy'][0])
  1092.  
  1093. # Aplikace PCA
  1094. pca = PCA(n_components=2)  # Snížení na 2 komponenty
  1095. X_pca = pca.fit_transform(X_normalized)
  1096.  
  1097. # Vizualizace výsledků
  1098. plt.figure()
  1099. plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y)
  1100. plt.xlabel('První hlavní komponenta')
  1101. plt.ylabel('Druhá hlavní komponenta')
  1102. plt.title('PCA na vašich datech')
  1103. plt.show()
  1104.  
  1105. ##################### LDA
  1106.  
  1107. X = A[:, :-1]  # Všechny sloupce kromě posledního jsou vstupy
  1108. y = A[:, -1]  # Poslední sloupec je label
  1109.  
  1110. # Normalizace dat
  1111. min_values = np.min(X, axis=0)
  1112. max_values = np.max(X, axis=0)
  1113. X_normalized = (X - min_values) / (max_values - min_values)
  1114.  
  1115. # Rozdělení dat na trénovací a testovací množiny
  1116. num_training_rows = A.shape[0]
  1117.  
  1118. X_train_normalized = X_normalized[:num_training_rows]
  1119. y_train = y[:num_training_rows]
  1120. X_test_normalized = X_normalized[num_training_rows:]
  1121. y_test = y[num_training_rows:]
  1122.  
  1123. # # Definice a kompilace modelu
  1124. # model = tf.keras.Sequential([
  1125. #     tf.keras.layers.Dense(256, activation='relu', input_shape=(len(X_train_normalized[0]),)),
  1126. #     tf.keras.layers.Dropout(0.3),
  1127. #     tf.keras.layers.Dense(128, activation='relu'),
  1128. #     tf.keras.layers.Dropout(0.3),
  1129. #     tf.keras.layers.Dense(64, activation='relu'),
  1130. #     tf.keras.layers.Dropout(0.3),
  1131. #     tf.keras.layers.Dense(1, activation='sigmoid')
  1132. # ])
  1133.  
  1134. #model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
  1135.  
  1136. from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
  1137.  
  1138. # Trénování modelu
  1139. epochs = 138
  1140. accuracy_history = []
  1141.  
  1142. for epoch in tqdm_notebook(range(epochs)):
  1143.     history = model.fit(X_train_normalized, np.array(y_train), epochs=1, verbose=0, shuffle=False)
  1144.     accuracy_history.append(history.history['accuracy'][0])
  1145.  
  1146. # Aplikace LDA
  1147. lda = LDA(n_components=1)  # Snížení na 2 komponenty
  1148. X_lda = lda.fit_transform(X_normalized, y)
  1149.  
  1150. # # Vizualizace výsledků
  1151. # plt.figure()
  1152. # plt.scatter(X_lda[:, 0], X_lda[:, 1], c=y)
  1153. # plt.xlabel('První diskriminační komponenta')
  1154. # plt.ylabel('Druhá diskriminační komponenta')
  1155. # plt.title('LDA na vašich datech')
  1156. # plt.show()
  1157.  
  1158. lda = LDA(n_components=1)
  1159. X_lda = lda.fit_transform(X_train_normalized, y_train)
  1160.  
  1161.  
  1162. # Vizualizace výsledků LDA
  1163. plt.figure()
  1164. plt.scatter(X_lda[:, 0], np.zeros_like(X_lda), c=y_train)
  1165. plt.xlabel('První diskriminační komponenta')
  1166. plt.title('LDA s učitelem')
  1167. plt.show()
  1168.  
  1169. ###################################################################################################################
  1170.  
  1171.  
  1172. import numpy as np
  1173. import matplotlib.pyplot as plt
  1174. import tensorflow as tf
  1175. from sklearn.metrics import recall_score, confusion_matrix, accuracy_score, precision_score, f1_score
  1176. import seaborn as sns
  1177.  
  1178. # # Vaše data
  1179. # A = [
  1180. #     [23.657800719276743, 18.859916797201468, 0, 0],
  1181. #     [22.573729142097473, 17.96922325097786, 0, 0],
  1182. #     [32.55342396968757, 29.463651408558803, 0, 0],
  1183. #     [6.718035041529263, 25.704665468161718, 2, 1],
  1184. #     [14.401918566243225, 16.770856492924658, 0, 0],
  1185. #     [17.457907312962234, 21.76521470574044, 0, 0],
  1186. #     [20.02796946568093, 73.45445954770891, 2, 1],
  1187. #     [30.295138369778076, 62.901112886193246, 2, 1],
  1188. #     [15.128977804449633, 32.40267702110393, 0, 0],
  1189. #     [30.179457395820013, 58.982492125646104, 2, 1],
  1190. #     [28.01649701854089, 63.92781357637711, 2, 1],
  1191. #     [16.791838457871147, 42.33482314089884, 0, 0],
  1192. #     [10.583694293380976, 19.61926728942497, 0, 0],
  1193. #     [26.634447074406467, 91.96624817360987, 2, 1],
  1194. #     [26.217868623367643, 36.400293587062976, 0, 0],
  1195. #     [17.689396788624936, 60.79797114006423, 2, 1],
  1196. #     [33.17193822527976, 66.75277364959176, 2, 1],
  1197. #     [23.793952755709153, 22.57501437360518, 0, 0],
  1198. #     [37.844484133572124, 36.320623921263156, 2, 1],
  1199. #     [35.16135413357336, 33.16395078484642, 2, 1],
  1200. #     [29.380894071974286, 25.28297332192533, 0, 0],
  1201. #     [31.65893504663792, 73.13603413708854, 2, 1],
  1202. # ]
  1203.  
  1204. # # Převod na NumPy pole
  1205. # A = np.array(A)
  1206.  
  1207. # Rozdělení na vstupní data (X) a cílové proměnné (y)
  1208. X = A[:, :-1]
  1209. y = A[:, -1]
  1210.  
  1211. # Rozdělení na trénovací a testovací sadu (v tomto příkladě použijeme celou sadu jako trénovací pro jednoduchost)
  1212. X_train, y_train = X, y
  1213. X_test, y_test = X, y
  1214.  
  1215. # Normalizace dat
  1216. min_values = np.min(X_train, axis=0)
  1217. max_values = np.max(X_train, axis=0)
  1218. X_train_normalized = (X_train - min_values) / (max_values - min_values)
  1219. X_test_normalized = (X_test - min_values) / (max_values - min_values)
  1220.  
  1221. # Definice modelu
  1222. model = tf.keras.Sequential([
  1223.     tf.keras.layers.Dense(256, activation='relu', input_shape=(X_train_normalized.shape[1],)),
  1224.     tf.keras.layers.Dropout(0.3),
  1225.     tf.keras.layers.Dense(128, activation='relu'),
  1226.     tf.keras.layers.Dropout(0.3),
  1227.     tf.keras.layers.Dense(64, activation='relu'),
  1228.     tf.keras.layers.Dropout(0.3),
  1229.     tf.keras.layers.Dense(1, activation='sigmoid')
  1230. ])
  1231.  
  1232. # Použití Adam optimizer s learning rate schedulerem
  1233. lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
  1234.     initial_learning_rate=1e-3,
  1235.     decay_steps=10000,
  1236.     decay_rate=0.9
  1237. )
  1238. optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
  1239.  
  1240. # Kompilace modelu
  1241. model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', tf.keras.metrics.Recall()])
  1242.  
  1243. # Trénování modelu
  1244. history = model.fit(X_train_normalized, y_train, epochs=50, verbose=0, shuffle=False)
  1245.  
  1246. # Predikce
  1247. y_pred_prob = model.predict(X_test_normalized)
  1248. y_pred = (y_pred_prob > 0.5).astype(int)
  1249.  
  1250. # Výpočet metrik
  1251. recall = recall_score(y_test, y_pred)
  1252. conf_matrix = confusion_matrix(y_test, y_pred)
  1253.  
  1254. # Vyhodnocení výkonu modelu
  1255. accuracy = accuracy_score(y_test, y_pred)
  1256. precision = precision_score(y_test, y_pred)
  1257. f1 = f1_score(y_test, y_pred)
  1258.  
  1259. # Výpis metrik
  1260. print(f"Recall: {recall:.4f}")
  1261. print(f"Accuracy: {accuracy:.4f}")
  1262. print(f"Precision: {precision:.4f}")
  1263. print(f"F1 Score: {f1:.4f}")
  1264. print(f"Confusion Matrix:\n{conf_matrix}")
  1265.  
  1266. # Zobrazení confusion matrix
  1267. sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues')
  1268. plt.xlabel('Predicted')
  1269. plt.ylabel('Actual')
  1270. plt.title('Confusion Matrix')
  1271. plt.show()
  1272.  
  1273.  
  1274. mu = np.mean(X_train, axis=0)
  1275. sigma = np.std(X_train, axis=0)
  1276.  
  1277. # Normalizace každého sloupce zvlášť
  1278. X_train_standardized = (X_train - mu) / sigma
  1279.  
  1280. # Pokud chcete provést stejnou normalizaci pro testovací data:
  1281. X_test_standardized = (X_test - mu) / sigma
  1282.  
  1283. # Výpis pro kontrolu
  1284. print("Průměry (mu):", mu)
  1285. print("Směrodatné odchylky (sigma):", sigma)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement