Advertisement
joelnazarene

imgclsfcn

Apr 26th, 2021
1,145
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.00 KB | None | 0 0
  1. from keras.datasets import fashion_mnist
  2. from keras.utils import to_categorical
  3. import numpy as np
  4.  
  5. # load dataset
  6. (trainX, trainy), (testX, testy) = fashion_mnist.load_data()
  7. # load train and test dataset
  8. def load_dataset():
  9.     # load dataset
  10.     (trainX, trainy), (testX, testY) = fashion_mnist.load_data()
  11.     # reshape dataset to have a single channel
  12.     trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
  13.     testX = testX.reshape((testX.shape[0], 28, 28, 1))
  14.     # one hot encode target values
  15.     trainy = to_categorical(trainy)
  16.     testY = to_categorical(testY)
  17.     return trainX, trainy, testX, testY
  18.  
  19. seed=9
  20.  
  21. from sklearn.model_selection import StratifiedShuffleSplit
  22.  
  23. data_split = StratifiedShuffleSplit(test_size=0.08,random_state=seed)
  24. for train_index, test_index in data_split.split(trainX, trainy):
  25.  
  26.     split_data_92, split_data_8 = trainX[train_index], trainX[test_index]
  27.  
  28.     split_label_92, split_label_8 = trainy[train_index], trainy[test_index]
  29. train_test_split = StratifiedShuffleSplit(test_size=0.3,random_state=seed) #test_size=0.3 denotes that 30 % of the dataset is used for testing.
  30.  
  31.  
  32. for train_index, test_index in train_test_split.split(split_data_8,split_label_8):
  33.  
  34.     train_data_70, test_data_30 = split_data_8[train_index], split_data_8[test_index]
  35.  
  36.     train_label_70, test_label_30 = split_label_8[train_index], split_label_8[test_index]
  37. train_data = train_data_70 #assigning to variable train_data
  38.  
  39. train_labels = train_label_70 #assigning to variable train_labels
  40.  
  41. test_data = test_data_30
  42.  
  43. test_labels = test_label_30
  44. print('train_data : ',train_data.shape)
  45.  
  46. print('train_labels : ',train_labels.shape)
  47.  
  48. print('test_data : ',test_data.shape)
  49.  
  50. print('test_labels : ',test_labels.shape)
  51.  
  52. # definition of normalization function
  53.  
  54. def normalize(data, eps=1e-8):
  55.  
  56.     data -= data.mean(axis=(0, 1, 2), keepdims=True)
  57.  
  58.     std = np.sqrt(data.var(axis=(0, 1, 2), ddof=1, keepdims=True))
  59.  
  60.     std[std < eps] = 1.
  61.  
  62.     data /= std
  63.  
  64.     return data
  65. train_data=train_data.astype('float64')
  66. test_data=test_data.astype('float64')
  67. # calling the function
  68.  
  69. train_data = normalize(train_data)
  70.  
  71. test_data = normalize(test_data)
  72. # prints the shape of train data and test data
  73.  
  74. print('train_data: ',train_data.shape)
  75.  
  76. print('test_data: ',test_data.shape)
  77.  
  78. # Computing whitening matrix
  79.  
  80. train_data_flat = train_data.reshape(train_data.shape[0], -1).T
  81.  
  82. test_data_flat = test_data.reshape(test_data.shape[0], -1).T
  83.  
  84. print('train_data_flat: ',train_data_flat.shape)
  85.  
  86. print('test_data_flat: ',test_data_flat.shape)
  87.  
  88.  
  89.  
  90. train_data_flat_t = train_data_flat.T
  91.  
  92. test_data_flat_t = test_data_flat.T
  93.  
  94.  
  95.  
  96. from sklearn.decomposition import PCA
  97.  
  98. # n_components specify the no.of components to keep
  99.  
  100. train_data_pca = PCA(n_components= 784).fit_transform(train_data_flat)
  101.  
  102. test_data_pca = PCA(n_components= 784).fit_transform(test_data_flat)
  103.  
  104. print(train_data_pca.shape)
  105.  
  106. print(test_data_pca.shape)
  107.  
  108. train_data_pca = train_data_pca.T
  109.  
  110. test_data_pca = test_data_pca.T
  111.  
  112.  
  113. from skimage import color
  114. def svdFeatures(input_data):
  115.  
  116.     svdArray_input_data=[]
  117.  
  118.     size = input_data.shape[0]
  119.  
  120.     for i in range (0,size):
  121.  
  122.         img=color.rgb2gray(input_data[i])
  123.  
  124.         U, s, V = np.linalg.svd(img, full_matrices=False);
  125.  
  126.         S=[s[i] for i in range(28)]
  127.  
  128.         svdArray_input_data.append(S)
  129.  
  130.         svdMatrix_input_data=np.matrix(svdArray_input_data)
  131.  
  132.     return svdMatrix_input_data
  133.  
  134.  
  135.  
  136. # apply SVD for train and test data
  137.  
  138. train_data_svd=svdFeatures(train_data)
  139.  
  140. test_data_svd=svdFeatures(test_data)
  141. print(train_data_svd.shape)
  142. print(test_data_svd.shape)
  143.  
  144.  
  145.  
  146. from sklearn import svm #Creating a svm classifier model
  147.  
  148. clf = svm.SVC(gamma=.001,probability=True) #train_data_flat_tModel training
  149.  
  150. train = clf.fit(train_data_flat_t, train_labels)
  151. predicted= clf.predict(test_data_flat_t)
  152.  
  153.  
  154. score = clf.score(test_data_flat_t,test_labels)
  155. print("score",score)
  156.  
  157. with open('output.txt', 'w') as file:
  158.     file.write(str(np.mean(score)))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement