Advertisement
ERENARD63

CHARACTER LEVEL LANGUAGE MODEL (RNN)

May 20th, 2018
626
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.23 KB | None | 0 0
  1. ## CHARACTER LEVEL LANGUAGE MODEL (RNN)
  2.  
  3.  
  4. import numpy as np
  5. from utils import *
  6. import random
  7. data = open('dinos.txt', 'r').read()
  8. data= data.lower()
  9. chars = list(set(data))
  10. data_size, vocab_size = len(data), len(chars)
  11. print('There are %d total characters and %d unique characters in your data.' % (data_size, vocab_size))
  12.  
  13. ### GRADED FUNCTION: clip
  14.  
  15. def clip(gradients, maxValue):
  16. '''
  17. Clips the gradients' values between minimum and maximum.
  18.  
  19. Arguments:
  20. gradients -- a dictionary containing the gradients "dWaa", "dWax", "dWya", "db", "dby"
  21. maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue
  22.  
  23. Returns:
  24. gradients -- a dictionary with the clipped gradients.
  25. '''
  26.  
  27. dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']
  28.  
  29. ### START CODE HERE ###
  30. # clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)
  31. for gradient in [dWax, dWaa, dWya, db, dby]:
  32. np.clip(gradient,-maxValue,maxValue,gradient)
  33. ### END CODE HERE ###
  34.  
  35. gradients = {"dWaa": dWaa, "dWax": dWax, "dWya": dWya, "db": db, "dby": dby}
  36.  
  37. return gradients
  38.  
  39. # GRADED FUNCTION: sample
  40.  
  41. def sample(parameters, char_to_ix, seed):
  42. """
  43. Sample a sequence of characters according to a sequence of probability distributions output of the RNN
  44.  
  45. Arguments:
  46. parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b.
  47. char_to_ix -- python dictionary mapping each character to an index.
  48. seed -- used for grading purposes. Do not worry about it.
  49.  
  50. Returns:
  51. indices -- a list of length n containing the indices of the sampled characters.
  52. """
  53.  
  54. # Retrieve parameters and relevant shapes from "parameters" dictionary
  55. Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']
  56. vocab_size = by.shape[0]
  57. n_a = Waa.shape[1]
  58.  
  59. ### START CODE HERE ###
  60. # Step 1: Create the one-hot vector x for the first character (initializing the sequence generation). (≈1 line)
  61. x = np.zeros((vocab_size,1))
  62. # Step 1': Initialize a_prev as zeros (≈1 line)
  63. a_prev = np.zeros((n_a,1))
  64.  
  65. # Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)
  66. indices = []
  67.  
  68. # Idx is a flag to detect a newline character, we initialize it to -1
  69. idx = -1
  70.  
  71. # Loop over time-steps t. At each time-step, sample a character from a probability distribution and append
  72. # its index to "indices". We'll stop if we reach 50 characters (which should be very unlikely with a well
  73. # trained model), which helps debugging and prevents entering an infinite loop.
  74. counter = 0
  75. newline_character = char_to_ix['\n']
  76.  
  77. while (idx != newline_character and counter != 50):
  78.  
  79. # Step 2: Forward propagate x using the equations (1), (2) and (3)
  80. a = np.tanh(np.dot(Wax,x)+np.dot(Waa,a_prev)+b)
  81. z = np.dot(Wya,a)+by
  82. y = softmax(z)
  83.  
  84. # for grading purposes
  85. np.random.seed(counter+seed)
  86.  
  87. # Step 3: Sample the index of a character within the vocabulary from the probability distribution y
  88. idx = np.random.choice(range(vocab_size), p = y.ravel())
  89.  
  90. # Append the index to "indices"
  91. indices.append(idx)
  92.  
  93. # Step 4: Overwrite the input character as the one corresponding to the sampled index.
  94. x = np.zeros((vocab_size,1))
  95. x[idx] = 1
  96.  
  97. # Update "a_prev" to be "a"
  98. a_prev = a
  99.  
  100. # for grading purposes
  101. seed += 1
  102. counter +=1
  103.  
  104. ### END CODE HERE ###
  105.  
  106. if (counter == 50):
  107. indices.append(char_to_ix['\n'])
  108.  
  109. return indices
  110.  
  111. ## Function Test
  112. np.random.seed(2)
  113. _, n_a = 20, 100
  114. Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
  115. b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
  116. parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
  117.  
  118.  
  119. indices = sample(parameters, char_to_ix, 0)
  120. print("Sampling:")
  121. print("list of sampled indices:", indices)
  122. print("list of sampled characters:", [ix_to_char[i] for i in indices])
  123.  
  124.  
  125. #def rnn_forward(X, Y, a_prev, parameters):
  126. # """ Performs the forward propagation through the RNN and computes the cross-entropy loss.
  127. # It returns the loss' value as well as a "cache" storing values to be used in the backpropagation."""
  128. # return loss, cache
  129.  
  130. #def rnn_backward(X, Y, parameters, cache):
  131. # """ Performs the backward propagation through time to compute the gradients of the loss with respect
  132. # to the parameters. It returns also all the hidden states."""
  133. # return gradients, a
  134.  
  135. #def update_parameters(parameters, gradients, learning_rate):
  136. # """ Updates parameters using the Gradient Descent Update Rule."""
  137.  
  138.  
  139. # GRADED FUNCTION: optimize
  140.  
  141. def optimize(X, Y, a_prev, parameters, learning_rate = 0.01):
  142. """
  143. Execute one step of the optimization to train the model.
  144.  
  145. Arguments:
  146. X -- list of integers, where each integer is a number that maps to a character in the vocabulary.
  147. Y -- list of integers, exactly the same as X but shifted one index to the left.
  148. a_prev -- previous hidden state.
  149. parameters -- python dictionary containing:
  150. Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
  151. Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
  152. Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
  153. b -- Bias, numpy array of shape (n_a, 1)
  154. by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
  155. learning_rate -- learning rate for the model.
  156.  
  157. Returns:
  158. loss -- value of the loss function (cross-entropy)
  159. gradients -- python dictionary containing:
  160. dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
  161. dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
  162. dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)
  163. db -- Gradients of bias vector, of shape (n_a, 1)
  164. dby -- Gradients of output bias vector, of shape (n_y, 1)
  165. a[len(X)-1] -- the last hidden state, of shape (n_a, 1)
  166. """
  167.  
  168. ### START CODE HERE ###
  169.  
  170. # Forward propagate through time (≈1 line)
  171. loss, cache = rnn_forward(X, Y, a_prev, parameters)
  172.  
  173. # Backpropagate through time (≈1 line)
  174. gradients, a = rnn_backward(X, Y, parameters, cache)
  175.  
  176. # Clip your gradients between -5 (min) and 5 (max) (≈1 line)
  177. gradients = clip(gradients,5)
  178.  
  179. # Update parameters (≈1 line)
  180. parameters = update_parameters(parameters, gradients, learning_rate)
  181.  
  182. ### END CODE HERE ###
  183.  
  184. return loss, gradients, a[len(X)-1]
  185.  
  186. ## GRADED FUNCTION: model
  187.  
  188. def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27):
  189. """
  190. Trains the model and generates dinosaur names.
  191.  
  192. Arguments:
  193. data -- text corpus
  194. ix_to_char -- dictionary that maps the index to a character
  195. char_to_ix -- dictionary that maps a character to an index
  196. num_iterations -- number of iterations to train the model for
  197. n_a -- number of units of the RNN cell
  198. dino_names -- number of dinosaur names you want to sample at each iteration.
  199. vocab_size -- number of unique characters found in the text, size of the vocabulary
  200.  
  201. Returns:
  202. parameters -- learned parameters
  203. """
  204.  
  205. # Retrieve n_x and n_y from vocab_size
  206. n_x, n_y = vocab_size, vocab_size
  207.  
  208. # Initialize parameters
  209. parameters = initialize_parameters(n_a, n_x, n_y)
  210.  
  211. # Initialize loss (this is required because we want to smooth our loss, don't worry about it)
  212. loss = get_initial_loss(vocab_size, dino_names)
  213.  
  214. # Build list of all dinosaur names (training examples).
  215. with open("dinos.txt") as f:
  216. examples = f.readlines()
  217. examples = [x.lower().strip() for x in examples]
  218.  
  219. # Shuffle list of all dinosaur names
  220. np.random.seed(0)
  221. np.random.shuffle(examples)
  222.  
  223. # Initialize the hidden state of your LSTM
  224. a_prev = np.zeros((n_a, 1))
  225.  
  226. # Optimization loop
  227. for j in range(num_iterations):
  228.  
  229. ### START CODE HERE ###
  230.  
  231. # Use the hint above to define one training example (X,Y) (≈ 2 lines)
  232. index = j % len(examples)
  233. X = [None] + [char_to_ix[ch] for ch in examples[index]]
  234. Y = X[1:] + [char_to_ix["\n"]]
  235.  
  236. # Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters
  237. # Choose a learning rate of 0.01
  238. curr_loss, gradients, a_prev = optimize(X,Y,a_prev,parameters,learning_rate=0.01)
  239.  
  240. ### END CODE HERE ###
  241.  
  242. # Use a latency trick to keep the loss smooth. It happens here to accelerate the training.
  243. loss = smooth(loss, curr_loss)
  244.  
  245. # Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly
  246. if j % 2000 == 0:
  247.  
  248. print('Iteration: %d, Loss: %f' % (j, loss) + '\n')
  249.  
  250. # The number of dinosaur names to print
  251. seed = 0
  252. for name in range(dino_names):
  253.  
  254. # Sample indices and print them
  255. sampled_indices = sample(parameters, char_to_ix, seed)
  256. print_sample(sampled_indices, ix_to_char)
  257.  
  258. seed += 1 # To get the same result for grading purposed, increment the seed by one.
  259.  
  260. print('\n')
  261.  
  262. return parameters
  263.  
  264.  
  265. ## RUNNING THE MODEL
  266. parameters = model(data, ix_to_char, char_to_ix)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement