1 # based on https://www.kdnuggets.com/2019/08/numpy-neural-networks-computational-graphs.html
4 # use a constant seed to keep things reproducible
5 rg = np.random.default_rng(1)
10 ini_type: initialization type for weight parameters: plain, xavier, or he
12 def __init__(self, input_shape, n_out, ini_type="plain"):
15 if ini_type == 'plain':
16 self.W = rg.standard_normal(size=(n_out, n_in)) * 0.01 # set weights 'W' to small random gaussian
17 elif ini_type == 'xavier':
18 self.W = rg.standard_normal(size=(n_out, n_in)) / (np.sqrt(n_in)) # set variance of W to 1/n
19 elif ini_type == 'he':
20 # Good when ReLU used in hidden layers
21 # Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
22 # Kaiming He et al. (https://arxiv.org/abs/1502.01852)
23 # http: // cs231n.github.io / neural - networks - 2 / # init
24 self.W = rg.standard_normal(size=(n_out, n_in)) * np.sqrt(2/n_in) # set variance of W to 2/n
26 self.b = np.zeros((n_out, 1))
27 self.shape = (self.W.shape[0], input_shape[1])
29 def forward(self, A_prev):
31 return self.W @ self.A_prev + self.b
33 def backward(self, upstream_grad, learning_rate=0.1):
34 # derivative of Cost w.r.t W
35 dW = upstream_grad @ self.A_prev.T
36 # derivative of Cost w.r.t b, sum across rows
37 db = np.sum(upstream_grad, axis=1, keepdims=True)
38 # derivative of Cost w.r.t A_prev
39 dA_prev = self.W.T @ upstream_grad
42 self.W -= learning_rate * dW
43 self.b -= learning_rate * db
49 def __init__(self, shape):
53 assert Z.shape == self.shape
54 self.A = 1 / (1 + np.exp(-Z)) # compute activations
57 def backward(self, upstream_grad, learning_rate=0.1):
58 # couple upstream gradient with local gradient, the result will be sent back to the Linear layer
59 return upstream_grad * self.A * (1 - self.A)
62 def label_vectors(labels, n):
63 y = np.zeros((n, labels.size))
64 for i, l in enumerate(labels):
69 def forward(layers, X):
70 assert X.shape[1] == layers[0].shape[1], f'input length {X.shape[1]} does not match first layer width {layers[0].shape[1]}'
73 cur = layer.forward(cur)
78 # the recognized digit is the index of the highest-valued output neuron
79 return np.argmax(y, axis=0), np.max(y, axis=0)
82 def accuracy(layers, X, labels):
83 '''Count percentage of test inputs which are being recognized correctly'''
85 assert X.shape[1] == layers[0].shape[1], f'input length {X.shape[1]} does not match first layer width {layers[0].shape[1]}'
86 assert layers[0].shape[1] == labels.size, f'first layer width {layers[0].shape[1]} does not match number of labels {labels.size}'
87 output = forward(layers, X)
88 classes = classify(output)[0]
89 return 100 * (np.sum(classes == labels) / classes.size)
92 def cost_sqe(Y, output):
94 This function computes and returns the Cost and its derivative.
95 The is function uses the Squared Error Cost function -> (1/2m)*sum(Y - output)^2
97 Y: label vectors of data
98 output: Predictions(activations) from a last layer, the output layer
100 cost: The Squared Error Cost result
101 dOutput: gradient of Cost w.r.t the output
105 cost = (1 / (2 * m)) * np.sum(np.square(Y - output))
106 cost = np.squeeze(cost) # remove extraneous dimensions to give just a scalar
108 dOutput = -1 / m * (Y - output) # derivative of the squared error cost function
112 def train(layers, X, Y, learning_rate=0.1, cost_fn=cost_sqe):
113 output = forward(layers, X)
114 cost, dOutput = cost_fn(Y, output)
117 for layer in reversed(layers):
118 cur = layer.backward(cur, learning_rate)