13
13
14
14
15
15
class Node :
16
- """
17
- A node in a computational graph contains the pointer to all its parents.
18
- :param val: value of current node
19
- :param parents: a container of all parents of current node
20
- """
21
-
22
- def __init__ (self , val = None , parents = None ):
23
- if parents is None :
24
- parents = []
25
- self .val = val
26
- self .parents = parents
27
-
28
- def __repr__ (self ):
29
- return "<Node {}>" .format (self .val )
30
-
31
-
32
- class NNUnit (Node ):
33
16
"""
34
17
A single unit of a layer in a neural network
35
18
:param weights: weights between parent nodes and current node
36
19
:param value: value of current node
37
20
"""
38
21
39
22
def __init__ (self , weights = None , value = None ):
40
- super (). __init__ ( value )
23
+ self . value = value
41
24
self .weights = weights or []
42
25
43
26
@@ -47,8 +30,8 @@ class Layer:
47
30
:param size: number of units in the current layer
48
31
"""
49
32
50
- def __init__ (self , size = 3 ):
51
- self .nodes = [NNUnit () for _ in range (size )]
33
+ def __init__ (self , size ):
34
+ self .nodes = [Node () for _ in range (size )]
52
35
53
36
def forward (self , inputs ):
54
37
"""Define the operation to get the output of this layer"""
@@ -65,7 +48,7 @@ def forward(self, inputs):
65
48
"""Take each value of the inputs to each unit in the layer."""
66
49
assert len (self .nodes ) == len (inputs )
67
50
for node , inp in zip (self .nodes , inputs ):
68
- node .val = inp
51
+ node .value = inp
69
52
return inputs
70
53
71
54
@@ -79,7 +62,7 @@ def forward(self, inputs):
79
62
assert len (self .nodes ) == len (inputs )
80
63
res = softmax1D (inputs )
81
64
for node , val in zip (self .nodes , res ):
82
- node .val = val
65
+ node .value = val
83
66
return res
84
67
85
68
@@ -91,11 +74,11 @@ class DenseLayer(Layer):
91
74
:param activation: (Activation object) activation function
92
75
"""
93
76
94
- def __init__ (self , in_size = 3 , out_size = 3 , activation = None ):
77
+ def __init__ (self , in_size = 3 , out_size = 3 , activation = Sigmoid ):
95
78
super ().__init__ (out_size )
96
79
self .out_size = out_size
97
80
self .inputs = None
98
- self .activation = Sigmoid () if not activation else activation
81
+ self .activation = activation ()
99
82
# initialize weights
100
83
for node in self .nodes :
101
84
node .weights = random_weights (- 0.5 , 0.5 , in_size )
@@ -105,8 +88,8 @@ def forward(self, inputs):
105
88
res = []
106
89
# get the output value of each unit
107
90
for unit in self .nodes :
108
- val = self .activation .f (dot_product (unit .weights , inputs ))
109
- unit .val = val
91
+ val = self .activation .function (dot_product (unit .weights , inputs ))
92
+ unit .value = val
110
93
res .append (val )
111
94
return res
112
95
@@ -131,7 +114,7 @@ def forward(self, features):
131
114
for node , feature in zip (self .nodes , features ):
132
115
out = conv1D (feature , node .weights )
133
116
res .append (out )
134
- node .val = out
117
+ node .value = out
135
118
return res
136
119
137
120
@@ -157,7 +140,7 @@ def forward(self, features):
157
140
out = [max (feature [i :i + self .kernel_size ])
158
141
for i in range (len (feature ) - self .kernel_size + 1 )]
159
142
res .append (out )
160
- self .nodes [i ].val = out
143
+ self .nodes [i ].value = out
161
144
return res
162
145
163
146
@@ -181,7 +164,7 @@ def init_examples(examples, idx_i, idx_t, o_units):
181
164
return inputs , targets
182
165
183
166
184
- def gradient_descent (dataset , net , loss , epochs = 1000 , l_rate = 0.01 , batch_size = 1 , verbose = None ):
167
+ def stochastic_gradient_descent (dataset , net , loss , epochs = 1000 , l_rate = 0.01 , batch_size = 1 , verbose = None ):
185
168
"""
186
169
Gradient descent algorithm to update the learnable parameters of a network.
187
170
:return: the updated network
@@ -200,6 +183,7 @@ def gradient_descent(dataset, net, loss, epochs=1000, l_rate=0.01, batch_size=1,
200
183
# update weights with gradient descent
201
184
weights = vector_add (weights , scalar_vector_product (- l_rate , gs ))
202
185
total_loss += batch_loss
186
+
203
187
# update the weights of network each batch
204
188
for i in range (len (net )):
205
189
if weights [i ]:
@@ -310,7 +294,7 @@ def BackPropagation(inputs, targets, theta, net, loss):
310
294
# backward pass
311
295
for i in range (h_layers , 0 , - 1 ):
312
296
layer = net [i ]
313
- derivative = [layer .activation .derivative (node .val ) for node in layer .nodes ]
297
+ derivative = [layer .activation .derivative (node .value ) for node in layer .nodes ]
314
298
delta [i ] = element_wise_product (previous , derivative )
315
299
# pass to layer i-1 in the next iteration
316
300
previous = matrix_multiplication ([delta [i ]], theta [i ])[0 ]
@@ -344,7 +328,7 @@ def forward(self, inputs):
344
328
for i in range (len (self .nodes )):
345
329
val = [(inputs [i ] - mu ) * self .weights [0 ] / np .sqrt (self .eps + stderr ** 2 ) + self .weights [1 ]]
346
330
res .append (val )
347
- self .nodes [i ].val = val
331
+ self .nodes [i ].value = val
348
332
return res
349
333
350
334
@@ -354,15 +338,12 @@ def get_batch(examples, batch_size=1):
354
338
yield examples [i : i + batch_size ]
355
339
356
340
357
- def NeuralNetLearner (dataset , hidden_layer_sizes = None , learning_rate = 0.01 , epochs = 100 ,
358
- optimizer = gradient_descent , batch_size = 1 , verbose = None ):
341
+ def NeuralNetLearner (dataset , hidden_layer_sizes , l_rate = 0.01 , epochs = 1000 , batch_size = 1 ,
342
+ optimizer = stochastic_gradient_descent , verbose = None ):
359
343
"""
360
344
Simple dense multilayer neural network.
361
345
:param hidden_layer_sizes: size of hidden layers in the form of a list
362
346
"""
363
-
364
- if hidden_layer_sizes is None :
365
- hidden_layer_sizes = [4 ]
366
347
input_size = len (dataset .inputs )
367
348
output_size = len (dataset .values [dataset .target ])
368
349
@@ -376,7 +357,7 @@ def NeuralNetLearner(dataset, hidden_layer_sizes=None, learning_rate=0.01, epoch
376
357
raw_net .append (DenseLayer (hidden_input_size , output_size ))
377
358
378
359
# update parameters of the network
379
- learned_net = optimizer (dataset , raw_net , mean_squared_error_loss , epochs , l_rate = learning_rate ,
360
+ learned_net = optimizer (dataset , raw_net , mean_squared_error_loss , epochs , l_rate = l_rate ,
380
361
batch_size = batch_size , verbose = verbose )
381
362
382
363
def predict (example ):
@@ -395,7 +376,8 @@ def predict(example):
395
376
return predict
396
377
397
378
398
- def PerceptronLearner (dataset , learning_rate = 0.01 , epochs = 100 , optimizer = gradient_descent , batch_size = 1 , verbose = None ):
379
+ def PerceptronLearner (dataset , l_rate = 0.01 , epochs = 1000 , batch_size = 1 ,
380
+ optimizer = stochastic_gradient_descent , verbose = None ):
399
381
"""
400
382
Simple perceptron neural network.
401
383
"""
@@ -406,7 +388,7 @@ def PerceptronLearner(dataset, learning_rate=0.01, epochs=100, optimizer=gradien
406
388
raw_net = [InputLayer (input_size ), DenseLayer (input_size , output_size )]
407
389
408
390
# update the network
409
- learned_net = optimizer (dataset , raw_net , mean_squared_error_loss , epochs , l_rate = learning_rate ,
391
+ learned_net = optimizer (dataset , raw_net , mean_squared_error_loss , epochs , l_rate = l_rate ,
410
392
batch_size = batch_size , verbose = verbose )
411
393
412
394
def predict (example ):
0 commit comments