Skip to content

Commit a35f06c

Browse files
Working on network3.py - convolutional NN examples added
1 parent 664aee3 commit a35f06c

File tree

1 file changed

+54
-7
lines changed

1 file changed

+54
-7
lines changed

test.py

+54-7
Original file line numberDiff line numberDiff line change
@@ -97,11 +97,16 @@
9797
Tutorial:
9898
http://deeplearning.net/software/theano/install_ubuntu.html#install-ubuntu
9999
100+
The following command will update only Theano:
101+
sudo pip install --upgrade --no-deps theano
102+
103+
The following command will update Theano and Numpy/Scipy (warning bellow):
104+
sudo pip install --upgrade theano
100105
101106
"""
102107

103108
"""
104-
Testing function to check whether your computations have been made on CPU or GPU.
109+
Below, there is a testing function to check whether your computations have been made on CPU or GPU.
105110
If the result is 'Used the cpu' and you want to have it in gpu, do the following:
106111
1) install theano:
107112
sudo python3.5 -m pip install Theano
@@ -119,7 +124,6 @@
119124
120125
121126
"""
122-
123127
def testTheano():
124128
from theano import function, config, shared, sandbox
125129
import theano.tensor as T
@@ -143,21 +147,64 @@ def testTheano():
143147
print('Used the cpu')
144148
else:
145149
print('Used the gpu')
146-
147150
# Perform check:
148-
testTheano()
151+
#testTheano()
149152

150153

151154
# ----------------------
152155
# - network3.py example:
153156
import network3
157+
from network3 import Network, ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer # softmax plus log-likelihood cost is more common in modern image classification networks.
154158

155-
'''
156-
from network3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer
159+
# read data:
157160
training_data, validation_data, test_data = network3.load_data_shared()
161+
# mini-batch size:
158162
mini_batch_size = 10
159-
net = network3.Network([
163+
164+
# chapter 6 - shallow architecture using just a single hidden layer, containing 100 hidden neurons.
165+
'''
166+
net = Network([
160167
FullyConnectedLayer(n_in=784, n_out=100),
161168
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
162169
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
163170
'''
171+
172+
# chapter 6 - 5x5 local receptive fields, 20 feature maps, max-pooling layer 2x2
173+
'''
174+
net = Network([
175+
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
176+
filter_shape=(20, 1, 5, 5),
177+
poolsize=(2, 2)),
178+
FullyConnectedLayer(n_in=20*12*12, n_out=100),
179+
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
180+
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
181+
'''
182+
183+
# chapter 6 - inserting a second convolutional-pooling layer to the previous example => better accuracy
184+
'''
185+
net = Network([
186+
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
187+
filter_shape=(20, 1, 5, 5),
188+
poolsize=(2, 2)),
189+
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
190+
filter_shape=(40, 20, 5, 5),
191+
poolsize=(2, 2)),
192+
FullyConnectedLayer(n_in=40*4*4, n_out=100),
193+
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
194+
net.SGD(training_data, 60, mini_batch_size, 0.1, validation_data, test_data)
195+
'''
196+
197+
# chapter 6 - rectified linear units and some l2 regularization (lmbda=0.1) => even better accuracy
198+
from network3 import ReLU
199+
net = Network([
200+
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
201+
filter_shape=(20, 1, 5, 5),
202+
poolsize=(2, 2),
203+
activation_fn=ReLU),
204+
ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
205+
filter_shape=(40, 20, 5, 5),
206+
poolsize=(2, 2),
207+
activation_fn=ReLU),
208+
FullyConnectedLayer(n_in=40*4*4, n_out=100, activation_fn=ReLU),
209+
SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
210+
net.SGD(training_data, 60, mini_batch_size, 0.03, validation_data, test_data, lmbda=0.1)

0 commit comments

Comments
 (0)