Skip to content

Commit f06fda8

Browse files
committed
Added layers
1 parent 1271fad commit f06fda8

7 files changed

+218
-30
lines changed

lab-11-1-mnist_cnn.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343
L2 = tf.nn.relu(L2)
4444
L2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1],
4545
strides=[1, 2, 2, 1], padding='SAME')
46-
L2 = tf.reshape(L2, [-1, 7 * 7 * 64])
46+
L2_flat = tf.reshape(L2, [-1, 7 * 7 * 64])
4747
'''
4848
Tensor("Conv2D_1:0", shape=(?, 14, 14, 64), dtype=float32)
4949
Tensor("Relu_1:0", shape=(?, 14, 14, 64), dtype=float32)
@@ -55,11 +55,11 @@
5555
W3 = tf.get_variable("W3", shape=[7 * 7 * 64, 10],
5656
initializer=tf.contrib.layers.xavier_initializer())
5757
b = tf.Variable(tf.random_normal([10]))
58-
hypothesis = tf.matmul(L2, W3) + b
58+
logits = tf.matmul(L2_flat, W3) + b
5959

6060
# define cost/loss & optimizer
6161
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
62-
logits=hypothesis, labels=Y))
62+
logits=logits, labels=Y))
6363
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
6464

6565
# initialize
@@ -83,7 +83,7 @@
8383
print('Learning Finished!')
8484

8585
# Test model and check accuracy
86-
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
86+
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
8787
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
8888
print('Accuracy:', sess.run(accuracy, feed_dict={
8989
X: mnist.test.images, Y: mnist.test.labels}))
@@ -92,7 +92,7 @@
9292
r = random.randint(0, mnist.test.num_examples - 1)
9393
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
9494
print("Prediction: ", sess.run(
95-
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))
95+
tf.argmax(logits, 1), feed_dict={X: mnist.test.images[r:r + 1]}))
9696

9797
# plt.imshow(mnist.test.images[r:r + 1].
9898
# reshape(28, 28), cmap='Greys', interpolation='nearest')

lab-11-2-mnist_deep_cnn.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666
L3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[
6767
1, 2, 2, 1], padding='SAME')
6868
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
69-
L3 = tf.reshape(L3, [-1, 128 * 4 * 4])
69+
L3_flat = tf.reshape(L3, [-1, 128 * 4 * 4])
7070
'''
7171
Tensor("Conv2D_2:0", shape=(?, 7, 7, 128), dtype=float32)
7272
Tensor("Relu_2:0", shape=(?, 7, 7, 128), dtype=float32)
@@ -79,7 +79,7 @@
7979
W4 = tf.get_variable("W4", shape=[128 * 4 * 4, 625],
8080
initializer=tf.contrib.layers.xavier_initializer())
8181
b4 = tf.Variable(tf.random_normal([625]))
82-
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
82+
L4 = tf.nn.relu(tf.matmul(L3_flat, W4) + b4)
8383
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
8484
'''
8585
Tensor("Relu_3:0", shape=(?, 625), dtype=float32)
@@ -90,14 +90,14 @@
9090
W5 = tf.get_variable("W5", shape=[625, 10],
9191
initializer=tf.contrib.layers.xavier_initializer())
9292
b5 = tf.Variable(tf.random_normal([10]))
93-
hypothesis = tf.matmul(L4, W5) + b5
93+
logits = tf.matmul(L4, W5) + b5
9494
'''
9595
Tensor("add_1:0", shape=(?, 10), dtype=float32)
9696
'''
9797

9898
# define cost/loss & optimizer
9999
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
100-
logits=hypothesis, labels=Y))
100+
logits=logits, labels=Y))
101101
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
102102

103103
# initialize
@@ -121,7 +121,7 @@
121121
print('Learning Finished!')
122122

123123
# Test model and check accuracy
124-
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
124+
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
125125
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
126126
print('Accuracy:', sess.run(accuracy, feed_dict={
127127
X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1}))
@@ -130,7 +130,7 @@
130130
r = random.randint(0, mnist.test.num_examples - 1)
131131
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
132132
print("Prediction: ", sess.run(
133-
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1], keep_prob: 1}))
133+
tf.argmax(logits, 1), feed_dict={X: mnist.test.images[r:r + 1], keep_prob: 1}))
134134

135135
# plt.imshow(mnist.test.images[r:r + 1].
136136
# reshape(28, 28), cmap='Greys', interpolation='nearest')

lab-11-3-mnist_cnn_class.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,8 @@ def _build_net(self):
7878
L3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[
7979
1, 2, 2, 1], padding='SAME')
8080
L3 = tf.nn.dropout(L3, keep_prob=self.keep_prob)
81-
L3 = tf.reshape(L3, [-1, 128 * 4 * 4])
81+
82+
L3_flat = tf.reshape(L3, [-1, 128 * 4 * 4])
8283
'''
8384
Tensor("Conv2D_2:0", shape=(?, 7, 7, 128), dtype=float32)
8485
Tensor("Relu_2:0", shape=(?, 7, 7, 128), dtype=float32)
@@ -91,7 +92,7 @@ def _build_net(self):
9192
W4 = tf.get_variable("W4", shape=[128 * 4 * 4, 625],
9293
initializer=tf.contrib.layers.xavier_initializer())
9394
b4 = tf.Variable(tf.random_normal([625]))
94-
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
95+
L4 = tf.nn.relu(tf.matmul(L3_flat, W4) + b4)
9596
L4 = tf.nn.dropout(L4, keep_prob=self.keep_prob)
9697
'''
9798
Tensor("Relu_3:0", shape=(?, 625), dtype=float32)
@@ -102,23 +103,23 @@ def _build_net(self):
102103
W5 = tf.get_variable("W5", shape=[625, 10],
103104
initializer=tf.contrib.layers.xavier_initializer())
104105
b5 = tf.Variable(tf.random_normal([10]))
105-
self.logit = tf.matmul(L4, W5) + b5
106+
self.logits = tf.matmul(L4, W5) + b5
106107
'''
107108
Tensor("add_1:0", shape=(?, 10), dtype=float32)
108109
'''
109110

110111
# define cost/loss & optimizer
111112
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
112-
logits=self.logit, labels=self.Y))
113+
logits=self.logits, labels=self.Y))
113114
self.optimizer = tf.train.AdamOptimizer(
114115
learning_rate=learning_rate).minimize(self.cost)
115116

116117
correct_prediction = tf.equal(
117-
tf.argmax(self.logit, 1), tf.argmax(self.Y, 1))
118+
tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
118119
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
119120

120121
def predict(self, x_test, keep_prop=1.0):
121-
return self.sess.run(self.logit, feed_dict={self.X: x_test, self.keep_prob: keep_prop})
122+
return self.sess.run(self.logits, feed_dict={self.X: x_test, self.keep_prob: keep_prop})
122123

123124
def get_accuracy(self, x_test, y_test, keep_prop=1.0):
124125
return self.sess.run(self.accuracy, feed_dict={self.X: x_test, self.Y: y_test, self.keep_prob: keep_prop})

lab-11-4-mnist_cnn_ensemble.py

+9-6
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,8 @@ def _build_net(self):
7777
L3 = tf.nn.max_pool(L3, ksize=[1, 2, 2, 1], strides=[
7878
1, 2, 2, 1], padding='SAME')
7979
L3 = tf.nn.dropout(L3, keep_prob=self.keep_prob)
80-
L3 = tf.reshape(L3, [-1, 128 * 4 * 4])
80+
81+
L3_flat = tf.reshape(L3, [-1, 128 * 4 * 4])
8182
'''
8283
Tensor("Conv2D_2:0", shape=(?, 7, 7, 128), dtype=float32)
8384
Tensor("Relu_2:0", shape=(?, 7, 7, 128), dtype=float32)
@@ -90,7 +91,7 @@ def _build_net(self):
9091
W4 = tf.get_variable("W4", shape=[128 * 4 * 4, 625],
9192
initializer=tf.contrib.layers.xavier_initializer())
9293
b4 = tf.Variable(tf.random_normal([625]))
93-
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
94+
L4 = tf.nn.relu(tf.matmul(L3_flat, W4) + b4)
9495
L4 = tf.nn.dropout(L4, keep_prob=self.keep_prob)
9596
'''
9697
Tensor("Relu_3:0", shape=(?, 625), dtype=float32)
@@ -101,23 +102,23 @@ def _build_net(self):
101102
W5 = tf.get_variable("W5", shape=[625, 10],
102103
initializer=tf.contrib.layers.xavier_initializer())
103104
b5 = tf.Variable(tf.random_normal([10]))
104-
self.logit = tf.matmul(L4, W5) + b5
105+
self.logits = tf.matmul(L4, W5) + b5
105106
'''
106107
Tensor("add_1:0", shape=(?, 10), dtype=float32)
107108
'''
108109

109110
# define cost/loss & optimizer
110111
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
111-
logits=self.logit, labels=self.Y))
112+
logits=self.logits, labels=self.Y))
112113
self.optimizer = tf.train.AdamOptimizer(
113114
learning_rate=learning_rate).minimize(self.cost)
114115

115116
correct_prediction = tf.equal(
116-
tf.argmax(self.logit, 1), tf.argmax(self.Y, 1))
117+
tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
117118
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
118119

119120
def predict(self, x_test, keep_prop=1.0):
120-
return self.sess.run(self.logit, feed_dict={self.X: x_test, self.keep_prob: keep_prop})
121+
return self.sess.run(self.logits, feed_dict={self.X: x_test, self.keep_prob: keep_prop})
121122

122123
def get_accuracy(self, x_test, y_test, keep_prop=1.0):
123124
return self.sess.run(self.accuracy, feed_dict={self.X: x_test, self.Y: y_test, self.keep_prob: keep_prop})
@@ -136,6 +137,8 @@ def train(self, x_data, y_data, keep_prop=0.7):
136137

137138
sess.run(tf.global_variables_initializer())
138139

140+
print('Learning Started!')
141+
139142
# train my model
140143
for epoch in range(training_epochs):
141144
avg_cost_list = np.zeros(len(models))

lab-11-5-mnist_cnn_ensemble_layers.py

+178
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,178 @@
1+
# Lab 11 MNIST and Deep learning CNN
2+
# https://www.tensorflow.org/tutorials/layers
3+
import tensorflow as tf
4+
import numpy as np
5+
6+
from tensorflow.examples.tutorials.mnist import input_data
7+
8+
tf.set_random_seed(777) # reproducibility
9+
10+
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
11+
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
12+
# more information about the mnist dataset
13+
14+
# hyper parameters
15+
learning_rate = 0.001
16+
training_epochs = 20
17+
batch_size = 100
18+
19+
20+
class Model:
21+
22+
def __init__(self, sess, name):
23+
self.sess = sess
24+
self.name = name
25+
self._build_net()
26+
27+
def _build_net(self):
28+
with tf.variable_scope(self.name):
29+
# dropout (keep_prob) rate 0.7~0.5 on training, but should be 1
30+
# for testing
31+
self.training = tf.placeholder(tf.bool)
32+
33+
# input place holders
34+
self.X = tf.placeholder(tf.float32, [None, 784])
35+
36+
# img 28x28x1 (black/white), Input Layer
37+
X_img = tf.reshape(self.X, [-1, 28, 28, 1])
38+
self.Y = tf.placeholder(tf.float32, [None, 10])
39+
40+
# L1 ImgIn shape=(?, 28, 28, 1)
41+
# W1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev=0.01))
42+
# Conv -> (?, 28, 28, 32)
43+
# Pool -> (?, 14, 14, 32)
44+
# L1 = tf.nn.conv2d(X_img, W1, strides=[1, 1, 1, 1], padding='SAME')
45+
# L1 = tf.nn.relu(L1)
46+
47+
# Convolutional Layer #1
48+
conv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3, 3],
49+
padding="SAME", activation=tf.nn.relu)
50+
51+
# L1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1],
52+
# strides=[1, 2, 2, 1], padding='SAME')
53+
54+
# Pooling Layer #1
55+
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
56+
padding="SAME", strides=2)
57+
58+
# L1 = tf.nn.dropout(L1, keep_prob=self.keep_prob)
59+
dropout1 = tf.layers.dropout(inputs=pool1,
60+
rate=0.7, training=self.training)
61+
62+
# Convolutional Layer #2 and Pooling Layer #2
63+
conv2 = tf.layers.conv2d(inputs=dropout1, filters=64, kernel_size=[3, 3],
64+
padding="SAME", activation=tf.nn.relu)
65+
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
66+
padding="SAME", strides=2)
67+
dropout2 = tf.layers.dropout(inputs=pool2,
68+
rate=0.7, training=self.training)
69+
70+
# Convolutional Layer #2 and Pooling Layer #2
71+
conv3 = tf.layers.conv2d(inputs=dropout2, filters=128, kernel_size=[3, 3],
72+
padding="same", activation=tf.nn.relu)
73+
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2],
74+
padding="same", strides=2)
75+
dropout3 = tf.layers.dropout(inputs=pool3,
76+
rate=0.7, training=self.training)
77+
78+
flat = tf.reshape(dropout3, [-1, 128 * 4 * 4])
79+
'''
80+
Tensor("Conv2D_2:0", shape=(?, 7, 7, 128), dtype=float32)
81+
Tensor("Relu_2:0", shape=(?, 7, 7, 128), dtype=float32)
82+
Tensor("MaxPool_2:0", shape=(?, 4, 4, 128), dtype=float32)
83+
Tensor("dropout_2/mul:0", shape=(?, 4, 4, 128), dtype=float32)
84+
Tensor("Reshape_1:0", shape=(?, 2048), dtype=float32)
85+
'''
86+
87+
# # Dense Layer: 4x4x128 inputs -> 625 outputs
88+
# W4 = tf.get_variable("W4", shape=[128 * 4 * 4, 625],
89+
# initializer=tf.contrib.layers.xavier_initializer())
90+
# b4 = tf.Variable(tf.random_normal([625]))
91+
# L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
92+
# L4 = tf.nn.dropout(L4, keep_prob=self.keep_prob)
93+
94+
dense4 = tf.layers.dense(inputs=flat,
95+
units=625, activation=tf.nn.relu)
96+
dropout4 = tf.layers.dropout(inputs=dense4,
97+
rate=0.5, training=self.training)
98+
99+
# Logits Layer: L5 Final FC 625 inputs -> 10 outputs
100+
self.logits = tf.layers.dense(inputs=dropout4, units=10)
101+
102+
# define cost/loss & optimizer
103+
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
104+
logits=self.logits, labels=self.Y))
105+
self.optimizer = tf.train.AdamOptimizer(
106+
learning_rate=learning_rate).minimize(self.cost)
107+
108+
correct_prediction = tf.equal(
109+
tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
110+
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
111+
112+
def predict(self, x_test, training=False):
113+
return self.sess.run(self.logits,
114+
feed_dict={self.X: x_test, self.training: training})
115+
116+
def get_accuracy(self, x_test, y_test, training=False):
117+
return self.sess.run(self.accuracy,
118+
feed_dict={self.X: x_test,
119+
self.Y: y_test, self.training: training})
120+
121+
def train(self, x_data, y_data, training=True):
122+
return self.sess.run([self.cost, self.optimizer], feed_dict={
123+
self.X: x_data, self.Y: y_data, self.training: training})
124+
125+
# initialize
126+
sess = tf.Session()
127+
128+
models = []
129+
num_models = 2
130+
for m in range(num_models):
131+
models.append(Model(sess, "model" + str(m)))
132+
133+
sess.run(tf.global_variables_initializer())
134+
135+
print('Learning Started!')
136+
137+
# train my model
138+
for epoch in range(training_epochs):
139+
avg_cost_list = np.zeros(len(models))
140+
total_batch = int(mnist.train.num_examples / batch_size)
141+
for i in range(total_batch):
142+
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
143+
144+
# train each model
145+
for m_idx, m in enumerate(models):
146+
c, _ = m.train(batch_xs, batch_ys)
147+
avg_cost_list[m_idx] += c / total_batch
148+
149+
print('Epoch:', '%04d' % (epoch + 1), 'cost =', avg_cost_list)
150+
151+
print('Learning Finished!')
152+
153+
# Test model and check accuracy
154+
test_size = len(mnist.test.labels)
155+
predictions = np.zeros(test_size * 10).reshape(test_size, 10)
156+
for m_idx, m in enumerate(models):
157+
print(m_idx, 'Accuracy:', m.get_accuracy(
158+
mnist.test.images, mnist.test.labels))
159+
p = m.predict(mnist.test.images)
160+
predictions += p
161+
162+
ensemble_correct_prediction = tf.equal(
163+
tf.argmax(predictions, 1), tf.argmax(mnist.test.labels, 1))
164+
ensemble_accuracy = tf.reduce_mean(
165+
tf.cast(ensemble_correct_prediction, tf.float32))
166+
print('Ensemble accuracy:', sess.run(ensemble_accuracy))
167+
168+
'''
169+
0 Accuracy: 0.9933
170+
1 Accuracy: 0.9946
171+
2 Accuracy: 0.9934
172+
3 Accuracy: 0.9935
173+
4 Accuracy: 0.9935
174+
5 Accuracy: 0.9949
175+
6 Accuracy: 0.9941
176+
177+
Ensemble accuracy: 0.9952
178+
'''

0 commit comments

Comments
 (0)