Skip to content

Commit df32fc5

Browse files
committed
update
1 parent 9e35e19 commit df32fc5

File tree

1 file changed

+121
-0
lines changed

1 file changed

+121
-0
lines changed

Diff for: cnn_class/keras_example.py

+121
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
# https://deeplearningcourses.com/c/advanced-computer-vision
2+
# https://www.udemy.com/advanced-computer-vision
3+
4+
from __future__ import print_function, division
5+
from builtins import range
6+
# Note: you may need to update your version of future
7+
# sudo pip install -U future
8+
9+
from keras.models import Sequential, Model
10+
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization, Input
11+
12+
import matplotlib.pyplot as plt
13+
import pandas as pd
14+
import numpy as np
15+
16+
from datetime import datetime
17+
from scipy.io import loadmat
18+
from sklearn.utils import shuffle
19+
20+
from benchmark import get_data, error_rate
21+
22+
23+
# helper
24+
# def y2indicator(Y):
25+
# N = len(Y)
26+
# K = len(set(Y))
27+
# I = np.zeros((N, K))
28+
# I[np.arange(N), Y] = 1
29+
# return I
30+
31+
def rearrange(X):
32+
# input is (32, 32, 3, N)
33+
# output is (N, 32, 32, 3)
34+
# N = X.shape[-1]
35+
# out = np.zeros((N, 32, 32, 3), dtype=np.float32)
36+
# for i in xrange(N):
37+
# for j in xrange(3):
38+
# out[i, :, :, j] = X[:, :, j, i]
39+
# return out / 255
40+
return (X.transpose(3, 0, 1, 2) / 255.).astype(np.float32)
41+
42+
43+
# get the data
44+
train, test = get_data()
45+
46+
# Need to scale! don't leave as 0..255
47+
# Y is a N x 1 matrix with values 1..10 (MATLAB indexes by 1)
48+
# So flatten it and make it 0..9
49+
# Also need indicator matrix for cost calculation
50+
Xtrain = rearrange(train['X'])
51+
Ytrain = train['y'].flatten() - 1
52+
del train
53+
54+
Xtest = rearrange(test['X'])
55+
Ytest = test['y'].flatten() - 1
56+
del test
57+
58+
59+
60+
# get shapes
61+
K = len(set(Ytrain))
62+
63+
64+
65+
# make the CNN
66+
i = Input(shape=Xtrain.shape[1:])
67+
x = Conv2D(filters=20, kernel_size=(5, 5))(i)
68+
x = BatchNormalization()(x)
69+
x = Activation('relu')(x)
70+
x = MaxPooling2D()(x)
71+
72+
x = Conv2D(filters=50, kernel_size=(5, 5))(x)
73+
x = BatchNormalization()(x)
74+
x = Activation('relu')(x)
75+
x = MaxPooling2D()(x)
76+
77+
x = Flatten()(x)
78+
x = Dense(units=500)(x)
79+
x = Activation('relu')(x)
80+
x = Dropout(0.3)(x)
81+
x = Dense(units=K)(x)
82+
x = Activation('softmax')(x)
83+
84+
model = Model(inputs=i, outputs=x)
85+
86+
87+
# list of losses: https://keras.io/losses/
88+
# list of optimizers: https://keras.io/optimizers/
89+
# list of metrics: https://keras.io/metrics/
90+
model.compile(
91+
loss='sparse_categorical_crossentropy',
92+
optimizer='adam',
93+
metrics=['accuracy']
94+
)
95+
96+
# note: multiple ways to choose a backend
97+
# either theano, tensorflow, or cntk
98+
# https://keras.io/backend/
99+
100+
101+
# gives us back a <keras.callbacks.History object at 0x112e61a90>
102+
r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=10, batch_size=32)
103+
print("Returned:", r)
104+
105+
# print the available keys
106+
# should see: dict_keys(['val_loss', 'acc', 'loss', 'val_acc'])
107+
print(r.history.keys())
108+
109+
# plot some data
110+
plt.plot(r.history['loss'], label='loss')
111+
plt.plot(r.history['val_loss'], label='val_loss')
112+
plt.legend()
113+
plt.show()
114+
115+
# accuracies
116+
plt.plot(r.history['acc'], label='acc')
117+
plt.plot(r.history['val_acc'], label='val_acc')
118+
plt.legend()
119+
plt.show()
120+
121+

0 commit comments

Comments
 (0)