Skip to content

Commit f5e8f6f

Browse files
HanadSHanadS
HanadS
authored and
HanadS
committed
Facial Recognition System using CNNs
0 parents  commit f5e8f6f

6 files changed

+489
-0
lines changed

Diff for: COMP4107ProjectReport.pdf

87.5 KB
Binary file not shown.

Diff for: eigenfaces.py

+122
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
import numpy as np
2+
from sklearn.model_selection import train_test_split
3+
from sklearn.decomposition import PCA
4+
import math
5+
from sklearn.datasets import fetch_lfw_people
6+
7+
########################################################################
8+
# Load the data as numpy arrays. #
9+
10+
faces = fetch_lfw_people(color=False, min_faces_per_person=10)
11+
12+
13+
########################################################################
14+
# Split into a training set and testing. #
15+
16+
X = faces.data
17+
y = faces.target
18+
X_train, X_test, y_train, y_test = train_test_split(
19+
X, y, test_size=0.25, random_state=42)
20+
21+
numTrain = X_train.shape[0]
22+
numTest = X_test.shape[0]
23+
24+
X_train = X_train.reshape(numTrain, 62, 47)
25+
X_test = X_test.reshape(numTest, 62, 47)
26+
27+
# data dimensions. #
28+
h, w = X_train[0].shape
29+
30+
X_train = X_train.reshape(numTrain, h*w)
31+
X_test = X_test.reshape(numTest, h*w)
32+
33+
########################################################################
34+
# Calculate mean face. #
35+
36+
meanFace = np.zeros(X_train[0].shape)
37+
for i in range(numTrain):
38+
meanFace = meanFace + X_train[0]
39+
40+
for i in range((h*w)):
41+
meanFace[i] = meanFace[i] / (numTrain)
42+
43+
########################################################################
44+
# Normalize the training set. #
45+
46+
X_train_normalized = X_train
47+
for i in range(numTrain):
48+
X_train_normalized[i] = X_train_normalized[i] - meanFace
49+
50+
########################################################################
51+
# Calculate the covariance. #
52+
53+
X_train_normalized_T = np.transpose(X_train_normalized)
54+
55+
covariance = np.dot(X_train_normalized, X_train_normalized_T)
56+
57+
########################################################################
58+
# Compute a PCA and produce eigenfaces. #
59+
60+
numComponents = 150
61+
62+
pca = PCA(n_components=numComponents, svd_solver='randomized',
63+
whiten=True).fit(X_train_normalized)
64+
65+
eigenfaces = pca.components_.reshape((numComponents, (h*w)))
66+
67+
68+
########################################################################
69+
# Calculate weights for the training set for each eigenvector. #
70+
71+
trainWeights = np.zeros((numTrain, 150))
72+
73+
for i in range(numTrain):
74+
trainFace = X_train[i]
75+
weights = np.zeros((150, ))
76+
77+
for j in range(150):
78+
weights[j] = np.dot(trainFace, eigenfaces[j])
79+
80+
trainWeights[i] = weights
81+
82+
########################################################################
83+
# Recognition. #
84+
85+
numberOfSuccesses = 0
86+
numberOfFailures = 0
87+
threshold = 490 # empirically driven threshold choice.
88+
89+
# K-Fold. #
90+
fold = np.array_split(range(X_test.shape[0]), 256)
91+
92+
testNumber = 50
93+
#for f in fold:
94+
for i in range(testNumber):
95+
newFace = X_test[i] - meanFace
96+
newFaceWeights = np.zeros((150, ))
97+
98+
for j in range(150):
99+
newFaceWeights[j] = np.dot(newFace, eigenfaces[j])
100+
101+
# set the currentlowest distance to infinity
102+
currentLowestDistance = float('inf')
103+
guesses = set()
104+
105+
for k in range(numTrain):
106+
dist = np.sqrt(np.linalg.norm(abs(newFaceWeights - trainWeights)))
107+
if dist < threshold:
108+
currentLowestDistance = dist
109+
guesses.add(y_train[k])
110+
111+
# Is the correct answer among our candidate faces. #
112+
if y_test[i] in guesses:
113+
numberOfSuccesses = numberOfSuccesses + 1
114+
else:
115+
numberOfFailures = numberOfFailures + 1
116+
117+
print("Test Image %d.\n" % (i + 1))
118+
119+
print("Successes: %d. Failures: %d. Accuracy: %.2f%%." % (numberOfSuccesses, numberOfFailures, (numberOfSuccesses/testNumber) * 100))
120+
121+
122+

Diff for: project_cnnL.py

+121
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
from __future__ import absolute_import
2+
from __future__ import division
3+
from __future__ import print_function
4+
5+
import tensorflow as tf
6+
import numpy as np
7+
from tensorflow.examples.tutorials.mnist import input_data
8+
from sklearn.model_selection import train_test_split
9+
from sklearn.datasets import fetch_lfw_people
10+
from sklearn.decomposition import PCA
11+
12+
13+
14+
batch_size = 100
15+
test_size = 256
16+
17+
def init_weights(shape,name):
18+
return tf.get_variable(name, shape = shape, initializer = tf.contrib.layers.xavier_initializer())
19+
20+
def loadData():
21+
22+
faces = fetch_lfw_people(color = False,min_faces_per_person=10)
23+
X = faces.data
24+
y = faces.target
25+
X_train, X_test, y_train, y_test = train_test_split(
26+
X, y, test_size=0.25)
27+
28+
return X_train, y_train,X_test, y_test
29+
30+
31+
def _encode_labels(y, k):
32+
onehot = np.zeros((y.shape[0], k))
33+
for idx, val in enumerate(y):
34+
onehot[idx, val] = 1.0
35+
return onehot
36+
37+
38+
def model(X, w, w_fc, w_o, p_keep_conv, p_keep_hidden):
39+
l1a = tf.nn.relu(tf.nn.conv2d(X, w,
40+
strides=[1, 1, 1, 1], padding='VALID'))
41+
42+
l2a = tf.nn.relu(tf.nn.conv2d(l1a, w2,
43+
strides=[1, 1, 1, 1], padding='VALID'))
44+
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1],
45+
strides=[1, 1, 1, 1], padding='SAME')
46+
47+
48+
l3a = tf.nn.relu(tf.nn.conv2d(l2, w3,
49+
strides=[1, 1, 1, 1], padding='VALID'))
50+
l3 = tf.nn.max_pool(l3a, ksize=[1, 3, 3, 1],
51+
strides=[1, 2, 2, 1], padding='SAME')
52+
53+
54+
l4a = tf.nn.relu(tf.nn.conv2d(l3, w4,
55+
strides=[1, 1, 1, 1], padding='VALID'))
56+
l4 = tf.nn.max_pool(l4a, ksize=[1, 2, 2, 1],
57+
strides=[1, 1, 1, 1], padding='SAME')
58+
59+
l5 = tf.reshape(l4, [-1, w_fc.get_shape().as_list()[0]]) # reshape to (?, 14x14x32)
60+
l6 = tf.nn.relu(tf.matmul(l5, w_fc))
61+
pyx = tf.matmul(l6, w_o)
62+
return pyx
63+
64+
65+
trX, trY, teX, teY = loadData()
66+
67+
k = np.amax(trY)+1; # NUmber of classes
68+
k2 = np.amax(teY)+1; # NUmber of classes
69+
70+
71+
72+
73+
trX = trX.reshape(-1, 62, 47, 1)
74+
teX = teX.reshape(-1, 62, 47, 1)
75+
trY = _encode_labels(trY,k)
76+
teY = _encode_labels(teY,k2)
77+
78+
79+
X = tf.placeholder("float", [None, 62, 47, 1])
80+
Y = tf.placeholder("float", [None, k])
81+
82+
w = init_weights([3, 3, 1, 16],"w")
83+
w2 = init_weights([3, 3, 16, 16],"w2")
84+
w3 = init_weights([3, 3, 16, 32], "w3") # 3x3x1 conv, 32 outputs
85+
w4 = init_weights([3, 3, 32, 48],"w4")
86+
w_fc = init_weights([48 * 26 * 19, 160],"w_fc") # FC 32 * 14 * 14 inputs, 625 outputs
87+
w_o = init_weights([160, k],"w_o") # FC 625 inputs, 10 outputs (labels)
88+
89+
p_keep_conv = tf.placeholder("float")
90+
p_keep_hidden = tf.placeholder("float")
91+
py_x = model(X, w, w_fc, w_o, p_keep_conv, p_keep_hidden)
92+
93+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
94+
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
95+
predict_op = tf.argmax(py_x, 1)
96+
97+
# Launch the graph in a session
98+
with tf.Session() as sess:
99+
# you need to initialize all variables
100+
101+
tf.global_variables_initializer().run()
102+
103+
for i in range(100):
104+
training_batch = zip(range(0, len(trX), batch_size),
105+
range(batch_size, len(trX)+1, batch_size))
106+
for start, end in training_batch:
107+
108+
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
109+
p_keep_conv: 0.8, p_keep_hidden: 0.5})
110+
111+
test_indices = np.arange(len(teX)) # Get A Test Batch
112+
np.random.shuffle(test_indices)
113+
test_indices = test_indices[0:test_size]
114+
115+
print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
116+
sess.run(predict_op, feed_dict={X: teX[test_indices],
117+
p_keep_conv: 1.0,
118+
p_keep_hidden: 1.0})))
119+
120+
121+

Diff for: project_cnnM.py

+116
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
from __future__ import absolute_import
2+
from __future__ import division
3+
from __future__ import print_function
4+
5+
import tensorflow as tf
6+
import numpy as np
7+
from tensorflow.examples.tutorials.mnist import input_data
8+
from sklearn.model_selection import train_test_split
9+
from sklearn.datasets import fetch_lfw_people
10+
from sklearn.decomposition import PCA
11+
12+
13+
batch_size = 100
14+
test_size = 256
15+
16+
def init_weights(shape,name):
17+
return tf.get_variable(name, shape = shape, initializer = tf.contrib.layers.xavier_initializer())
18+
19+
def loadData():
20+
faces = fetch_lfw_people(color = False,min_faces_per_person=10)
21+
X = faces.data
22+
y = faces.target
23+
names = faces.target_names
24+
targets = faces.target
25+
X_train, X_test, y_train, y_test = train_test_split(
26+
X, y, test_size=0.25)
27+
28+
return X_train, y_train,X_test, y_test,names,targets
29+
30+
31+
32+
def _encode_labels(y, k):
33+
onehot = np.zeros((y.shape[0], k))
34+
for idx, val in enumerate(y):
35+
onehot[idx, val] = 1.0
36+
return onehot
37+
38+
39+
def model(X, w, w_fc, w_o, p_keep_conv, p_keep_hidden):
40+
l1a = tf.nn.relu(tf.nn.conv2d(X, w,
41+
strides=[1, 1, 1, 1], padding='SAME'))
42+
43+
44+
l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1],
45+
strides=[1, 1, 1, 1], padding='SAME')
46+
47+
48+
l2a = tf.nn.relu(tf.nn.conv2d(l1, w2,
49+
strides=[1, 1, 1, 1], padding='SAME'))
50+
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1],
51+
strides=[1, 1, 1, 1], padding='SAME')
52+
53+
l3a = tf.nn.relu(tf.nn.conv2d(l2, w3,
54+
strides=[1, 2, 2, 1], padding='SAME'))
55+
l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1],
56+
strides=[1, 1, 1, 1], padding='SAME')
57+
58+
59+
l4 = tf.reshape(l3, [-1, w_fc.get_shape().as_list()[0]])
60+
l5 = tf.nn.relu(tf.matmul(l4, w_fc))
61+
pyx = tf.matmul(l5, w_o)
62+
return pyx
63+
64+
65+
trX_raw, trY_raw, teX_raw, teY_raw, names,targets = loadData()
66+
67+
k = np.amax(trY_raw)+1; # this is the number of classes
68+
k2 = np.amax(teY_raw)+1;
69+
70+
71+
72+
trX = trX_raw.reshape(-1, 62, 47, 1) # 28x28x1 input img
73+
teX = teX_raw.reshape(-1, 62, 47, 1) # 28x28x1 input img
74+
trY = _encode_labels(trY_raw,k)
75+
teY = _encode_labels(teY_raw,k2)
76+
77+
X = tf.placeholder("float", [None, 62, 47, 1])
78+
Y = tf.placeholder("float", [None, k])
79+
80+
w = init_weights([5, 5, 1, 16],"w")
81+
w2 = init_weights([4, 4, 16, 32],"w2")
82+
w3 = init_weights([3, 3, 32, 48], "w3")
83+
w_fc = init_weights([48 * 31 * 24, 160], "w_fc")
84+
w_o = init_weights([160, k],"w_o")
85+
86+
p_keep_conv = tf.placeholder("float")
87+
p_keep_hidden = tf.placeholder("float")
88+
py_x = model(X, w, w_fc, w_o, p_keep_conv, p_keep_hidden)
89+
90+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
91+
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
92+
predict_op = tf.argmax(py_x, 1)
93+
94+
# Launch the graph in a session
95+
with tf.Session() as sess:
96+
97+
tf.global_variables_initializer().run()
98+
99+
for i in range(100):
100+
training_batch = zip(range(0, len(trX), batch_size),
101+
range(batch_size, len(trX)+1, batch_size))
102+
for start, end in training_batch:
103+
104+
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
105+
p_keep_conv: 0.8, p_keep_hidden: 0.5})
106+
107+
test_indices = np.arange(len(teX)) # Get A Test Batch
108+
np.random.shuffle(test_indices)
109+
test_indices = test_indices[0:test_size]
110+
111+
print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
112+
sess.run(predict_op, feed_dict={X: teX[test_indices],
113+
p_keep_conv: 1.0,
114+
p_keep_hidden: 1.0})))
115+
116+

0 commit comments

Comments
 (0)