-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfaceRecognizer.py
112 lines (91 loc) · 3.8 KB
/
faceRecognizer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import cv2
import numpy as np
from sklearn.preprocessing import LabelEncoder
from mpc.nn import Dense, Reveal, Softmax, Sequential
from mpc.tensor import PrivateEncodedTensor
from openface.align import AlignDlib
from openface.openface_model import create_model
import os
import config
import warnings
warnings.filterwarnings("ignore")
# load sample data
def predict(classifier, wrapper, x):
likelihoods = classifier.predict(wrapper(x), batch_size=128)
prob = np.max(likelihoods.unwrap())
print(likelihoods.unwrap())
if (prob < 0.5):
return 'Unknown', prob
y_predicted = np.argmax(likelihoods.unwrap())
print("y_predicted",y_predicted )
return y_predicted, prob
# using pre-trained model
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('models/nn4.small2.v1.h5')
# Initialize the OpenFace face alignment utility
aligment = AlignDlib('models/landmarks.dat')
# Align image on face
def align_image(img):
return aligment.align(96, img, aligment.getLargestFaceBoundingBox(img),
landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
def img_to_embedding(image):
img = image[..., ::-1]
img = align_image(img)
try:
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
except TypeError:
print("The image is not Clear to extract the Embeddings")
else:
# obtain embedding vector for image
return nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
face_cascade = cv2.CascadeClassifier('models/haarcascade_frontalface_default.xml')
# load customDataset
metadata = np.load(config.faceData+'/metadata.npy',allow_pickle=True)
# load embedded
embedded = np.load(config.faceData+'/embedded.npy',allow_pickle=True)
targets = np.array([m.name for m in metadata])
encoder = LabelEncoder()
encoder.fit(targets)
# Numerical encoding of identities
y = encoder.transform(targets)
num_labels = len(np.unique(y))
classifier = Sequential([
Dense(num_labels, 128),
Reveal(),
Softmax()
])
classifier.layers[0].weights = PrivateEncodedTensor.from_shares(
np.load(config.weights+'/encrypted_layer0_weights_0.npy',allow_pickle=True),
np.load(config.weights+'/encrypted_layer0_weights_1.npy',allow_pickle=True))
classifier.layers[0].bias = PrivateEncodedTensor.from_shares(np.load(config.weights+'/encrypted_layer0_bias_0.npy',allow_pickle=True),
np.load(config.weights+'/encrypted_layer0_bias_1.npy',allow_pickle=True))
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
frame = cv2.flip(frame, 1)
faces = face_cascade.detectMultiScale(frame, 1.3, 5)
print(faces)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
roi = frame[y:y + h, x:x + w]
try:
example_image_embedded = img_to_embedding(roi).reshape(1, -1)
except AttributeError:
continue
example_identity, prob = predict(classifier, PrivateEncodedTensor, example_image_embedded)
print("example_identity", example_identity)
print("prob", prob)
if (example_identity == 'Unknown'):
example_identity_ = 'Unknown person'
else:
example_identity = np.atleast_1d(example_identity)
example_identity_ = encoder.inverse_transform(example_identity)
print(example_identity_)
cv2.putText(frame, "Face : " + str(example_identity_), (x, y - 50), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
cv2.putText(frame, "prob : " + str(prob), (x, y - 20), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
cv2.imshow('Face Recognition System', frame)
if (cv2.waitKey(1) & 0xFF == ord('q')):
break
video_capture.release()
cv2.destroyAllWindows()