Skip to content

Commit fd2da74

Browse files
authored
Add SFace visualization demo and example outputs (#231)
* initial commit * add example output
1 parent 80f7c6a commit fd2da74

File tree

5 files changed

+102
-15
lines changed

5 files changed

+102
-15
lines changed

README.md

+4
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,10 @@ Some examples are listed below. You can find more in the directory of each model
6161

6262
![largest selfie](./models/face_detection_yunet/example_outputs/largest_selfie.jpg)
6363

64+
### Face Recognition with [SFace](./models/face_recognition_sface/)
65+
66+
![sface demo](./models/face_recognition_sface/example_outputs/demo.jpg)
67+
6468
### Facial Expression Recognition with [Progressive Teacher](./models/facial_expression_recognition/)
6569

6670
![fer demo](./models/facial_expression_recognition/example_outputs/selfie.jpg)

models/face_recognition_sface/README.md

+7-1
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,18 @@ Run the following command to try the demo:
2626

2727
```shell
2828
# recognize on images
29-
python demo.py --input1 /path/to/image1 --input2 /path/to/image2
29+
python demo.py --target /path/to/image1 --query /path/to/image2
3030

3131
# get help regarding various parameters
3232
python demo.py --help
3333
```
3434

35+
### Example outputs
36+
37+
![sface demo](./example_outputs/demo.jpg)
38+
39+
Note: Left part of the image is the target identity, the right part is the query. Green boxes are the same identity, red boxes are different identities compared to the left.
40+
3541
## License
3642

3743
All files in this directory are licensed under [Apache 2.0 License](./LICENSE).

models/face_recognition_sface/demo.py

+86-12
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,10 @@
3030

3131
parser = argparse.ArgumentParser(
3232
description="SFace: Sigmoid-Constrained Hypersphere Loss for Robust Face Recognition (https://ieeexplore.ieee.org/document/9318547)")
33-
parser.add_argument('--input1', '-i1', type=str,
34-
help='Usage: Set path to the input image 1 (original face).')
35-
parser.add_argument('--input2', '-i2', type=str,
36-
help='Usage: Set path to the input image 2 (comparison face).')
33+
parser.add_argument('--target', '-t', type=str,
34+
help='Usage: Set path to the input image 1 (target face).')
35+
parser.add_argument('--query', '-q', type=str,
36+
help='Usage: Set path to the input image 2 (query).')
3737
parser.add_argument('--model', '-m', type=str, default='face_recognition_sface_2021dec.onnx',
3838
help='Usage: Set model path, defaults to face_recognition_sface_2021dec.onnx.')
3939
parser.add_argument('--backend_target', '-bt', type=int, default=0,
@@ -46,8 +46,64 @@
4646
'''.format(*[x for x in range(len(backend_target_pairs))]))
4747
parser.add_argument('--dis_type', type=int, choices=[0, 1], default=0,
4848
help='Usage: Distance type. \'0\': cosine, \'1\': norm_l1. Defaults to \'0\'')
49+
parser.add_argument('--save', '-s', action='store_true',
50+
help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.')
51+
parser.add_argument('--vis', '-v', action='store_true',
52+
help='Usage: Specify to open a new window to show results. Invalid in case of camera input.')
4953
args = parser.parse_args()
5054

55+
def visualize(img1, faces1, img2, faces2, matches, scores, target_size=[512, 512]): # target_size: (h, w)
56+
out1 = img1.copy()
57+
out2 = img2.copy()
58+
matched_box_color = (0, 255, 0) # BGR
59+
mismatched_box_color = (0, 0, 255) # BGR
60+
61+
# Resize to 256x256 with the same aspect ratio
62+
padded_out1 = np.zeros((target_size[0], target_size[1], 3)).astype(np.uint8)
63+
h1, w1, _ = out1.shape
64+
ratio1 = min(target_size[0] / out1.shape[0], target_size[1] / out1.shape[1])
65+
new_h1 = int(h1 * ratio1)
66+
new_w1 = int(w1 * ratio1)
67+
resized_out1 = cv.resize(out1, (new_w1, new_h1), interpolation=cv.INTER_LINEAR).astype(np.float32)
68+
top = max(0, target_size[0] - new_h1) // 2
69+
bottom = top + new_h1
70+
left = max(0, target_size[1] - new_w1) // 2
71+
right = left + new_w1
72+
padded_out1[top : bottom, left : right] = resized_out1
73+
74+
# Draw bbox
75+
bbox1 = faces1[0][:4] * ratio1
76+
x, y, w, h = bbox1.astype(np.int32)
77+
cv.rectangle(padded_out1, (x + left, y + top), (x + left + w, y + top + h), matched_box_color, 2)
78+
79+
# Resize to 256x256 with the same aspect ratio
80+
padded_out2 = np.zeros((target_size[0], target_size[1], 3)).astype(np.uint8)
81+
h2, w2, _ = out2.shape
82+
ratio2 = min(target_size[0] / out2.shape[0], target_size[1] / out2.shape[1])
83+
new_h2 = int(h2 * ratio2)
84+
new_w2 = int(w2 * ratio2)
85+
resized_out2 = cv.resize(out2, (new_w2, new_h2), interpolation=cv.INTER_LINEAR).astype(np.float32)
86+
top = max(0, target_size[0] - new_h2) // 2
87+
bottom = top + new_h2
88+
left = max(0, target_size[1] - new_w2) // 2
89+
right = left + new_w2
90+
padded_out2[top : bottom, left : right] = resized_out2
91+
92+
# Draw bbox
93+
assert faces2.shape[0] == len(matches), "number of faces2 needs to match matches"
94+
assert len(matches) == len(scores), "number of matches needs to match number of scores"
95+
for index, match in enumerate(matches):
96+
bbox2 = faces2[index][:4] * ratio2
97+
x, y, w, h = bbox2.astype(np.int32)
98+
box_color = matched_box_color if match else mismatched_box_color
99+
cv.rectangle(padded_out2, (x + left, y + top), (x + left + w, y + top + h), box_color, 2)
100+
101+
score = scores[index]
102+
text_color = matched_box_color if match else mismatched_box_color
103+
cv.putText(padded_out2, "{:.2f}".format(score), (x + left, y + top - 5), cv.FONT_HERSHEY_DUPLEX, 0.4, text_color)
104+
105+
return np.concatenate([padded_out1, padded_out2], axis=1)
106+
51107
if __name__ == '__main__':
52108
backend_id = backend_target_pairs[args.backend_target][0]
53109
target_id = backend_target_pairs[args.backend_target][1]
@@ -65,17 +121,35 @@
65121
backendId=backend_id,
66122
targetId=target_id)
67123

68-
img1 = cv.imread(args.input1)
69-
img2 = cv.imread(args.input2)
124+
img1 = cv.imread(args.target)
125+
img2 = cv.imread(args.query)
70126

71127
# Detect faces
72128
detector.setInputSize([img1.shape[1], img1.shape[0]])
73-
face1 = detector.infer(img1)
74-
assert face1.shape[0] > 0, 'Cannot find a face in {}'.format(args.input1)
129+
faces1 = detector.infer(img1)
130+
assert faces1.shape[0] > 0, 'Cannot find a face in {}'.format(args.target)
75131
detector.setInputSize([img2.shape[1], img2.shape[0]])
76-
face2 = detector.infer(img2)
77-
assert face2.shape[0] > 0, 'Cannot find a face in {}'.format(args.input2)
132+
faces2 = detector.infer(img2)
133+
assert faces2.shape[0] > 0, 'Cannot find a face in {}'.format(args.query)
78134

79135
# Match
80-
result = recognizer.match(img1, face1[0][:-1], img2, face2[0][:-1])
81-
print('Result: {}.'.format('same identity' if result else 'different identities'))
136+
scores = []
137+
matches = []
138+
for face in faces2:
139+
result = recognizer.match(img1, faces1[0][:-1], img2, face[:-1])
140+
scores.append(result[0])
141+
matches.append(result[1])
142+
143+
# Draw results
144+
image = visualize(img1, faces1, img2, faces2, matches, scores)
145+
146+
# Save results if save is true
147+
if args.save:
148+
print('Resutls saved to result.jpg\n')
149+
cv.imwrite('result.jpg', image)
150+
151+
# Visualize results in a new window
152+
if args.vis:
153+
cv.namedWindow("SFace Demo", cv.WINDOW_AUTOSIZE)
154+
cv.imshow("SFace Demo", image)
155+
cv.waitKey(0)
Loading

models/face_recognition_sface/sface.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def match(self, image1, face1, image2, face2):
5757

5858
if self._disType == 0: # COSINE
5959
cosine_score = self._model.match(feature1, feature2, self._disType)
60-
return 1 if cosine_score >= self._threshold_cosine else 0
60+
return cosine_score, 1 if cosine_score >= self._threshold_cosine else 0
6161
else: # NORM_L2
6262
norml2_distance = self._model.match(feature1, feature2, self._disType)
63-
return 1 if norml2_distance <= self._threshold_norml2 else 0
63+
return norml2_distance, 1 if norml2_distance <= self._threshold_norml2 else 0

0 commit comments

Comments
 (0)