|
| 1 | +# Import Libraries |
| 2 | +import cv2 |
| 3 | +import numpy as np |
| 4 | + |
| 5 | + |
| 6 | +# The gender model architecture |
| 7 | +# https://drive.google.com/open?id=1W_moLzMlGiELyPxWiYQJ9KFaXroQ_NFQ |
| 8 | +GENDER_MODEL = 'weights/deploy_gender.prototxt' |
| 9 | +# The gender model pre-trained weights |
| 10 | +# https://drive.google.com/open?id=1AW3WduLk1haTVAxHOkVS_BEzel1WXQHP |
| 11 | +GENDER_PROTO = 'weights/gender_net.caffemodel' |
| 12 | +# Each Caffe Model impose the shape of the input image also image preprocessing is required like mean |
| 13 | +# substraction to eliminate the effect of illunination changes |
| 14 | +MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746) |
| 15 | +# Represent the gender classes |
| 16 | +GENDER_LIST = ['Male', 'Female'] |
| 17 | +# https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt |
| 18 | +FACE_PROTO = "weights/deploy.prototxt.txt" |
| 19 | +# https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel |
| 20 | +FACE_MODEL = "weights/res10_300x300_ssd_iter_140000_fp16.caffemodel" |
| 21 | + |
| 22 | +# load face Caffe model |
| 23 | +face_net = cv2.dnn.readNetFromCaffe(FACE_PROTO, FACE_MODEL) |
| 24 | +# Load gender prediction model |
| 25 | +gender_net = cv2.dnn.readNetFromCaffe(GENDER_MODEL, GENDER_PROTO) |
| 26 | + |
| 27 | + |
| 28 | +def get_faces(frame, confidence_threshold=0.5): |
| 29 | + # convert the frame into a blob to be ready for NN input |
| 30 | + blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), (104, 177.0, 123.0)) |
| 31 | + # set the image as input to the NN |
| 32 | + face_net.setInput(blob) |
| 33 | + # perform inference and get predictions |
| 34 | + output = np.squeeze(face_net.forward()) |
| 35 | + # initialize the result list |
| 36 | + faces = [] |
| 37 | + # Loop over the faces detected |
| 38 | + for i in range(output.shape[0]): |
| 39 | + confidence = output[i, 2] |
| 40 | + if confidence > confidence_threshold: |
| 41 | + box = output[i, 3:7] * \ |
| 42 | + np.array([frame.shape[1], frame.shape[0], |
| 43 | + frame.shape[1], frame.shape[0]]) |
| 44 | + # convert to integers |
| 45 | + start_x, start_y, end_x, end_y = box.astype(np.int) |
| 46 | + # widen the box a little |
| 47 | + start_x, start_y, end_x, end_y = start_x - \ |
| 48 | + 10, start_y - 10, end_x + 10, end_y + 10 |
| 49 | + start_x = 0 if start_x < 0 else start_x |
| 50 | + start_y = 0 if start_y < 0 else start_y |
| 51 | + end_x = 0 if end_x < 0 else end_x |
| 52 | + end_y = 0 if end_y < 0 else end_y |
| 53 | + # append to our list |
| 54 | + faces.append((start_x, start_y, end_x, end_y)) |
| 55 | + return faces |
| 56 | + |
| 57 | + |
| 58 | +def display_img(title, img): |
| 59 | + """Displays an image on screen and maintains the output until the user presses a key""" |
| 60 | + # Display Image on screen |
| 61 | + cv2.imshow(title, img) |
| 62 | + # Mantain output until user presses a key |
| 63 | + cv2.waitKey(0) |
| 64 | + # Destroy windows when user presses a key |
| 65 | + cv2.destroyAllWindows() |
| 66 | + |
| 67 | + |
| 68 | +def get_optimal_font_scale(text, width): |
| 69 | + """Determine the optimal font scale based on the hosting frame width""" |
| 70 | + for scale in reversed(range(0, 60, 1)): |
| 71 | + textSize = cv2.getTextSize(text, fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=scale/10, thickness=1) |
| 72 | + new_width = textSize[0][0] |
| 73 | + if (new_width <= width): |
| 74 | + return scale/10 |
| 75 | + return 1 |
| 76 | + |
| 77 | + |
| 78 | +def predict_gender(input_path: str): |
| 79 | + """Predict the gender of the faces showing in the image""" |
| 80 | + # Initialize frame size |
| 81 | + # frame_width = 1280 |
| 82 | + # frame_height = 720 |
| 83 | + # Read Input Image |
| 84 | + img = cv2.imread(input_path) |
| 85 | + # resize the image, uncomment if you want to resize the image |
| 86 | + # img = cv2.resize(img, (frame_width, frame_height)) |
| 87 | + # Take a copy of the initial image and resize it |
| 88 | + frame = img.copy() |
| 89 | + # predict the faces |
| 90 | + faces = get_faces(frame) |
| 91 | + # Loop over the faces detected |
| 92 | + # for idx, face in enumerate(faces): |
| 93 | + for i, (start_x, start_y, end_x, end_y) in enumerate(faces): |
| 94 | + face_img = frame[start_y: end_y, start_x: end_x] |
| 95 | + face_img = cv2.rectangle( |
| 96 | + frame, (start_x, start_y), (end_x, end_y), (0, 255, 0), 2) |
| 97 | + # image --> Input image to preprocess before passing it through our dnn for classification. |
| 98 | + # scale factor = After performing mean substraction we can optionally scale the image by some factor. (if 1 -> no scaling) |
| 99 | + # size = The spatial size that the CNN expects. Options are = (224*224, 227*227 or 299*299) |
| 100 | + # mean = mean substraction values to be substracted from every channel of the image. |
| 101 | + # swapRB=OpenCV assumes images in BGR whereas the mean is supplied in RGB. To resolve this we set swapRB to True. |
| 102 | + blob = cv2.dnn.blobFromImage(image=face_img, scalefactor=1.0, size=( |
| 103 | + 227, 227), mean=MODEL_MEAN_VALUES, swapRB=False, crop=False) |
| 104 | + # Predict Gender |
| 105 | + gender_net.setInput(blob) |
| 106 | + gender_preds = gender_net.forward() |
| 107 | + i = gender_preds[0].argmax() |
| 108 | + gender = GENDER_LIST[i] |
| 109 | + gender_confidence_score = gender_preds[0][i] |
| 110 | + # Draw the box |
| 111 | + label = "{}-{:.2f}%".format(gender, gender_confidence_score*100) |
| 112 | + print(label) |
| 113 | + yPos = start_y - 15 |
| 114 | + while yPos < 15: |
| 115 | + yPos += 15 |
| 116 | + # get the font scale for this image size |
| 117 | + optimal_font_scale = get_optimal_font_scale(label,((end_x-start_x)+25)) |
| 118 | + box_color = (255, 0, 0) if gender == "Male" else (147, 20, 255) |
| 119 | + cv2.rectangle(face_img, (start_x, start_y), (end_x, end_y), box_color, 2) |
| 120 | + # Label processed image |
| 121 | + cv2.putText(face_img, label, (start_x, yPos), |
| 122 | + cv2.FONT_HERSHEY_SIMPLEX, optimal_font_scale, box_color, 2) |
| 123 | + |
| 124 | + # Display processed image |
| 125 | + display_img("Gender Estimator", face_img) |
| 126 | + # uncomment if you want to save the image |
| 127 | + cv2.imwrite("output.jpg", face_img) |
| 128 | + # Cleanup |
| 129 | + cv2.destroyAllWindows() |
| 130 | + |
| 131 | + |
| 132 | + |
| 133 | +if __name__ == '__main__': |
| 134 | + # Parsing command line arguments entered by user |
| 135 | + import sys |
| 136 | + predict_gender(sys.argv[1]) |
0 commit comments