Skip to content

Commit 337916d

Browse files
authored
Merge pull request #896 from vaibhavpnimkar/vaibhavpnimkar-patch-3
Adding face-recognition project via face-api-js
2 parents 0b5b6b0 + cfbd87e commit 337916d

File tree

5 files changed

+105
-0
lines changed

5 files changed

+105
-0
lines changed
+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# face-recognition-javascript-webcam-faceapi
2+
3+
Face recognition on webcam with Javascript !
4+
5+
287 KB
Loading
+15
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
<!DOCTYPE html>
2+
<html lang="en">
3+
<head>
4+
<meta charset="UTF-8">
5+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
6+
<meta http-equiv="X-UA-Compatible" content="ie=edge">
7+
<title>Face detection on the browser using javascript !</title>
8+
<script defer src="face-api.min.js"></script>
9+
<script defer src="script.js"></script>
10+
<link rel="stylesheet" href="style.css">
11+
</head>
12+
<body>
13+
<video id="video" width="600" height="450" autoplay>
14+
</body>
15+
</html>
+72
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
const video = document.getElementById("video");
2+
3+
Promise.all([
4+
faceapi.nets.ssdMobilenetv1.loadFromUri("/models"),
5+
faceapi.nets.faceRecognitionNet.loadFromUri("/models"),
6+
faceapi.nets.faceLandmark68Net.loadFromUri("/models"),
7+
]).then(startWebcam);
8+
9+
function startWebcam() {
10+
navigator.mediaDevices
11+
.getUserMedia({
12+
video: true,
13+
audio: false,
14+
})
15+
.then((stream) => {
16+
video.srcObject = stream;
17+
})
18+
.catch((error) => {
19+
console.error(error);
20+
});
21+
}
22+
23+
function getLabeledFaceDescriptions() {
24+
const labels = ["Felipe", "Messi", "Data"];
25+
return Promise.all(
26+
labels.map(async (label) => {
27+
const descriptions = [];
28+
for (let i = 1; i <= 2; i++) {
29+
const img = await faceapi.fetchImage(`./labels/${label}/${i}.png`);
30+
const detections = await faceapi
31+
.detectSingleFace(img)
32+
.withFaceLandmarks()
33+
.withFaceDescriptor();
34+
descriptions.push(detections.descriptor);
35+
}
36+
return new faceapi.LabeledFaceDescriptors(label, descriptions);
37+
})
38+
);
39+
}
40+
41+
video.addEventListener("play", async () => {
42+
const labeledFaceDescriptors = await getLabeledFaceDescriptions();
43+
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors);
44+
45+
const canvas = faceapi.createCanvasFromMedia(video);
46+
document.body.append(canvas);
47+
48+
const displaySize = { width: video.width, height: video.height };
49+
faceapi.matchDimensions(canvas, displaySize);
50+
51+
setInterval(async () => {
52+
const detections = await faceapi
53+
.detectAllFaces(video)
54+
.withFaceLandmarks()
55+
.withFaceDescriptors();
56+
57+
const resizedDetections = faceapi.resizeResults(detections, displaySize);
58+
59+
canvas.getContext("2d").clearRect(0, 0, canvas.width, canvas.height);
60+
61+
const results = resizedDetections.map((d) => {
62+
return faceMatcher.findBestMatch(d.descriptor);
63+
});
64+
results.forEach((result, i) => {
65+
const box = resizedDetections[i].detection.box;
66+
const drawBox = new faceapi.draw.DrawBox(box, {
67+
label: result,
68+
});
69+
drawBox.draw(canvas);
70+
});
71+
}, 100);
72+
});
+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
body {
2+
padding: 0;
3+
margin: 0;
4+
width: 100vw;
5+
height: 100vh;
6+
display: flex;
7+
align-items: center;
8+
justify-content: center;
9+
}
10+
11+
canvas {
12+
position: absolute;
13+
}

0 commit comments

Comments
 (0)