使用Face API在社交应用中集成人脸识别认证

本文详细介绍了如何使用Face-API.js在React和Node.js构建的社交应用中实现人脸识别多因素认证系统,包括前端摄像头集成、人脸特征提取和后端验证逻辑。

如何使用Face API在社交应用中集成人脸识别认证

社交应用多年来不断发展,迫切需要安全的方法来验证用户身份。将多因素认证功能集成到应用中对于增强其完整性至关重要。在社交应用中,认证机制可消除双方个人信息的不必要访问。

预期目标

本文将指导您创建一个基于Stream.io的聊天应用的多因素认证系统,确保高效的用户人脸ID认证,仅允许授权访问。我将通过相关代码示例说明所有这些内容。

前置要求

  • Node.js/Express的中级知识(后端方面)
  • React的前端知识
  • Stream.io API密钥

Face API工具简介

Face-Api.js是一个面部识别包,专为与JavaScript驱动的应用集成而设计。它基于TensorFlow库构建,并基于机器学习模型和抽象计算提供广泛的面部检测功能。

项目设置

前端

使用Vite框架为前端提供支持:

1
npm create vite@latest

创建React应用后,安装face-api.js:

1
npm i face-api.js

安装Stream的聊天SDK:

1
npm i stream-chat stream-chat-react

后端

后端用于存储用户详细信息,并在访问聊天应用前确保用户认证。MongoDB将是选择的数据库,我们将使用Express.js库作为后端API开发环境。

演示项目:集成人脸识别和认证

在本节中,我们将逐步设置前端认证页面,用户可以在注册页面上注册其详细信息、用户名、电子邮件和密码。他们还需要拍摄面部快照,并将调用Face API来检测图像中的面部。

此后,调用图像faceDescriptor函数,根据提供的机器学习模型生成用户面部的独特面部描述向量值。这些值在成功注册后通过Express.js后端安全地存储在MongoDB数据库中。

该应用耦合了一个多因素认证系统,该系统具有基于密码的认证和人脸认证机制。

注册页面代码

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
import React, { useState, useRef, useEffect } from 'react';
import * as faceapi from 'face-api.js';
import { useNavigate } from 'react-router-dom';
import axios from 'axios';

const Register = () => {
  const navigate = useNavigate("/");

  const userRef = useRef();
  const passwordRef = useRef();
  const emailRef = useRef();
  const FullRef = useRef();
  const snapshotRef = useRef(null);
  const videoRef = useRef(null);
  const canvasRef = useRef(null);

  const [modelIsLoaded, setModelIsLoaded] = useState(false);
  const [detections, setDetections] = useState([]);
  const [error, setError] = useState(false);
  const [snapshot, setSnapshot] = useState(null);
  const [cameraActive, setCameraActive] = useState(true);
  const [submitDisabled, setSubmitDisabled] = useState(true);
  const [descriptionValue, setDescriptionValue] = useState(null);
  const [faceDetected, setFaceDetected] = useState(false);

  useEffect(() => {
    const loadModels = async () => {
      await faceapi.nets.tinyFaceDetector.loadFromUri('/models');
      await faceapi.nets.faceLandmark68Net.loadFromUri('/models');
      await faceapi.nets.faceRecognitionNet.loadFromUri('/models');
      await faceapi.nets.faceExpressionNet.loadFromUri('/models');
      await faceapi.nets.tinyFaceDetector.loadFromUri('/models');
      setModelIsLoaded(true);
      startVideo();
    };

    loadModels();
  }, []);

  const RegSubmit = async (e) => {
    e.preventDefault();
    console.log("hello");

    try {
      const res = await axios.post('http://localhost:5000/v1/users', {
        username: userRef.current.value,
        email: emailRef.current.value,
        FullName: FullRef.current.value,
        password: passwordRef.current.value,
        faceDescriptor: descriptionValue
      });

      console.log(res.data);
      setError(false);
      navigate("/login");
      console.log("help");
    } catch (err) {
      console.log(err);
      setError(true);
    }
  };

  const startVideo = () => {
    navigator.mediaDevices
      .getUserMedia({ video: true })
      .then((stream) => {
        videoRef.current.srcObject = stream;
      })
      .catch((err) => console.error("Error accessing webcam: ", err));
  };

  const stopVid = () => {
    navigator.mediaDevices.getUserMedia({ video: false });
    const stream = videoRef?.current?.srcObject;
    if (stream) {
      stream.getTracks().forEach((track) => track.stop());
      videoRef.current.srcObject = null;
      setCameraActive(false);
    }
  };

  const captureSnapshot = async () => {
    const canvas = snapshotRef.current;
    const context = canvas.getContext('2d');
    context.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height);
    const dataUrl = canvas.toDataURL('image/jpeg');
    setSnapshot(dataUrl);

    const detection = await faceapi
      .detectSingleFace(canvas, new faceapi.TinyFaceDetectorOptions())
      .withFaceLandmarks()
      .withFaceDescriptor();

    if (detection) {
      const newDescriptor = detection.descriptor;
      setDescriptionValue(newDescriptor);
      console.log(newDescriptor);
      setSubmitDisabled(false);
      stopVid();

      if (storedDescriptor && isMatchingFace(storedDescriptor, newDescriptor)) {
        setInterval(alert("face matched"), 100);
      } else {
        alert("No Match Found!");
      }
    } else {
      console.error("No face detected in snapshot");
    }
  };

  const handleVideoPlay = async () => {
    const video = videoRef.current;
    const canvas = canvasRef.current;
    const displaySize = { width: video.width, height: video.height };
    faceapi.matchDimensions(canvas, displaySize);

    setInterval(async () => {
      if (!cameraActive) return;

      const detections = await faceapi.detectAllFaces(
        video,
        new faceapi.TinyFaceDetectorOptions()
      );

      const resizedDetections = faceapi.resizeResults(detections, displaySize);
      canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height);
      faceapi.draw.drawDetections(canvas, resizedDetections);

      const detected = detections.length > 0;
      if (detected && !faceDetected) {
        captureSnapshot();
      }

      setFaceDetected(detected);
    }, 100);
  };

  return (
    <div className="flex flex-col w-full h-screen justify-center">
      <div className="flex flex-col">
        <form className="flex flex-col mb-2 w-full" onSubmit={RegSubmit}>
          <h3 className="flex flex-col mx-auto mb-5">Registration Page</h3>

          <div className="flex flex-col mb-2 w-[50%] mx-auto items-center">
            <input
              type="text"
              placeholder="Email"
              className="w-full rounded-2xl h-[50px] border-2 p-2 mb-2 border-gray-900"
              required
              ref={emailRef}
            />
            <input
              type="text"
              placeholder="Username"
              className="w-full rounded-2xl h-[50px] border-2 p-2 mb-2 border-gray-900"
              required
              ref={userRef}
            />
            <input
              type="text"
              placeholder="Full Name"
              className="w-full rounded-2xl h-[50px] border-2 p-2 mb-2 border-gray-900"
              required
              ref={FullRef}
            />
            <input
              type="password"
              placeholder="Password"
              className="w-full rounded-2xl h-[50px] border-2 p-2 mb-2 border-gray-900"
              required
              ref={passwordRef}
            />

            <div>
              {!modelIsLoaded && cameraActive && !descriptionValue ? (
                <p>Loading</p>
              ) : (
                <>
                  {!descriptionValue && (
                    <>
                      <video
                        ref={videoRef}
                        width="200"
                        height="160"
                        onPlay={handleVideoPlay}
                        autoPlay
                        muted
                      />
                      <canvas
                        ref={canvasRef}
                        width="200"
                        height="160"
                        style={{ position: 'absolute', top: 0, left: 0 }}
                      />
                      <p>
                        {faceDetected ? (
                          <span style={{ color: 'green' }}>Face Detected</span>
                        ) : (
                          <span style={{ color: 'red' }}>No Face Detected</span>
                        )}
                      </p>
                      <canvas
                        ref={snapshotRef}
                        width="480"
                        height="360"
                        style={{ display: 'none' }}
                      />
                    </>
                  )}
                </>
              )}

              {snapshot && (
                <div style={{ marginTop: '20px' }}>
                  <h4>Face Snapshot:</h4>
                  <img
                    src={snapshot}
                    alt="Face Snapshot"
                    width="200"
                    height="160"
                  />
                </div>
              )}
            </div>

            <div className="mt-2">
              <button type="button" onClick={stopVid}>
                Stop Video
              </button>
            </div>

            <button
              disabled={submitDisabled}
              className="mx-auto mt-4 rounded-2xl cursor-pointer text-white bg-primary w-[80%] lg:w-[50%] h-[40px] text-center items-center justify-center"
              type="submit"
            >
              Register
            </button>
          </div>

          <div className="flex flex-col mt-1 w-full">
            <p className="flex justify-center">
              Registered previously?&nbsp;
              <a href="/login" className="text-blue-600 underline">
                Login
              </a>
            </p>
          </div>

          {error && (
            <p className="text-red-600 text-center mt-2">
              Error while registering, try again
            </p>
          )}
        </form>
      </div>
    </div>
  );
};

export default Register;

人脸认证页面

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
import axios from 'axios';
import React, { useRef, useEffect, useState } from 'react';
import * as faceapi from 'face-api.js';
import { useNavigate } from 'react-router-dom';

const FaceAuth = () => {
  const navigate = useNavigate("/");

  const videoRef = useRef(null);
  const canvasRef = useRef(null);
  const snapshotRef = useRef(null);

  const [cameraActive, setCameraActive] = useState(true);
  const [snapshot, setSnapshot] = useState(null);
  const [descriptionValue, setDescriptionValue] = useState(null);
  const [faceDetected, setFaceDetected] = useState(false);

  useEffect(() => {
    const loadModels = async () => {
      await faceapi.nets.tinyFaceDetector.loadFromUri('/models');
      await faceapi.nets.faceLandmark68Net.loadFromUri('/models');
      await faceapi.nets.faceRecognitionNet.loadFromUri('/models');
      await faceapi.nets.faceExpressionNet.loadFromUri('/models');
    };

    loadModels();
  }, []);

  const handleVideoPlay = async () => {
    const video = videoRef.current;
    const canvas = canvasRef.current;

    const displaySize = { width: video.width, height: video.height };
    faceapi.matchDimensions(canvas, displaySize);

    setInterval(async () => {
      if (!cameraActive) return;

      const detections = await faceapi.detectAllFaces(
        video,
        new faceapi.TinyFaceDetectorOptions()
      );

      const resizedDetections = faceapi.resizeResults(detections, displaySize);
      canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height);
      faceapi.draw.drawDetections(canvas, resizedDetections);

      const detected = detections.length > 0;
      if (detected && !faceDetected) {
        captureSnapshot();
      }

      setFaceDetected(detected);
    }, 100);
  };

  const startVideo = () => {
    navigator.mediaDevices
      .getUserMedia({ video: true })
      .then((stream) => {
        videoRef.current.srcObject = stream;
      })
      .catch((err) => console.error("Error accessing webcam: ", err));
  };

  const stopVid = () => {
    const stream = videoRef.current.srcObject;
    if (stream) {
      stream.getTracks().forEach((track) => track.stop());
      videoRef.current.srcObject = null;
      setCameraActive(false);
    }
  };

  const deleteImage = () => {
    setSnapshot(null);
    setDescriptionValue(null);
    setFaceDetected(false);
    setCameraActive(true);
    startVideo();
  };

  const captureSnapshot = async () => {
    const canvas = snapshotRef.current;
    const context = canvas.getContext('2d');
    context.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height);

    const dataUrl = canvas.toDataURL('image/jpeg');
    setSnapshot(dataUrl);
    stopVid();

    const detection = await faceapi
      .detectSingleFace(canvas, new faceapi.TinyFaceDetectorOptions())
      .withFaceLandmarks()
      .withFaceDescriptor();

    if (detection) {
      const newDescriptor = detection.descriptor;
      setDescriptionValue(newDescriptor);
      console.log(newDescriptor);
    }
  };

  const FaceAuthenticate = async (e) => {
    e.preventDefault();

    try {
      const res = await axios.post(
        'http://localhost:5000/v1/auth/face-auth',
        { faceDescriptor: descriptionValue },
        { withCredentials: true }
      );

      console.log(res?.data);
      navigate('/chat');
    } catch (err) {
      console.log(err);
    }
  };

  return (
    <>
      <div className="flex w-full h-screen flex-col justify-center">
        <p className="flex flex-col mx-auto items-center text-lg font-semibold mb-3">
          Take a snapshot to confirm your identity
        </p>
        <p className="text-center mb-4">Ensure that the picture is taken in a bright area</p>

        <button
          onClick={startVideo}
          className="flex w-[30%] mx-auto text-center items-center justify-center mb-5 h-[40px] bg-blue-600 rounded-md text-white"
        >
          Turn on Webcam
        </button>

        {!snapshot ? (
          <>
            <video
              className="flex mx-auto items-center rounded-md"
              ref={videoRef}
              width="240"
              height="180"
              onPlay={handleVideoPlay}
              autoPlay
              muted
            />
            <canvas
              ref={snapshotRef}
              width="240"
              height="180"
              style={{ position: 'absolute', top: 0, left: 0 }}
            />
            <button onClick={captureSnapshot} className="mt-4 mx-auto block text-sm text-blue-600 underline">
              Take a snapshot
            </button>
          </>
        ) : (
          <div className="flex w-full justify-center">
            <img
              src={snapshot}
              className="rounded-lg"
              width="240"
              height="180"
              alt="Face Snapshot"
            />
          </div>
        )}

        <div className="flex flex-row w-full justify-evenly mt-5">
          <button
            onClick={deleteImage}
            className="bg-purple-500 text-white p-2 h-[35px] rounded-lg"
          >
            Delete Image
          </button>
          <button
            onClick={FaceAuthenticate}
            className="bg-purple-500 text-white p-2 h-[35px] rounded-lg"
          >
            Upload Image
          </button>
        </div>
      </div>
    </>
  );
};

export default FaceAuth;

后端人脸认证功能

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
const faceAuth = async (req, res) => {
  try {
    console.log(req.session);

    const id = req.session.passport?.user;
    console.log(id);

    const { faceDescriptor } = req.body;
    const user = await User.findById(id);
    console.log(user);

    if (user == null) {
      return res.status(400).json({ err: "User not found" });
    }

    const isMatchingFace = (descriptor1, descriptor2, threshold = 0.6) => {
      // Convert the stored descriptor (object) to a Float32Array
      if (!(descriptor1 instanceof Float32Array)) {
        descriptor1 = new Float32Array(Object.values(descriptor1));
      }

      if (!(descriptor2 instanceof Float32Array)) {
        descriptor2 = new Float32Array(Object.values(descriptor2));
      }

      const distance = faceapi.euclideanDistance(descriptor1, descriptor2);
      console.log("Euclidean Distance:", distance);

      return distance < threshold;
    };

    if (isMatchingFace(faceDescriptor, user.faceDescriptor)) {
      console.log("Face match successful");
      req.session.mfa = true;

      return res.status(200).json({
        msg: "User authentication was successful. Proceed to the chat app.",
      });
    } else {
      return res.status(401).json({ msg: "Face does not match. Access denied." });
    }
  } catch (err) {
    console.log(err);
    res.status(500).json({
      err: "User face couldn't be authenticated. Please try again later",
    });
  }
};

附加信息和提示

这些努力的总体目标是实现一种更可扩展和安全的用户验证方法。阈值可以轻松修改和调整以提高应用准确性。或者,AWS Rekognition工具可以有效地替换Face API工具,提供高效的云驱动模型。

结论

到目前为止,我们已经完成了创建一个高效的多因素人脸认证工具的过程,以防止入侵者访问我们的聊天应用,确保并优先考虑最高级别的用户隐私。

comments powered by Disqus
使用 Hugo 构建
主题 StackJimmy 设计