Some time ago, I learned the basic usage of canvas and wrote a clock and filter. When I was writing the filter, I was thinking about whether I could play the video through canvas, so that others could not get the video source, so I wrote the video to play. Later, I called the camera and wrote the video to canvas and added the filter. Since you can add a filter to read every pixel, can you add face recognition?

The focus of this paper, face recognition. Face-api.js and tracking.js are used. Face -api.js is mainly introduced

In the code

There are important comments in the code, how to output the camera to canvas, this is not the focus, the next article will introduce the real-time detection of camera video

<! DOCTYPE html> <html lang="en">

<head>
    <meta charset="UTF-8"< p style> video {width: 500px; margin: auto; background-color: aquamarine; display: block; } .imgcontainer { display: inline-block; position: relative; }#rect {}

        .rectkuangkuang {
            position: absolute;
            border: 1px solid lightblue;
            width: 0;
            height: 0;
        }
    </style>
    <script src="https://cdn.bootcss.com/jquery/3.4.1/jquery.min.js"></script>
    <script src="https://cdn.bootcss.com/tracking.js/1.1.3/tracking-min.js"></script>
    <script src="./face-pi.js"></script>
</head>

<body>

    <video id="video"></video>

    <canvas width="500" height="500" id="canvas"></canvas>
    <canvas width="500" height="500" id="canvas1"></canvas>
    <div class="imgcontainer" style="display: none;">
        <img style="width:500px; height:500px" id="image" src="" />
        <div id="rect" class="rectkuangkuang"></div>
    </div>

    <button class="photo"</button> <button class="identify"</button> <img SRC ="./timg111.jpg" alt="" id="testimg" style="width: 500px; display: none;">

    <script>

        $(function () {
            var video = document.getElementById('video');

            const canvas = document.getElementById('canvas');
            const canvas1 = document.getElementById('canvas1');
            const img = document.getElementById('image');
            const rectKuang = document.getElementById('rect');

            const testimg = document.getElementById('testimg')

            const context = canvas.getContext('2d');
            const context1 = canvas1.getContext('2d');

            if(the navigator. MediaDevices. GetUserMedia) {/ / the latest standard API navigator. MediaDevices. GetUserMedia ({video: {width: 1000, height: 1000 } }).then(success).catch(error); }else if(the navigator. WebkitGetUserMedia) {/ / its core the navigator browser. WebkitGetUserMedia ({video: {width: 1000, height: 1000 } }, success, error) }else if(the navigator. MozGetUserMedia) {/ / firfox navigator browser. MozGetUserMedia ({video: {width: 1000, height: 1000 } }, success, error); }else if (navigator.getUserMedia) {
                //旧版API
                navigator.getUserMedia({ video: { width: 1000, height: 1000 } }, success, error);
            }

            functionSuccess (stream) {// Compatible with WebKit core browser //letCompatibleURL = window.URL || window.webkitURL; // Set the video stream as the source of the video element // console.log(stream); //video.src = CompatibleURL.createObjectURL(stream); video.srcObject = stream; video.play(); timer =setInterval(function() { context.drawImage(video, 0, (400 - 288) / 2, video.offsetWidth, video.offsetHeight); }, 0); }functionError (error) {console.log(' Failed to access user media device${error.name}.${error.message}`); } // take a photo $('.photo').click(function () {
                // context1.drawImage(testimg, 0, 0, testimg.offsetWidth, testimg.offsetHeight)
                context1.drawImage(video, 0, (400 - 288) / 2, video.offsetWidth, video.offsetHeight)
                var dateUrl = canvas.toDataURL('image/png');
                img.src = dateUrl;
            })


            async function run() {// face load module is async, so use await, all call complete // use MTCNN to detect face, relatively fast, loadMtcnnModel()'./weights') the console. The log (11111) / / loading of facial recognition mark await faceapi. LoadFaceLandmarkModel ('./weights') the console. The log (22222) / / loaded face recognition module await faceapi. LoadFaceRecognitionModel ('./weights') the console. The log (33333) / / loading face detection The distance detection / / await faceapi. LoadTinyFaceDetectorModel ('./weights'). / / the console log (4444) / / identify facial algorithm module await faceapi. LoadSsdMobilenetv1Model ('./weights') the console. The log (5555) / / identify expression modules / / await faceapi. LoadFaceExpressionModel ('./weights') // console.log(6666) } run(); // const displaySize = {width: testimg. Width, height: testimg. Height} const displaySize = {width: testimg. 500, height: (500) } asyncfunction recognize() {/ / for contour map and facial expressions const detectionsWithExpressions = await faceapi / / testing all face in the photo. The detectAllFaces (img) / / for facial identification tags .withFacelandMarks () // Automatic recognition of facial expressions //. WithFaceExpressions () // Automatic recognition of size const resizedResults = Faceapi. ResizeResults (detectionsWithExpressions displaySize) / / contour faceapi. The draw. DrawDetections (canvas1, ResizedResults) / / draw the facial markers in 68 faceapi. The draw. DrawFaceLandmarks (canvas1, ResizedResults) / / const minProbability = 0.05 / / painting expression / / faceapi. The draw. DrawFaceExpressions (canvas1, resizedResults. minProbability) } $('.identify').click(function () {
                recognize();
            })
        })

    </script>
</body>

</html>
Copy the code