preface

Face recognition technology is now more and more popular, so we teach you today to implement a face recognition component.

resources

  • element UI
  • Vue.js
  • tracking-min.js
  • face-min.js

The source code

Since some of our computers have cameras and some don’t, we need to package this component for different scenarios. Let me draw a picture, just to make it a little bit more intuitive.

If there is a camera, we will display (requires a portrait recognition component) :Without a camera, we will display:To judge whether there is a camera, we can use this method:

// Create is recommended to check whether there is a camera
    var deviceList = [];
    navigator.mediaDevices
      .enumerateDevices()
      .then(devices= > {
        devices.forEach(device= > {
          deviceList.push(device.kind);
        });
        if (deviceList.indexOf("videoinput") = ="1") {
          console.info("No cameras.");
          return false;
        } else {
          console.info("There are cameras.");
          this.videoinput = true; // This is a state that I have defined. The initial value is false
        }
      })
      .catch(function(err) {
        alert(err.name + ":" + err.message);
      });

Copy the code

Complete code:

index.vue

<template>
<! Face recognition -->
    <el-dialog
      :visible.sync="openFaceView"
      width="581px"
      :show-close="false"
      v-loading="faceloading"
      element-loading-text="Face recognition in progress."
    >
      <div class="ovf" style="padding:20px;">
        <el-upload
          v-if=! "" videoinput"
          class="upload-demo"
          action
          multiple
          :limit="1"
          :file-list="fileList"
          :on-change="handleChange"
          :on-exceed="handleExceed"
          :before-remove="beforeRemove"
          :auto-upload="false"
        >
          <el-button size="small" type="primary">Click upload portrait picture</el-button>
        </el-upload>
        <div v-if="videoinput">
          <el-button size="small" type="primary" @click="checkFace">Click for face recognition</el-button>
          <div slot="tip" class="el-upload__tip">This function can be performed only on a non-Internet Explorer browser</div>
        </div>
        <div class="dialog-footer">
          <el-button @click="openFaceView = false">Take away</el-button>
          <el-button type="primary" @click="postFace()">determine</el-button>
        </div>
      </div>
    </el-dialog>
    <el-dialog :visible.sync="checkFaceView" width="581px" :show-close="false">
      <Face :faceView="checkFaceView" @canvasToImage="getImgFile"></Face>
    </el-dialog>
</template>
<script>
import { verifyFace } from ".. /.. /request/api"; // Introduce face recognition interface
import Face from "./Face"; // Introduce face recognition component
export default {
  name: "MyClassRoom".data() {
    return {
    	openFaceView:true.faceloading: false.videoinput: false.fileList: [].face: "",}},components: {
    Face
  },

  methods: {
	 // The face recognition box is displayed
    checkFace() {
      this.checkFaceView = true;
    },
    // Restrict the upload of photos
    handleExceed() {
      this.$message.warning({
        message: "Do not re-upload!".offset: 380.duration: 1000
      });
    },
    // Remove the portrait image
    beforeRemove(file) {
      return this.$confirm('Confirm removal${file.name}? `);
    },
    // Upload the file
    handleChange(file) {
      this.face = file.raw;
    },
    // Get the captured image
    getImgFile(d) {
      this.face = d;
      this.checkFaceView = false;
    },
    // Face recognition completed
    postFace() {
      this.faceloading = true;
      this.checkFaceView=false;
      let formData = new FormData();
      formData.append("face".this.face);
      /* Face recognition interface, upload the photos to the background, I use the wrapper axios here. Headers = {' content-type ':'multipart/form-data'} */
      verifyFace(formData, { isUpload: true }) 
        .then(res= > {
          console.log(res);
          if (res.code == 0) {
            this.faceloading = false;
            this.$message.success({
              message: "Face recognition success!".offset: 380.duration: 1000
            });
          } else {
            this.$message.error({
              message: "Face recognition failure!".offset: 380.duration: 1000
            });
            this.faceloading = false;
          }
        })
        .catch(err= > {
          console.log(err); }); }},created() {
    // Check whether there is a camera
    var deviceList = [];
    navigator.mediaDevices
      .enumerateDevices()
      .then(devices= > {
        devices.forEach(device= > {
          deviceList.push(device.kind);
        });
        if (deviceList.indexOf("videoinput") = ="1") {
          console.info("No cameras.");
          return false;
        } else {
          console.info("There are cameras.");
          this.videoinput = true;
        }
      })
      .catch(function(err) {
        alert(err.name + ":"+ err.message); }); }},</script>
Copy the code

Face.vue

<! Face recognition -->
<template>
  <div class="face">
    <div class="container">
      <video id="video" preload autoplay loop muted></video>
      <canvas id="canvas" width="581" height="436"></canvas>
      <canvas id="canvas1" width="581" height="436"></canvas>
    </div>
    <div class="btns">
      <el-button type="primary" @click="start">Turn on the camera</el-button>
      <el-button type="primary" @click="screenshot">Manual screenshots</el-button>
      <el-button type="primary" @click="keepImg">Save the picture</el-button>
      <p class="tips">1, first open the camera; 2. Put the portrait in the box for automatic interception, or click manual interception. Captured images will appear in the unsaved images bar below; 3, finally click Save, below you can preview the saved picture.</p>
    </div>
    <div class="imgs" v-show="imgView">
      <p>Unsaved picture</p>
      <canvas id="shortCut" width="140" height="140"></canvas>
      <p>Saved picture</p>
      <div id="img"></div>
    </div>
  </div>
</template>
<script>
import ".. /.. /assets/js/tracking-min.js"; // Need to import (download link at end of article)
import ".. /.. /assets/js/face-min.js"; // // needs to be introduced (download link at end of article)
export default {
  name: "testTracking".props: ["faceView"].data() {
    return {
      saveArray: {},
      imgView: false
    };
  },
  methods: {
    // Open the camera
    start() {
      var saveArray = {};
      var canvas = document.getElementById("canvas");
      var context = canvas.getContext("2d");
      // eslint-disable-next-line no-undef
      var tracker = new window.tracking.ObjectTracker("face");
      tracker.setInitialScale(4);
      tracker.setStepSize(2);
      tracker.setEdgesDensity(0.1);
      // eslint-disable-next-line no-undef
      this.trackerTask = window.tracking.track("#video", tracker, {
        camera: true
      });
      tracker.on("track".function(event) {
        context.clearRect(0.0, canvas.width, canvas.height);
        event.data.forEach(function(rect) {
          context.strokeStyle = "#fff";
          context.strokeRect(rect.x, rect.y, rect.width, rect.height);
          context.fillStyle = "#fff";
          saveArray.x = rect.x;
          saveArray.y = rect.y;
          saveArray.width = rect.width;
          saveArray.height = rect.height;
        });
      });
      var canvas1 = document.getElementById("canvas1");
      var context1 = canvas1.getContext("2d");
      context1.strokeStyle = "#69fff1";
      context1.moveTo(190.118);
      context1.lineTo(390.118);
      context1.lineTo(390.318);
      context1.lineTo(190.318);
      context1.lineTo(190.118);
      context1.stroke();
      setInterval(() = > {
        if (
          saveArray.x > 200 &&
          saveArray.x + saveArray.width < 400 &&
          saveArray.y > 120 &&
          saveArray.y + saveArray.height < 320 &&
          saveArray.width < 180 &&
          saveArray.height < 180
        ) {
          console.log(saveArray);
          this.getPhoto();
          for (var key in saveArray) {
            deletesaveArray[key]; }}},2000);
    },
    // Get the portrait photo
    getPhoto() {
      var video = document.getElementById("video");
      var can = document.getElementById("shortCut");
      var context2 = can.getContext("2d");
      context2.drawImage(video, 210.130.210.210.0.0.140.140);
      this.imgView = true;
    },
    / / screenshots
    screenshot() {
      this.getPhoto();
    },
    // Convert canvas to image
    convertCanvasToImage(canvas) {
      var image = new Image();
      image.src = canvas.toDataURL("image/png");
      return image;
    },
    // convert base64 to a file, dataurl to base64 as a string, filename to a filename (must have a suffix, such as.jpg,.png)
    dataURLtoFile(dataurl, filename) {
      var arr = dataurl.split(","),
        mime = arr[0].match(/ : (. *?) ; /) [1],
        bstr = atob(arr[1]),
        n = bstr.length,
        u8arr = new Uint8Array(n);
      while (n--) {
        u8arr[n] = bstr.charCodeAt(n);
      }
      return new File([u8arr], filename, { type: mime });
    },
    // Save the image
    keepImg() {
      var can = document.getElementById("shortCut");
      var img = document.getElementById("img");
      var photoImg = document.createElement("img");
      photoImg.src = this.convertCanvasToImage(can).src;
      img.appendChild(photoImg);
      // Get the image address converted to base64
        this.$emit(
          "canvasToImage".this.dataURLtoFile(this.convertCanvasToImage(can).src, "person.jpg"));console.log(
        this.dataURLtoFile(this.convertCanvasToImage(can).src, "person.jpg")); },clearCanvas() {
      var c = document.getElementById("canvas");
      var c1 = document.getElementById("canvas1");
      var cxt = c.getContext("2d");
      var cxt1 = c1.getContext("2d");
      cxt.clearRect(0.0.581.436);
      cxt1.clearRect(0.0.581.436);
    },
    closeFace() {
      console.log("Close the face recognition window.");
      this.imgView = false;
      this.clearCanvas();
      // Stop detection
      this.trackerTask.stop();
      console.log(this.trackerTask);
      // Turn off the camera
      var video = document.getElementById("video");
      video.srcObject.getTracks()[0].stop(); }},watch: {
    faceView(v) {
      if (v == false) {
        this.closeFace(); }},imgView(v) {
      if (v == true) {
        this.$message.success({
          message: "Intercept! Click save image".offset: 380.duration: 1000}); }}},destroyed(){}};</script>
<style scoped lang="scss">
.face {
  .container {
    background: # 000;
    position: relative;
    width: 581px;
    height: 436px;
    #canvas1 {
      position: absolute;
    }
    video.#canvas.#canvas1 {
      position: absolute;
      width: 581px;
      height: 436px; }}.btns {
    padding: 10px;
    .tips {
      font-size: 14px;
      color: # 666;
      margin: 10px 0;
      line-height: 24px; }}.imgs {
    padding: 10px;
    p {
      font-size: 16px; }}}</style>

Copy the code

conclusion

In this way, a simple and practical portrait identification is completed.

Face – min. Js links: pan.baidu.com/s/1gB0Yd178… Extraction code: 9Q7Q

Tracking – min. Js links: pan.baidu.com/s/1LP7pZIbA… Extraction code: QX75

Thanks for reading, if you feel feel, please help to like, close the note!

Wechat public number: front end experience robbery road, welcome to pay attention to.