1. 程式人生 > 程式設計 >Vue+tracking.js 實現前端人臉檢測功能

Vue+tracking.js 實現前端人臉檢測功能

專案中需要實現人臉登陸功能,實現思路為在前端檢測人臉,把人臉照片傳送到後端識別,返回使用者token登陸成功

前端呼叫攝像頭使用tracking.js檢測視訊流中的人臉,檢測到人臉後拍照上傳後端。

後端使用face_recognition人臉識別庫,使用Flask提供restfulAP供前端呼叫

實現效果如下圖:

登陸介面:

Vue+tracking.js 實現前端人臉檢測功能

攝像頭檢測人臉介面:

Vue+tracking.js 實現前端人臉檢測功能

前端程式碼如下:

<template>
 <div id="facelogin">
 <h1 class="title is-1">{{FaceisDetected}}</h1>
 <!-- <p>{{FaceisDetected}}</p> -->
 <div class="content-cam">
 <div class="camera-wrp sec">
 <video width="320" height="320" ref="videoDom" id="video_cam" preload autoplay loop muted></video>
 <canvas width="320" height="320" ref="canvasDOM" id="face_detect"></canvas>
 <div class="control-btn"></div>
 </div>
 <div class="images-wrp sec">
 <!-- <p class="title is-5">Image taken</p> -->
 <div
  :class="`img-item img-item-${index}`"
  v-for="(image,index) in images"
  :key="`img-wrp-${index}`"
  :style="`background-image: url('${image}')`"
 ></div>
 </div>
 </div>
 </div>
</template>

export default {
name: 'facelogin',data() {
return {
count: 0,isdetected: '請您保持臉部在畫面中央',videoEl: {},canvasEL: {},images: [],trackCcv: false,trackTracking: false,autoCaptureTrackTraking: false,userMediaConstraints: {
audio: false,video: {
// ideal(應用最理想的)
width: {
min: 320,ideal: 1280,max: 1920
},height: {
min: 240,ideal: 720,max: 1080
},// frameRate受限頻寬傳輸時,低幀率可能更適宜
frameRate: {
min: 15,ideal: 30,max: 60
},// 攝像頭翻轉
facingMode: 'user'
}
}
}
},computed: {
FaceisDetected() {
return this.isdetected
}
},created() {
this.changeView()
},mounted() {
 // The getUserMedia interface is used for handling camera input.
 // Some browsers need a prefix so here we're covering all the options
 navigator.getMedia =
 navigator.getUserMedia ||
 navigator.webkitGetUserMedia ||
 navigator.mozGetUserMedia ||
 navigator.msGetUserMedia
 this.init()
 },methods: {
 async init() {
 this.videoEl = this.$refs.videoDom
 this.canvasEL = this.$refs.canvasDOM
 await navigator.mediaDevices
 .getUserMedia(this.userMediaConstraints)
 .then(this.getMediaStreamSuccess)
 .catch(this.getMediaStreamError)
 await this.onPlay()
 },async onPlay() {
 debugHelper.log('onPlay')


 this.onTrackTracking()
 },changeView() {
 this.setTitle('刷臉登陸')
 this.setBackDisabled(false)
 this.setBackIcon('arrow_back')
 msgbus.vm.setBottomNavVisible(false)
 msgbus.vm.setBottomBtnVisible(false)
 msgbus.vm.setMsgInputVisible({ value: false })
 },onTrackTracking() {
 const context = this
 const video = this.videoEl
 const canvas = this.canvasEL
 const canvasContext = canvas.getContext('2d')
 let tracker = new tracking.ObjectTracker('face')


 video.pause()
 video.src = ''
 tracker.setInitialScale(4)
 tracker.setStepSize(2)
 tracker.setEdgesDensity(0.1)
 tracking.track('#video_cam',tracker,{ camera: true })
 tracker.on('track',function(event) {
 const { autoCaptureTrackTraking } = context
 canvasContext.clearRect(0,canvas.width,canvas.height)
 event.data.forEach(function({ x,y,width,height }) {
  canvasContext.strokeStyle = '#a64ceb'
  canvasContext.strokeRect(x,height)
  canvasContext.font = '11px Helvetica'
  canvasContext.fillStyle = '#fff'
 })


 if (!isEmpty(event.data) && context.count <= 10) {
  if (context.count < 0) context.count = 0
  context.count += 1
  //debugHelper.log(context.count)
  if (context.count > 10) {
  context.isdetected = '已檢測到人臉,正在登入'
  //context.$router.push({ name: 'pwdlogin' })
  }
 } else {
  context.count -= 1
  if (context.count < 0) context.isdetected = '請您保持臉部在畫面中央'
  //this.isdetected = '已檢測到人臉,正在登入'
 }
 
 })
 },onDownloadFile(item) {
 const link = document.createElement('a')
 link.href = item
 link.download = `cahyo-${new Date().toISOString()}.png`
 link.click()


 link.remove()
 },onTakeCam() {
 const canvas = document.createElement('canvas')
 const video = this.$el.querySelector('#video_cam')
 const canvasContext = canvas.getContext('2d')


 if (video.videoWidth && video.videoHeight) {
 const isBiggerW = video.videoWidth > video.videoHeight
 const fixVidSize = isBiggerW ? video.videoHeight : video.videoWidth
 let offsetLeft = 0
 let offsetTop = 0


 if (isBiggerW) offsetLeft = (video.videoWidth - fixVidSize) / 2
 else offsetTop = (video.videoHeight - fixVidSize) / 2


 // make canvas size 300px
 canvas.width = canvas.height = 300
 const { width,height } = canvas


 canvasContext.drawImage(
  video,offsetLeft,offsetTop,fixVidSize,height
 )
 const image = canvas.toDataURL('image/png')
 this.images.push(image)
 }
 },onDetectFace(param,index) {
 const imgItem = document.querySelector(`.img-item-${index}`)
 const image = new Image()
 image.src = param


 const tracker = new tracking.ObjectTracker('face')
 tracker.setStepSize(1.7)
 tracking.track(image,tracker)


 tracker.on('track',function(event) {
 event.data.forEach(function(rect) {
  window.plot(rect.x,rect.y,rect.width,rect.height)
 })
 })


 window.plot = function(x,w,h) {
 const rect = document.createElement('div')
 document.querySelector(`.img-item-${index}`).appendChild(rect)
 rect.classList.add('rect')
 rect.style.width = w + 'px'
 rect.style.height = h + 'px'
 rect.style.left = x + 'px'
 rect.style.top = y + 'px'
 rect.style.border = '2px solid yellow'
 rect.style.position = 'absolute'
 }
 },getMediaStreamSuccess(stream) {
 window.stream = stream // make stream available to browser console
 this.videoEl.srcObject = stream
 debugHelper.log('getMediaStreamSuccess1')
 //this.$store.commit('setVideoCanvasObject',this.videoEl)
 debugHelper.log('getMediaStreamSuccess2')
 },// 視訊媒體流失敗
 getMediaStreamError(error) {
 alert('視訊媒體流獲取錯誤' + error)
 },// 結束媒體流
 stopMediaStreamTrack() {
 clearInterval(this.timeInterval)
 if (typeof window.stream === 'object') {
 this.videoEl.srcObject = null
 //this.$store.commit('setVideoCanvasObject','')
 window.stream.getTracks().forEach(track => track.stop())
 }
 },

總結

到此這篇關於Vue+tracking.js 實現前端人臉檢測功能的文章就介紹到這了,更多相關vue tracking.js 人臉檢測內容請搜尋我們以前的文章或繼續瀏覽下面的相關文章希望大家以後多多支援我們!