opencv入門基礎(九)基於dlib進行人臉、特定物體追蹤
阿新 • • 發佈:2020-12-28
技術標籤:opencv基礎學習pythonopencv視訊處理深度學習機器學習
opencv入門基礎(九)基於dlib進行人臉、特定物體追蹤
一.人臉追蹤
# 1 加入庫
import cv2
import dlib
# 2 主函式
def main():
# 3 開啟攝像頭
capture = cv2.VideoCapture(0)
# 4 基於dlib庫獲取人臉檢測器
detector = dlib.get_frontal_face_detector()
# 5 基於dlib庫實時跟蹤
tractor = dlib.correlation_tracker( )
# 6 跟蹤狀態
tracking_state = False
# 7 迴圈讀取每一幀
while True:
ret, frame = capture.read()
# 8 如果沒有跟蹤, 啟動跟蹤器
if tracking_state is False:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dets = detector(gray, 1) # 返回檢測到的人臉
if len(dets) > 0: # 判斷檢測到人臉沒有,len(dets)代表人臉數目
tractor.start_track(frame, dets[0])
tracking_state = True
# 9 正在跟蹤,實時獲取人臉的位置,顯示
if tracking_state is True:
tractor.update(frame) # 跟新畫面
position = tractor.get_position() # 獲取人臉的座標
cv2. rectangle(frame,(int(position.left()), int(position.top())), (int(position.right()), int(position.bottom())), (0,255,0), 3)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.imshow("face tracking", frame)
capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
執行程式你會發現,電腦的攝像頭可以將你的人臉用方框圈住,但是當你用手擋住臉時,再將手拿開,那麼鎖定框將隨著你的手移動,可見基於dilb進行人臉追蹤的精度準確性並不是很高,如若想用高精度的演算法,那麼還需用到深度學習。
二.增加儲存視訊功能
# 1 加入庫
import cv2
import dlib
# 2 主函式
def main():
# 3 開啟攝像頭
capture = cv2.VideoCapture(0)
# 4 基於dlib庫獲取人臉檢測器
detector = dlib.get_frontal_face_detector()
# 5 基於dlib庫實時跟蹤
tractor = dlib.correlation_tracker()
# 6 跟蹤狀態
tracking_state = False
# 增加功能一:儲存視訊
frame_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
frame_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_fps = capture.get(cv2.CAP_PROP_FPS)
# 設定視訊格式
fourcc = cv2.VideoWriter_fourcc(*"XVID")
# 設定輸出格式
output = cv2.VideoWriter("record.avi", fourcc, int(frame_fps), (int(frame_width), int(frame_height)), True)
# 7 迴圈讀取每一幀
while True:
ret, frame = capture.read()
# 8 如果沒有跟蹤, 啟動跟蹤器
if tracking_state is False:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dets = detector(gray, 1) # 返回檢測到的人臉
if len(dets) > 0: # 判斷檢測到人臉沒有,len(dets)代表人臉數目
tractor.start_track(frame, dets[0])
tracking_state = True
# 9 正在跟蹤,實時獲取人臉的位置,顯示
if tracking_state is True:
tractor.update(frame) # 跟新畫面
position = tractor.get_position() # 獲取人臉的座標
cv2.rectangle(frame,(int(position.left()), int(position.top())), (int(position.right()), int(position.bottom())), (0,255,0), 3)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.imshow("face tracking", frame)
# 儲存視訊
output.write(frame)
capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
當你按下q鍵退出時,會把剛才執行時的視訊以record.avi為名字,儲存在當前目錄下。
三.增加reset功能和資訊提示功能
# 1 加入庫
import cv2
import dlib
# 增加功能二:資訊提示
def show_info(frame, tracking_state):
pos1 = (20, 40) # 提示資訊顯示位置
pos2 = (20, 80) # 提示資訊顯示位置
cv2.putText(frame, "'1' : reset ", pos1, cv2.FONT_HERSHEY_COMPLEX, 0.5, (255,255,255))
# 根據狀態,顯示不同的資訊
if tracking_state is True:
cv2.putText(frame, "tracking now ...", pos2, cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 0, 0))
else:
cv2.putText(frame, "no tracking ...", pos2, cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0))
# 2 主函式
def main():
# 3 開啟攝像頭
capture = cv2.VideoCapture(0)
# 4 基於dlib庫獲取人臉檢測器
detector = dlib.get_frontal_face_detector()
# 5 基於dlib庫實時跟蹤
tractor = dlib.correlation_tracker()
# 6 跟蹤狀態
tracking_state = False
# 7 迴圈讀取每一幀
while True:
ret, frame = capture.read()
# 顯示提示資訊
show_info(frame, tracking_state)
# 8 如果沒有跟蹤, 啟動跟蹤器
if tracking_state is False:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dets = detector(gray, 1) # 返回檢測到的人臉
if len(dets) > 0:
tractor.start_track(frame, dets[0])
tracking_state = True
# 9 正在跟蹤,實時獲取人臉的位置,顯示
if tracking_state is True:
tractor.update(frame) # 跟新畫面
position = tractor.get_position() # 獲取人臉的座標
cv2.rectangle(frame,(int(position.left()), int(position.top())), (int(position.right()), int(position.bottom())), (0,255,0), 3)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
if key == ord('1'):
tracking_state = False
cv2.imshow("face tracking", frame)
capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
在左上角出現了提示資訊:
四.基於dlib進行目標跟蹤
這一部分實現的功能為,劃定一個特定的物體,讓攝像頭對其進行跟蹤。
import cv2
import dlib
# 定義方法:顯示資訊
def show_info():
pos1 = (10,20)
pos2 = (10,40)
pos3 = (10,60)
info1 = "put left button,select an area,start tracking"
info2 = "'1':start tracking,'2':stop tracking,'q':exit"
cv2.putText(frame,info1,pos1,cv2.FONT_HERSHEY_COMPLEX,0.5,(255,255,255))
cv2.putText(frame,info2,pos2,cv2.FONT_HERSHEY_COMPLEX,0.5,(255,255,255))
if tracking_state is True:
cv2.putText(frame,"tracking now",pos3,cv2.FONT_HERSHEY_COMPLEX,0.5,(255,0,0))
else:
cv2.putText(frame, "stop tracking", pos3, cv2.FONT_HERSHEY_COMPLEX, 0.5, (255,0,0))
# 存放滑鼠點選事件的座標點
points = []
# 定義方法:滑鼠點選的事件
def mouse_event_handler(event,x,y,flags,parms): # flags,parms不用管,但缺少會報錯
global points # 全域性呼叫,要用的列表中的引數
if event == cv2.EVENT_LBUTTONDOWN: # 滑鼠左鍵按下
points = [(x,y)]
elif event == cv2.EVENT_LBUTTONUP: # 滑鼠左鍵鬆開
points.append((x,y))
# 開啟攝像頭
capture = cv2.VideoCapture(0)
# 設定視窗名稱
nameWindow = "Object Tracking"
# 將滑鼠事件繫結到視窗上去
cv2.namedWindow(nameWindow)
cv2.setMouseCallback(nameWindow , mouse_event_handler)
# 啟動追蹤器dlib.correlation_tracker()
tracker = dlib.correlation_tracker()
# 假設跟蹤狀態
tracking_state = False
# 迴圈讀取視訊流
while True:
# 獲取每一幀
ret,frame = capture.read()
# 顯示提示資訊
show_info()
# 如果獲取到的座標點是兩個,那麼就繪製出矩形框,以及也要讓dlib的rectangle()知道座標在哪裡
if len(points) == 2:
cv2.rectangle(frame,points[0],points[1],(0,255,0),3) # point[0]:(x,y),point[1]:(x,y)元組型別
dlib_rect = dlib.rectangle(points[0][0],points[0][1],points[1][0],points[1][1])
# 判斷:如果跟蹤狀態為true,那麼,更新跟蹤,獲取位置,繪製矩形框
if tracking_state is True:
tracker.update(frame) # 更新畫面
pos = tracker.get_position() # 獲取座標
cv2.rectangle(frame,(int(pos.left()),int(pos.top())),(int(pos.right()),int(pos.bottom())),(255,0,0),3)
# 事件判斷,根據按鍵:1、2、3、4
key = cv2.waitKey(1) & 0xFF
if key == ord('1'): # 開始追蹤
if len(points) == 2:
tracker.start_track(frame,dlib_rect)
tracking_state = True
points = [] # 重啟之後就要重新框選了
if key == ord('2'): # 重置
points = []
tracking_state = False
if key == ord('q'): # 退出
break
# 顯示整體效果
cv2.imshow(nameWindow,frame)
capture.release()
cv2.destroyAllWindows()
滑鼠框選後按1進行追蹤,按2退出跟蹤重新選定,按q則退出程式