【python/qt】Python+Qt實現簡單的視訊監控介面
阿新 • • 發佈:2018-12-10
DATE: 2018.12.9
1、前言
這個介面是之前讀研時候學習QT時寫的一個簡單的介面,主要實現了人臉檢測部分的功能,比較簡單。
從今年3月份就開始寫這個視訊監控的功能,一直拖到了11月份。找工作結束後,可以好好研究一下Python和Qt以及兩者的混合程式設計了。
不過,在實現視訊監控介面的過程中,甚是糾結,看來混合程式設計是不好弄的。
2、簡單的視訊監控介面實現
平臺:Python + Qt
初步程式碼如下:
# -*- coding:utf-8 -*-
#Created by SoaringLee at 2016/3/26
#Updated by SoaringLee at 2016/11/9
from PyQt4 import QtCore, QtGui
import sys
import cv2
import cv2.cv as cv
import numpy as np
cascade_fn = 'data/haarcascades/haarcascade_frontalface_alt2.xml' #訓練好的xml資料
save_video = False
snap_flag = False
open_face = True
preprocessing = True
def Face_detect(img, cascade): #人臉檢測函式
rects = cascade. detectMultiScale(img, scaleFactor=1.3,minNeighbors=5, minSize=(20, 20),
flags = cv.CV_HAAR_SCALE_IMAGE)
if len(rects) == 0:
return []
#print rects
rects[:,2:] += rects[:,:2] #這是什麼意思,設定矩形框的大小
print rects
return rects
def draw_rects (img, rects, color): #在img上繪製矩形
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
def laplaceTransform(img): #Laplace濾波
gray_laplace = cv2.Laplacian(img,cv2.CV_16S,ksize = 3)
dst = cv2.convertScaleAbs(gray_laplace)
return dst
def SobelFilter(img): #Sobel濾波
x = cv2.Sobel(img,cv2.CV_16S,1,0)
y = cv2.Sobel(img,cv2.CV_16S,0,1)
absX = cv2.convertScaleAbs(x)
absY = cv2.convertScaleAbs(y)
gray_sobel = cv2.addWeighted(absX,0.5,absY,0.5,0)
return gray_sobel
#########################################################################
##視訊監控介面原型 功能說明:
##(1)按下ESC或者q鍵,退出視訊監控介面【已實現】
##(2)按下空格鍵,儲存當前視訊影象到本地(攝像頭拍照功能)【已實現】
##(3)選擇是否人臉檢測和將視訊儲存到本地(本地錄影功能)【已實現】
##(4)增加功能:多路實時監控,調節亮度和對比度功能,調節畫質功能,網路視訊監控,迴圈錄影功能
##(5)GUI介面封裝:將視訊監控功能封裝成介面,實現監控產品的基本功能。進一步考慮網路功能
#########################################################################
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("Video Surveilliance Interface"))
MainWindow.resize(688, 427)
self.centralWidget = QtGui.QWidget(MainWindow)
self.centralWidget.setObjectName(_fromUtf8("centralWidget"))
self.Btn_VideoWriter = QtGui.QPushButton(self.centralWidget)
self.Btn_VideoWriter.setGeometry(QtCore.QRect(20, 40, 91, 41))
self.Btn_VideoWriter.setObjectName(_fromUtf8("Btn_VideoWriter"))
self.Btn_VideoWriter.clicked.connect(self.Btn_VideoWriter_Clicked)
self.Btn_VideoWarning = QtGui.QPushButton(self.centralWidget)
self.Btn_VideoWarning.setGeometry(QtCore.QRect(20, 110, 91, 41))
self.Btn_VideoWarning.setObjectName(_fromUtf8("Btn_VideoWarning"))
self.Btn_VideoSnap = QtGui.QPushButton(self.centralWidget)
self.Btn_VideoSnap.setGeometry(QtCore.QRect(20, 180, 91, 41))
self.Btn_VideoSnap.setObjectName(_fromUtf8("Btn_VideoSnap"))
self.Btn_VideoSnap.clicked.connect(self.Btn_VideoSnap_Clicked)
self.Btn_FaceDetection = QtGui.QPushButton(self.centralWidget)
self.Btn_FaceDetection.setGeometry(QtCore.QRect(20, 250, 91, 41))
self.Btn_FaceDetection.setObjectName(_fromUtf8("Btn_FaceDetection"))
self.Btn_FaceDetection.clicked.connect(self.Btn_FaceDetection_Clicked)
self.Btn_Preprocessing = QtGui.QPushButton(self.centralWidget)
self.Btn_Preprocessing.setGeometry(QtCore.QRect(20, 320, 91, 41))
self.Btn_Preprocessing.setObjectName(_fromUtf8("Btn_Preprocessing"))
self.Btn_Preprocessing.clicked.connect(self.Btn_Preprocessing_Clicked)
self.label = QtGui.QLabel(self.centralWidget)
self.label.setObjectName(_fromUtf8("label"))
#self.label.setGeometry(QtCore.QRect(20, 320, 91, 41))
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtGui.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 688, 23))
self.menuBar.setObjectName(_fromUtf8("menuBar"))
MainWindow.setMenuBar(self.menuBar)
self.mainToolBar = QtGui.QToolBar(MainWindow)
self.mainToolBar.setObjectName(_fromUtf8("mainToolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtGui.QStatusBar(MainWindow)
self.statusBar.setObjectName(_fromUtf8("statusBar"))
MainWindow.setStatusBar(self.statusBar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def Btn_VideoWriter_Clicked(self):
save_video =True
def Btn_VideoSnap_Clicked(self):
snap_flag = True
def Btn_FaceDetection_Clicked(self):
open_face = True
def Btn_Preprocessing_Clicked(self):
preprocessing = True
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle( "MainWindow")
self.Btn_VideoWriter.setText(_fromUtf8("Recoding"))
self.Btn_VideoWarning.setText(_translate("MainWindow","Video Warning",None))
self.Btn_VideoSnap.setText(_translate("MainWindow","Snap",None))
self.Btn_FaceDetection.setText(_translate("MainWindow","Face Detection",None))
self.Btn_Preprocessing.setText(_translate("MainWindow","Preprocessing",None))
self.label.setText("Image")
def camera_cap(self,MainWindow):
capture1=cv2.VideoCapture(0) #獲取攝像頭資料
#將capture儲存為motion-jpeg,cv_fourcc為儲存格式
size = (int(capture1.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
int(capture1.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
#isopened可以檢視攝像頭是否開啟
print capture1.isOpened(),r"攝像頭1已開啟!"
num=0
if save_video:
flag = True
video=cv2.VideoWriter("VideoTest.avi", cv2.cv.CV_FOURCC('I','4','2','0'),30, size)
else:
flag = None
#要不斷讀取image需要設定一個迴圈
while True:
if capture1.isOpened():
ret1,img1=capture1.read()
#視訊中的圖片一張張寫入
if flag:
video.write(img1)
cv2.imshow(r'【視訊監控畫面1】',img1);
cv2.waitKey(1)
#cv2.imwrite('%s.jpg'%(str(num)),img)
gray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
cascade = cv2.CascadeClassifier(cascade_fn) #載入分類器
if not cascade:
print stderr,"ERROR:Could not load classifier cascade!"
else:
pass
rects1 = Face_detect(gray, cascade) #進行人臉檢測
vis1= img1.copy()
if open_face:
draw_rects(vis1, rects1, (0, 255, 0))
cv2.imshow(r'人臉檢測', vis1)
if preprocessing:
laplace=laplaceTransform(vis1)
cv2.imshow(r'laplace【拉普拉斯銳化】',laplace)
sobel =SobelFilter(vis1)
cv2.imshow(r'Sobel【邊緣提取】',sobel)
equalize = cv2.equalizeHist(gray)
cv2.imshow(r'【直方圖均衡化】',equalize)
key=cv2.waitKey(2)#裡面數字為delay時間,如果大於0為重新整理時間,
#超過指定時間則返回-1,等於0沒有返回值,但也可以讀取鍵盤數值。此處設定重新整理時間為2ms
num = num+1
if key == ord('q'):
break
if key == 27: #27表示ESC的ASCII碼值
break
if snap_flag:
cv2.imwrite(r'通道1_儲存的圖片'+str(num)+'.jpg',img1)
capture1.release()#關閉攝像頭
cv2.destroyAllWindows()#關閉所有視窗
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
ui.camera_cap(MainWindow)
sys.exit(app.exec_())
執行結果:
相關功能有待進一步完善!