驅動層-4 V4L2驅動之上層應用
V4L2分為兩層:驅動層和應用層。驅動層負責控制具體的視訊編碼硬體,並嚮應用層提供對應的操作api。應用層利用驅動層提供的api間接的去控制硬體,並對獲取視訊資料。
一、V4L2的應用層開發框架
首先我們先熟悉一下如何通過V4L2的介面來採集視訊。操作上現在基本都是使用ioctl的命令進行操作(因為對裝置的控制底層驅動主要在ioctl裡實現),基本流程如下:
1. 開啟視訊裝置。fd = open(“/dev/video0”, O_RDWR);
2. 查詢裝置的能力。ioctl(fd, VIDIOC_QUERYCAP, struct v4l2_capability *cap).
3. 設定視訊的採集引數,包括格式,制式,幀率,視訊採集視窗的大小,旋轉。
設定視訊的制式:ioctl(fd, VIDIOC_S_STD, v4l2_std_id)
設定視訊的格式,如逐行還是隔行,是YUV422還是YUV420,影象大小多少:ioctl(fd, VIDIOC_S_FMT, STRUCT v4l2_format *fmt)
設定視訊的幀率:ioctl(fd, VIDIOC_S_PARM, struct v4l2_parm *prm)
設定影象採集視窗的大小:ioctl(fd, VIDIOC_S_PARM, struct v4l2_parm *prm)
4. 向驅動申請視訊流資料的緩衝區
申請至少3個緩衝區:ioctl(fd, VIDIOC_REQBUF, struct v4l2_requstbuffers *buf)
查詢緩衝在核心中的的地址和長度: ioctl(fd, VIDIOC_QUERYBUF, struct v4l2_buffer *buf)
5. 用mmap將緩衝區對映到使用者空間,這樣使用者就可以直接出緩衝區中的資料。
6. 將所有緩衝區全部存放到輸出佇列:ioctl(fd, VIDIOC_QBUF, buf)
7. 開始採集: ioctl(fd, VIDIOC_STREAMON, &type)
8. 從輸出佇列中取出已函式視訊資料的緩衝區: ioctl(fd, VIDIOC_DQBUF, buf);
9. 處理完緩衝區中的視訊後將改緩衝區還給輸入佇列:ioctl(fd, VIDIOC_QBUF, buf).
10. 重複8到9,直到停止採集資料。
11. 停止採集資料:ioctl(fd, VIDIOC_STREAMOFF, &type)
12. 用unmap釋放緩衝區,關閉fd。
視訊驅動程式定義了一個視訊輸入佇列和一個視訊輸出佇列。我們在申請到緩衝區後,要先用VIDIOC_QBUF將所有緩衝區放到輸入佇列中再開始用VIDIOC_STRAMON啟動資料的採集。驅動程式會採集到的資料填滿緩衝區然後移到輸出佇列。這個時候應用程式用VIDIOC_DQBUF從佇列中取出緩衝區後記得用VIDIOC_QBUF將緩衝區還給輸入佇列,不然輸入佇列會沒有緩衝區可用而餓死。我們可以用如下的圖來表示整個緩衝區的迴圈流動過程:
這個迴圈過程中,驅動程式將緩衝區分為四類:
V4L2_BUF_FLAG_UNMAPPED 0B0000
V4L2_BUF_FLAG_MAPPED 0B0001
V4L2_BUF_FLAG_ENQUEUED 0B0010
V4L2_BUF_FLAG_DONE 0B0100
這四類緩衝區的轉換可以用如下圖形表示:
我們用上面的思路來分析一下TI的dmai開發框架裡的例子。程式碼在dmai_xx_xx\packages\ti\sdo\dmai\linux目錄下的capture.c。我們看一下程式碼,主要在Capture_create裡面。
Capture_Handle Capture_create(BufTab_Handle hBufTab, Capture_Attrs *attrs)
{
…
/* Open video capture device */
hCapture->fd =open(attrs->captureDevice, O_RDWR, 0);//開啟裝置
/* Query for capture devicecapabilities */
if (ioctl(hCapture->fd, VIDIOC_QUERYCAP, &cap) == -1) {//用VIDIOC_QUERYCAP查詢裝置能力
cleanup(hCapture);
if (errno == EINVAL) {
Dmai_err1(“%s is no V4L2device\n”, attrs->captureDevice);
cleanup(hCapture);
return NULL;
}
Dmai_err2(“FailedVIDIOC_QUERYCAP on %s (%s)\n”, attrs->captureDevice,
strerror(errno));
cleanup(hCapture);
return NULL;
}
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {//看看裝置是否具有捕獲影象能力
Dmai_err1(“%s is not a videocapture device\n”, attrs->captureDevice);
cleanup(hCapture);
return NULL;
}
if (!(cap.capabilities &V4L2_CAP_STREAMING)) {//看裝置是不是支援流操作
Dmai_err1("%s does notsupport streaming i/o\n", attrs->captureDevice);
cleanup(hCapture);
return NULL;
}
cropCap.type =V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(hCapture->fd,VIDIOC_CROPCAP, &cropCap) == -1) {
Dmai_err2("VIDIOC_CROPCAP failed on %s (%s)\n",attrs->captureDevice,
strerror(errno));
cleanup(hCapture);
return NULL;
}
fmt.type =V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(hCapture->fd,VIDIOC_G_FMT, &fmt) == -1) {//用VIDIOC_G_FMT來獲取當前輸出的視訊引數
Dmai_err2("FailedVIDIOC_G_FMT on %s (%s)\n", attrs->captureDevice,
strerror(errno));
cleanup(hCapture);
return NULL;
}
fmt.fmt.pix.width = width;
fmt.fmt.pix.height = height;
switch(attrs->colorSpace) {
case ColorSpace_UYVY:
fmt.fmt.pix.pixelformat= V4L2_PIX_FMT_UYVY;
break;
case ColorSpace_YUV420PSEMI:
fmt.fmt.pix.pixelformat= V4L2_PIX_FMT_NV12;
break;
default:
Dmai_err1("Unsupported color format %g\n",attrs->colorSpace);
cleanup(hCapture);
return NULL;
};
fmt.fmt.pix.bytesperline = 0;
fmt.fmt.pix.sizeimage = 0;
pixelFormat =fmt.fmt.pix.pixelformat;
if ((videoStd == VideoStd_CIF)|| (videoStd == VideoStd_SIF_PAL) ||
(videoStd == VideoStd_SIF_NTSC)|| (videoStd == VideoStd_D1_PAL) ||
(videoStd ==VideoStd_D1_NTSC)) {
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
} else {
fmt.fmt.pix.field = V4L2_FIELD_NONE;
}
if (ioctl(hCapture->fd, VIDIOC_TRY_FMT, &fmt) == -1) {////用VIDIOC_TRY_FMT來驗證當前輸出的設定的引數是否可行
Dmai_err2(“FailedVIDIOC_TRY_FMT on %s (%s)\n”, attrs->captureDevice,
strerror(errno));
cleanup(hCapture);
return NULL;
}
if (ioctl(hCapture->fd, VIDIOC_S_FMT, &fmt) == -1) {//上面證實引數有效,現在正式設定輸出視訊的參格式
Dmai_err2(“FailedVIDIOC_S_FMT on %s (%s)\n”, attrs->captureDevice,
strerror(errno));
cleanup(hCapture);
return NULL;
}
Dmai_dbg3(“Video input connected size %dx%d pitch %d\n”,
fmt.fmt.pix.width,fmt.fmt.pix.height, fmt.fmt.pix.bytesperline);
if (ioctl(hCapture->fd, VIDIOC_G_FMT, &fmt) == -1) {//再次驗證是否設定成功
Dmai_err2(“FailedVIDIOC_G_FMT on %s (%s)\n”, attrs->captureDevice,
strerror(errno));
cleanup(hCapture);
return NULL;
}
if (pixelFormat != fmt.fmt.pix.pixelformat) {
Dmai_err2(“Pixel format 0x%xnot supported. Received %x\n”,
pixelFormat,fmt.fmt.pix.pixelformat);
cleanup(hCapture);
return NULL;
}
halfRateCapture = FALSE;
if (halfRateCapture == TRUE) {
struct v4l2_standard stdinfo;
struct v4l2_streamparmstreamparam, streamparam_s;
Bool found;
Dmai_dbg0(“Setting captureframe rate to half.\n”);
Dmai_clear(stdinfo);
stdinfo.index= 0;
found= 0;
while ( 0 ==ioctl(hCapture->fd, VIDIOC_ENUMSTD, &stdinfo)) {//查詢視訊支援的解析度
if(stdinfo.id == V4L2_STD_720P_60||stdinfo.id == V4L2_STD_PAL_D1) {
found= 1;
break;
}
stdinfo.index++;
}
if(!found) {
Dmai_err1("Couldnot find required 720-60 standard (%s)\n", strerror(errno));
cleanup(hCapture);
return NULL;
}
/* set the streamingparameter to reduce the capture frequency to half */
Dmai_clear(streamparam);
streamparam.type =V4L2_BUF_TYPE_VIDEO_CAPTURE;
streamparam.parm.capture.timeperframe.numerator=
stdinfo.frameperiod.numerator;
streamparam.parm.capture.timeperframe.denominator =
stdinfo.frameperiod.denominator/ 2;
streamparam_s = streamparam;
if (ioctl(hCapture->fd,VIDIOC_S_PARM , &streamparam) < 0 ) {//設定幀率
Dmai_err1(“VIDIOC_S_PARMfailed (%s)\n”, strerror(errno));
cleanup(hCapture);
return NULL;
}
/* verify the params */
if (ioctl(hCapture->fd, VIDIOC_G_PARM, &streamparam) < 0) {//驗證設定是否成功
Dmai_err1(“VIDIOC_G_PARMfailed (%s)\n”, strerror(errno));
cleanup(hCapture);
return NULL;
}
if((streamparam.parm.capture.timeperframe.numerator !=
streamparam_s.parm.capture.timeperframe.numerator)||
(streamparam.parm.capture.timeperframe.denominator !=
streamparam_s.parm.capture.timeperframe.denominator)) {
Dmai_err0(“Couldnot set capture driver for 30fps\n”);
cleanup(hCapture);
return NULL;
}
}
if (attrs->cropWidth > 0 && attrs->cropHeight >= 0) {
if (attrs->cropX &0x1) {
Dmai_err1(“Crop width(%ld) needs to be even\n”, attrs->cropX);
cleanup(hCapture);
return NULL;
}
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
crop.c.left = attrs->cropX;
crop.c.top = attrs->cropY;
crop.c.width = attrs->cropWidth;
crop.c.height =hCapture->topOffset ? attrs->cropHeight + 4 + 2 :
attrs->cropHeight;
Dmai_dbg4(“Setting capturecropping at %dx%d size %dx%d\n”,
crop.c.left,crop.c.top, crop.c.width, crop.c.height);
/* Crop the image dependingon requested image size */
if (ioctl(hCapture->fd,VIDIOC_S_CROP, &crop) == -1) {//設定視訊採集視窗
Dmai_err2("VIDIOC_S_CROP failed on %s (%s)\n",attrs->captureDevice,
strerror(errno));
cleanup(hCapture);
return NULL;
}
}
if (hBufTab == NULL) {
hCapture->userAlloc =FALSE;
/* The driver allocates thebuffers */
if (_Dmai_v4l2DriverAlloc(hCapture->fd,
attrs->numBufs,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
&hCapture->bufDescs,
&hBufTab,
hCapture->topOffset,
attrs->colorSpace) < 0) {
Dmai_err1("Failedto allocate capture driver buffers on %s\n",
attrs->captureDevice);
cleanup(hCapture);
return NULL;
}
}
else {
/* Make the driver use theuser supplied buffers */
if (_Dmai_v4l2UserAlloc(hCapture->fd,
attrs->numBufs,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
&hCapture->bufDescs,
hBufTab,
0,attrs->colorSpace) < 0) {//這個函式裡面使用VIDIOC_REQBUF和VIDIOC_QUERYBUF申請了緩衝區,並將緩衝區用mmap進行了對映,然後用VIDIOC_QBUF將緩衝區放到了輸入佇列
Dmai_err1("Failedto intialize capture driver buffers on %s\n",
attrs->captureDevice);
cleanup(hCapture);
return NULL;
}
}
hCapture->hBufTab = hBufTab;
/* Start the video streaming */
type =V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(hCapture->fd,VIDIOC_STREAMON, &type) == -1) {//開始視訊採集
Dmai_err2("VIDIOC_STREAMON failed on %s (%s)\n",attrs->captureDevice,
strerror(errno));
cleanup(hCapture);
return NULL;
}
hCapture->started = TRUE;
return hCapture;
}
二、V4L2應用層與驅動層的關係
上面我們看到,V4L2的應用層使用了大量的ioctl命令來完成視訊資料的採集。不同的平臺上的攝像頭介面以及使用的sensor 攝像頭都是不同。我們在驅動層必然要具體為每一款硬體實現具體的驅動。上面的每一個應用層的命令在驅動層都必須進行對應的實現。這樣當我們在應用層呼叫這些命令時才能控制到具體的硬體。下一節我們具體分析驅動層那些ioctl的命令,並結合ti davinci的dm365以及tvp5150的驅動來分析驅動層。