Camera安卓原始碼-高通mm_camera架構剖析
主要涉及三方面:
1. Camera open
2. Camera preview
3. Camera capture
1. Camera Open
mm_camera&mm_camera_open()
首先,mm_camera層用一個結構體去表示從底層獲取的camera物件,這個結構體叫做mm_camera_obj。
如下結構體所示,mm_camera物件包含了兩個執行緒:
1. mm_camera_poll_thread_t
2. mm_camera_cmd_thread_t
還包含了一個用於儲存自身所擁有的Channel的陣列:
mm_channel_t ch[MM_CAMERA_CHANNEL_MAX]
typedef struct mm_camera_obj {
uint32_t my_hdl;
int ref_count;
int32_t ctrl_fd;
int32_t ds_fd; /* domain socket fd */
pthread_mutex_t cam_lock;
pthread_mutex_t cb_lock; /* lock for evt cb */
mm_channel_t ch[MM_CAMERA_CHANNEL_MAX];
mm_camera_evt_obj_t evt;
mm_camera_poll_thread_t evt_poll_thread; /* evt poll thread */
mm_camera_cmd_thread_t evt_thread; /* thread for evt CB */
mm_camera_vtbl_t vtbl;
pthread_mutex_t evt_lock;
pthread_cond_t evt_cond;
mm_camera_event_t evt_rcvd;
pthread_mutex_t msg_lock; /* lock for sending msg through socket */
uint32_t sessionid; /* Camera server session id */
} mm_camera_obj_t;
而在camer open的過程中,kernel層以上最終會呼叫到mm_camera_open(mm_camera_obj_t *my_obj)
方法。該方法是在mm_camera_interface
中被呼叫的。
int32_t camera_open(uint8_t camera_idx, mm_camera_vtbl_t **camera_vtbl)
{
......
mm_camera_obj_t *cam_obj = NULL;
cam_obj = (mm_camera_obj_t *)malloc(sizeof(mm_camera_obj_t));
if(NULL == cam_obj) {
pthread_mutex_unlock(&g_intf_lock);
LOGE("no mem");
return -EINVAL;
}
/* initialize camera obj */
memset(cam_obj, 0, sizeof(mm_camera_obj_t));
cam_obj->ctrl_fd = -1;
cam_obj->ds_fd = -1;
cam_obj->ref_count++;
cam_obj->my_hdl = mm_camera_util_generate_handler(camera_idx);
cam_obj->vtbl.camera_handle = cam_obj->my_hdl; /* set handler */
cam_obj->vtbl.ops = &mm_camera_ops;
pthread_mutex_init(&cam_obj->cam_lock, NULL);
/* unlock global interface lock, if not, in dual camera use case,
* current open will block operation of another opened camera obj*/
pthread_mutex_lock(&cam_obj->cam_lock);
pthread_mutex_unlock(&g_intf_lock);
rc = mm_camera_open(cam_obj);
......
}
也就是通過mm_camera_open
方法去填充這個mm_camera_obj的結構體,再供上層使用。
接下來進入mm_camera_open
方法體:
LOGD("Launch evt Thread in Cam Open");
snprintf(my_obj->evt_thread.threadName, THREAD_NAME_SIZE, "CAM_Dispatch");
mm_camera_cmd_thread_launch(&my_obj->evt_thread,
mm_camera_dispatch_app_event,
(void *)my_obj);
/* launch event poll thread
* we will add evt fd into event poll thread upon user first register for evt */
LOGD("Launch evt Poll Thread in Cam Open");
snprintf(my_obj->evt_poll_thread.threadName, THREAD_NAME_SIZE, "CAM_evntPoll");
mm_camera_poll_thread_launch(&my_obj->evt_poll_thread,
MM_CAMERA_POLL_TYPE_EVT);
mm_camera_evt_sub(my_obj, TRUE);
會開啟兩個執行緒。
1.1 執行緒1:mm_camera_cmd_thread
其中第一個mm_camera_cmd_thread_launch
方法在mm_camera_thread
中定義:
int32_t mm_camera_cmd_thread_launch(mm_camera_cmd_thread_t * cmd_thread,
mm_camera_cmd_cb_t cb,
void* user_data){
cmd_thread->cb = cb;
cmd_thread->user_data = user_data;
cmd_thread->is_active = TRUE;
/* launch the thread */
pthread_create(&cmd_thread->cmd_pid,
NULL,
mm_camera_cmd_thread,
(void *)cmd_thread);
}
typedef struct {
uint8_t is_active; /*indicates whether thread is active or not */
cam_queue_t cmd_queue; /* cmd queue (queuing dataCB, asyncCB, or exitCMD) */
pthread_t cmd_pid; /* cmd thread ID */
cam_semaphore_t cmd_sem; /* semaphore for cmd thread */
cam_semaphore_t sync_sem; /* semaphore for synchronization with cmd thread */
mm_camera_cmd_cb_t cb; /* cb for cmd */
void* user_data; /* user_data for cb */
char threadName[THREAD_NAME_SIZE];
} mm_camera_cmd_thread_t;
也就是先給mm_camera_obj中的mm_camera_cmd_thread_t的cb賦值,然後啟動該執行緒的方法體mm_camera_cmd_thread
:
static void *mm_camera_cmd_thread(void *data)
{
int running = 1;
int ret;
mm_camera_cmd_thread_t *cmd_thread =
(mm_camera_cmd_thread_t *)data;
mm_camera_cmdcb_t* node = NULL;
mm_camera_cmd_thread_name(cmd_thread->threadName);
do {
do {
ret = cam_sem_wait(&cmd_thread->cmd_sem);
if (ret != 0 && errno != EINVAL) {
LOGE("cam_sem_wait error (%s)",
strerror(errno));
return NULL;
}
} while (ret != 0);
/* we got notified about new cmd avail in cmd queue */
node = (mm_camera_cmdcb_t*)cam_queue_deq(&cmd_thread->cmd_queue);
while (node != NULL) {
switch (node->cmd_type) {
case MM_CAMERA_CMD_TYPE_EVT_CB:
case MM_CAMERA_CMD_TYPE_DATA_CB:
case MM_CAMERA_CMD_TYPE_REQ_DATA_CB:
case MM_CAMERA_CMD_TYPE_SUPER_BUF_DATA_CB:
case MM_CAMERA_CMD_TYPE_CONFIG_NOTIFY:
case MM_CAMERA_CMD_TYPE_START_ZSL:
case MM_CAMERA_CMD_TYPE_STOP_ZSL:
case MM_CAMERA_CMD_TYPE_GENERAL:
case MM_CAMERA_CMD_TYPE_FLUSH_QUEUE:
if (NULL != cmd_thread->cb) {
cmd_thread->cb(node, cmd_thread->user_data);
}
break;
case MM_CAMERA_CMD_TYPE_EXIT:
default:
running = 0;
break;
}
free(node);
node = (mm_camera_cmdcb_t*)cam_queue_deq(&cmd_thread->cmd_queue);
} /* (node != NULL) */
} while (running);
return NULL;
}
該執行緒體中會不斷迴圈的從執行緒佇列中取出cmd,並執行cb,這裡的cb是之前通過mm_camera_cmd_thread_launch傳進來的,在原始碼中是方法:
static void mm_camera_dispatch_app_event(mm_camera_cmdcb_t *cmd_cb,
void* user_data)
{
int i;
mm_camera_event_t *event = &cmd_cb->u.evt;
mm_camera_obj_t * my_obj = (mm_camera_obj_t *)user_data;
if (NULL != my_obj) {
mm_camera_cmd_thread_name(my_obj->evt_thread.threadName);
pthread_mutex_lock(&my_obj->cb_lock);
for(i = 0; i < MM_CAMERA_EVT_ENTRY_MAX; i++) {
if(my_obj->evt.evt[i].evt_cb) {
my_obj->evt.evt[i].evt_cb(
my_obj->my_hdl,
event,
my_obj->evt.evt[i].user_data);
}
}
pthread_mutex_unlock(&my_obj->cb_lock);
}
}
而此處的my_obj->evt
是在camera_open的時候通過register_event_notify
傳入的
int QCamera3HardwareInterface::openCamera()
{
int rc = 0;
char value[PROPERTY_VALUE_MAX];
KPI_ATRACE_CALL();
if (mCameraHandle) {
LOGE("Failure: Camera already opened");
return ALREADY_EXISTS;
}
rc = QCameraFlash::getInstance().reserveFlashForCamera(mCameraId);
if (rc < 0) {
LOGE("Failed to reserve flash for camera id: %d",
mCameraId);
return UNKNOWN_ERROR;
}
rc = camera_open((uint8_t)mCameraId, &mCameraHandle);
if (rc) {
LOGE("camera_open failed. rc = %d, mCameraHandle = %p", rc, mCameraHandle);
return rc;
}
if (!mCameraHandle) {
LOGE("camera_open failed. mCameraHandle = %p", mCameraHandle);
return -ENODEV;
}
rc = mCameraHandle->ops->register_event_notify(mCameraHandle->camera_handle,
camEvtHandle, (void *)this);
用於處理mm_camera的一些事件。
1.2 執行緒2:mm_camera_poll_thread
呼叫方法mm_camera_poll_thread_launch
去建立mm_camera_obj中的poll_thread:
int32_t mm_camera_poll_thread_launch(mm_camera_poll_thread_t * poll_cb,
mm_camera_poll_thread_type_t poll_type)
{
int32_t rc = 0;
size_t i = 0, cnt = 0;
poll_cb->poll_type = poll_type;
//Initialize poll_fds
cnt = sizeof(poll_cb->poll_fds) / sizeof(poll_cb->poll_fds[0]);
for (i = 0; i < cnt; i++) {
poll_cb->poll_fds[i].fd = -1;
}
//Initialize poll_entries
cnt = sizeof(poll_cb->poll_entries) / sizeof(poll_cb->poll_entries[0]);
for (i = 0; i < cnt; i++) {
poll_cb->poll_entries[i].fd = -1;
}
//Initialize pipe fds
poll_cb->pfds[0] = -1;
poll_cb->pfds[1] = -1;
rc = pipe(poll_cb->pfds);
if(rc < 0) {
LOGE("pipe open rc=%d\n", rc);
return -1;
}
poll_cb->timeoutms = -1; /* Infinite seconds */
LOGD("poll_type = %d, read fd = %d, write fd = %d timeout = %d",
poll_cb->poll_type,
poll_cb->pfds[0], poll_cb->pfds[1],poll_cb->timeoutms);
pthread_mutex_init(&poll_cb->mutex, NULL);
pthread_cond_init(&poll_cb->cond_v, NULL);
/* launch the thread */
pthread_mutex_lock(&poll_cb->mutex);
poll_cb->status = 0;
pthread_create(&poll_cb->pid, NULL, mm_camera_poll_thread, (void *)poll_cb);
if(!poll_cb->status) {
pthread_cond_wait(&poll_cb->cond_v, &poll_cb->mutex);
}
pthread_mutex_unlock(&poll_cb->mutex);
LOGD("End");
return rc;
}
插一句:在Linux系統中一切皆可以看成是檔案,檔案又可分為:普通檔案、目錄檔案、連結檔案和裝置檔案。檔案描述符(file descriptor)是核心為了高效管理已被開啟的檔案所建立的索引,其是一個非負整數(通常是小整數),用於指代被開啟的檔案,所有執行I/O操作的系統呼叫都通過檔案描述符。
此處會建立一個pipe
,int pipe(int filedes[2]);
, 需要往裡傳入一個二元的陣列,fd[0]
代表讀資料的一端,讀取時必須關閉寫入端,即close(filedes[1]);;而fd[1]
代表寫資料的一端,寫入時必須關閉讀取端,即close(filedes[0])。點此處瞭解
開啟執行緒體:
static void *mm_camera_poll_fn(mm_camera_poll_thread_t *poll_cb)
{
do {
for(i = 0; i < poll_cb->num_fds; i++) {
poll_cb->poll_fds[i].events = POLLIN|POLLRDNORM|POLLPRI;
}
rc = poll(poll_cb->poll_fds, poll_cb->num_fds, poll_cb->timeoutms);
if(rc > 0) {
if ((poll_cb->poll_fds[0].revents & POLLIN) &&
(poll_cb->poll_fds[0].revents & POLLRDNORM)) {
/* if we have data on pipe, we only process pipe in this iteration */
LOGD("cmd received on pipe\n");
mm_camera_poll_proc_pipe(poll_cb);
} else {
for(i=1; i<poll_cb->num_fds; i++) {
/* Checking for ctrl events */
if ((poll_cb->poll_type == MM_CAMERA_POLL_TYPE_EVT) &&
(poll_cb->poll_fds[i].revents & POLLPRI)) {
LOGD("mm_camera_evt_notify\n");
if (NULL != poll_cb->poll_entries[i-1].notify_cb) {
poll_cb->poll_entries[i-1].notify_cb(poll_cb->poll_entries[i-1].user_data);
}
}
if ((MM_CAMERA_POLL_TYPE_DATA == poll_cb->poll_type) &&
(poll_cb->poll_fds[i].revents & POLLIN) &&
(poll_cb->poll_fds[i].revents & POLLRDNORM)) {
LOGD("mm_stream_data_notify\n");
if (NULL != poll_cb->poll_entries[i-1].notify_cb) {
poll_cb->poll_entries[i-1].notify_cb(poll_cb->poll_entries[i-1].user_data);
}
}
}
}
} else {
/* in error case sleep 10 us and then continue. hard coded here */
usleep(10);
continue;
}
} while ((poll_cb != NULL) && (poll_cb->state == MM_CAMERA_POLL_TASK_STATE_POLL));
}
回撥到mm_camera.c中的mm_camera_enqueue_evt
,在這個方法中會呼叫:
mm_camera_cmdcb_t *node = NULL;
node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
/* enqueue to evt cmd thread */
cam_queue_enq(&(my_obj->evt_thread.cmd_queue), node);
/* wake up evt cmd thread */
cam_sem_post(&(my_obj->evt_thread.cmd_sem));
將node放入到mm_camera_obj的cmd_queue中去,也就是1.1中建立的mm_camera_cmd_thread_t
的cmd_queue中去供其出隊操作。
2. Camera Preview
Channel&stream
會涉及到四個內容:
- QCamera3Channel
- QCamera3Stream
- mm_channel
- mm_stream
最先,在QCamera3HWI的初始化時,就建立了一個mm_channel:
int QCamera3HardwareInterface::initialize(
const struct camera3_callback_ops *callback_ops)
{
......
mCallbackOps = callback_ops;
mChannelHandle = mCameraHandle->ops->add_channel(
mCameraHandle->camera_handle, NULL, NULL, this);
......
}
呼叫了mm_camera.c中的mm_camera_add_channel
方法,而後呼叫mm_camera_channel.c
中的mm_channel_init
方法:
int32_t mm_channel_init(mm_channel_t *my_obj,
mm_camera_channel_attr_t *attr,
mm_camera_buf_notify_t channel_cb,
void *userdata)
{
int32_t rc = 0;
my_obj->bundle.super_buf_notify_cb = channel_cb;
my_obj->bundle.user_data = userdata;
if (NULL != attr) {
my_obj->bundle.superbuf_queue.attr = *attr;
}
LOGD("Launch data poll thread in channel open");
snprintf(my_obj->poll_thread[0].threadName, THREAD_NAME_SIZE, "CAM_dataPoll");
mm_camera_poll_thread_launch(&my_obj->poll_thread[0],
MM_CAMERA_POLL_TYPE_DATA);
/* change state to stopped state */
my_obj->state = MM_CHANNEL_STATE_STOPPED;
return rc;
}
這裡的cb是null,每個mm_channel_t中均保有一個執行緒陣列,在上述init方法中,開啟了此channel的一個poll_thread[0]。而該poll_thread的notify_cb是在mm_camera_stream.c中被賦值的,在mm_camera_stream.c中執行mm_camera_poll_thread_add_poll_fd
,將mm_stream_data_notify
作為notify_cb,在mm_stream_data_notify
中會執行mm_stream_dispatch_sync_data
,在mm_stream_dispatch_sync_data
中會執行回撥,此回撥其實就是QCamera3Stream的dataNotifyCB,至此就可以將mm層的資料回調回HAL層。
在開啟preview之前,需要先進行一些configure操作。在QCamera3HWI的configureStreamsPerfLocked
中new出了一系列的QCamera3Channel,比如metadatachannel,yuvchannel,supportchannel等。而在Channel建立的時候,會初始化QCamera3ProcessingChannel的成員變數QCamera3PostProcessor m_postprocessor; // post processor
, 執行m_postprocessor.init
,而在init操作中,會啟動postprocessor中的一個執行緒,執行緒體為dataProcessRoutine
,用於channel處理資料。 也就是說QCamera3Channel持有一個處理資料的執行緒。
之後當執行preview的請求時,即QCamera3HardwareInterface::processCaptureRequest
時,會執行以下三個步驟:
- init channel
- start channel
- request channel
2.1 Init Channel
首先,initialize Channel,此處的channel指的是QCamera3Channel:
int QCamera3HardwareInterface::processCaptureRequest()
{
......
//First initialize all streams
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
if ((((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask()) ||
((1U << CAM_STREAM_TYPE_PREVIEW) == channel->getStreamTypeMask())) &&
setEis)
rc = channel->initialize(is_type);
else {
rc = channel->initialize(IS_TYPE_NONE);
}
if (NO_ERROR != rc) {
LOGE("Channel initialization failed %d", rc);
pthread_mutex_unlock(&mMutex);
goto error_exit;
}
}
......
}
在Channel的init過程中,先為此channel新增QCamera3Stream:
int32_t QCamera3YUVChannel::initialize(cam_is_type_t isType)
{
ATRACE_CALL();
int32_t rc = NO_ERROR;
cam_dimension_t streamDim;
if (NULL == mCamera3Stream) {
LOGE("Camera stream uninitialized");
return NO_INIT;
}
if (1 <= m_numStreams) {
// Only one stream per channel supported in v3 Hal
return NO_ERROR;
}
mIsType = isType;
mStreamFormat = getStreamDefaultFormat(CAM_STREAM_TYPE_CALLBACK,
mCamera3Stream->width, mCamera3Stream->height);
streamDim.width = mCamera3Stream->width;
streamDim.height = mCamera3Stream->height;
rc = QCamera3Channel::addStream(mStreamType,
mStreamFormat,
streamDim,
ROTATE_0,
mNumBufs,
mPostProcMask,
mIsType);
if (rc < 0) {
LOGE("addStream failed");
return rc;
}
cam_stream_buf_plane_info_t buf_planes;
cam_padding_info_t paddingInfo = mPaddingInfo;
memset(&buf_planes, 0, sizeof(buf_planes));
//to ensure a big enough buffer size set the height and width
//padding to max(height padding, width padding)
paddingInfo.width_padding = MAX(paddingInfo.width_padding, paddingInfo.height_padding);
paddingInfo.height_padding = paddingInfo.width_padding;
rc = mm_stream_calc_offset_snapshot(mStreamFormat, &streamDim, &paddingInfo,
&buf_planes);
if (rc < 0) {
LOGE("mm_stream_calc_offset_preview failed");
return rc;
}
mFrameLen = buf_planes.plane_info.frame_len;
if (NO_ERROR != rc) {
LOGE("Initialize failed, rc = %d", rc);
return rc;
}
/* initialize offline meta memory for input reprocess */
rc = QCamera3ProcessingChannel::initialize(isType);
if (NO_ERROR != rc) {
LOGE("Processing Channel initialize failed, rc = %d",
rc);
}
return rc;
}
在addStream中,new QCamera3Stream,並init:
QCamera3Stream *pStream = new QCamera3Stream(m_camHandle,
m_handle,
m_camOps,
&mPaddingInfo,
this);
if (pStream == NULL) {
LOGE("No mem for Stream");
return NO_MEMORY;
}
LOGD("batch size is %d", batchSize);
rc = pStream->init(streamType, streamFormat, streamDim, streamRotation,
NULL, minStreamBufNum, postprocessMask, isType, batchSize,
streamCbRoutine, this);
QCamera3Stream的init會呼叫mm_camera.c中的mm_camera_add_stream
方法:
uint32_t mm_camera_add_stream(mm_camera_obj_t *my_obj,
uint32_t ch_id)
{
uint32_t s_hdl = 0;
mm_channel_t * ch_obj =
mm_camera_util_get_channel_by_handler(my_obj, ch_id);
if (NULL != ch_obj) {
pthread_mutex_lock(&ch_obj->ch_lock);
pthread_mutex_unlock(&my_obj->cam_lock);
mm_channel_fsm_fn(ch_obj,
MM_CHANNEL_EVT_ADD_STREAM,
NULL,
(void *)&s_hdl);
} else {
pthread_mutex_unlock(&my_obj->cam_lock);
}
return s_hdl;
}
首先會獲取之前建立的一個mm_camera_channel,將其作為引數傳入mm_channel_fsm_fn
, 而回顧之前建立的那個mm_camera_channel,在其建立之後,會將自身狀態改變為MM_CHANNEL_STATE_STOPPED
, 所以這裡進入mm_channel_fsm_fn
之後會執行mm_channel_fsm_fn_stopped,並且執行case MM_CHANNEL_EVT_ADD_STREAM:
:
int32_t mm_channel_fsm_fn_stopped(mm_channel_t *my_obj,
mm_channel_evt_type_t evt,
void * in_val,
void * out_val)
{
int32_t rc = 0;
LOGD("E evt = %d", evt);
switch (evt) {
case MM_CHANNEL_EVT_ADD_STREAM:
{
uint32_t s_hdl = 0;
s_hdl = mm_channel_add_stream(my_obj);
*((uint32_t*)out_val) = s_hdl;
rc = 0;
}
break;
case...
...
}
也就是執行mm_channel_add_stream(my_obj)
,根據mm_camera_stream的狀態機,最終會呼叫到:
mm_camera_stream.c
int32_t mm_stream_fsm_inited(mm_stream_t *my_obj,
mm_stream_evt_type_t evt,
void * in_val,
void * out_val)
{
int32_t rc = 0;
char dev_name[MM_CAMERA_DEV_NAME_LEN];
const char *dev_name_value = NULL;
if (NULL == my_obj) {
LOGE("NULL camera object\n");
return -1;
}
LOGD("E, my_handle = 0x%x, fd = %d, state = %d",
my_obj->my_hdl, my_obj->fd, my_obj->state);
switch(evt) {
case MM_STREAM_EVT_ACQUIRE:
if ((NULL == my_obj->ch_obj) || (NULL == my_obj->ch_obj->cam_obj)) {
LOGE("NULL channel or camera obj\n");
rc = -1;
break;
}
dev_name_value = mm_camera_util_get_dev_name(my_obj->ch_obj->cam_obj->my_hdl);
if (NULL == dev_name_value) {
LOGE("NULL device name\n");
rc = -1;
break;
}
snprintf(dev_name, sizeof(dev_name), "/dev/%s",
dev_name_value);
my_obj->fd = open(dev_name, O_RDWR | O_NONBLOCK);
if (my_obj->fd < 0) {
LOGE("open dev returned %d\n", my_obj->fd);
rc = -1;
break;
}
LOGD("open dev fd = %d\n", my_obj->fd);
rc = mm_stream_set_ext_mode(my_obj);
if (0 == rc) {
my_obj->state = MM_STREAM_STATE_ACQUIRED;
} else {
/* failed setting ext_mode
* close fd */
close(my_obj->fd);
my_obj->fd = -1;
break;
}
break;
default:
LOGE("invalid state (%d) for evt (%d), in(%p), out(%p)",
my_obj->state, evt, in_val, out_val);
break;
}
return rc;
}
在上述程式碼中,我們可以看到,
stream執行了一個open操作:my_obj->fd = open(dev_name, O_RDWR | O_NONBLOCK);
,表明stream與某個video裝置建立了關聯,並可以去讀取某些資料。
至此,我們就從上到下的建立了QCamera3Channel -> QCamera3Stream -> mm_channel -> mm_stream 之間的關聯。
2.2 Start Channel
Channel init結束之後,QCamera3HWI會執行channel->start():
int32_t QCamera3Channel::start()
{
ATRACE_CALL();
int32_t rc = NO_ERROR;
if (m_numStreams > 1) {
LOGW("bundle not supported");
} else if (m_numStreams == 0) {
return NO_INIT;
}
if(m_bIsActive) {
LOGW("Attempt to start active channel");
return rc;
}
for (uint32_t i = 0; i < m_numStreams; i++) {
if (mStreams[i] != NULL) {
mStreams[i]->start();
}
}
m_bIsActive = true;
return rc;
}
也就是說一個QCamera3Channel只擁有一個QCamera3Stream,會執行QCamera3Stream的start方法:
int32_t QCamera3Stream::start()
{
int32_t rc = 0;
mDataQ.init();
mTimeoutFrameQ.init();
if (mBatchSize)
mFreeBatchBufQ.init();
rc = mProcTh.launch(dataProcRoutine, this);
return rc;
}
這其中,會啟動QCamera3Stream所擁有的一個成員變數執行緒QCameraCmdThread mProcTh;
,執行緒體為:
void *QCamera3Stream::dataProcRoutine(void *data)
{
int running = 1;
int ret;
QCamera3Stream *pme = (QCamera3Stream *)data;
QCameraCmdThread *cmdThread = &pme->mProcTh;
cmdThread->setName(mStreamNames[pme->mStreamInfo->stream_type]);
LOGD("E");
do {
do {
ret = cam_sem_wait(&cmdThread->cmd_sem);
if (ret != 0 && errno != EINVAL) {
LOGE("cam_sem_wait error (%s)",
strerror(errno));
return NULL;
}
} while (ret != 0);
// we got notified about new cmd avail in cmd queue
camera_cmd_type_t cmd = cmdThread->getCmd();
switch (cmd) {
case CAMERA_CMD_TYPE_TIMEOUT:
{
int32_t bufIdx = (int32_t)(pme->mTimeoutFrameQ.dequeue());
pme->cancelBuffer(bufIdx);
break;
}
case CAMERA_CMD_TYPE_DO_NEXT_JOB:
{
LOGD("Do next job");
mm_camera_super_buf_t *frame =
(mm_camera_super_buf_t *)pme->mDataQ.dequeue();
if (NULL != frame) {
if (UNLIKELY(frame->bufs[0]->buf_type ==
CAM_STREAM_BUF_TYPE_USERPTR)) {
pme->handleBatchBuffer(frame);
} else if (pme->mDataCB != NULL) {
pme->mDataCB(frame, pme, pme->mUserData);
} else {
// no data cb routine, return buf here
pme->bufDone(frame->bufs[0]->buf_idx);
}
}
}
break;
case CAMERA_CMD_TYPE_EXIT:
LOGH("Exit");
/* flush data buf queue */
pme->mDataQ.flush();
pme->mTimeoutFrameQ.flush();
pme->flushFreeBatchBufQ();
running = 0;
break;
default:
break;
}
} while (running);
LOGD("X");
return NULL;
}
可以看到在case DO_NEXT_JOB中,會從stream的資料佇列中出隊一個mm_camera_super_buf_t
,然後通過mDataCB將該buffer往上層傳,而此處的buffer指的是在stream init的時候傳入的QCamera3Channel的streamCbRoutine
,也就是說會將資料通過回撥拋到Channel層。而在streamCbRoutine
中,會呼叫Channel自身持有的執行緒去做資料處理操作,也就是執行:m_postprocessor.processData(frame,ppInfo->output, resultFrameNumber);
在processData中,會將buffer入佇列,讓channel的執行緒去處理,執行緒體為dataProcessRoutine
。
至此,QCamera3Channel的start操作執行完畢。
接下來會執行:
rc = mCameraHandle->ops->start_channel(mCameraHandle->camera_handle, mChannelHandle);
執行mm_camera.c的:
int32_t mm_camera_start_channel(mm_camera_obj_t *my_obj, uint32_t ch_id)
{
int32_t rc = -1;
mm_channel_t * ch_obj =
mm_camera_util_get_channel_by_handler(my_obj, ch_id);
if (NULL != ch_obj) {
pthread_mutex_lock(&ch_obj->ch_lock);
pthread_mutex_unlock(&my_obj->cam_lock);
rc = mm_channel_fsm_fn(ch_obj,
MM_CHANNEL_EVT_START,
NULL,
NULL);
} else {
pthread_mutex_unlock(&my_obj->cam_lock);
}
return rc;
}
進入到mmchannel的state machine中,由於之前addstream沒有改變channel的狀態,所以這次的case依然為MM_CHANNEL_STATE_STOPPED:
int32_t mm_channel_fsm_fn(mm_channel_t *my_obj,
mm_channel_evt_type_t evt,
void * in_val,
void * out_val)
{
int32_t rc = -1;
LOGD("E state = %d", my_obj->state);
switch (my_obj->state) {
case MM_CHANNEL_STATE_NOTUSED:
rc = mm_channel_fsm_fn_notused(my_obj, evt, in_val, out_val);
break;
case MM_CHANNEL_STATE_STOPPED:
rc = mm_channel_fsm_fn_stopped(my_obj, evt, in_val, out_val);
break;
case MM_CHANNEL_STATE_ACTIVE:
rc = mm_channel_fsm_fn_active(my_obj, evt, in_val, out_val);
break;
case MM_CHANNEL_STATE_PAUSED:
rc = mm_channel_fsm_fn_paused(my_obj, evt, in_val, out_val);
break;
default:
LOGD("Not a valid state (%d)", my_obj->state);
break;
}
/* unlock ch_lock */
pthread_mutex_unlock(&my_obj->ch_lock);
LOGD("X rc = %d", rc);
return rc;
}
進入mm_channel_fsm_fn_stopped,case為:
case MM_CHANNEL_EVT_START:
{
rc = mm_channel_start(my_obj);
/* first stream started in stopped state
* move to active state */
if (0 == rc) {
my_obj->state = MM_CHANNEL_STATE_ACTIVE;
}
}
break;
進入mm_channel_start:
/*===========================================================================
* FUNCTION : mm_channel_start
*
* DESCRIPTION: start a channel, which will start all streams in the channel
*
* PARAMETERS :
* @my_obj : channel object
*
* RETURN : int32_t type of status
* 0 -- success
* -1 -- failure
*==========================================================================*/
int32_t mm_channel_start(mm_channel_t *my_obj)
{
int32_t rc = 0;
int i = 0, j = 0;
mm_stream_t *s_objs[MAX_STREAM_NUM_IN_BUNDLE] = {NULL};
uint8_t num_streams_to_start = 0;
uint8_t num_streams_in_bundle_queue = 0;
mm_stream_t *s_obj = NULL;
int meta_stream_idx = 0;
cam_stream_type_t stream_type = CAM_STREAM_TYPE_DEFAULT;
for (i = 0; i < MAX_STREAM_NUM_IN_BUNDLE; i++) {
if (my_obj->streams[i].my_hdl > 0) {
s_obj = mm_channel_util_get_stream_by_handler(my_obj,
my_obj->streams[i].my_hdl);
if (NULL != s_obj) {
stream_type = s_obj->stream_info->stream_type;
/* remember meta data stream index */
if ((stream_type == CAM_STREAM_TYPE_METADATA) &&
(s_obj->ch_obj == my_obj)) {
meta_stream_idx = num_streams_to_start;
}
s_objs[num_streams_to_start++] = s_obj;
if (!s_obj->stream_info->noFrameExpected) {
num_streams_in_bundle_queue++;
}
}
}
}
if (meta_stream_idx > 0 ) {
/* always start meta data stream first, so switch the stream object with the first one */
s_obj = s_objs[0];
s_objs[0] = s_objs[meta_stream_idx];
s_objs[meta_stream_idx] = s_obj;
}
if (NULL != my_obj->bundle.super_buf_notify_cb) {
/* need to send up cb, therefore launch thread */
/* init superbuf queue */
mm_channel_superbuf_queue_init(&my_obj->bundle.superbuf_queue);
my_obj->bundle.superbuf_queue.num_streams = num_streams_in_bundle_queue;
my_obj->bundle.superbuf_queue.expected_frame_id =
my_obj->bundle.superbuf_queue.attr.user_expected_frame_id;
my_obj->bundle.superbuf_queue.expected_frame_id_without_led = 0;
my_obj->bundle.superbuf_queue.led_off_start_frame_id = 0;
my_obj->bundle.superbuf_queue.led_on_start_frame_id = 0;
my_obj->bundle.superbuf_queue.led_on_num_frames = 0;
my_obj->bundle.superbuf_queue.good_frame_id = 0;
for (i = 0; i < num_streams_to_start; i++) {
/* Only bundle streams that belong to the channel */
if(!(s_objs[i]->stream_info->noFrameExpected)) {
if (s_objs[i]->ch_obj == my_obj) {
/* set bundled flag to streams */
s_objs[i]->is_bundled = 1;
}
my_obj->bundle.superbuf_queue.bundled_streams[j++] = s_objs[i]->my_hdl;
}
}
/* launch cb thread for dispatching super buf through cb */
snprintf(my_obj->cb_thread.threadName, THREAD_NAME_SIZE, "CAM_SuperBuf");
mm_camera_cmd_thread_launch(&my_obj->cb_thread,
mm_channel_dispatch_super_buf,
(void*)my_obj);
/* launch cmd thread for super buf dataCB */
snprintf(my_obj->cmd_thread.threadName, THREAD_NAME_SIZE, "CAM_SuperBufCB");
mm_camera_cmd_thread_launch(&my_obj->cmd_thread,
mm_channel_process_stream_buf,
(void*)my_obj);
/* set flag to TRUE */
my_obj->bundle.is_active = TRUE;
}
/* link any streams first before starting the rest of the streams */
for (i = 0; i < num_streams_to_start; i++) {
if (s_objs[i]->ch_obj != my_obj) {
pthread_mutex_lock(&s_objs[i]->linked_stream->buf_lock);
s_objs[i]->linked_stream->linked_obj = my_obj;
s_objs[i]->linked_stream->is_linked = 1;
pthread_mutex_unlock(&s_objs[i]->linked_stream->buf_lock);
continue;
}
}
for (i = 0; i < num_streams_to_start; i++) {
if (s_objs[i]->ch_obj != my_obj) {
continue;
}
/* all streams within a channel should be started at the same time */
if (s_objs[i]->state == MM_STREAM_STATE_ACTIVE) {
LOGE("stream already started idx(%d)", i);
rc = -1;
break;
}
/* allocate buf */
rc = mm_stream_fsm_fn(s_objs[i],
MM_STREAM_EVT_GET_BUF,
NULL,
NULL);
if (0 != rc) {
LOGE("get buf failed at idx(%d)", i);
break;
}
/* reg buf */
rc = mm_stream_fsm_fn(s_objs[i],
MM_STREAM_EVT_REG_BUF,
NULL,
NULL);
if (0 != rc) {
LOGE("reg buf failed at idx(%d)", i);
break;
}
/* start stream */
rc = mm_stream_fsm_fn(s_objs[i],
MM_STREAM_EVT_START,
NULL,
NULL);
if (0 != rc) {
LOGE("start stream failed at idx(%d)", i);
break;
}
}
/* error handling */
if (0 != rc) {
/* unlink the streams first */
for (j = 0; j < num_streams_to_start; j++) {
if (s_objs[j]->ch_obj != my_obj) {
pthread_mutex_lock(&s_objs[j]->linked_stream->buf_lock);
s_objs[j]->linked_stream->is_linked = 0;
s_objs[j]->linked_stream->linked_obj = NULL;
pthread_mutex_unlock(&s_objs[j]->linked_stream->buf_lock);
if (TRUE == my_obj->bundle.is_active) {
mm_channel_flush_super_buf_queue(my_obj, 0,
s_objs[i]->stream_info->stream_type);
}
memset(s_objs[j], 0, sizeof(mm_stream_t));
continue;
}
}
for (j = 0; j <= i; j++) {
if ((NULL == s_objs[j]) || (s_objs[j]->ch_obj != my_obj)) {
continue;
}
/* stop streams*/
mm_stream_fsm_fn(s_objs[j],
MM_STREAM_EVT_STOP,
NULL,
NULL);
/* unreg buf */
mm_stream_fsm_fn(s_objs[j],
MM_STREAM_EVT_UNREG_BUF,
NULL,
NULL);
/* put buf back */
mm_stream_fsm_fn(s_objs[j],
MM_STREAM_EVT_PUT_BUF,
NULL,
NULL);
}
/* destroy super buf cmd thread */
if (TRUE == my_obj->bundle.is_active) {
/* first stop bundle thread */
mm_camera_cmd_thread_release(&my_obj->cmd_thread);
mm_camera_cmd_thread_release(&my_obj->cb_thread);
/* deinit superbuf queue */
mm_channel_superbuf_queue_deinit(&my_obj->bundle.superbuf_queue);
/* memset super buffer queue info */
my_obj->bundle.is_active = 0;
memset(&my_obj->bundle.superbuf_queue, 0, sizeof(mm_channel_queue_t));
}
}
my_obj->bWaitForPrepSnapshotDone = 0;
if (my_obj->bundle.superbuf_queue.attr.enable_frame_sync) {
LOGH("registering Channel obj %p", my_obj);
mm_frame_sync_register_channel(my_obj);
}
return rc;
}
}
程式碼中的if判斷if (NULL != my_obj->bundle.super_buf_notify_cb)
中,其實s