Android視訊播放資料讀取的流程
阿新 • • 發佈:2019-02-15
轉自 http://blog.sina.com.cn/foreverlovelost
這裡分析Android4.0.1本地視訊資料讀取的流程,其他過程類似 當播放條件準備妥當之後,就要迴圈進行讀取視訊的原始資料放到MediaBuffer,將MediaBuffer中的資料輸送到解碼器中解碼,解碼後的資料放到MediaBuffer中,在將這MediaBuffer中的資料進行render顯示。 本文主要側重讀取原始資料的流程,主要是程式碼跟蹤,不夾雜個人分析,有些mpeg4的演算法不懂。 1:onVideoEvent中開始讀取資料,具體程式碼如下: void AwesomePlayer::onVideoEvent() { if (!mVideoBuffer) { MediaSource::ReadOptions options; if (mSeeking != NO_SEEK) { LOGV("seeking to %lld us (%.2f secs)", mSeekTimeUs, mSeekTimeUs /1E6); options.setSeekTo( mSeekTimeUs, mSeeking == SEEK_VIDEO_ONLY ?MediaSource::ReadOptions::SEEK_NEXT_SYNC :MediaSource::ReadOptions::SEEK_CLOSEST_SYNC); } for (;;) { status_terr =mVideoSource->read(&mVideoBuffer,&options);
}
}
}
藍色為核心程式碼,如果是正常讀取,options為null,否則這個結構體中包含了seek到的時間和seek的模式,用於具體從檔案中哪裡開始讀取,傳入的mVideoBuffer引用用來裝解碼後的資料
2:藍色部分呼叫的是OMXCodec::read函式,這個函式中核心的程式碼如下:
status_t OMXCodec::read(
MediaBuffer **buffer, const ReadOptions*options) {
status_t err = OK;
*buffer = NULL;
bool seeking = false;
int64_t seekTimeUs;
ReadOptions::SeekModeseekMode;
if (options&&options->getSeekTo(&seekTimeUs,&seekMode)) {
seeking = true;
}
if (seeking) {
CODEC_LOGV("seeking to %lld us (%.2f secs)",seekTimeUs, seekTimeUs / 1E6);
CHECK(seekTimeUs >= 0);
mSeekTimeUs = seekTimeUs;
mSeekMode = seekMode;
}
drainInputBuffers();
size_t index = *mFilledBuffers.begin(); // A list of indices intomPortStatus[kPortIndexOutput] filled with data.
mFilledBuffers.erase(mFilledBuffers.begin());
BufferInfo *info =&mPortBuffers[kPortIndexOutput].editItemAt(index);
CHECK_EQ((int)info->mStatus,(int)OWNED_BY_US);
info->mStatus= OWNED_BY_CLIENT;
info->mMediaBuffer->add_ref();
*buffer = info->mMediaBuffer;
return OK;
}
兩點:
a,drainInputBuffers開始了資料的讀取;
b,mFilledBuffers從這個佇列中讀取已經解碼後的資料放入到傳入的MediaBuffer中,mFilledBuffers佇列中的MediaBuffer就是drainInputBuffers中寫進去的
3:跟到drainInputBuffer中看看
bool OMXCodec::drainInputBuffer(BufferInfo *info) {
CODEC_LOGV("callingemptyBuffer with codec specific data");
status_t err =mOMX->emptyBuffer(
mNode,info->mBuffer, 0, size,
OMX_BUFFERFLAG_ENDOFFRAME |OMX_BUFFERFLAG_CODECCONFIG,
0);
CHECK_EQ(err,(status_t)OK);
info->mStatus= OWNED_BY_COMPONENT;
status_t err;
bool signalEOS = false;
int64_t timestampUs =0;
size_t offset = 0;
int32_t n = 0;
for (;;) {
MediaBuffer *srcBuffer;
if (mSeekTimeUs >= 0) {
MediaSource::ReadOptions options;
options.setSeekTo(mSeekTimeUs, mSeekMode);
mSeekTimeUs = -1;
mSeekMode= ReadOptions::SEEK_CLOSEST_SYNC;
err =mSource->read(&srcBuffer,&options);
if (err ==OK) {
int64_t targetTimeUs;
if(srcBuffer->meta_data()->findInt64(
kKeyTargetTime,&targetTimeUs)
&& targetTimeUs >=0) {
CODEC_LOGV("targetTimeUs = %lld us",targetTimeUs);
mTargetTimeUs = targetTimeUs;
} else {
mTargetTimeUs = -1;
}
}
}
}
CODEC_LOGV("CallingemptyBuffer on buffer %p (length %d), "
"timestamp %lld us (%.2fsecs)",
info->mBuffer,offset,
timestampUs, timestampUs /1E6);
err = mOMX->emptyBuffer(
mNode, info->mBuffer, 0,offset,
flags, timestampUs);
info->mStatus= OWNED_BY_COMPONENT;
return true;
}
兩點:
a,呼叫err= mSource->read(&srcBuffer,&options);從原始檔案中讀取原始資料,
b,往srcBuffer中讀取資料前後,都呼叫omx轉移已經讀取到該info中的資料,目的是解碼,解碼後的資料就房子了mFilledBuffers這個佇列中;
4:針對mpeg4型別的視訊,上面的read函式呼叫的是MPEG4Source的read函式,核心程式碼如下:
status_t MPEG4Source::read(
MediaBuffer **out, constReadOptions *options) {
*out =NULL;
int64_tseekTimeUs;
ReadOptions::SeekMode mode;
if(options &&options->getSeekTo(&seekTimeUs,&mode)) {
if分支是用於有seek的流程
1:首先找到seektime附近的sampleIndex;
2:然後找到sampleIndex附近的關鍵幀的syncSampleIndex;
3:然後syncSampleIndex找到具體的sampleTime,sampleTime就是目前需要播放到的位置;
4:mSampleTable->getMetaDataForSample呼叫這個函式找到sampleTime時間的offset和size;
5:有了offset和size之後剩下就是呼叫mDataSource->readAt(offset,(uint8_t *)mBuffer->data(),size);讀取資料放到buffer中去了;
uint32_t findFlags =0;
switch (mode) {
caseReadOptions::SEEK_PREVIOUS_SYNC:
findFlags= SampleTable::kFlagBefore;
break;
case ReadOptions::SEEK_NEXT_SYNC:
findFlags= SampleTable::kFlagAfter;
break;
caseReadOptions::SEEK_CLOSEST_SYNC:
case ReadOptions::SEEK_CLOSEST:
findFlags= SampleTable::kFlagClosest;
break;
default:
CHECK(!"Should not be here.");
break;
}
uint32_tsampleIndex;
status_t err =mSampleTable->findSampleAtTime(
seekTimeUs* mTimescale / 1000000,
&sampleIndex, findFlags);
uint32_tsyncSampleIndex;
if (err == OK) {
err =mSampleTable->findSyncSampleNear(
sampleIndex,&syncSampleIndex, findFlags);
}
uint32_tsampleTime;
if (err == OK) {
err =mSampleTable->getMetaDataForSample(
sampleIndex, NULL, NULL,&sampleTime);
}
if (mode ==ReadOptions::SEEK_CLOSEST) {
targetSampleTimeUs = (sampleTime * 1000000ll) /mTimescale;
}
mCurrentSampleIndex =syncSampleIndex;
}
off64_toffset;
size_tsize;
uint32_t cts;
boolisSyncSample;
boolnewBuffer = false;
if(mBuffer == NULL) {
newBuffer =true;
status_t err =
mSampleTable->getMetaDataForSample(
mCurrentSampleIndex,&offset, &size,&cts, &isSyncSample);
if (err != OK) {
return err;
}
err =mGroup->acquire_buffer(&mBuffer);
if (err != OK) {
CHECK(mBuffer == NULL);
return err;
}
}
if(!mIsAVC || mWantsNALFragments) {
if (newBuffer) {
ssize_t num_bytes_read =
mDataSource->readAt(offset, (uint8_t*)mBuffer->data(), size);
CHECK(mBuffer != NULL);
mBuffer->set_range(0,size);
mBuffer->meta_data()->clear();
mBuffer->meta_data()->setInt64(
kKeyTime, ((int64_t)cts *1000000) / mTimescale);
if (isSyncSample) {
mBuffer->meta_data()->setInt32(kKeyIsSyncFrame,1);
}
++mCurrentSampleIndex;
}
if (!mIsAVC) {
*out = mBuffer;
mBuffer = NULL;
return OK;
}
// Each NAL unit is split upinto its constituent fragments and
// each one of them returnedin its own buffer.
CHECK(mBuffer->range_length() >=mNALLengthSize);
const uint8_t *src=
(const uint8_t *)mBuffer->data()+ mBuffer->range_offset();
size_t nal_size =parseNALSize(src);
MediaBuffer *clone =mBuffer->clone();
CHECK(clone !=NULL);
clone->set_range(mBuffer->range_offset()+ mNALLengthSize, nal_size);
CHECK(mBuffer !=NULL);
mBuffer->set_range(
mBuffer->range_offset() + mNALLengthSize +nal_size,
mBuffer->range_length() - mNALLengthSize -nal_size);
if(mBuffer->range_length() == 0) {
mBuffer->release();
mBuffer = NULL;
}
*out = clone;
return OK;
}
}
藍色部分為主要的流程
5:後續就是開始呼叫SampleTable.cpp和SampleIterator.cpp這兩個類的相關函式解析檔案和讀取資料,最主要的函式時通過sampleIndex獲取offset和size資訊了,程式碼如下:
status_t SampleTable::getMetaDataForSample(
uint32_t sampleIndex,
off64_t *offset,
size_t *size,
uint32_t *compositionTime,
bool *isSyncSample) {
Mutex::AutolockautoLock(mLock);
status_t err;
if ((err =mSampleIterator->seekTo(sampleIndex)) != OK) {
return err;
}
if (offset) {
*offset =mSampleIterator->getSampleOffset();
}
if (size) {
*size =mSampleIterator->getSampleSize();
}
if (compositionTime) {
*compositionTime
=mSampleIterator->getSampleTime();
}
if (isSyncSample) {
*isSyncSample = false;
if (mSyncSampleOffset < 0){
// Everysample is a sync sample.
*isSyncSample = true;
} else {
size_t i =(mLastSyncSampleIndex < mNumSyncSamples)
&&(mSyncSamples[mLastSyncSampleIndex] <=sampleIndex)
? mLastSyncSampleIndex :0;
while (i< mNumSyncSamples &&mSyncSamples[i] < sampleIndex) {
++i;
}
if (i< mNumSyncSamples &&mSyncSamples[i] == sampleIndex) {
*isSyncSample
= true;
}
mLastSyncSampleIndex = i;
}
}
return OK;
}
下面這個函式沒有看懂,對具體的mpeg4壓縮協議沒有進行深入瞭解
status_tSampleIterator::findSampleTime(
uint32_t sampleIndex,uint32_t *time) {
if(sampleIndex >=mTable->mNumSampleSizes) {
returnERROR_OUT_OF_RANGE;
}
while(sampleIndex >= mTTSSampleIndex + mTTSCount){
if (mTimeToSampleIndex ==mTable->mTimeToSampleCount) {
return ERROR_OUT_OF_RANGE;
}
mTTSSampleIndex +=mTTSCount;
mTTSSampleTime += mTTSCount *mTTSDuration;
mTTSCount =mTable->mTimeToSample[2 *mTimeToSampleIndex];
mTTSDuration =mTable->mTimeToSample[2 * mTimeToSampleIndex +1];
++mTimeToSampleIndex;
}
*time =mTTSSampleTime + mTTSDuration * (sampleIndex -mTTSSampleIndex);
*time+=mTable->getCompositionTimeOffset(sampleIndex);
returnOK;
}
這裡分析Android4.0.1本地視訊資料讀取的流程,其他過程類似 當播放條件準備妥當之後,就要迴圈進行讀取視訊的原始資料放到MediaBuffer,將MediaBuffer中的資料輸送到解碼器中解碼,解碼後的資料放到MediaBuffer中,在將這MediaBuffer中的資料進行render顯示。 本文主要側重讀取原始資料的流程,主要是程式碼跟蹤,不夾雜個人分析,有些mpeg4的演算法不懂。 1:onVideoEvent中開始讀取資料,具體程式碼如下: void AwesomePlayer::onVideoEvent() { if (!mVideoBuffer) { MediaSource::ReadOptions options; if (mSeeking != NO_SEEK) { LOGV("seeking to %lld us (%.2f secs)", mSeekTimeUs, mSeekTimeUs /1E6); options.setSeekTo( mSeekTimeUs, mSeeking == SEEK_VIDEO_ONLY ?MediaSource::ReadOptions::SEEK_NEXT_SYNC :MediaSource::ReadOptions::SEEK_CLOSEST_SYNC); } for (;;) { status_terr =mVideoSource->read(&mVideoBuffer,&options);