Android rtsp 流媒體音視訊幀的處理流程
阿新 • • 發佈:2018-12-30
轉自 http://blog.sina.com.cn/foreverlovelost
先把從收到rtp包到封裝成完整的一幀涉及的相關函式從上到下羅列一遍, 後續在忘記的情況下理清的時候可以作為線索,不用從頭去分析程式碼 (MyHandler.h)onMessageReceived(case setu) sp notify = newAMessage('accu', id()); void ARTPConnection::addStream void ARTPConnection::onAddStream(const sp&msg) void ARTPConnection::onPollStreams() status_t ARTPConnection::receive(StreamInfo *s, boolreceiveRTP) status_t ARTPConnection::parseRTP(StreamInfo *s, const sp&buffer) void ARTPSource::processRTPPacket(const sp&buffer) void ARTPAssembler::onPacketReceived(const sp&source) ARTPAssembler::AssemblyStatusAMPEG4AudioAssembler::assembleMore( const sp &source) ARTPAssembler::AssemblyStatusAMPEG4AudioAssembler::addPacket( const sp &source) void AMPEG4AudioAssembler::submitAccessUnit() sp msg =mNotifyMsg->dup();
msg->setBuffer("access-unit",accessUnit);
msg->post();
看紅色部分,從構造一個訊息,到最後通過這個訊息將封裝好的buffer,返回到MyHandler中來處理,並且前面一篇部落格分析dlna問題的時候大致講解了如何把從伺服器端接受到的一些rtp包封裝一個完整的視訊幀。
下面是封裝好的一個完整的幀如何打上時間戳放到待解碼的佇列中去的
MyHandler中的onMessageReceived函式會受到上面post過來的訊息,然後會呼叫onAccessUnitCompete函式
if (mFirstAccessUnit) {
如果是第一個到達的資料包,會給RTSPSource.cpp發一個訊息,表示受到資料了,已經連線上了,然後更改一下狀態
sp<AMessage> msg =mNotify->dup();
msg->setInt32("what", kWhatConnected);
msg->post();
if(mSeekable) {
如果是點播的rtsp流媒體,會進入到這裡來,而直播流媒體卻進不來
for (size_t i = 0; i< mTracks.size(); ++i) {
TrackInfo *info =&mTracks.editItemAt(i);
postNormalPlayTimeMapping(i,
info->mNormalPlayTimeRTP,info->mNormalPlayTimeUs);
}
}
mFirstAccessUnit = false;
}
下面這個處理是J版本新增加的,也就是在音視訊任何一個還沒有建立時間戳的時候,會將受到的這個幀暫時儲存在mPackets中,具體需要研究一下 mAllTracksHaveTime這個變數的賦值,設計rtsp流媒體RTCP包中的sr資訊,專門用於同步,下一篇部落格在分析。
if (!mAllTracksHaveTime) {
ALOGV("storing accessUnit, no time established yet");
track->mPackets.push_back(accessUnit);
return;
}
當上面的if條件不滿足,也即音視訊都已經建立了同步的時間機制後,就會進入先面的程式碼對這個幀做處理,首先把先前暫時存放在mPackets中的幀全部取出來呼叫addMediaTimestamp打上時間戳,然後傳送出去。
while (!track->mPackets.empty()) {
sp<ABuffer> accessUnit =*track->mPackets.begin();
track->mPackets.erase(track->mPackets.begin());
if(addMediaTimestamp(trackIndex, track, accessUnit)) {
postQueueAccessUnit(trackIndex, accessUnit);
}
}
將mPackets中的幀打上時間戳後,還要將剛來的這一幀也打上時間戳然後呼叫postQueueAccessUnit傳送出去
if (addMediaTimestamp(trackIndex, track,accessUnit)) {
postQueueAccessUnit(trackIndex, accessUnit);
}
至於上面怎麼打時間戳也會在後面的部落格中講解。
上面的傳送出去,跟蹤訊息流程指導是傳送到了RTSPSource.cpp的onMessageReceived函式中了
case MyHandler::kWhatAccessUnit:
{
size_ttrackIndex;
判斷是音訊還是視訊
CHECK(msg->findSize("trackIndex",&trackIndex));
CHECK_LT(trackIndex, mTracks.size());
取出從MyHandler中打好時間戳的幀
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit",&accessUnit));
int32_tdamaged;
判斷是否是已經破壞的幀,如果是已經破壞的幀就不放到待解碼的佇列中去
if(accessUnit->meta()->findInt32("damaged",&damaged)
&& damaged){
ALOGI("dropping damagedaccess unit.");
break;
}
TrackInfo*info = &mTracks.editItemAt(trackIndex);
sp<AnotherPacketSource> source =info->mSource;
if (source!= NULL) {
uint32_t rtpTime;
CHECK(accessUnit->meta()->findInt32("rtp-time",(int32_t *)&rtpTime));
下面的這個if條件是J版本修改過的,只有直播流媒體會進入到這個if中去。
if(!info->mNPTMappingValid) {
// This is a live stream, wedidn't receive any normal
// playtime mapping. We won'tmap to npt time.
source->queueAccessUnit(accessUnit);
break;
}
下滿這段程式碼使給這一幀重新就算時間戳,只對點播的rtsp流媒體會起作用。
int64_t nptUs =
((double)rtpTime -(double)info->mRTPTime)
/info->mTimeScale
*1000000ll
+info->mNormalPlaytimeUs;
accessUnit->meta()->setInt64("timeUs",nptUs);
source->queueAccessUnit(accessUnit);
}
break;
}
無論對於直播還是點播流媒體,最後都會將這個AccessUnit放到AnotherPacketSource的待解碼佇列中去了。
下面跟蹤一下如何從待解碼的佇列中取出完整的幀進行解碼
初始化解碼器後會呼叫ACodec的postFillThisBuffer函式
CHECK_EQ((int)info->mStatus,(int)BufferInfo::OWNED_BY_US);
sp<AMessage> notify =mCodec->mNotify->dup();
取出初始化Acodec從NuPlayerDecoder.cpp中傳遞過來的notify這個訊息,用於往NuPlayerDecoder.cpp中傳送訊息
notify->setInt32("what",ACodec::kWhatFillThisBuffer); 設定好了what
notify->setPointer("buffer-id",info->mBufferID);
info->mData->meta()->clear();
notify->setBuffer("buffer",info->mData);
設定了返回的訊息,用於等下fillBuffer之後通知ACodec
sp<AMessage> reply = newAMessage(kWhatInputBufferFilled,mCodec->id());
reply->setPointer("buffer-id",info->mBufferID);
notify->setMessage("reply", reply);
notify->post();
info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
post之後就被NuPlayerDecoder.cpp接受到了
case kWhatCodecNotify:
{
int32_twhat;
CHECK(msg->findInt32("what",&what));
if (what== ACodec::kWhatFillThisBuffer) {
onFillThisBuffer(msg);
}
break;
}
void NuPlayer::Decoder::onFillThisBuffer(constsp<AMessage> &msg){
sp<AMessage> reply;
CHECK(msg->findMessage("reply",&reply));
sp<ABuffer> outBuffer;
sp<AMessage> notify =mNotify->dup(); 取出從NuPlayer中傳遞過來的notify,用於給其傳送訊息
notify->setMessage("codec-request", msg);
notify->post();
}
下面就是NuPlayer中接受到了訊息
case kWhatVideoNotify:
case kWhatAudioNotify:
{
bool audio= msg->what() == kWhatAudioNotify;
sp<AMessage> codecRequest;
CHECK(msg->findMessage("codec-request",&codecRequest));
int32_twhat;
CHECK(codecRequest->findInt32("what",&what));
if (what== ACodec::kWhatFillThisBuffer) {
status_t err = feedDecoderInputData(
audio, codecRequest);
if (err == -EWOULDBLOCK){
if (mSource->feedMoreTSData() ==OK) {
msg->post(10000ll);
}
}
}
status_t NuPlayer::feedDecoderInputData(bool audio, constsp<AMessage> &msg){
sp<AMessage> reply;
CHECK(msg->findMessage("reply",&reply));
sp<ABuffer> accessUnit;
booldropAccessUnit;
do {
status_t err = mSource->dequeueAccessUnit(audio,&accessUnit);
if (err == -EWOULDBLOCK){
returnerr;
}
dropAccessUnit = false;
if (!audio
&& mVideoLateByUs >100000ll
&& mVideoIsAVC
&&!IsAVCReferenceFrame(accessUnit)) {
dropAccessUnit = true;
++mNumFramesDropped;
}
} while(dropAccessUnit);
status_t NuPlayer::RTSPSource::dequeueAccessUnit(
bool audio,sp<ABuffer> *accessUnit) {
下面這段程式碼使J版本新增加的,目的是等到音訊和視訊都緩衝到2秒資料之後才會從待解碼的佇列裡取出幀去解碼
if(mStartingUp) {
if (!haveSufficientDataOnAllTracks()) {
return-EWOULDBLOCK;
}
mStartingUp = false;
}
sp<AnotherPacketSource> source =getSource(audio);
status_tfinalResult;
if(!source->hasBufferAvailable(&finalResult)){
return finalResult == OK ? -EWOULDBLOCK :finalResult;
}
returnsource->dequeueAccessUnit(accessUnit);
到這裡就從AnotherPacketSource的待解碼佇列中取出幀然後扔到ACodec傳給解碼器去解碼
}
先把從收到rtp包到封裝成完整的一幀涉及的相關函式從上到下羅列一遍, 後續在忘記的情況下理清的時候可以作為線索,不用從頭去分析程式碼 (MyHandler.h)onMessageReceived(case setu) sp notify = newAMessage('accu', id()); void ARTPConnection::addStream void ARTPConnection::onAddStream(const sp&msg) void ARTPConnection::onPollStreams() status_t ARTPConnection::receive(StreamInfo *s, boolreceiveRTP) status_t ARTPConnection::parseRTP(StreamInfo *s, const sp&buffer) void ARTPSource::processRTPPacket(const sp&buffer) void ARTPAssembler::onPacketReceived(const sp&source) ARTPAssembler::AssemblyStatusAMPEG4AudioAssembler::assembleMore( const sp &source) ARTPAssembler::AssemblyStatusAMPEG4AudioAssembler::addPacket( const sp &source) void AMPEG4AudioAssembler::submitAccessUnit() sp msg =mNotifyMsg->dup();