[live555] testRTSPClient 通過ffmpeg 解碼(記錄)
阿新 • • 發佈:2019-02-18
live555 獲取每一幀
testRTSPClient 中
Boolean DummySink::continuePlaying() {
if (fSource == NULL) return False; // sanity check (should not happen)
// Request the next frame of data from our input source. "afterGettingFrame()" will get called later, when it arrives:
fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
afterGettingFrame, this,
onSourceClosure, this);
return True;
}
獲取了fReceiveBuffer ,完整的一幀資料,可以選擇在afterGettingFrame()中處理
在解碼的過程中我使用了ffmpeg
將接收的每一幀儲存成 h264 裸流檔案(可用vlc 檔案播放)
//開啟檔案 file.h264
File *fp_h264 = fopen("file.h264" ,"ab+")//append 形式開啟檔案流
//初始化 引數 使用H264解碼
av_register_all();
AVCodec *m_pAVCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
AVCodecContext *m_pAVFrame = av_frame_alloc();
AVFrame *m_pAVCodecContext = avcodec_alloc_context3(m_pAVCodec);
下面就是處理NALU 的轉化為h264,通過0x00 00 00 01 區分NALU ,在testRTSPClient 的afterGettingFrame 中處理
//定義一個結構體來儲存一幀
typedef struct {
unsigned char * frameBuf;
int framebufsize;
}FrameUnit;
FrameUnit *pFrameunit = NULL;
if (!strcmp(fSubsession.mediumName(), "video"))//只處理視訊流
{
//給結構提分配記憶體
pFrameunit = (FrameUnit*)malloc(sizeof(pFrameunit));
char *pbuf = (char *)fReceiveBuffer;
//每一幀之間的startcode
char head[4] = { 0x00, 0x00, 0x00, 0x01 };
//在每一幀之間加上start code
//,0x0000001+sps+0x00000001+pps+0x00000001+NALU1+0x00000001+NALU2
pFrameunit->framebufsize = frameSize + 4;
pFrameunit->frameBuf = (unsigned char *)malloc(pFrameunit->framebufsize);
memcpy(pFrameunit->frameBuf, head, 4);
memcpy(pFrameunit->frameBuf + 4, pbuf, frameSize);
//儲存檔案
fwrite(pFrameunit->frameBuf, pFrameunit->framebufsize, 1, fp_h264);
//釋放記憶體
free(pFrameunit->frameBuf);
pFrameunit->frameBuf = NULL;
pFrameunit->framebufsize = 0;
//當全部寫完fclose
fclose(fp_h264);
}
以上就是完整的步驟,用vlc 播放file.h264 就可以
值得學習的是通過ffmpeg 解碼轉為YUV格式
同樣的流程,不過在處理每一幀的資料時候,為ffmpeg的AVCodecContext新增extradata ,即 sps和pps
在處理每一幀資料之前將sps pps 解析出來並且新增到AVCodecContext中
//開啟檔案file.yuv
File *fp_yuv fopen("file.yuv"ab+")
//處理解碼需要的sps 和pps
if (!strcmp(fSubsession.mediumName(), "video"))
{
//判斷AVCodecContext->extradata 是否有資料
if (m_pAVCodecContext->extradata == NULL)
{
unsigned int SPropRecords = -1;
SPropRecord *p_record = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), SPropRecords);
//sps pps 以陣列的形式儲存SPropRecord中
SPropRecord &sps = p_record[0];
SPropRecord &pps = p_record[1];
int totalsize = 0;
unsigned char* tmp = NULL;
unsigned char nalu_header[4] = { 0, 0, 0, 1 };
totalsize = 8 + sps.sPropLength + pps.sPropLength;
//在每個sps 和pps 之前加上startcode
tmp = (unsigned char*)realloc(tmp, totalsize);
memcpy(tmp, nalu_header, 4);
memcpy(tmp + 4, sps.sPropBytes, sps.sPropLength);
memcpy(tmp + 4 + sps.sPropLength, nalu_header, 4);
memcpy(tmp + 4 + sps.sPropLength + 4, pps.sPropBytes, pps.sPropLength);
//將 sps 和pps 的資料給ffmpeg的h264解碼器上下文
m_pAVCodecContext->extradata_size = totalsize;
m_pAVCodecContext->extradata = tmp;
}
//下面的步驟和儲存h264一樣,只是在儲存檔案的地方修改成解碼
if (fp_yuv) {
AVPacket *avpkt = (AVPacket *)malloc(sizeof(AVPacket));
AVFrame *pFrameYUV = av_frame_alloc();
AVFrame *pFrameVideo = av_frame_alloc();
int got_picture = 0;
av_init_packet(avpkt);
//將每一幀賦值給AVPacket
avpkt->data = frameunit->frameBuf;
avpkt->size = frameunit->framebufsize;
if (avpkt->size > 0)
{
//解碼一幀,成功返回got_picture 1 ,解碼資料在pFrameVideo
int ret = avcodec_decode_video2(m_pAVCodecContext, pFrameVideo, &got_picture, avpkt);
if (ret < 0) {
env << "Decode :Error.\n";
goto error_handler;
}
}
//解碼成功,轉化為 yuv
if (got_picture)
{
//下面的步驟,將pFrameVideo轉為pFrameYUV
uint8_t * out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, m_pAVCodecContext->width, m_pAVCodecContext->height));
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, m_pAVCodecContext->width, m_pAVCodecContext->height);
struct SwsContext *img_convert_ctx = sws_getContext(m_pAVCodecContext->width, m_pAVCodecContext->height
, m_pAVCodecContext->pix_fmt, m_pAVCodecContext->width, m_pAVCodecContext->height
, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrameVideo->data, pFrameVideo->linesize, 0
, m_pAVCodecContext->height, pFrameYUV->data, pFrameYUV->linesize);
//輸出出YUV資料 儲存在file.yuv
int y_size = m_pAVCodecContext->width * m_pAVCodecContext->height;
if (mFrameYUV->data[0])
fwrite(mFrameYUV->data[0], 1, y_size, fp_yuv); //Y
if(mFrameYUV->data[1])
fwrite(mFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
if(mFrameYUV->data[2])
fwrite(mFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
return 0;
}
//釋放
av_frame_free(&pFrameYUV);
av_frame_free(&pFrameVideo);
av_free_packet(avpkt);
fclose(fp_yuv);
}
....
}
選擇yuv 播放器,選擇720P格式播放
最後就是尋找一個類似SDL 來播放