FFmpeg解碼H264視訊流
阿新 • • 發佈:2019-02-19
1、寫在前面
此文章主要包含解碼H264視訊流資料,主要有以下幾點:
1、H264視訊幀為Annex B格式,若使用AVCC格式可自行研究;
2、H264視訊裸流,非解碼視訊檔案(若有需要我後期可新增這部分程式碼);
3、支援輸出RGB24或YUV420格式,其他可自行修改;
4、FFmpeg官網程式碼迭代及介面變更較大,程式碼適應於FFmpeg3.4.2"Cantor"、3.3.7"Hilbert"等版本,較舊介面請看舊版本程式碼;
2、新版本
FFmpegVideoDecoder.h
#include <libavcodec/avcodec.h> /** 視訊流解碼器初始化 @param ctx 解碼引數結構體AVCodecParameters @see FFmpeg_H264DecoderInit,此為解碼H264視訊流 @return 初始化成功返回0,否則<0 */ int FFmpeg_VideoDecoderInit(AVCodecParameters *ctx); /** H264視訊流解碼器初始化 @return 初始化成功返回0,否則<0 */ int FFmpeg_H264DecoderInit(void); /** 釋放解碼器 @return 初始化成功返回0,否則<0 */ int FFmpeg_VideoDecoderRelease(void); //return 0:暫未收到解碼資料,-1:解碼失敗,1:解碼成功 /** 解碼視訊流資料 @param inbuf 視訊裸流資料 @param inbufSize 視訊裸流資料大小 @param framePara 接收幀引數陣列:{width,height,linesize1,linesiz2,linesize3} @param outRGBBuf 輸出RGB資料(若已申請記憶體) @param outYUVBuf 輸出YUV資料(若已申請記憶體) @return 成功返回解碼資料幀大小,否則<=0 */ int FFmpeg_H264Decode(unsigned char * inbuf, int inbufSize, int *framePara, unsigned char *outRGBBuf, unsigned char **outYUVBuf);
FFmpegVideoDecoder.c
#include <libavformat/avformat.h> #include <libswscale/swscale.h> #include "FFmpegVideoDecoder.h" struct AVCodecContext *pAVCodecCtx_decoder = NULL; struct AVCodec *pAVCodec_decoder; struct AVPacket mAVPacket_decoder; struct AVFrame *pAVFrame_decoder = NULL; struct SwsContext* pImageConvertCtx_decoder = NULL; struct AVFrame *pFrameYUV_decoder = NULL; int FFmpeg_VideoDecoderInit(AVCodecParameters *codecParameters) { if (!codecParameters) { CPrintf("Source codec context is NULL."); //CPrintf需替換為printf return -1; } FFmpeg_VideoDecoderRelease(); avcodec_register_all(); pAVCodec_decoder = avcodec_find_decoder(codecParameters->codec_id); if (!pAVCodec_decoder) { CPrintf1("Can not find codec:%d\n", codecParameters->codec_id); return -2; } pAVCodecCtx_decoder = avcodec_alloc_context3(pAVCodec_decoder); if (!pAVCodecCtx_decoder) { CPrintf("Failed to alloc codec context."); FFmpeg_VideoDecoderRelease(); return -3; } if (avcodec_parameters_to_context(pAVCodecCtx_decoder, codecParameters) < 0) { CPrintf("Failed to copy avcodec parameters to codec context."); FFmpeg_VideoDecoderRelease(); return -3; } if (avcodec_open2(pAVCodecCtx_decoder, pAVCodec_decoder, NULL) < 0){ CPrintf("Failed to open h264 decoder"); FFmpeg_VideoDecoderRelease(); return -4; } av_init_packet(&mAVPacket_decoder); pAVFrame_decoder = av_frame_alloc(); pFrameYUV_decoder = av_frame_alloc(); return 0; } int FFmpeg_H264DecoderInit() { avcodec_register_all(); AVCodec *pAVCodec = avcodec_find_decoder(AV_CODEC_ID_H264); if (!pAVCodec){ CPrintf("can not find H264 codec\n"); return -1; } AVCodecContext *pAVCodecCtx = avcodec_alloc_context3(pAVCodec); if (pAVCodecCtx == NULL) { CPrintf("Could not alloc video context!\n"); return -2; } AVCodecParameters *codecParameters = avcodec_parameters_alloc(); if (avcodec_parameters_from_context(codecParameters, pAVCodecCtx) < 0) { CPrintf("Failed to copy avcodec parameters from codec context."); avcodec_parameters_free(&codecParameters); avcodec_free_context(&pAVCodecCtx); return -3; } int ret = FFmpeg_VideoDecoderInit(codecParameters); avcodec_parameters_free(&codecParameters); avcodec_free_context(&pAVCodecCtx); return ret; } int FFmpeg_VideoDecoderRelease() { if (pAVCodecCtx_decoder != NULL) { avcodec_free_context(&pAVCodecCtx_decoder); pAVCodecCtx_decoder = NULL; } if (pAVFrame_decoder != NULL) { av_packet_unref(&mAVPacket_decoder); av_free(pAVFrame_decoder); pAVFrame_decoder = NULL; } if (pFrameYUV_decoder) { av_frame_unref(pFrameYUV_decoder); av_free(pFrameYUV_decoder); pFrameYUV_decoder = NULL; } if (pImageConvertCtx_decoder) { sws_freeContext(pImageConvertCtx_decoder); } av_packet_unref(&mAVPacket_decoder); return 0; } int FFmpeg_H264Decode(unsigned char *inbuf, int inbufSize, int *framePara, unsigned char *outRGBBuf, unsigned char **outYUVBuf) { if (!pAVCodecCtx_decoder || !pAVFrame_decoder || !inbuf || inbufSize<=0 || !framePara || (!outRGBBuf && !outYUVBuf)) { return -1; } av_frame_unref(pAVFrame_decoder); av_frame_unref(pFrameYUV_decoder); framePara[0] = framePara[1] = 0; mAVPacket_decoder.data = inbuf; mAVPacket_decoder.size = inbufSize; int ret = avcodec_send_packet(pAVCodecCtx_decoder, &mAVPacket_decoder); if (ret == 0) { ret = avcodec_receive_frame(pAVCodecCtx_decoder, pAVFrame_decoder); if (ret == 0) { framePara[0] = pAVFrame_decoder->width; framePara[1] = pAVFrame_decoder->height; if (outYUVBuf) { *outYUVBuf = (unsigned char *)pAVFrame_decoder->data; framePara[2] = pAVFrame_decoder->linesize[0]; framePara[3] = pAVFrame_decoder->linesize[1]; framePara[4] = pAVFrame_decoder->linesize[2]; } else if (outRGBBuf) { pFrameYUV_decoder->data[0] = outRGBBuf; pFrameYUV_decoder->data[1] = NULL; pFrameYUV_decoder->data[2] = NULL; pFrameYUV_decoder->data[3] = NULL; int linesize[4] = { pAVCodecCtx_decoder->width * 3, pAVCodecCtx_decoder->height * 3, 0, 0 }; pImageConvertCtx_decoder = sws_getContext(pAVCodecCtx_decoder->width, pAVCodecCtx_decoder->height, AV_PIX_FMT_YUV420P, pAVCodecCtx_decoder->width, pAVCodecCtx_decoder->height, AV_PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); sws_scale(pImageConvertCtx_decoder, (const uint8_t* const *) pAVFrame_decoder->data, pAVFrame_decoder->linesize, 0, pAVCodecCtx_decoder->height, pFrameYUV_decoder->data, linesize); sws_freeContext(pImageConvertCtx_decoder); return 1; } } else if (ret == AVERROR(EAGAIN)) { return 0; } else { return -1; } } return 0; }
3、舊版本
FFmpegVideoDecoder.h
int FFmpeg_VideoDecoderInit(AVCodecContext *ctx);
int FFmpeg_H264DecoderInit(void);
int FFmpeg_VideoDecoderRelease(void);
int FFmpeg_H264Decode(unsigned char * inbuf, int inbufSize, int *framePara, unsigned char *outRGBBuf, unsigned char **outYUVBuf);
FFmpegVideoDecoder.c
#include <libavformat/avformat.h> #include <libswscale/swscale.h> #include "FFmpegVideoDecoder.h" struct AVCodecContext *pAVCodecCtx = NULL; struct AVCodec *pAVCodec; struct AVPacket mAVPacket; struct AVFrame *pAVFrame = NULL; struct SwsContext* pImageConvertCtx = NULL; struct AVFrame *pFrameYUV = NULL; int FFmpeg_VideoDecoderInit(AVCodecContext *ctx) { if (!ctx) { cv_printf("Source codec context is NULL."); //cv_printf需替換為printf return -1; } FFmpeg_VideoDecoderRelease(); avcodec_register_all(); pAVCodec = avcodec_find_decoder(ctx->codec_id); if (!pAVCodec) { cv_printf("Can not find codec:%d\n", ctx->codec_id); return -2; } pAVCodecCtx = avcodec_alloc_context3(pAVCodec); if (!pAVCodecCtx || avcodec_copy_context(pAVCodecCtx, ctx) != 0) { cv_printf("Failed to alloc codec context."); FFmpeg_VideoDecoderRelease(); return -3; } if (avcodec_open2(pAVCodecCtx, pAVCodec, NULL) < 0){ cv_printf("Failed to open h264 decoder"); FFmpeg_VideoDecoderRelease(); return -4; } av_init_packet(&mAVPacket); pAVFrame = av_frame_alloc(); pFrameYUV = av_frame_alloc(); return 0; } int FFmpeg_H264DecoderInit() { avcodec_register_all(); AVCodec *pAVCodec = avcodec_find_decoder(AV_CODEC_ID_H264); if (!pAVCodec){ cv_printf("can not find H264 codec\n"); return -1; } AVCodecContext *pAVCodecCtx = avcodec_alloc_context3(pAVCodec); int ret = FFmpeg_VideoDecoderInit(pAVCodecCtx); if (pAVCodecCtx || ret < 0) { avcodec_free_context(&pAVCodecCtx); pAVCodecCtx = NULL; } return ret; } int FFmpeg_VideoDecoderRelease() { if (pAVCodecCtx != NULL) { avcodec_close(pAVCodecCtx); avcodec_free_context(&pAVCodecCtx); pAVCodecCtx = NULL; } if (pAVFrame != NULL) { av_packet_unref(&mAVPacket); av_free(pAVFrame); pAVFrame = NULL; } if (pFrameYUV) { av_frame_unref(pFrameYUV); av_free(pFrameYUV); pFrameYUV = NULL; } if (pImageConvertCtx) { sws_freeContext(pImageConvertCtx); } av_packet_unref(&mAVPacket); return 0; } int FFmpeg_H264Decode(unsigned char * inbuf, int inbufSize, int *framePara, unsigned char *outRGBBuf, unsigned char **outYUVBuf) { av_frame_unref(pAVFrame); av_frame_unref(pFrameYUV); framePara[0] = framePara[1] = 0; mAVPacket.data = inbuf; mAVPacket.size = inbufSize; if (inbuf==NULL || inbufSize<=0) { return -1; } int len = -1, got_picture = 0; len = avcodec_decode_video2(pAVCodecCtx, pAVFrame, &got_picture, &mAVPacket); if (len < 0) { cv_printf("解碼錯誤:%d\n",len); return len; } if (got_picture > 0) { framePara[0] = pAVFrame->width; framePara[1] = pAVFrame->height; if (outYUVBuf) { *outYUVBuf = (unsigned char *)pAVFrame->data; framePara[2] = pAVFrame->linesize[0]; framePara[3] = pAVFrame->linesize[1]; framePara[4] = pAVFrame->linesize[2]; } else if (outRGBBuf) { pFrameYUV->data[0] = outRGBBuf; pFrameYUV->data[1] = NULL; pFrameYUV->data[2] = NULL; pFrameYUV->data[3] = NULL; int linesize[4] = { pAVCodecCtx->width * 3, pAVCodecCtx->height * 3, 0, 0 }; pImageConvertCtx = sws_getContext(pAVCodecCtx->width, pAVCodecCtx->height, AV_PIX_FMT_YUV420P, pAVCodecCtx->width, pAVCodecCtx->height, AV_PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); sws_scale(pImageConvertCtx, (const uint8_t* const *) pAVFrame->data, pAVFrame->linesize, 0, pAVCodecCtx->height, pFrameYUV->data, linesize); sws_freeContext(pImageConvertCtx); } } return len; }