1. 程式人生 > >vs2017+ffmpeg實現解碼及縮放

vs2017+ffmpeg實現解碼及縮放

概述:

本文程式是對於雷神github上simplest_ffmpeg_decoder工程結合ffmpeg3的修改版

環境:win10 + vs2017 + ffmpeg(版本為3.4.2)

函式介紹:

//幾乎都是被第一個呼叫,初始化libavformat庫及註冊所有的複用器、解複用器以及協議,

//如果沒有呼叫這個函式,也可以使用下面的函式單獨選擇你想支援的格式

//av_register_input_format()

//av_register_input_format()

void av_register_all(void);

//初始化網路組元件,為後續提供網路相關支援,可選項,但是推薦呼叫

int avformat_network_init(void);

//分配一個AVFormatContext

AVFormatContext *avformat_alloc_context(void);

//開啟多媒體資料並獲取一些相關資訊

int avformat_open_input(AVFormatContext **ps, const char *url, 
AVInputFormat *fmt, AVDictionary **options);

//讀取一個packet資料並獲取一些資訊,avformat_open_input後再呼叫,獲取資訊更全

int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options);

//分配一個AVCodecContext並填充個預設值

AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);

//將AVCodecParameters中的碼流引數拷貝一份到AVCodecContext中

int avcodec_parameters_to_context(AVCodecContext *codec,
                                  const AVCodecParameters *par);

//使用給定的AVCodec初始化AVCodecContext

int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);

//分配一個AVFrame並初始化位預設數值,用來存放AVPacket解碼出來的原始資料,

//必須使用av_frame_free()釋放

AVFrame *av_frame_alloc(void);

//讀取碼流中的一幀資料,

int av_read_frame(AVFormatContext *s, AVPacket *pkt);

//釋放packet空間

void av_packet_unref(AVPacket *pkt);

//傳送編碼資料包

int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);

//接收解碼後的資料

int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);

//分配一個SwsContext結構體用來做縮放

struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
                                  int dstW, int dstH, enum AVPixelFormat dstFormat,
                                  int flags, SwsFilter *srcFilter,
                                  SwsFilter *dstFilter, const double *param);

//縮放

int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
              const int srcStride[], int srcSliceY, int srcSliceH,
              uint8_t *const dst[], const int dstStride[]);

程式例項:

/* ffmpeg_decoder_v2.cpp: 定義控制檯應用程式的入口點。
實現功能:解碼mkv檔案(640x272) --- > yuv檔案()  ---> 縮放到1080p  ---> 儲存成檔案 out.yuv

主要函式:
av_register_all()
avformat_alloc_context()
avformat_open_input()
avcodec_alloc_context3()
avcodec_open2()
av_read_frame()
avcodec_send_packet()
avcodec_receive_frame()
sws_scale()

*/
#include "stdafx.h"

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
};

//連結庫
#pragma comment(lib , "avformat.lib")
#pragma comment(lib , "avcodec.lib")
#pragma comment(lib , "avdevice.lib")
#pragma comment(lib , "avfilter.lib")
#pragma comment(lib , "avutil.lib")
#pragma comment(lib , "postproc.lib")
#pragma comment(lib , "swresample.lib")
#pragma comment(lib , "swscale.lib")

#define dst_w		1920
#define dst_h		1080

int main(int argc, char* argv[])
{

	AVFormatContext	*pFormatCtx = NULL;
	int				videoindex = -1;
	AVCodecContext	*pCodecCtx = NULL;
	AVCodec			*pCodec = NULL;
	AVFrame	*pFrame = NULL, *pFrameYUV = NULL;
	uint8_t *out_buffer = NULL;
	AVPacket *packet = NULL;
	struct SwsContext *img_convert_ctx;

	int y_size = 0;
	int ret = -1;

	char filepath[] = "Titanic.mkv";

	FILE *fp_yuv = fopen("output.yuv", "wb+");


	//avcodec_register_all();//複用器等並沒有使用到,不需要初始化,直接呼叫av_register_all就行
	av_register_all();
	//avformat_network_init();
	if (!(pFormatCtx = avformat_alloc_context()))
	{
		printf("avformat_alloc_context error!!,ret=%d\n", AVERROR(ENOMEM));
		return -1;
	}

	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx, NULL)<0) {
		printf("Couldn't find stream information.\n");
		return -1;
	}

/*
	//another way to get the stream id
	for (i = 0; i < pFormatCtx->nb_streams; i++) {
		if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoindex = i;
			break;
		}
	}
*/

	/* select the video stream */
    ret = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &pCodec, 0);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
        return ret;
    }
	videoindex = ret; //video stream id

	pCodec = avcodec_find_decoder(pFormatCtx->streams[videoindex]->codecpar->codec_id);
	if (pCodec == NULL) {
		printf("Codec not found.\n");
		return -1;
	}
	pCodecCtx = avcodec_alloc_context3(pCodec);
	if (pCodecCtx == NULL)
	{
		printf("Could not allocate AVCodecContext\n");
		return -1;
	}
	if ((ret = avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[videoindex]->codecpar)) < 0)
	{
		printf("Failed to copy codec parameters to decoder context\n");
		return ret;
	}

	if (avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
		printf("Could not open codec.\n");
		return -1;
	}
	printf("www=%d,hhh=%d\n", pCodecCtx->width, pCodecCtx->height);
	pFrame = av_frame_alloc(); //the data after decoder
	pFrameYUV = av_frame_alloc(); //the data after scale
	out_buffer = (uint8_t *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, dst_w, dst_h, 1));
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, dst_w, dst_h, 1);
	packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	if (!packet) {
		fprintf(stderr, "Can not alloc packet\n");
		return -1;
	}

	av_dump_format(pFormatCtx, 0, filepath, 0);

	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		dst_w, dst_h, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	while (av_read_frame(pFormatCtx, packet) >= 0) {
		if (packet->stream_index == videoindex) {
			ret = avcodec_send_packet(pCodecCtx, packet);
			if (ret != 0)
			{
				printf("send pkt error.\n");
				return ret;
			}

			if (avcodec_receive_frame(pCodecCtx, pFrame) == 0) {
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);

				//y_size = pCodecCtx->width*pCodecCtx->height;
				y_size = dst_w * dst_h;
				fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
				printf("Succeed to decode 1 frame!\n");

			}
		}
		av_packet_unref(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	
	while (1) {
		ret = avcodec_send_packet(pCodecCtx, NULL);
		if (ret != 0)
		{
			printf("send pkt error.\n");
			break;
		}

		if (avcodec_receive_frame(pCodecCtx, pFrame) != 0)
		{
			break;
		}
		sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
			pFrameYUV->data, pFrameYUV->linesize);

		//int y_size = pCodecCtx->width*pCodecCtx->height;
		fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
		fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V

		printf("Flush Decoder: Succeed to decode 1 frame!\n");
	}

	sws_freeContext(img_convert_ctx);

	fclose(fp_yuv);

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_free_context(&pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}