OpenCv + ffmpeg + rtmp 實現攝像頭採集資料直播功能
阿新 • • 發佈:2019-05-24
採用OpenCv獲取影象資料,通過ffmpeg推流給rtmp伺服器
OpenCV獲取的影象資料為BGR格式,需要轉換成YUV格式,再將其編碼為h264格式,通過ffmpeg推流
標頭檔案
extern "C" { #include <libavcodec/avcodec.h> #include <libavdevice/avdevice.h> #include <libavformat/avformat.h> #include <libavutil/mathematics.h> #include <libavutil/opt.h> #include <libavutil/time.h> #include <libavutil/timestamp.h> #include <libswscale/swscale.h> }
主要實現
//nginx-rtmp 直播伺服器rtmp推流URL
char *outUrl = "rtmp://localhost:6666/live/Cam";
//註冊所有的編解碼器
avcodec_register_all();
//註冊所有的封裝器
av_register_all();
//註冊所有網路協議
avformat_network_init();
Mat orgFrame;
//畫素格式轉換上下文
SwsContext *vsc = NULL;
//輸出的資料結構
AVFrame *yuv = NULL;
//編碼器上下文
AVCodecContext *avctx = NULL;
//rtmp flv 封裝器
AVFormatContext *ofmt_ctx = NULL;
if(m_cap) //VideoCapture* m_cap;
{
if(m_cap->isOpened() == false)
{
bool ret = m_cap->open(0);
if(ret == false && QFile::exists(DEFAULT_CAMERA_DEV))
{
ret = m_cap->open(DEFAULT_CAMERA_DEV);
}
// if(ret)
// {
// m_cap->set(CV_CAP_PROP_FRAME_WIDTH, 640);
// m_cap->set(CV_CAP_PROP_FRAME_HEIGHT, 480);
// m_cap->set(CV_CAP_PROP_FPS, 30);
// }
}
}
else
return -1;
int framecnt = 0;
int inWidth = m_cap->get(CAP_PROP_FRAME_WIDTH);
int inHeight = m_cap->get(CAP_PROP_FRAME_HEIGHT);
int fps = m_cap->get(CAP_PROP_FPS);
qDebug() << "m_cap info" << inWidth << inHeight << fps;
fps = 25;
///2 初始化格式轉換上下文
vsc = sws_getCachedContext(vsc,
inWidth, inHeight, AV_PIX_FMT_BGR24, //、高、畫素格式
inWidth, inHeight, AV_PIX_FMT_YUV420P,//目標寬、高、畫素格式
SWS_BICUBIC, // 尺寸變化使用演算法
0, 0, 0
);
if (!vsc)
{
return -2;
}
///3 初始化輸出的資料結構
yuv = av_frame_alloc();
yuv->format = AV_PIX_FMT_YUV420P;
yuv->width = inWidth;
yuv->height = inHeight;
yuv->pts = 0;
//分配yuv空間
int ret = av_frame_get_buffer(yuv, 32);
if (ret != 0)
{
ERROR_INFO "av_frame_get_buffer fail";
return -3;
}
///4 初始化編碼上下文
//a 找到編碼器
AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec)
{
ERROR_INFO ("Can`t find h264 encoder!");
return -13;
}
//b 建立編碼器上下文
avctx = avcodec_alloc_context3(codec);
if (!avctx)
{
ERROR_INFO ("avcodec_alloc_context3 failed!");
return -8;
}
qDebug() << "init avctx--------------------";
//c 配置編碼器引數
avctx->codec_id = codec->id;
avctx->thread_count = 8;
// avctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
AVDictionary *param = 0;
av_dict_set(¶m, "preset", "superfast", 0); //編碼形式修改
av_dict_set(¶m, "tune", "zerolatency", 0); //實時編碼
avctx->width = inWidth;
avctx->height = inHeight;
// avctx->bit_rate = 400000;
avctx->bit_rate = 50 * 1024 * 8;
avctx->time_base.num = 1;
avctx->time_base.den = fps;
avctx->framerate.num = fps;
avctx->framerate.den = 1;
avctx->qmin = 10; //調節清晰度和編碼速度 //這個值調節編碼後輸出資料量越大輸出資料量越小,越大編碼速度越快,清晰度越差
avctx->qmax = 51;
//畫面組的大小,多少幀一個關鍵幀
avctx->gop_size = 50; //編碼一旦有gopsize很大的時候或者用了opencodec,有些播放器會等待I幀,無形中增加延遲。
avctx->max_b_frames = 0; //編碼時如果有B幀會再解碼時快取很多幀資料才能解B幀,因此只留下I幀和P幀。
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
//d 開啟編碼器上下文
ret = avcodec_open2(avctx, codec, ¶m);
if (ret != 0)
{
qDebug() << "avcodec_open2 fail";
return -5;
}
qDebug() << "avcodec_open2 success! --------------------";
///5 輸出封裝器和視訊流配置
//a 建立輸出封裝器上下文
// ret = avformat_alloc_output_context2(&ofmt_ctx, 0, "flv", outUrl);
ret = avformat_alloc_output_context2(&ofmt_ctx, 0, "flv", NULL); //++Huey
if (ret != 0)
{
ERROR_INFO "avformat_alloc_output_context2 fail";
return -6;
}
qDebug() << "avformat_new_stream --------------------";
//b 新增視訊流
AVStream *out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream)
{
qDebug() << ("avformat_new_stream failed");
return -7;
}
// vs->codecpar->codec_tag = 0;
out_stream->codec->codec_tag = 0; //++Huey
//從編碼器複製引數
qDebug() << "avcodec_copy_context --------------------";
// avcodec_parameters_from_context(vs->codecpar, vc);
//++Huey
avcodec_copy_context(out_stream->codec,avctx);
out_stream->time_base.num = 1;
out_stream->time_base.den = fps;
//End++
av_dump_format(ofmt_ctx, 0, outUrl, 1);
qDebug() << "avio_open --------------------";
///開啟rtmp 的網路輸出IO
ret = avio_open(&ofmt_ctx->pb, outUrl, AVIO_FLAG_WRITE);
if (ret != 0)
{
qDebug() << "avio_open fail";
return -9;
}
qDebug() << "avformat_write_header --------------------";
//寫入封裝頭
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret != 0)
{
qDebug() << "avformat_write_header fail";
return -10;
}
qDebug() << "run CamPush --------------------";
AVPacket pkt;
av_init_packet(&pkt);
int vpts = 0;
while(1)
{
ERROR_INFO "m_cap->grab --------------------";
///讀取rtsp視訊幀,解碼視訊幀
if (!m_cap->grab())
{
continue;
}
ERROR_INFO "m_cap->retrieve --------------------";
///yuv轉換為rgb
if (!m_cap->retrieve(orgFrame))
{
continue;
}
imshow("video", orgFrame);
waitKey(20);
ERROR_INFO "rgb to yuv --------------------";
///rgb to yuv
//輸入的資料結構
uint8_t *indata[AV_NUM_DATA_POINTERS] = { 0 };
//indata[0] bgrbgrbgr
//plane indata[0] bbbbb indata[1]ggggg indata[2]rrrrr
indata[0] = orgFrame.data;
int insize[AV_NUM_DATA_POINTERS] = { 0 };
//一行(寬)資料的位元組數
insize[0] = orgFrame.cols * orgFrame.elemSize();
int h = sws_scale(vsc, indata, insize, 0, orgFrame.rows, //源資料
yuv->data, yuv->linesize);
if (h <= 0)
{
continue;
}
ERROR_INFO "AVFrame to AVPacket --------------------";
///h264編碼
yuv->pts = vpts;
vpts++;
int got_packet = 0;
av_init_packet(&pkt);
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
ret = avcodec_encode_video2(avctx, &pkt,yuv, &got_packet);
qDebug() << "AVPacket Bef --"
<< pkt.size << pkt.pts << pkt.dts << pkt.duration
<< (pkt.data == NULL) << (pkt.buf == NULL);
if(got_packet == 0 || ret != 0)
{
qDebug() << "avcodec_encode_video2 fail -------------";
continue;
}
//推流
#if 0
pkt.stream_index = out_stream->index;
AVRational time_base = out_stream->time_base;//{ 1, 1000 };
AVRational r_framerate1 = avctx->framerate;
AVRational time_base_q;
time_base_q.num = 1;
time_base_q.den = AV_TIME_BASE;
framecnt++;
int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1)); //內部時間戳
pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
pkt.dts = pkt.pts;
pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base);
pkt.pos = -1;
#elif 0
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.size = sizeof(AVFrame);
pkt.pts = pkt.dts = av_rescale_q(pkt.pts, avctx->time_base, out_stream->time_base);
av_packet_rescale_ts(&pkt, avctx->time_base, out_stream->time_base);
#elif 1
pkt.pts = av_rescale_q(pkt.pts, avctx->time_base, out_stream->time_base);
pkt.dts = pkt.pts;
pkt.duration = av_rescale_q(pkt.duration, avctx->time_base, out_stream->time_base);
#endif
// ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
ret = av_write_frame(ofmt_ctx, &pkt); //++Huey
qDebug() << "end --------------------" << ret;
}
return 0;