1. 程式人生 > >基於FFmpeg的攝像頭直播(推流)

基於FFmpeg的攝像頭直播(推流)

本文實現: 讀取PC攝像頭視訊資料並以RTMP協議傳送為直播流. 示例包含
1. FFmpeg的libavdevice的使用
2. 視訊編碼, 解碼, 推流的基本流程

要使用libavdevice的相關函式, 首先需要註冊相關元件
avdevice_register_all()

列出電腦中可用的DShow裝置

AVFormatContext *pFmtCtx = avformat_alloc_context();  
    AVDeviceInfoList *device_info = NULL;  
    AVDictionary* options = NULL;  
    av_dict_set(&options, "list_devices"
, "true", 0); AVInputFormat *iformat = av_find_input_format("dshow"); printf("Device Info=============\n"); avformat_open_input(&pFmtCtx, "video=dummy", iformat, &options); printf("========================\n");

也可以直接使用FFmpeg的工具
ffmpeg -list_devices true -f dshow -i dummy

PS: avdevice有一個avdevice_list_devices函式可以列舉系統的採集裝置, 包括裝置名和裝置描述, 可以讓使用者選擇要使用的裝置, 但是不支援DShow裝置.

像開啟普通檔案一樣將上面的具體裝置名作為輸入開啟, 並進行相應的初始化設定

av_register_all();  
    //Register Device  
    avdevice_register_all();  
    avformat_network_init();  

    //Show Dshow Device    
    show_dshow_device();  

    printf
("\nChoose capture device: "); if (gets(capture_name) == 0) { printf("Error in gets()\n"); return -1; } sprintf(device_name, "video=%s", capture_name); ifmt=av_find_input_format("dshow"); //Set own video device's name if (avformat_open_input(&ifmt_ctx, device_name, ifmt, NULL) != 0){ printf("Couldn't open input stream.(無法開啟輸入流)\n"); return -1; } //input initialize if (avformat_find_stream_info(ifmt_ctx, NULL)<0) { printf("Couldn't find stream information.(無法獲取流資訊)\n"); return -1; } videoindex = -1; for (i = 0; i<ifmt_ctx->nb_streams; i++) if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { videoindex = i; break; } if (videoindex == -1) { printf("Couldn't find a video stream.(沒有找到視訊流)\n"); return -1; } if (avcodec_open2(ifmt_ctx->streams[videoindex]->codec, avcodec_find_decoder(ifmt_ctx->streams[videoindex]->codec->codec_id), NULL)<0) { printf("Could not open codec.(無法開啟解碼器)\n"); return -1; }

輸入裝置初始化後, 需要對輸出做相應的初始化. FFmpeg將網路協議和檔案同等看待, 同時因為使用RTMP協議進行傳輸, 因此制定輸出為flv格式, 編碼器使用H.264

//output initialize  
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path);  
    //output encoder initialize  
    pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);  
    if (!pCodec){  
        printf("Can not find encoder! (沒有找到合適的編碼器!)\n");  
        return -1;  
    }  
    pCodecCtx=avcodec_alloc_context3(pCodec);  
    pCodecCtx->pix_fmt = PIX_FMT_YUV420P;  
    pCodecCtx->width = ifmt_ctx->streams[videoindex]->codec->width;  
    pCodecCtx->height = ifmt_ctx->streams[videoindex]->codec->height;  
    pCodecCtx->time_base.num = 1;  
    pCodecCtx->time_base.den = 25;  
    pCodecCtx->bit_rate = 400000;  
    pCodecCtx->gop_size = 250;  
    /* Some formats,for example,flv, want stream headers to be separate. */  
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)  
        pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;  

    //H264 codec param  
    //pCodecCtx->me_range = 16;  
    //pCodecCtx->max_qdiff = 4;  
    //pCodecCtx->qcompress = 0.6;  
    pCodecCtx->qmin = 10;  
    pCodecCtx->qmax = 51;  
    //Optional Param  
    pCodecCtx->max_b_frames = 3;  
    // Set H264 preset and tune  
    AVDictionary *param = 0;  
    av_dict_set(&param, "preset", "fast", 0);  
    av_dict_set(&param, "tune", "zerolatency", 0);  

    if (avcodec_open2(pCodecCtx, pCodec,&param) < 0){  
        printf("Failed to open encoder! (編碼器開啟失敗!)\n");  
        return -1;  
    }  

    //Add a new stream to output,should be called by the user before avformat_write_header() for muxing  
    video_st = avformat_new_stream(ofmt_ctx, pCodec);  
    if (video_st == NULL){  
        return -1;  
    }  
    video_st->time_base.num = 1;  
    video_st->time_base.den = 25;  
    video_st->codec = pCodecCtx;  

    //Open output URL,set before avformat_write_header() for muxing  
    if (avio_open(&ofmt_ctx->pb,out_path, AVIO_FLAG_READ_WRITE) < 0){  
    printf("Failed to open output file! (輸出檔案開啟失敗!)\n");  
    return -1;  
    }  

    //Show some Information  
    av_dump_format(ofmt_ctx, 0, out_path, 1);  

    //Write File Header  
    avformat_write_header(ofmt_ctx,NULL); 

完成輸入和輸出的初始化後, 就可以正式開始解碼和編碼並推流的流程了.
需要注意的是, 攝像頭資料往往是RGB格式的, 需要將其轉換為YUV420P格式, 才能推流, 因此要先做如下的準備工作

//prepare before decode and encode  
    dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));  
    //enc_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));  
    //camera data has a pix fmt of RGB,convert it to YUV420  
    img_convert_ctx = sws_getContext(ifmt_ctx->streams[videoindex]->codec->width, ifmt_ctx->streams[videoindex]->codec->height,   
        ifmt_ctx->streams[videoindex]->codec->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);  
    pFrameYUV = avcodec_alloc_frame();  
    uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));  
    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);  

現在, 就可以正式開始解碼, 編碼 和推流了

//start decode and encode  
    int64_t start_time=av_gettime();  
    while (av_read_frame(ifmt_ctx, dec_pkt) >= 0){     
        if (exit_thread)  
            break;  
        av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n");  
        pframe = av_frame_alloc();  
        if (!pframe) {  
            ret = AVERROR(ENOMEM);  
            return -1;  
        }  
        //av_packet_rescale_ts(dec_pkt, ifmt_ctx->streams[dec_pkt->stream_index]->time_base,  
        //  ifmt_ctx->streams[dec_pkt->stream_index]->codec->time_base);  
        ret = avcodec_decode_video2(ifmt_ctx->streams[dec_pkt->stream_index]->codec, pframe,  
            &dec_got_frame, dec_pkt);  
        if (ret < 0) {  
            av_frame_free(&pframe);  
            av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");  
            break;  
        }  
        if (dec_got_frame){  
            sws_scale(img_convert_ctx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);     

            enc_pkt.data = NULL;  
            enc_pkt.size = 0;  
            av_init_packet(&enc_pkt);  
            ret = avcodec_encode_video2(pCodecCtx, &enc_pkt, pFrameYUV, &enc_got_frame);  
            av_frame_free(&pframe);  
            if (enc_got_frame == 1){  
                //printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, enc_pkt.size);  
                framecnt++;   
                enc_pkt.stream_index = video_st->index;  

                //Write PTS  
                AVRational time_base = ofmt_ctx->streams[videoindex]->time_base;//{ 1, 1000 };  
                AVRational r_framerate1 = ifmt_ctx->streams[videoindex]->r_frame_rate;// { 50, 2 };  
                AVRational time_base_q = { 1, AV_TIME_BASE };  
                //Duration between 2 frames (us)  
                int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));  //內部時間戳  
                //Parameters  
                //enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));  
                enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);  
                enc_pkt.dts = enc_pkt.pts;  
                enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));  
                enc_pkt.pos = -1;  

                //Delay  
                int64_t pts_time = av_rescale_q(enc_pkt.dts, time_base, time_base_q);  
                int64_t now_time = av_gettime() - start_time;  
                if (pts_time > now_time)  
                    av_usleep(pts_time - now_time);  

                ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);  
                av_free_packet(&enc_pkt);  
            }  
        }  
        else {  
            av_frame_free(&pframe);  
        }  
        av_free_packet(dec_pkt);  
    }  

解碼比較簡單, 編碼部分需要自己計算PTS, DTS, 比較複雜
這裡通過幀率計算PTS和DTS, 首先通過幀率計算兩幀之間的時間間隔, 但是要換算