1. 程式人生 > >FFMPEG+SDL2 實現播放器功能

FFMPEG+SDL2 實現播放器功能

一、介面

    1 開啟的pushbutton

    2 停止/開始的pushbutton

    3 進度條  QSlider

    4 播放視窗widget

二、建立videothread執行緒

    該執行緒用於讀取視訊資料流   具體實現過程可以參考

    其中需要新增SDL處理音訊的部分進去

   2.1 SDL初始化

if (SDL_Init(SDL_INIT_AUDIO))
    {
        fprintf(stderr,"Could not initialize SDL - %s. \n", SDL_GetError());
        exit(1);
    }

   2.2 獲取音訊流資訊

for (i = 0; i < pFormatCtx->nb_streams; i++)
   {    
       if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO  && audioStream < 0)
       {
           audioStream = i;
       }

   }

///如果audioStream為-1 說明沒有找到音訊流
   if (audioStream == -1) {
       printf("Didn't find a audio stream.\n");
       return;
   }

if (audioStream >= 0)
   {
       /// 設定SDL音訊流資訊 音訊處理初始化
       audio_stream_component_open(&mVideoState, audioStream);  //該函式後續會進行完成
   }

audio_stream_component_open函式在3.3.1中會涉及

2.3  查詢音訊解碼器


///================================ 查詢音訊解碼器 =================================//
   AVCodecContext *aCodecCtx;
   aCodecCtx = pFormatCtx->streams[audioStream]->codec;
   AVCodec *aCodec;
   aCodec = avcodec_find_decoder(aCodecCtx->codec_id);

   if (aCodec == NULL)
   {
       printf("ACodec not found.\n");
       return;
   }

   ///================================ 開啟音訊解碼器 ==================================//
   if (avcodec_open2(aCodecCtx, aCodec, NULL) < 0)
   {
       printf("Could not open audio codec.\n");
       return;
   }    1 SDL初始化
   vs->audio_st = pFormatCtx->streams[audioStream]; ///設定音訊解碼

   packet_queue_init(&vs->videoq); //音訊處理:初始化佇列

 2.4 建立視訊解碼的執行緒  由SDL進行建立  video_thread函式後續階段完成

 vs->video_tid = SDL_CreateThread(video_thread, "video_thread", &mVideoState);
 vs->player = this; //執行緒指標


 2.5  分配AVPacket用於存放讀取的視訊

 AVPacket *packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一個packet 用於存放讀取的視訊

三、開始進行音視訊的讀取

    3.1 音視訊統一讀取

 //===========================  讀取視訊資訊 ===============================//
       if (av_read_frame(pFormatCtx, packet) < 0) //讀取的是一幀視訊  資料存入一個AVPacket的結構中
       {
           qDebug()  << "read error." ;
           return ;
       }
       //此時資料儲存在packet中

       if (packet->stream_index == videoStream)
       {
           packet_queue_put(&vs->videoq, packet); //這裡我們將視訊資料存入佇列

       }
       else if( packet->stream_index == audioStream )
       {
           packet_queue_put(&vs->audioq, packet);//這裡我們將音訊資料存入佇列

       }

    3.2 視訊讀取執行緒   將視訊轉換成QImage顯示

        3.2.1 視訊相關結構體的分配

 ///解碼視訊相關
    AVFrame *pFrame, *pFrameRGB;
    uint8_t *out_buffer_rgb; //解碼後的rgb資料
    struct SwsContext *img_convert_ctx;  //用於解碼後的視訊格式轉換

    AVCodecContext *pCodecCtx = is->video_st->codec; //視訊解碼器

    pFrame = av_frame_alloc();
    pFrameRGB = av_frame_alloc();

    ///這裡我們改成了 將解碼後的YUV資料轉換成RGB32
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
            pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
            PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);

    numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);

    out_buffer_rgb = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
    avpicture_fill((AVPicture *) pFrameRGB, out_buffer_rgb, PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height);
    // 會將pFrameRGB的資料按RGB格式自動"關聯"到out_buffer_rgb  即pFrameRGB中的資料改變了 out_buffer中的資料也會相應的改變

        3.2.2  從佇列中取出資料

  if (packet_queue_get(&is->videoq, packet, 1) <= 0)
            break;//佇列裡面沒有資料了  讀取完畢了


        3.2.3 解碼 

       ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);
        if (packet->dts == AV_NOPTS_VALUE && pFrame->opaque&& *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE)
        {
            video_pts = *(uint64_t *) pFrame->opaque;
        }
        else if (packet->dts != AV_NOPTS_VALUE)
        {
            video_pts = packet->dts;
        }
        else
        {
            video_pts = 0;
        }


        3.2.4 音視訊同步  關鍵函式synchronize_video( )

        video_pts *= av_q2d(is->video_st->time_base);
        video_pts = synchronize_video(is, pFrame, video_pts);
        while(1)
        {
            audio_pts = is->audio_clock;
            if (video_pts <= audio_pts) //等待匹配   兩者時間匹配 跳出該迴圈
                break;

            int delayTime = (video_pts - audio_pts) * 1000;

            delayTime = delayTime > 5 ? 5:delayTime;

            SDL_Delay(delayTime);
        }


        3.2.5  將影象縮放 轉換格式 輸出到QImage  並且啟用訊號

 if (got_picture)
        {
            sws_scale(img_convert_ctx,(uint8_t const * const *) pFrame->data,
                    pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
                    pFrameRGB->linesize);

            //把這個RGB資料 用QImage載入
            QImage tmpImg((uchar *)out_buffer_rgb,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);
            QImage image = tmpImg.copy(); //把影象複製一份 傳遞給介面顯示
            is->player->disPlayVideo(image); //呼叫激發訊號的函式
        }


  3.3 音訊處理過程

        3.3.1 audio_stream_component_open 函式 

int audio_stream_component_open(VideoState *is, int stream_index) //audioStream 遍歷出來的音訊流資訊
{
    AVFormatContext *ic = is->ic;
    AVCodecContext  *codecCtx;
    AVCodec         *codec;
    SDL_AudioSpec   wanted_spec, spec;  // wanted_spec是我們期望設定的屬性,spec是系統最終接受的引數
    int64_t wanted_channel_layout = 0;
    int wanted_nb_channels; //聲道數
    /*  SDL支援的聲道數為 1, 2, 4, 6 */
    /*  後面我們會使用這個陣列來糾正不支援的聲道數目 */
    const int next_nb_channels[] = { 0, 0, 1, 6, 2, 6, 4, 6 };

    if (stream_index < 0 || stream_index >= ic->nb_streams)
    {
        return -1;
    }

    codecCtx           = ic->streams[stream_index]->codec;
    wanted_nb_channels = codecCtx->channels;

    //根據期望的聲道數獲得期望的聲道佈局
    if (!wanted_channel_layout || wanted_nb_channels!= av_get_channel_layout_nb_channels(wanted_channel_layout)) //Return the number of channels in the channel layout.
    {
        wanted_channel_layout  = av_get_default_channel_layout(wanted_nb_channels); //Return default channel layout for a given number of channels.
        wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
    }

    wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);//根據期望的聲道佈局獲得期望的聲道數
    wanted_spec.freq     = codecCtx->sample_rate; //頻率  由外部傳入流轉換得到
    if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0)
    {
        return -1;
    }

    //設定期望屬性
    wanted_spec.format   = AUDIO_S16SYS;           // 格式
    wanted_spec.silence  = 0;                      // 0指示靜音
    wanted_spec.samples  = SDL_AUDIO_BUFFER_SIZE;  // 自定義SDL緩衝區大小
    wanted_spec.callback = audio_callback;         // 音訊解碼的關鍵回撥函式
    wanted_spec.userdata = is;                     // 傳給上面回撥函式的外帶資料


    //開啟音訊裝置,這裡使用一個while來迴圈嘗試開啟不同的聲道數(由上面next_nb_channels陣列指定)直到成功開啟,或者全部失敗
    //wanted_spec ==> spec
    do
    {
        is->audioID = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(0,0),0,&wanted_spec, &spec,0);
        //is->audioID = SDL_OpenAudio(&wanted_spec, &spec);
        fprintf(stderr,"SDL_OpenAudio (%d channels): %s\n",wanted_spec.channels, SDL_GetError());
        qDebug()<<QString("SDL_OpenAudio (%1 channels): %2").arg(wanted_spec.channels).arg(SDL_GetError());

        wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)]; // 7 和 wanted_spec.channels的最小值
        if (!wanted_spec.channels)
        {
            fprintf(stderr,"No more channel combinations to tyu, audio open failed\n");
//            return -1;
            break;
        }
        wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);

    }while(is->audioID == 0);

    // 檢查實際使用的配置(儲存在spec,由SDL_OpenAudio()填充)
    if (spec.format != AUDIO_S16SYS)
    {
        fprintf(stderr,"SDL advised audio format %d is not supported!\n",spec.format);
        return -1;
    }

    // 看二者的channels是否相同  得到最終的wanted_channel_layout  該引數需要傳給VideoPlayer
    if (spec.channels != wanted_spec.channels)
    {
        wanted_channel_layout = av_get_default_channel_layout(spec.channels);
        if (!wanted_channel_layout)
        {
            fprintf(stderr,"SDL advised channel count %d is not supported!\n",spec.channels);
            return -1;
        }
    }

    is->audio_hw_buf_size = spec.size;

    /* 把設定好的引數儲存到大結構VideoState中 */
    is->audio_src_fmt            = is->audio_tgt_fmt            = AV_SAMPLE_FMT_S16;
    is->audio_src_freq           = is->audio_tgt_freq           = spec.freq;
    is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
    is->audio_src_channels       = is->audio_tgt_channels       = spec.channels;

    ///====================================== 查詢解碼器 ==========================================//
    codec = avcodec_find_decoder(codecCtx->codec_id);

    ///====================================== 開啟解碼器 ==========================================//
    if (!codec || (avcodec_open2(codecCtx, codec, NULL) < 0))
    {
        fprintf(stderr,"Unsupported codec!\n");
        return -1;
    }
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; //丟掉大小為0的資料流

    switch (codecCtx->codec_type)
    {
        case AVMEDIA_TYPE_AUDIO:
            is->audio_st = ic->streams[stream_index];
            is->audio_buf_size = 0;
            is->audio_buf_index = 0;
            memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
            packet_queue_init(&is->audioq); //初始化PacketQueue佇列  全部清零
            //SDL_PauseAudio(0); // 開始播放靜音
            SDL_PauseAudioDevice(is->audioID,0); //開始播放 呼叫回撥函式audio_callback
            break;
        default:
            break;
    }

    return 0;
}


        該函式基本上是固定模式  需要注意的是回撥函式的設定

    wanted_spec.callback = audio_callback;         // 音訊解碼的關鍵回撥函式

    3.3.2  回撥函式  在開始播放後呼叫audio_callback

static void audio_callback(void *userdata, Uint8 *stream, int len)
{
    VideoState *is = (VideoState *) userdata; //傳入的資料結構體

    int len1, audio_data_size;

    double pts;

    /*   len是由SDL傳入的SDL緩衝區的大小,如果這個緩衝未滿,我們就一直往裡填充資料 */
    /*   audio_buf_index 和 audio_buf_size 標示我們自己用來放置解碼出來的資料的緩衝區 */
    /*   這些資料待copy到SDL緩衝區, 當audio_buf_index >= audio_buf_size的時候意味著我們的緩衝為空
    /*   沒有資料可供copy,這時候需要呼叫audio_decode_frame來解碼出更多的楨資料 */
    while (len > 0)
    {

        if (is->audio_buf_index >= is->audio_buf_size) //audio_buf為空 需要解碼更多幀的資料
        {
            ///解碼之後的資料儲存在 is->audio_buf
            audio_data_size = audio_decode_frame(is, &pts);   //解碼的過程

            if (audio_data_size < 0)  /* 沒能解碼出資料,我們預設播放靜音 */
            {
                /* silence */
                is->audio_buf_size = 1024;
                /* 清零,靜音 */
                memset(is->audio_buf, 0, is->audio_buf_size);

            }
            else
            {
                is->audio_buf_size = audio_data_size;
            }
            is->audio_buf_index = 0; //指標歸零
        }

        /*  檢視stream可用空間,決定一次copy多少資料,剩下的下次繼續copy */
        len1 = is->audio_buf_size - is->audio_buf_index;
        if (len1 > len)
        {
            len1 = len;
        }

        // is->audio_buf  ===>  stream
        memcpy(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1);
        len    -= len1;
        stream += len1;
        is->audio_buf_index += len1;
    }

    //qDebug()<<"audio_callback finished";
}


    3.3.3  解碼的過程audio_decode_frame

 static int audio_decode_frame(VideoState *is, double *pts_ptr) //解碼音訊
    {
    int len1, len2, decoded_data_size;
    AVPacket *pkt = &is->audio_pkt;
    int got_frame = 0;
    int64_t dec_channel_layout;
    int wanted_nb_samples, resampled_data_size, n;

    double pts;

    for (;;)
    {
        while (is->audio_pkt_size > 0)
        {

            //判斷暫停
            if (is->isPause == true)
            {
                SDL_Delay(10);
                continue;
            }


            if (!is->audio_frame)
            {
                if (!(is->audio_frame = avcodec_alloc_frame()))
                {
                    return AVERROR(ENOMEM);
                }
            }
            else
                avcodec_get_frame_defaults(is->audio_frame);

            //解碼 資料儲存在is->audio_frame
            len1 = avcodec_decode_audio4(is->audio_st->codec, is->audio_frame,&got_frame, pkt);
            if (len1 < 0)  // error, skip the frame
            {
                is->audio_pkt_size = 0;
                break;
            }

            is->audio_pkt_data += len1; //指標+len1
            is->audio_pkt_size -= len1; //快取-len1

            if (!got_frame)
                continue;

            /* 計算解碼出來的楨需要的緩衝大小 */
            decoded_data_size = av_samples_get_buffer_size(NULL,
                    is->audio_frame->channels, is->audio_frame->nb_samples,
                    (AVSampleFormat)is->audio_frame->format, 1);

            dec_channel_layout =
                    (is->audio_frame->channel_layout && is->audio_frame->channels
                         == av_get_channel_layout_nb_channels( is->audio_frame->channel_layout))
                    ?is->audio_frame->channel_layout :av_get_default_channel_layout(is->audio_frame->channels);

            wanted_nb_samples = is->audio_frame->nb_samples;

            if (is->audio_frame->format   != is->audio_src_fmt
                    || dec_channel_layout != is->audio_src_channel_layout
                    || is->audio_frame->sample_rate != is->audio_src_freq
                    || (wanted_nb_samples != is->audio_frame->nb_samples
                            && !is->swr_ctx))
            {
                if (is->swr_ctx)
                    swr_free(&is->swr_ctx);
                is->swr_ctx = swr_alloc_set_opts(NULL,
                        is->audio_tgt_channel_layout, (AVSampleFormat)is->audio_tgt_fmt,
                        is->audio_tgt_freq, dec_channel_layout,
                        (AVSampleFormat)is->audio_frame->format, is->audio_frame->sample_rate,
                        0, NULL);
                if (!is->swr_ctx || swr_init(is->swr_ctx) < 0)
                {
                    //fprintf(stderr,"swr_init() failed\n");
                    break;
                }
                is->audio_src_channel_layout = dec_channel_layout;
                is->audio_src_channels = is->audio_st->codec->channels;
                is->audio_src_freq = is->audio_st->codec->sample_rate;
                is->audio_src_fmt = is->audio_st->codec->sample_fmt;
            }

            /* 這裡我們可以對取樣數進行調整,增加或者減少,一般可以用來做聲畫同步 */
            if (is->swr_ctx)
            {
                const uint8_t **in =(const uint8_t **) is->audio_frame->extended_data;
                uint8_t *out[] = { is->audio_buf2 };

                if (wanted_nb_samples != is->audio_frame->nb_samples)
                {
                    if (swr_set_compensation(is->swr_ctx,
                            (wanted_nb_samples - is->audio_frame->nb_samples)
                                    * is->audio_tgt_freq
                                    / is->audio_frame->sample_rate,
                            wanted_nb_samples * is->audio_tgt_freq
                                    / is->audio_frame->sample_rate) < 0)
                    {
                        //fprintf(stderr,"swr_set_compensation() failed\n");
                        break;
                    }
                }

                 //=================================== 轉換 ===========================================//
                //轉換出來的資料儲存在uint8_t *out中。也就是  is->audio_buf2
                len2 = swr_convert(is->swr_ctx, out,
                        sizeof(is->audio_buf2) / is->audio_tgt_channels
                                / av_get_bytes_per_sample(is->audio_tgt_fmt),
                        in, is->audio_frame->nb_samples);
                if (len2 < 0)
                {
                    //fprintf(stderr,"swr_convert() failed\n");
                    break;
                }
                if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels
                                / av_get_bytes_per_sample(is->audio_tgt_fmt))
                {
                    //fprintf(stderr,"warning: audio buffer is probably too small\n");
                    swr_init(is->swr_ctx);
                }
                is->audio_buf = is->audio_buf2; //解碼之後轉換得到的資料
                resampled_data_size = len2 * is->audio_tgt_channels
                        * av_get_bytes_per_sample(is->audio_tgt_fmt);
            }
            else
            {
                resampled_data_size = decoded_data_size;
                is->audio_buf = is->audio_frame->data[0];
            }

            //計算時間
            pts = is->audio_clock;
            *pts_ptr = pts;
            n = 2 * is->audio_st->codec->channels;
            is->audio_clock += (double) resampled_data_size
                    / (double) (n * is->audio_st->codec->sample_rate);

            // We have data, return it and come back for more later
            return resampled_data_size;
        }


        //判斷暫停
        if (is->isPause == true)
        {
            SDL_Delay(10);
            continue;
        }

        if (pkt->data)
            av_free_packet(pkt);
        memset(pkt, 0, sizeof(*pkt));

        if (packet_queue_get(&is->audioq, pkt, 0) <= 0) //從佇列is->audioq中取出第一個packet  存放在pkt中
            return -1;

        is->audio_pkt_data = pkt->data;
        is->audio_pkt_size = pkt->size;

        /* if update, update the audio clock w/pts */
        if (pkt->pts != AV_NOPTS_VALUE)
        {
            is->audio_clock = av_q2d(is->audio_st->time_base) * pkt->pts;
        }
    }

    return 0;
}


3.4  synchronize_video( )音視訊同步函式

static double synchronize_video(VideoState *is, AVFrame *src_frame, double pts)
{

    double frame_delay;

    if (pts != 0) {
        /* if we have pts, set video clock to it */
        is->video_clock = pts;
    } else {
        /* if we aren't given a pts, set it to the clock */
        pts = is->video_clock;
    }
    /* update the video clock */
    frame_delay = av_q2d(is->video_st->codec->time_base);
    /* if we are repeating a frame, adjust clock accordingly */
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
    is->video_clock += frame_delay;
    return pts;
}

總結:

總體來看系統由三個執行緒

1 音視訊處理匯流排程  該執行緒由繼承的QT類得到

2 視訊處理執行緒   由SDL建立   vs->video_tid = SDL_CreateThread(video_thread, "video_thread", &mVideoState);

3 音訊處理執行緒  由SDL建立  audio_stream_component_open( )中有音訊資料的回撥函式

同步過程  線上程2 視訊處理執行緒中同步

/*===========================================音視訊同步===============================================*/
        video_pts = synchronize_video(is, pFrame, video_pts);

        while(1)
        {
            audio_pts = is->audio_clock;
            if (video_pts <= audio_pts) //等待匹配   兩者時間匹配 跳出該迴圈
                break;

            int delayTime = (video_pts - audio_pts) * 1000;

            delayTime = delayTime > 5 ? 5:delayTime;

            SDL_Delay(delayTime);
        }
視訊執行緒會等待音訊執行緒   比


較兩者之間的pts  從而繼續進行下一幀的播放

目前待解決問題:

1 AVI格式視訊無法播放

參考文章:http://blog.yundiantech.com/?log=blog&scat=182

原始碼下載地址: