1. 程式人生 > >WebRtc語音整體框架

WebRtc語音整體框架

圖一語音整體框架圖 如上圖所示,音訊整個處理框架除了ligjingle負責p2p資料的傳輸,主要是VOE(Voice Engine)和Channel適配層 圖二建立資料通訊channel時序圖 上圖是本地端 的完整過程,VOE由CreateMediaEngine_w開始建立,Channel適配層由SetLocalDescription根據SDP開始建立,下面來分析下這兩個過程 VOE建立過程 /*src\talk\app\webrtc\peerconnectionfactory.cc*/ bool PeerConnectionFactory::Initialize() { ...... default_allocator_factory_ = PortAllocatorFactory::Create(worker_thread_); ..... cricket::MediaEngineInterface* media_engine = worker_thread_->Invoke(rtc::Bind( &PeerConnectionFactory::CreateMediaEngine_w, this)); //定義的巨集,實際上就是在worker_thread_執行緒上執行CreateMediaEngine_w ..... channel_manager_.reset( new cricket::ChannelManager(media_engine, worker_thread_)); ...... } cricket::MediaEngineInterface* PeerConnectionFactory::CreateMediaEngine_w() { ASSERT(worker_thread_ == rtc::Thread::Current()); return cricket::WebRtcMediaEngineFactory::Create( default_adm_.get(), video_encoder_factory_.get(), video_decoder_factory_.get()); } MediaEngineInterface* WebRtcMediaEngineFactory::Create( webrtc::AudioDeviceModule* adm, WebRtcVideoEncoderFactory* encoder_factory, WebRtcVideoDecoderFactory* decoder_factory) { return CreateWebRtcMediaEngine(adm, encoder_factory, decoder_factory); } //CreateWebRtcMediaEngine實際上是WebRtcMediaEngine2,而WebRtcMediaEngine2又是繼承至CompositeMediaEngine //模板類,實現在webrtcmediaengine.cc namespace cricket { class WebRtcMediaEngine2 : public CompositeMediaEngine { public: WebRtcMediaEngine2(webrtc::AudioDeviceModule* adm, WebRtcVideoEncoderFactory* encoder_factory, WebRtcVideoDecoderFactory* decoder_factory) { voice_.SetAudioDeviceModule(adm); video_.SetExternalDecoderFactory(decoder_factory); video_.SetExternalEncoderFactory(encoder_factory); } }; } // namespace cricket template class CompositeMediaEngine : public MediaEngineInterface { public: virtual ~CompositeMediaEngine() {} virtual bool Init(rtc::Thread* worker_thread) { if (!voice_.Init(worker_thread)) //此處的voice 即為WebRtcVoiceEngine return false; video_.Init(); //video 為WebRtcVideoEngine2 後面再分析 return true; } ...... } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 相關類圖如下: 圖三VOE引擎類圖 WebRtcVoiceEngine::WebRtcVoiceEngine() : voe_wrapper_(new VoEWrapper()), //底層Voice Engine代理類,與底層相關的上層都呼叫此類完成 tracing_(new VoETraceWrapper()), //除錯相關類 adm_(NULL), log_filter_(SeverityToFilter(kDefaultLogSeverity)), is_dumping_aec_(false) { Construct(); } 1 2 3 4 5 6 7 8 下面看看構造WebRtcVoiceEngine相關的類和方法: //VoEWrapper實際上是VoiceEngine--> voice_engine_impl.cc的代理 /* webrtcvoe.h */ class VoEWrapper { public: VoEWrapper() : engine_(webrtc::VoiceEngine::Create()), processing_(engine_), base_(engine_), codec_(engine_), dtmf_(engine_), hw_(engine_), neteq_(engine_), network_(engine_), rtp_(engine_), sync_(engine_), volume_(engine_) { } /*webrtcvoiceengine.cc*/ void WebRtcVoiceEngine::Construct() { ...... //註冊引擎狀態回撥函式,將底層錯誤資訊告知WebRtcVoiceEngine if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) { LOG_RTCERR0(RegisterVoiceEngineObserver); } .... // Load our audio codec list. ConstructCodecs(); // 根據kCodecPrefs表,音質從高到低,從底層獲取最高音質的codec ..... options_ = GetDefaultEngineOptions(); //設定預設的音訊選項,需要回音消除,降噪,自動調節音量,是否需要dump等... } //WebRtcVoiceEngine初始化函式 bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) { ...... bool res = InitInternal(); ...... } bool WebRtcVoiceEngine::InitInternal() { ...... // 初始化底層AudioDeviceModule 在WebRtc中引數dbm_此處傳入的是NULL. //voe_wrapper_ 是VoiceEngine的代理類在voice_engine_impl.cc 中實現, //而VoiceEngineImpl繼承至VoiceEngine,creat時建立的是VoiceEngineImpl //在voe_base_impl.cc中實現 //並將物件返回給VoEWrapper //此處voe_wrapper_->base()實際上是VoiceEngineImpl物件,下面分析VoiceEngineImpl.Init if (voe_wrapper_->base()->Init(adm_) == -1) { //voe_wrapper_->base() ...... } ...... } /*voe_base_impl.cc*/ int VoEBaseImpl::Init(AudioDeviceModule* external_adm, AudioProcessing* audioproc) { ...... if (external_adm == nullptr) { //上面已經提到,demo中傳入的是null #if !defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE) return -1; #else // Create the internal ADM implementation. //建立本地的AudioDeviceModuleImpl 物件 //通過AudioRecorder 和AudioTrack實現音訊採集與播放 shared_->set_audio_device(AudioDeviceModuleImpl::Create( VoEId(shared_->instance_id(), -1), shared_->audio_device_layer())); if (shared_->audio_device() == nullptr) { shared_->SetLastError(VE_NO_MEMORY, kTraceCritical, "Init() failed to create the ADM"); return -1; } #endif // WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE } else { // Use the already existing external ADM implementation. shared_->set_audio_device(external_adm); LOG_F(LS_INFO) process_thread()) { shared_->process_thread()->RegisterModule(shared_->audio_device()); } bool available = false; // -------------------- // Reinitialize the ADM // 為音訊裝置設定監聽器 if (shared_->audio_device()->RegisterEventObserver(this) != 0) { shared_->SetLastError( VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, "Init() failed to register event observer for the ADM"); } // 為音訊設備註冊AudioTransport的實現,實現音訊資料的傳輸 if (shared_->audio_device()->RegisterAudioCallback(this) != 0) { shared_->SetLastError( VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, "Init() failed to register audio callback for the ADM"); } // 音訊裝置的初始化! if (shared_->audio_device()->Init() != 0) { shared_->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError, "Init() failed to initialize the ADM"); return -1; } ...... } AudioDeviceModule* AudioDeviceModuleImpl::Create(const int32_t id, const AudioLayer audioLayer){ ...... RefCountImpl* audioDevice = new RefCountImpl(id, audioLayer); // 檢查平臺是否支援 if (audioDevice->CheckPlatform() == -1) { delete audioDevice; return NULL; } // 根據不同的平臺選擇不同的實現,Android平臺 是通過JNI的方式( audio_record_jni.cc audio_track_jni.cc), //獲取java層的org/webrtc/voiceengine/WebRtcAudioRecord.java //和org/webrtc/voiceengine/WebRtcAudioTrack.java 實現音訊採集和播放 if (audioDevice->CreatePlatformSpecificObjects() == -1) { delete audioDevice; return NULL; } // 分配共享記憶體,通過AudioTransportS實現音訊資料的傳遞 if (audioDevice->AttachAudioBuffer() == -1) { delete audioDevice; return NULL; } ...... } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 Channel建立過程 在圖二時序圖中,在SetLocalDescription中會呼叫CreateChannels建立根據SDP建立會話所需要的Channels.由此開啟了音視訊資料和使用者資料傳輸通道,下面詳細看看音訊channel建立的過程,其他的類似: 相關類圖如下: /* webrtcsession.cc */ bool WebRtcSession::CreateChannels(const SessionDescription* desc) { // Creating the media channels and transport proxies. //根據SDP建立VoiceChannel const cricket::ContentInfo* voice = cricket::GetFirstAudioContent(desc); if (voice && !voice->rejected && !voice_channel_) { if (!CreateVoiceChannel(voice)) { LOG(LS_ERROR) rejected && !video_channel_) { if (!CreateVideoChannel(video)) { LOG(LS_ERROR) rejected && !data_channel_) { if (!CreateDataChannel(data)) { LOG(LS_ERROR) CreateVoiceChannel( media_controller_.get(), transport_controller(), content->name, true, audio_options_)); if (!voice_channel_) { return false; } ...... return true; } /* webrtc\src\talk\session\media\channelmanager.cc*/ VoiceChannel* ChannelManager::CreateVoiceChannel( webrtc::MediaControllerInterface* media_controller, TransportController* transport_controller, const std::string& content_name, bool rtcp, const AudioOptions& options) { //定義的巨集,實際意思是 在worker_thread_中執行ChannelManager::CreateVoiceChannel_w方法! return worker_thread_->Invoke( Bind(&ChannelManager::CreateVoiceChannel_w, this, media_controller, transport_controller, content_name, rtcp, options)); } VoiceChannel* ChannelManager::CreateVoiceChannel_w( webrtc::MediaControllerInterface* media_controller, TransportController* transport_controller, const std::string& content_name, bool rtcp, const AudioOptions& options) { ...... //此處的media_engine_為在peerconnectionfactory.cc中建立的WebRtcMediaEngine2 //最終呼叫WebRtcVoiceEngine::CreateChannel方法 VoiceMediaChannel* media_channel = media_engine_->CreateChannel(media_controller->call_w(), options); if (!media_channel) return nullptr; //VoiceChannel繼承BaseChannel,從libjingle獲取資料或者是通過libjingle將資料發給遠端端! VoiceChannel* voice_channel = new VoiceChannel(worker_thread_, media_engine_.get(), media_channel, transport_controller, content_name, rtcp); if (!voice_channel->Init()) { delete voice_channel; return nullptr; } voice_channels_.push_back(voice_channel); return voice_channel; } VoiceMediaChannel* WebRtcVoiceEngine::CreateChannel(webrtc::Call* call, const AudioOptions& options) { WebRtcVoiceMediaChannel* ch = new WebRtcVoiceMediaChannel(this, options, call); if (!ch->valid()) { delete ch; return nullptr; } return ch; } WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine* engine, const AudioOptions& options, webrtc::Call* call) : engine_(engine), voe_channel_(engine->CreateMediaVoiceChannel()),//呼叫WebRtcVoiceEngine::CreateMediaVoiceChannel()方法 ...... { //將當前WebRtcVoiceMediaChannel註冊給WebRtcVoiceEngine管理放入ChannelList中 engine->RegisterChannel(this); ...... //為上面創造的新channel註冊WebRtcVoiceMediaChannel.可以認為WebRtcVoiceMediaChannel是橋樑,底層 //channel通過註冊的Transport實現資料流的傳送和接受! ConfigureSendChannel(voe_channel()); SetOptions(options); } int WebRtcVoiceEngine::CreateVoiceChannel(VoEWrapper* voice_engine_wrapper) { //VoEWrapper為VoiceEngine的封裝,我覺得相當於是VoiceEngine的代理。 //而在VoiceEngine的實現voice_engine_impl.cc可以看出,VoiceEngine實際上是VoiceEngineImpl的封裝 //voice_engine_wrapper->base()的到的是VoiceEngineImpl物件 return voice_engine_wrapper->base()->CreateChannel(voe_config_); } /* voe_base_impl.cc */ int VoEBaseImpl::CreateChannel() { ..... //通過ChannelManager建立Channel物件 voe::ChannelOwner channel_owner = shared_->channel_manager().CreateChannel(); return InitializeChannel(&channel_owner); } /* android\webrtc\src\webrtc\voice_engine\channel_manager.cc*/ ChannelOwner ChannelManager::CreateChannel() { return CreateChannelInternal(config_); } ChannelOwner ChannelManager::CreateChannelInternal(const Config& config) { Channel* channel; //新建Channel物件 Channel::CreateChannel(channel, ++last_channel_id_, instance_id_, event_log_.get(), config); ChannelOwner channel_owner(channel); CriticalSectionScoped crit(lock_.get()); //ChannelManager對所有新建channel的管理 channels_.push_back(channel_owner); //返回封裝的ChannelOwner return channel_owner; } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 語音傳送流程 採集 在安卓系統的WebRtc demo中,語音還是通過系統的AudioRecorder.java 類實現採集的。在VoEBaseImpl::Init階段介紹過會為AudioDeviceModule註冊資料傳輸回撥函式如下: int VoEBaseImpl::Init(AudioDeviceModule* external_adm, AudioProcessing* audioproc) { ...... // Register the AudioTransport implementation if (shared_->audio_device()->RegisterAudioCallback(this) != 0) { shared_->SetLastError( VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, "Init() failed to register audio callback for the ADM"); } ...... } int32_t AudioDeviceModuleImpl::RegisterAudioCallback(AudioTransport* audioCallback) { CriticalSectionScoped lock(&_critSectAudioCb); //最終將VoEBaseImpl的實現,註冊到裝置的AudioDeviceBuffer中 _audioDeviceBuffer.RegisterAudioCallback(audioCallback); return 0; } 所以總的來說音訊資料會如下流程,最終VoEBaseImpl實現的AudioTransport回撥獲取資料或者播放資料! nativeDataIsRecorded(org/webrtc/voiceengine/WebRtcAudioRecord.java)---> (audio_record_jni.cc)AudioRecordJni::DataIsRecorded-->OnDataIsRecorded--> AudioDeviceBuffer.DeliverRecordedData---> AudioTransport.RecordedDataIsAvailable---> (voe_base_impl.cc)VoEBaseImpl::RecordedDataIsAvailable 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 處理 /* voe_base_impl.cc */ int32_t VoEBaseImpl::RecordedDataIsAvailable( const void* audioSamples, size_t nSamples, size_t nBytesPerSample, uint8_t nChannels, uint32_t samplesPerSec, uint32_t totalDelayMS, int32_t clockDrift, uint32_t micLevel, bool keyPressed, uint32_t& newMicLevel) { newMicLevel = static_cast(ProcessRecordedDataWithAPM( nullptr, 0, audioSamples, samplesPerSec, nChannels, nSamples, totalDelayMS, clockDrift, micLevel, keyPressed)); return 0; } //從java層獲取的資料,直接送入ProcessRecordedDataWithAPM處理! int VoEBaseImpl::ProcessRecordedDataWithAPM( const int voe_channels[], int number_of_voe_channels, const void* audio_data, uint32_t sample_rate, uint8_t number_of_channels, size_t number_of_frames, uint32_t audio_delay_milliseconds, int32_t clock_drift, uint32_t volume, bool key_pressed) { ...... //調節音量 if (volume != 0) { // Scale from ADM to VoE level range if (shared_->audio_device()->MaxMicrophoneVolume(&max_volume) == 0) { if (max_volume) { voe_mic_level = static_cast( (volume * kMaxVolumeLevel + static_cast(max_volume / 2)) / max_volume); } } // We learned that on certain systems (e.g Linux) the voe_mic_level // can be greater than the maxVolumeLevel therefore // we are going to cap the voe_mic_level to the maxVolumeLevel // and change the maxVolume to volume if it turns out that // the voe_mic_level is indeed greater than the maxVolumeLevel. if (voe_mic_level > kMaxVolumeLevel) { voe_mic_level = kMaxVolumeLevel; max_volume = volume; } } //這裡對音訊有一系列的處理,比如:錄製到檔案,重取樣,迴音消除,AGC調節等。。。 shared_->transmit_mixer()->PrepareDemux( audio_data, number_of_frames, number_of_channels, sample_rate, static_cast(audio_delay_milliseconds), clock_drift, voe_mic_level, key_pressed); // Copy the audio frame to each sending channel and perform // channel-dependent operations (file mixing, mute, etc.), encode and // packetize+transmit the RTP packet. When |number_of_voe_channels| == 0, // do the operations on all the existing VoE channels; otherwise the // operations will be done on specific channels. if (number_of_voe_channels == 0) { shared_->transmit_mixer()->DemuxAndMix(); shared_->transmit_mixer()->EncodeAndSend(); } else { shared_->transmit_mixer()->DemuxAndMix(voe_channels, number_of_voe_channels); shared_->transmit_mixer()->EncodeAndSend(voe_channels, number_of_voe_channels); } ...... } // Return 0 to indicate no change on the volume. return 0; } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 編碼 //shared_->transmit_mixer()->EncodeAndSend //實現資料的編碼,編碼後觸發打包傳送 void TransmitMixer::EncodeAndSend(const int voe_channels[], int number_of_voe_channels) { for (int i = 0; i < number_of_voe_channels; ++i) { voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]); voe::Channel* channel_ptr = ch.channel(); if (channel_ptr && channel_ptr->Sending())//判斷當前的channel是否處於傳送的狀態 channel_ptr->EncodeAndSend(); } } uint32_t Channel::EncodeAndSend(){ ...... //編碼壓縮音訊資料 if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::EncodeAndSend() ACM encoding failed"); return 0xFFFFFFFF; } ...... } int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) { InputData input_data; CriticalSectionScoped lock(acm_crit_sect_.get()); //編碼之前的處理 ,根據需求重取樣 並將資料封裝在InputData中 int r = Add10MsDataInternal(audio_frame, &input_data); //開始編碼 return r < 0 ? r : Encode(input_data); } int32_t AudioCodingModuleImpl::Encode(const InputData& input_data){ ...... //從CodecManager獲取當前正在使用的編碼器 AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder(); ...... //開始編碼 encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes()); encoded_info = audio_encoder->Encode( rtp_timestamp, input_data.audio, input_data.length_per_channel, encode_buffer_.size(), encode_buffer_.data()); encode_buffer_.SetSize(encoded_info.encoded_bytes); ...... { CriticalSectionScoped lock(callback_crit_sect_.get()); if (packetization_callback_) { //觸發傳送,packetization_callback_由Channel繼承AudioPacketizationCallback實現。 //Channel在Init()時呼叫,audio_coding_->RegisterTransportCallback(this)完成註冊! packetization_callback_->SendData( frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp, encode_buffer_.data(), encode_buffer_.size(), my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation : nullptr); } if (vad_callback_) { // 靜音檢測回撥 vad_callback_->InFrameType(frame_type); } } } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 打包 音訊資料在編碼之後會通過Channel實現的AudioPacketizationCallback.SendData觸發資料打包傳送流程。 SendData實現如下: /* android\webrtc\src\webrtc\voice_engine\channel.cc*/ int32_t Channel::SendData(FrameType frameType, uint8_t payloadType, uint32_t timeStamp, const uint8_t* payloadData, size_t payloadSize, const RTPFragmentationHeader* fragmentation){ ...... //RTP打包和傳送 if (_rtpRtcpModule->SendOutgoingData((FrameType&)frameType, payloadType, timeStamp, // Leaving the time when this frame was // received from the capture device as // undefined for voice for now. -1, payloadData, payloadSize, fragmentation) == -1) { _engineStatisticsPtr->SetLastError( VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, "Channel::SendData() failed to send data to RTP/RTCP module"); return -1; } ...... } /* android\webrtc\src\webrtc\modules\rtp_rtcp\source\rtp_rtcp_impl.cc*/ //最終由RTPSender實現RTP打包和傳送 int32_t ModuleRtpRtcpImpl::SendOutgoingData( FrameType frame_type, int8_t payload_type, uint32_t time_stamp, int64_t capture_time_ms, const uint8_t* payload_data, size_t payload_size, const RTPFragmentationHeader* fragmentation, const RTPVideoHeader* rtp_video_hdr) { rtcp_sender_.SetLastRtpTime(time_stamp, capture_time_ms); if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) { rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); } return rtp_sender_.SendOutgoingData( frame_type, payload_type, time_stamp, capture_time_ms, payload_data, payload_size, fragmentation, rtp_video_hdr); } /*android\webrtc\src\webrtc\modules\rtp_rtcp\source\rtp_sender.cc*/ int32_t RTPSender::SendOutgoingData(FrameType frame_type, int8_t payload_type, uint32_t capture_timestamp, int64_t capture_time_ms, const uint8_t* payload_data, size_t payload_size, const RTPFragmentationHeader* fragmentation, const RTPVideoHeader* rtp_hdr) { ...... //確定傳輸的是音訊還是視訊 if (CheckPayloadType(payload_type, &video_type) != 0) { LOG(LS_ERROR) SendAudio(frame_type, payload_type, capture_timestamp, payload_data, payload_size, fragmentation); //若為視訊 ret_val = video_->SendVideo(video_type, frame_type, payload_type, capture_timestamp, capture_time_ms, payload_data, payload_size, fragmentation, rtp_hdr); } /*android\webrtc\src\webrtc\modules\rtp_rtcp\source\rtp_sender_audio.cc*/ int32_t RTPSenderAudio::SendAudio( const FrameType frameType, const int8_t payloadType, const uint32_t captureTimeStamp, const uint8_t* payloadData, const size_t dataSize, const RTPFragmentationHeader* fragmentation) { ...... //根據協議打包編碼後的音訊資料,整個流程較複雜這裡不做分析,可以參考原始碼做深入的瞭解 ...... //傳送 return _rtpSender->SendToNetwork(dataBuffer, payloadSize, rtpHeaderLength, -1, kAllowRetransmission, RtpPacketSender::kHighPriority); } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 傳送 上面流程可以瞭解到,RTP打包完成之後由RTPSender完成傳送流程,如下: int32_t RTPSender::SendToNetwork(uint8_t* buffer, size_t payload_length, size_t rtp_header_length, int64_t capture_time_ms, StorageType storage, RtpPacketSender::Priority priority){ ...... //進行一些時間上的處理和重發機制處理後直接傳送資料 bool sent = SendPacketToNetwork(buffer, length); ..... //更新統計狀態 UpdateRtpStats(buffer, length, rtp_header, false, false); ...... } bool RTPSender::SendPacketToNetwork(const uint8_t *packet, size_t size) { int bytes_sent = -1; if (transport_) { bytes_sent = //此處的transport_實際為Channel,Channel繼承自Transport /* 在Channel建構函式中 Channel::Channel(int32_t channelId, uint32_t instanceId, RtcEventLog* const event_log, const Config& config){ RtpRtcp::Configuration configuration; configuration.audio = true; configuration.outgoing_transport = this; //設定Transport configuration.audio_messages = this; configuration.receive_statistics = rtp_receive_statistics_.get(); configuration.bandwidth_callback = rtcp_observer_.get(); _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration)); } //在ModuleRtpRtcpImpl構造方法中會將引數傳入RTPSender ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration) : rtp_sender_(configuration.audio, configuration.clock, configuration.outgoing_transport, configuration.audio_messages, configuration.paced_sender, configuration.transport_sequence_number_allocator, configuration.transport_feedback_callback, configuration.send_bitrate_observer, configuration.send_frame_count_observer, configuration.send_side_delay_observer), rtcp_sender_(configuration.audio, configuration.clock, configuration.receive_statistics, configuration.rtcp_packet_type_counter_observer), rtcp_receiver_(configuration.clock, configuration.receiver_only, configuration.rtcp_packet_type_counter_observer, configuration.bandwidth_callback, configuration.intra_frame_callback, configuration.transport_feedback_callback, this)......) */ transport_->SendRtp(packet, size) ? static_cast(size) : -1; } ...... return true; } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 通過上面的分析發現最終的傳送流程在Channel中由SendRtp實現: bool Channel::SendRtp(const uint8_t *data, size_t len){ ...... //此處的 _transportPtr 由int32_t Channel::RegisterExternalTransport(Transport& transport)註冊完成 //聯絡之前分析的建立Channel的流程可以發現,在webrtcvoiceengine.cc中 // WebRtcVoiceMediaChannel建構函式中呼叫了ConfigureSendChannel(voe_channel()) /* void WebRtcVoiceMediaChannel::ConfigureSendChannel(int channel) { //在VoENetworkImpl中通過ChannelOwner獲取Channel註冊Transport if (engine()->voe()->network()->RegisterExternalTransport( channel, *this) == -1) { LOG_RTCERR2(RegisterExternalTransport, channel, this); } // Enable RTCP (for quality stats and feedback messages) EnableRtcp(channel); // Reset all recv codecs; they will be enabled via SetRecvCodecs. ResetRecvCodecs(channel); // Set RTP header extension for the new channel. SetChannelSendRtpHeaderExtensions(channel, send_extensions_); } */ if (!_transportPtr->SendRtp(bufferToSendPtr, bufferLength)) { std::string transport_name = _externalTransport ? "external transport" : "WebRtc sockets"; WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::SendPacket() RTP transmission using %s failed", transport_name.c_str()); return false; } ...... } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 通過上面的分析可以發現,Channel中註冊的Transport實際上是WebRtcVoiceMediaChannel /*android\webrtc\src\talk\media\webrtc\webrtcvoiceengine.h*/ class WebRtcVoiceMediaChannel : public VoiceMediaChannel, public webrtc::Transport { ...... // implements Transport interface bool SendRtp(const uint8_t* data, size_t len) override { rtc::Buffer packet(reinterpret_cast(data), len, kMaxRtpPacketLen); return VoiceMediaChannel::SendPacket(&packet); } ...... } /*android\webrtc\src\talk\media\base\mediachannel.h*/ class VoiceMediaChannel : public MediaChannel { ...... // Base method to send packet using NetworkInterface. bool SendPacket(rtc::Buffer* packet) { return DoSendPacket(packet, false); } bool SendRtcp(rtc::Buffer* packet) { return DoSendPacket(packet, true); } // Sets the abstract interface class for sending RTP/RTCP data. virtual void SetInterface(NetworkInterface *iface) { rtc::CritScope cs(&network_interface_crit_); network_interface_ = iface; } private: bool DoSendPacket(rtc::Buffer* packet, bool rtcp) { rtc::CritScope cs(&network_interface_crit_); if (!network_interface_) return false; //network_interface_通過SetInterface設定, //是由android\webrtc\src\talk\session\media\channel.h實現 在BaseChannel::Init()呼叫SetInterface完成註冊 return (!rtcp) ? network_interface_->SendPacket(packet) : network_interface_->SendRtcp(packet); } ...... } /*android\webrtc\src\talk\media\base\channel.h*/ class BaseChannel : public rtc::MessageHandler, public sigslot::has_slots<>, public MediaChannel::NetworkInterface, public ConnectionStatsGetter { } /*android\webrtc\src\talk\media\base\channel.cc*/ bool BaseChannel::Init() { ...... //為BaseChannel設定TransportChannel if (!SetTransport(content_name())) { return false; } // Both RTP and RTCP channels are set, we can call SetInterface on // media channel and it can set network options. media_channel_->SetInterface(this); return true; } bool BaseChannel::SendPacket(rtc::Buffer* packet, rtc::DiffServCodePoint dscp) { return SendPacket(false, packet, dscp); } bool BaseChannel::SendPacket(bool rtcp, rtc::Buffer* packet, rtc::DiffServCodePoint dscp){ ...... // 獲取傳輸資料的TransportChannel,Init()通過呼叫SetTransport設定 TransportChannel* channel = (!rtcp || rtcp_mux_filter_.IsActive()) ? transport_channel_ : rtcp_transport_channel_; if (!channel || !channel->writable()) { return false; } ...... // int ret = channel->SendPacket(packet->data(), packet->size(), options, (secure() && secure_dtls()) ? PF_SRTP_BYPASS : 0); } bool BaseChannel::SetTransport(const std::string& transport_name) { return worker_thread_->Invoke( Bind(&BaseChannel::SetTransport_w, this, transport_name));//實際上就是在SetTransport_w執行緒中呼叫SetTransport_w } bool BaseChannel::SetTransport_w(const std::string& transport_name) { ...... //先通過TransportController建立相應的 //TransportChannel(TransportChannelImpl繼承TransportChannel,P2PTransportChannel繼承TransportChannelImpl,最終由P2PTransportChannel實現) set_transport_channel(transport_controller_->CreateTransportChannel_w( transport_name, cricket::ICE_CANDIDATE_COMPONENT_RTP)); if (!transport_channel()) { return false; } ...... } void BaseChannel::set_transport_channel(TransportChannel* new_tc) { TransportChannel* old_tc = transport_channel_; if (old_tc) {//先登出old_tc的事件監聽 DisconnectFromTransportChannel(old_tc); //銷燬掉沒用的Channel節約系統資源 transport_controller_->DestroyTransportChannel_w( transport_name_, cricket::ICE_CANDIDATE_COMPONENT_RTP); } transport_channel_ = new_tc; if (new_tc) {//設定監聽事件 ConnectToTransportChannel(new_tc); for (const auto& pair : socket_options_) { new_tc->SetOption(pair.first, pair.second); } } //告知響應的MediaChannel,TransportChannel已經設定完畢 SetReadyToSend(false, new_tc && new_tc->writable()); } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 P2PTransportChannel的SendPacket設計到libjingle p2p的實現,這裡做過多的分析。 從以上分析結合圖一,就能較好理解webRTC整個音訊框架! 語音接收播放流程 接收 如圖一的黃色箭頭所示,網路資料從libjingle傳入BaseChannel。 //在VoiceChannel::Init()中呼叫BaseChannel::Init() //--->BaseChannel::Init() //--->bool BaseChannel::SetTransport(const std::string& transport_name) //--->bool BaseChannel::SetTransport_w(const std::string& transport_name) //--->void BaseChannel::set_transport_channel(TransportChannel* new_tc) //--->void BaseChannel::ConnectToTransportChannel(TransportChannel* tc) 1 2 3 4 5 6 7 /* 在TransportChannel類中,每接受一個數據包都會觸發SignalReadPacket訊號 通過訊號與曹實現類間的通訊 */ void BaseChannel::ConnectToTransportChannel(TransportChannel* tc) { ASSERT(worker_thread_ == rtc::Thread::Current()); tc->SignalWritableState.connect(this, &BaseChannel::OnWritableState); //libjingle每收到一個數據包都會觸發BaseChannel::OnChannelRead tc->SignalReadPacket.connect(this, &BaseChannel::OnChannelRead); tc->SignalReadyToSend.connect(this, &BaseChannel::OnReadyToSend); } void BaseChannel::OnChannelRead(TransportChannel* channel, const char* data, size_t len, const rtc::PacketTime& packet_time, int flags) { // OnChannelRead gets called from P2PSocket; now pass data to MediaEngine ASSERT(worker_thread_ == rtc::Thread::Current()); // When using RTCP multiplexing we might get RTCP packets on the RTP // transport. We feed RTP traffic into the demuxer to determine if it is RTCP. bool rtcp = PacketIsRtcp(channel, data, len); rtc::Buffer packet(data, len); HandlePacket(rtcp, &packet, packet_time); } void BaseChannel::HandlePacket(bool rtcp, rtc::Buffer* packet, const rtc::PacketTime& packet_time){ ...... if (!rtcp) { //rtp packet media_channel_->OnPacketReceived(packet, packet_time); } else { // rtcp packet 很顯然這裡的media_channel_是WebRtcVoiceMediaChannel media_channel_->OnRtcpReceived(packet, packet_time); } ...... } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 解包 /*android\webrtc\src\talk\media\webrtc\webrtcvoiceengine.cc*/ void WebRtcVoiceMediaChannel::OnPacketReceived( rtc::Buffer* packet, const rtc::PacketTime& packet_time) { RTC_DCHECK(thread_checker_.CalledOnValidThread()); // Forward packet to Call as well. const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp, packet_time.not_before); //通過PacketReceiver::DeliveryStatus Call::DeliverPacket //--->PacketReceiver::DeliveryStatus Call::DeliverRtp //--->若為音訊則呼叫bool AudioReceiveStream::DeliverRtp 估算延時,估算遠端端的位元率,並更新相關狀體 //若為視訊則呼叫 bool VideoReceiveStream::DeliverRtp call_->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, reinterpret_cast(packet->data()), packet->size(), webrtc_packet_time); // Pick which channel to send this packet to. If this packet doesn't match // any multiplexed streams, just send it to the default channel. Otherwise, // send it to the specific decoder instance for that stream. int which_channel = GetReceiveChannelNum(ParseSsrc(packet->data(), packet->size(), false)); if (which_channel == -1) { which_channel = voe_channel(); } // Pass it off to the decoder. //開始解包 解碼 engine()->voe()->network()->ReceivedRTPPacket( which_channel, packet->data(), packet->size(), webrtc::PacketTime(packet_time.timestamp, packet_time.not_before)); } /*android\webrtc\src\webrtc\audio\audio_receive_stream.cc*/ bool AudioReceiveStream::DeliverRtp(const uint8_t* packet, size_t length, const PacketTime& packet_time) { ...... //解析包頭 if (!rtp_header_parser_->Parse(packet, length, &header)) { return false; } ...... //估算延時和位元率 remote_bitrate_estimator_->IncomingPacket(arrival_time_ms, payload_size, header, false); } /*android\webrtc\src\webrtc\voice_engine\voe_network_impl.cc*/ int VoENetworkImpl::ReceivedRTPPacket(int channel, const void* data, size_t length, const PacketTime& packet_time){ ...... //聯絡前面的解析,這裡的channelPtr實際上就是android\webrtc\src\webrtc\voice_engine\Channel.cc中的Channel return channelPtr->ReceivedRTPPacket((const int8_t*)data, length, packet_time); } /*android\webrtc\src\webrtc\voice_engine\Channel.cc*/ int32_t Channel::ReceivedRTPPacket(const int8_t* data, size_t length, const PacketTime& packet_time){ ...... const uint8_t* received_packet = reinterpret_cast(data); RTPHeader header; //解析包頭 if (!rtp_header_parser_->Parse(received_packet, length, &header)) { WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVoice, _channelId, "Incoming packet: invalid RTP header"); return -1; } ...... //開始解包和解碼操作 return ReceivePacket(received_packet, length, header, in_order) ? 0 : -1; } bool Channel::ReceivePacket(const uint8_t* packet, size_t packet_length, const RTPHeader& header, bool in_order){ ...... const uint8_t* payload = packet + header.headerLength; ...... //將有效資料給解碼器解碼 return rtp_receiver_->IncomingRtpPacket(header, payload, payload_length, payload_specific, in_order); } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 解碼 /*android\webrtc\src\webrtc\modules\rtp_rtcp\source\rtp_receiver_impl.cc*/ bool RtpReceiverImpl::IncomingRtpPacket( const RTPHeader& rtp_header, const uint8_t* payload, size_t payload_length, PayloadUnion payload_specific, bool in_order){ // Trigger our callbacks. CheckSSRCChanged(rtp_header); ...... //通過回撥將資料送給解碼器 int32_t ret_val = rtp_media_receiver_->ParseRtpPacket( &webrtc_rtp_header, payload_specific, is_red, payload, payload_length, clock_->TimeInMilliseconds(), is_first_packet_in_frame); } /*android\webrtc\src\webrtc\modules\rtp_rtcp\source\rtp_receiver_audio.cc*/ int32_t RTPReceiverAudio::ParseRtpPacket(WebRtcRTPHeader* rtp_header, const PayloadUnion& specific_payload, bool is_red, const uint8_t* payload, size_t payload_length, int64_t timestamp_ms, bool is_first_packet) { ...... return ParseAudioCodecSpecific(rtp_header, payload, payload_length, specific_payload.Audio, is_red); } int32_t RTPReceiverAudio::ParseAudioCodecSpecific( WebRtcRTPHeader* rtp_header, const uint8_t* payload_data, size_t payload_length, const AudioPayload& audio_specific, bool is_red) { //處理DTMF相關 bool telephone_event_packet = TelephoneEventPayloadType(rtp_header->header.payloadType); if (telephone_event_packet) { CriticalSectionScoped lock(crit_sect_.get()); // RFC 4733 2.3 // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // | event |E|R| volume | duration | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // if (payload_length % 4 != 0) { return -1; } size_t number_of_events = payload_length / 4; // sanity if (number_of_events >= MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS) { number_of_events = MAX_NUMBER_OF_PARALLEL_TELEPHONE_EVENTS; } for (size_t n = 0; n < number_of_events; ++n) { bool end = (payload_data[(4 * n) + 1] & 0x80) ? true : false; std::set::iterator event = telephone_event_reported_.find(payload_data[4 * n]); if (event != telephone_event_reported_.end()) { // we have already seen this event if (end) { telephone_event_reported_.erase(payload_data[4 * n]); } } else { if (end) { // don't add if it's a end of a tone } else { telephone_event_reported_.insert(payload_data[4 * n]); } } } ...... //向解碼器填入資料 // TODO(holmer): Break this out to have RED parsing handled generically. if (is_red && !(payload_data[0] & 0x80)) { // we recive only one frame packed in a RED packet remove the RED wrapper rtp_header->header.payloadType = payload_data[0]; // only one frame in the RED strip the one byte to help NetEq return data_callback_->OnReceivedPayloadData( payload_data + 1, payload_length - 1, rtp_header); } rtp_header->type.Audio.channel = audio_specific.channels; return data_callback_->OnReceivedPayloadData( payload_data, payload_length, rtp_header); //上面的data_callback_為RtpData型別,由Channel實現 } /*android\webrtc\src\webrtc\voice_engine\Channel.cc*/ int32_t Channel::OnReceivedPayloadData(const uint8_t* payloadData, size_t payloadSize, const WebRtcRTPHeader* rtpHeader){ ...... if (audio_coding_->IncomingPacket(payloadData, payloadSize, *rtpHeader) != 0) { _engineStatisticsPtr->SetLastError( VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning, "Channel::OnReceivedPayloadData() unable to push data to the ACM"); return -1; } ...... } /*android\webrtc\src\webrtc\modules\audio_coding\main\acm2\audio_coding_module_impl.cc*/ int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload, const size_t payload_length, const WebRtcRTPHeader& rtp_header) { return receiver_.InsertPacket(rtp_header, incoming_payload, payload_length); } /*\android\webrtc\src\webrtc\modules\audio_coding\main\acm2\acm_receiver.cc*/ int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header, const uint8_t* incoming_payload, size_t length_payload){ ...... //根據rtp頭從DecoderDatabase管理的解碼器中,選擇合適的解碼器 const Decoder* decoder = RtpHeaderToDecoder(*header, incoming_payload); .....//同步相關處理 ...... //android\webrtc\src\webrtc\modules\audio_coding\neteq\NetEqImpl.CC //NetEq技術是GIPS的核心音訊處理技術,後背谷歌收購。詳細瞭解可參考NetEq解析 if (neteq_->InsertPacket(rtp_header, incoming_payload, length_payload, receive_timestamp) < 0) { LOG(LERROR) (header->payloadType) header.markerBit = false; packet->header.payloadType = rtp_header.header.payloadType; packet->header.sequenceNumber = rtp_header.header.sequenceNumber; packet->header.timestamp = rtp_header.header.timestamp; packet->header.ssrc = rtp_header.header.ssrc; packet->header.numCSRCs = 0; packet->payload_length = length_bytes; packet->primary = true; packet->waiting_time = 0; packet->payload = new uint8_t[packet->payload_length]; packet->sync_packet = is_sync_packet; if (!packet->payload) { LOG_F(LS_ERROR) payload, payload, packet->payload_length); // Insert packet in a packet list. packet_list.push_back(packet); // Save main payloads header for later. memcpy(&main_header, &packet->header, sizeof(main_header)); } //處理DTMF相關事件,將事件放入DtmfEvent佇列中 PacketList::iterator it = packet_list.begin(); while (it != packet_list.end()) { Packet* current_packet = (*it); assert(current_packet); assert(current_packet->payload); if (decoder_database_->IsDtmf(current_packet->header.payloadType)) { assert(!current_packet->sync_packet); // We had a sanity check for this. DtmfEvent event; int ret = DtmfBuffer::ParseEvent( current_packet->header.timestamp, current_packet->payload, current_packet->payload_length, &event); if (ret != DtmfBuffer::kOK) { PacketBuffer::DeleteAllPackets(&packet_list); return kDtmfParsingError; } if (dtmf_buffer_->InsertEvent(event) != DtmfBuffer::kOK) { PacketBuffer::DeleteAllPackets(&packet_list); return kDtmfInsertError; } // TODO(hlundin): Let the destructor of Packet handle the payload. delete [] current_packet->payload; delete current_packet; it = packet_list.erase(it); } else { ++it; } } ...... // Update bandwidth estimate, if the packet is not sync-packet. if (!packet_list.empty() && !packet_list.front()->sync_packet) { // The list can be empty here if we got nothing but DTMF payloads. AudioDecoder* decoder = decoder_database_->GetDecoder(main_header.payloadType); assert(decoder); // Should always get a valid object, since we have // already checked that the payload types are known. //在最終的decoder中好像都沒有實現 decoder->IncomingPacket(packet_list.front()->payload, packet_list.front()->payload_length, packet_list.front()->header.sequenceNumber, packet_list.front()->header.timestamp, receive_timestamp); // 需要解碼的資料放入PacketBuffer 列表中 const size_t buffer_length_before_insert = packet_buffer_->NumPacketsInBuffer(); ret = packet_buffer_->InsertPacketList( &packet_list, *decoder_database_, ¤t_rtp_payload_type_, ¤t_cng_rtp_payload_type_); if (ret == PacketBuffer::kFlushed) { // Reset DSP timestamp etc. if packet buffer flushed. new_codec_ = true; update_sample_rate_and_channels = true; } else if (ret != PacketBuffer::kOK) { PacketBuffer::DeleteAllPackets(&packet_list); return kOtherError; } ...... } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 通過int NetEqImpl::GetAudio獲取pcm資料。 int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio, size_t* samples_per_channel, int* num_channels, NetEqOutputType* type){ ...... int error = GetAudioInternal(max_length, output_audio, samples_per_channel, num_channels) ...... } int NetEqImpl::GetAudioInternal(size_t max_length, int16_t* output, size_t* samples_per_channel, int* num_channels){ ...... //解碼 int decode_return_value = Decode(&packet_list, &operation, &length, &speech_type); ...... } int NetEqImpl::Decode(PacketList* packet_list, Operations* operation, int* decoded_length, AudioDecoder::SpeechType* speech_type){ ...... //獲得當前解碼器 AudioDecoder* decoder = decoder_database_->GetActiveDecoder(); ...... //開始解碼 if (*operation == kCodecInternalCng) { RTC_DCHECK(packet_list->empty()); return_value = DecodeCng(decoder, decoded_length, speech_type); } else { return_value = DecodeLoop(packet_list, *operation, decoder, decoded_length, speech_type); } ...... } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 最終通過GetAudio獲取的就是pcm資料! 播放 org/webrtc/voiceengine/WebRtcAudioTrack.java 通過AudioTrackThread播放執行緒不斷從native獲取pcm資料,並將pcm資料送入audiotrack中播放。 nativeGetPlayoutData(WebRtcAudioTrack.java)--> void JNICALL AudioTrackJni::GetPlayoutData(audio_track_jni.cc)--> void AudioTrackJni::OnGetPlayoutData(size_t length)((audio_track_jni.cc)) void AudioTrackJni::OnGetPlayoutData(size_t length) { ...... // Pull decoded data (in 16-bit PCM format) from jitter buffer. //獲取資料 int samples = audio_device_buffer_->RequestPlayoutData(frames_per_buffer_); if (samples (samples), frames_per_buffer_); // Copy decoded data into common byte buffer to ensure that it can be // written to the Java based audio track. //拷貝到共享記憶體 samples = audio_device_buffer_->GetPlayoutData(direct_buffer_address_); ...... } int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples){ ...... /**/ if (_ptrCbAudioTransport) { uint32_t res(0); int64_t elapsed_time_ms = -1; int64_t ntp_time_ms = -1; res = _ptrCbAudioTransport->NeedMorePlayData(_playSamples, playBytesPerSample, playChannels, playSampleRate, &_playBuffer[0], nSamplesOut, &elapsed_time_ms, &ntp_time_ms); if (res != 0) { WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "NeedMorePlayData() failed"); } } ...... } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 AudioTransport由VoEBaseImpl實現,具體的註冊過程可以參考上面的解析! /*android\webrtc\src\webrtc\voice_engine\voe_base_impl.cc*/ int32_t VoEBaseImpl::NeedMorePlayData(size_t nSamples, size_t nBytesPerSample, uint8_t nChannels, uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut, int64_t* elapsed_time_ms, int64_t* ntp_time_ms) { GetPlayoutData(static_cast(samplesPerSec), static_cast(nChannels), nSamples, true, audioSamples, elapsed_time_ms, ntp_time_ms); nSamplesOut = audioFrame_.samples_per_channel_; return 0; } void VoEBaseImpl::GetPlayoutData(int sample_rate, int number_of_channels, size_t number_of_frames, bool feed_data_to_apm, void* audio_data, int64_t* elapsed_time_ms, int64_t* ntp_time_ms){ //獲取資料 shared_->output_mixer()->MixActiveChannels(); //混音和重取樣處理 // Additional operations on the combined signal shared_->output_mixer()->DoOperationsOnCombinedSignal(feed_data_to_apm); // Retrieve the final output mix (resampled to match the ADM) shared_->output_mixer()->GetMixedAudio(sample_rate, number_of_channels, &audioFrame_); //拷貝pcm資料 memcpy(audio_data, audioFrame_.data_, sizeof(int16_t) * number_of_frames * number_of_channels); } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 shared_->output_mixer()->MixActiveChannels() 通過channel從解碼器中獲取pcm資料 /*android\webrtc\src\webrtc\voice_engine\output_mixer.cc*/ int32_t OutputMixer::MixActiveChannels() { return _mixerModule.Process(); } /*android\webrtc\src\webrtc\modules\audio_conference_mixer\source\audio_conference_mixer_impl.cc*/ int32_t AudioConferenceMixerImpl::Process() { ...... UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, &remainingParticipantsAllowedToMix); ...... } void AudioConferenceMixerImpl::UpdateToMix( AudioFrameList* mixList, AudioFrameList* rampOutList, std::map* mixParticipantList, size_t* maxAudioFrameCounter){ ...... for (MixerParticipantList::const_iterator participant = _participantList.begin(); participant != _participantList.end(); ++participant) { ...... //從MixerParticipan獲取pcm資料,而MixerParticipant由Channel實現 // if((*participant)->GetAudioFrame(_id, audioFrame) != 0) { WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, "failed to GetAudioFrame() from participant"); _audioFramePool->PushMemory(audioFrame); continue; ...... } } ...... } /*android\webrtc\src\webrtc\voice_engine\channel.cc*/ int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame){ ...... //從AudioCodingModule獲取解碼的pcm資料 if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_, audioFrame) == -1) { WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId), "Channel::GetAudioFrame() PlayoutData10Ms() failed!"); // In all likelihood, the audio in this frame is garbage. We return an // error so that the audio mixer module doesn't add it to the mix. As // a result, it won't be played out and the actions skipped here are // irrelevant. return -1; } ...... } /*android\webrtc\src\webrtc\modules\audio_coding\main\acm2\audio_coding_module_impl.cc*/ int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz, AudioFrame* audio_frame) { // GetAudio always returns 10 ms, at the requested sample rate. if (receiver_.GetAudio(desired_freq_hz, audio_frame) != 0) { WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, "PlayoutData failed, RecOut Failed"); return -1; } audio_frame->id_ = id_; return 0; } /*android\webrtc\src\webrtc\modules\audio_coding\main\acm2\acm_receiver.cc*/ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame){ ...... // 結合之前的解碼分析,可知,這裡是從緩衝區中獲取的壓縮音訊資料,然後通過decoder解碼後送出! if (neteq_->GetAudio(AudioFrame::kMaxDataSizeSamples, audio_buffer_.get(), &samples_per_channel, &num_channels, &type) != NetEq::kOK) { LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed."; return -1; } ...... } 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 從上面的分析可以看出,webrtc整個層次結構非常清晰。結合圖一,再結合相關程式碼很容易瞭解整個框架! --------------------- 本文來自 Jalon007 的CSDN 部落格 ,全文地址請點選:https://blog.csdn.net/u010657219/article/details/54931154?utm_source=copy