1. 程式人生 > 實用技巧 >WebRTC Native開發實戰之資料採集--攝像頭

WebRTC Native開發實戰之資料採集--攝像頭

1. 實時音視訊開發主要步驟

2. 資料採集

音訊的採集主要來自麥克風;
視訊的採集源主要有兩個: 1. 攝像頭; 2. 螢幕。

這裡先介紹如何採集攝像頭資料。

2.1 環境

我這裡使用的是Ubuntu,因此和windows會稍微有些差別,但是都可以通過example下的peerconnection例項來很方便地對照實現。

由於我的機器沒有攝像頭,因此還是採取在ubuntu上使用v4l2loopback和ffmpeg模擬攝像頭 .

2.2 獲取裝置資訊

再進行攝像頭採集前,需要先知道有哪幾個攝像頭可用。
webrt中可以使用webrtc::VideoCaptureModule::DeviceInfo

來實現裝置列舉:

std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info(
      webrtc::VideoCaptureFactory::CreateDeviceInfo());

int num_devices = info->NumberOfDevices();
if (!info) {
    RTC_LOG(LERROR) << "CreateDeviceInfo failed";
    return -1;
  }
  int num_devices = info->NumberOfDevices();
  for (int i = 0; i < num_devices; ++i) {
    //使用索引i建立capture物件
  }
}

Linux上列舉裝置的函式NumberOfDevices

uint32_t DeviceInfoLinux::NumberOfDevices() {
  RTC_LOG(LS_INFO) << __FUNCTION__;

  uint32_t count = 0;
  char device[20];
  int fd = -1;
  struct v4l2_capability cap;

  /* detect /dev/video [0-63]VideoCaptureModule entries */
  for (int n = 0; n < 64; n++) {
    sprintf(device, "/dev/video%d", n);
    if ((fd = open(device, O_RDONLY)) != -1) {
      // query device capabilities and make sure this is a video capture device
      if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0 ||
          !(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) {
        close(fd);
        continue;
      }

      close(fd);
      count++;
    }
  }

  return count;
}

很簡單,就是遍歷/dev/video*檔案。

2.3 實現Sink

首先還是要再次確認幾個概念:

  • 對於流媒體系統來說,產生資料的裝置叫Source,接收資料的裝置叫Sink
  • webrtc中抽象了VideoSourceInterfaceVideoSinkInterface分別表示SourceSink,但是它們是相對的概念,比如某一抽象可能對底層是Sink,但是對上層是Source
  • 如果能夠提供視訊資料,需要實現VideoSourceInterface,此介面類暴露了AddOrUpdateSink,可以將Sink註冊給Source
  • 如果想要接收視訊資料,需要實現VideoSinkInterface,此介面暴露了OnFrame函式,只要將Sink通過AddOrUpdateSink函式註冊給Source,那麼Source就會通過OnFrame介面將資料傳給Sink
  • VideoCapture採集攝像頭時,既是VideoSinkInterface也是VideoSourceInterface.

因此可以仿照peer_connection/client專案寫出捕獲攝像頭的大概程式碼.

首先,實現VideoSinkInterface:

// vcm_capturer_test.h

#ifndef EXAMPLES_VIDEO_CAPTURE_VCM_CAPTURER_TEST_H_
#define EXAMPLES_VIDEO_CAPTURE_VCM_CAPTURER_TEST_H_

#include <memory>

#include "modules/video_capture/video_capture.h"
#include "examples/video_capture/video_capturer_test.h"

namespace webrtc_demo {

class VcmCapturerTest : public VideoCapturerTest,
                        public rtc::VideoSinkInterface<webrtc::VideoFrame> {
 public:
  static VcmCapturerTest* Create(size_t width,
                                 size_t height,
                                 size_t target_fps,
                                 size_t capture_device_index);

  virtual ~VcmCapturerTest();

  void OnFrame(const webrtc::VideoFrame& frame) override;

 private:
  VcmCapturerTest();

  bool Init(size_t width,
            size_t height,
            size_t target_fps,
            size_t capture_device_index);

  void Destroy();

  rtc::scoped_refptr<webrtc::VideoCaptureModule> vcm_;
  webrtc::VideoCaptureCapability capability_;
};

}  // namespace webrtc_demo

#endif  // EXAMPLES_VIDEO_CAPTURE_VCM_CAPTURER_TEST_H_
// vcm_capturer_test.cc

#include "examples/video_capture/vcm_capturer_test.h"

#include "modules/video_capture/video_capture_factory.h"
#include "rtc_base/logging.h"

namespace webrtc_demo {

VcmCapturerTest::VcmCapturerTest() : vcm_(nullptr) {}

VcmCapturerTest::~VcmCapturerTest() {
  Destroy();
}

bool VcmCapturerTest::Init(size_t width,
                           size_t height,
                           size_t target_fps,
                           size_t capture_device_index) {
  std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> device_info(
      webrtc::VideoCaptureFactory::CreateDeviceInfo());

  char device_name[256];
  char unique_name[256];

  if (device_info->GetDeviceName(static_cast<uint32_t>(capture_device_index),
                                 device_name, sizeof(device_name), unique_name,
                                 sizeof(unique_name)) != 0) {
    Destroy();
    return false;
  }

  vcm_ = webrtc::VideoCaptureFactory::Create(unique_name);
  if (!vcm_) {
    return false;
  }
  vcm_->RegisterCaptureDataCallback(this);

  device_info->GetCapability(vcm_->CurrentDeviceName(), 0, capability_);
  capability_.width = static_cast<int32_t>(width);
  capability_.height = static_cast<int32_t>(height);
  capability_.maxFPS = static_cast<int32_t>(target_fps);
  capability_.videoType = webrtc::VideoType::kI420;

  if (vcm_->StartCapture(capability_) != 0) {
    Destroy();
    return false;
  }

  RTC_CHECK(vcm_->CaptureStarted());
  return true;
}

VcmCapturerTest* VcmCapturerTest::Create(size_t width,
                                         size_t height,
                                         size_t target_fps,
                                         size_t capture_device_index) {
  std::unique_ptr<VcmCapturerTest> vcm_capturer(new VcmCapturerTest());
  if (!vcm_capturer->Init(width, height, target_fps, capture_device_index)) {
    RTC_LOG(LS_WARNING) << "Failed to create VcmCapturer(w = " << width
                        << ", h = " << height << ", fps = " << target_fps
                        << ")";
    return nullptr;
  }
  return vcm_capturer.release();
}

void VcmCapturerTest::Destroy() {
  if (!vcm_)
    return;

  vcm_->StopCapture();
  vcm_->DeRegisterCaptureDataCallback();
  // Release reference to VCM.
  vcm_ = nullptr;
}

void VcmCapturerTest::OnFrame(const webrtc::VideoFrame& frame) {
  static auto timestamp = std::chrono::duration_cast<std::chrono::milliseconds>(
            std::chrono::system_clock::now().time_since_epoch()).count();
  static size_t cnt = 0;

  RTC_LOG(LS_INFO) << "OnFrame";
  VideoCapturerTest::OnFrame(frame);

  cnt++;
  auto timestamp_curr = std::chrono::duration_cast<std::chrono::milliseconds>(
            std::chrono::system_clock::now().time_since_epoch()).count();
  if(timestamp_curr - timestamp > 1000) {
    RTC_LOG(LS_INFO) << "FPS: " << cnt;
    cnt = 0;
    timestamp = timestamp_curr;
  }
}

}  // namespace webrtc_demo

2.4 實現Source

// video_capturer_test.h

class VideoCapturerTest : public rtc::VideoSourceInterface<webrtc::VideoFrame> {
 public:
  class FramePreprocessor {
   public:
    virtual ~FramePreprocessor() = default;
    virtual webrtc::VideoFrame Preprocess(const webrtc::VideoFrame& frame) = 0;
  };

 public:
  ~VideoCapturerTest() override;

  void AddOrUpdateSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
                       const rtc::VideoSinkWants& wants) override;

  void RemoveSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;

  void SetFramePreprocessor(std::unique_ptr<FramePreprocessor> preprocessor) {
    std::lock_guard<std::mutex> lock(mutex_);
    preprocessor_ = std::move(preprocessor);
  }

 protected:
  void OnFrame(const webrtc::VideoFrame& frame);
  rtc::VideoSinkWants GetSinkWants();

 private:
  void UpdateVideoAdapter();
  webrtc::VideoFrame MaybePreprocess(const webrtc::VideoFrame& frame);

 private:
  std::unique_ptr<FramePreprocessor> preprocessor_;
  std::mutex mutex_;
  rtc::VideoBroadcaster broadcaster_;
  cricket::VideoAdapter video_adapter_;
};

}  // namespace webrtc_demo

#endif  // EXAMPLES_VIDEO_CAPTURE_VIDEO_CPTURE_TEST_H_
// video_capturer_test.cc

#include "examples/video_capture/video_capturer_test.h"

#include "api/video/i420_buffer.h"
#include "api/video/video_rotation.h"
#include "rtc_base/logging.h"

namespace webrtc_demo {

VideoCapturerTest::~VideoCapturerTest() = default;

void VideoCapturerTest::OnFrame(const webrtc::VideoFrame& original_frame) {
  int cropped_width = 0;
  int cropped_height = 0;
  int out_width = 0;
  int out_height = 0;

  webrtc::VideoFrame frame = MaybePreprocess(original_frame);

  if (!video_adapter_.AdaptFrameResolution(
          frame.width(), frame.height(), frame.timestamp_us() * 1000,
          &cropped_width, &cropped_height, &out_width, &out_height)) {
    // Drop frame in order to respect frame rate constraint.
    return;
  }

  if (out_height != frame.height() || out_width != frame.width()) {
    // Video adapter has requested a down-scale. Allocate a new buffer and
    // return scaled version.
    // For simplicity, only scale here without cropping.
    rtc::scoped_refptr<webrtc::I420Buffer> scaled_buffer =
        webrtc::I420Buffer::Create(out_width, out_height);
    scaled_buffer->ScaleFrom(*frame.video_frame_buffer()->ToI420());
    webrtc::VideoFrame::Builder new_frame_builder =
        webrtc::VideoFrame::Builder()
            .set_video_frame_buffer(scaled_buffer)
            .set_rotation(webrtc::kVideoRotation_0)
            .set_timestamp_us(frame.timestamp_us())
            .set_id(frame.id());
    ;
    if (frame.has_update_rect()) {
      webrtc::VideoFrame::UpdateRect new_rect =
          frame.update_rect().ScaleWithFrame(frame.width(), frame.height(), 0,
                                             0, frame.width(), frame.height(),
                                             out_width, out_height);
      new_frame_builder.set_update_rect(new_rect);
    }
    broadcaster_.OnFrame(new_frame_builder.build());
  } else {
    // No adaptations needed, just return the frame as is.
    broadcaster_.OnFrame(frame);
  }
}

rtc::VideoSinkWants VideoCapturerTest::GetSinkWants() {
  return broadcaster_.wants();
}

void VideoCapturerTest::AddOrUpdateSink(
    rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
    const rtc::VideoSinkWants& wants) {
  broadcaster_.AddOrUpdateSink(sink, wants);
  UpdateVideoAdapter();
}

void VideoCapturerTest::RemoveSink(
    rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) {
  broadcaster_.RemoveSink(sink);
  UpdateVideoAdapter();
}

void VideoCapturerTest::UpdateVideoAdapter() {
  video_adapter_.OnSinkWants(broadcaster_.wants());
}

webrtc::VideoFrame VideoCapturerTest::MaybePreprocess(
    const webrtc::VideoFrame& frame) {
  std::lock_guard<std::mutex> lock(mutex_);
  if (preprocessor_ != nullptr) {
    return preprocessor_->Preprocess(frame);
  } else {
    return frame;
  }
}
}  // namespace webrtc_demo

2.5 main函式

main函式:

#include "modules/video_capture/video_capture_factory.h"
#include "rtc_base/logging.h"
#include "examples/video_capture/vcm_capturer_test.h"
#include "test/video_renderer.h"

#include <iostream>
#include <thread>

int main() {
  const size_t kWidth = 1920;
  const size_t kHeight = 1080;
  const size_t kFps = 30;

  std::unique_ptr<webrtc_demo::VcmCapturerTest> capturer;
  std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info(
      webrtc::VideoCaptureFactory::CreateDeviceInfo());
  std::unique_ptr<webrtc::test::VideoRenderer> renderer;

  if (!info) {
    RTC_LOG(LERROR) << "CreateDeviceInfo failed";
    return -1;
  }
  int num_devices = info->NumberOfDevices();
  for (int i = 0; i < num_devices; ++i) {
    capturer.reset(
        webrtc_demo::VcmCapturerTest::Create(kWidth, kHeight, kFps, i));
    if (capturer) {
      break;
    }
  }

  if (!capturer) {
    RTC_LOG(LERROR) << "Cannot found available video device";
    return -1;
  }

  renderer.reset(webrtc::test::VideoRenderer::Create("Camera", kWidth, kHeight));
  capturer->AddOrUpdateSink(renderer.get(), rtc::VideoSinkWants());

  std::this_thread::sleep_for(std::chrono::seconds(30));
  capturer->RemoveSink(renderer.get());

  RTC_LOG(WARNING) << "Demo exit";
  return 0;
}

這裡直接使用webrtc::test::VideoRenderer現成的結構實現渲染端。其實仔細看webrtc::test::VideoRenderer的程式碼,它就實現了rtc::VideoSinkInterface,可以直接通過AddOrUpdateSink將它交給我們自己定義的VideoCapturerTest結構體,因為我們向上層提供的VideoCapturerTest是一個rtc::VideoSourceInterface實現。

demo程式碼分支: https://github.com/243286065/webrtc-cpp-demo/tree/496545116dd6d44c10a0c9b96f1420f54b540abb

提交diff: https://github.com/243286065/webrtc-cpp-demo/commit/496545116dd6d44c10a0c9b96f1420f54b540abb

3. VideoCaptureFactory

上面的例子中我們通過webrtc::test::VideoRenderer來實現一個渲染端是個很方便偷懶的辦法,其實如果要自己實現的話也是很簡單的,仿照src/test/video_renderer.h實現就行:

// src/test/video_renderer.h

/*
 *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */
#ifndef TEST_VIDEO_RENDERER_H_
#define TEST_VIDEO_RENDERER_H_

#include <stddef.h>

#include "api/video/video_sink_interface.h"

namespace webrtc {
class VideoFrame;

namespace test {
class VideoRenderer : public rtc::VideoSinkInterface<VideoFrame> {
 public:
  // Creates a platform-specific renderer if possible, or a null implementation
  // if failing.
  static VideoRenderer* Create(const char* window_title,
                               size_t width,
                               size_t height);
  // Returns a renderer rendering to a platform specific window if possible,
  // NULL if none can be created.
  // Creates a platform-specific renderer if possible, returns NULL if a
  // platform renderer could not be created. This occurs, for instance, when
  // running without an X environment on Linux.
  static VideoRenderer* CreatePlatformRenderer(const char* window_title,
                                               size_t width,
                                               size_t height);
  virtual ~VideoRenderer() {}

 protected:
  VideoRenderer() {}
};
}  // namespace test
}  // namespace webrtc

#endif  // TEST_VIDEO_RENDERER_H_

不過CreatePlatformRenderer就需要根據各平臺自己實現,webrtc的預設實現位於
src/test/linux/video_renderer_linux.ccsrc/test/win/d3d_renderer.cc,需要使用glx或者d3d自己實現視窗。

表面上,上述實現中,我們把採集端的sourcesink自己給實現,其實是使用了webrt自己提供的一套方便的介面:webrtc::VideoCaptureFactory

//video_capture_factory.h

#ifndef MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_
#define MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_

#include "api/scoped_refptr.h"
#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_defines.h"

namespace webrtc {

class VideoCaptureFactory {
 public:
  // Create a video capture module object
  // id - unique identifier of this video capture module object.
  // deviceUniqueIdUTF8 - name of the device.
  //                      Available names can be found by using GetDeviceName
  static rtc::scoped_refptr<VideoCaptureModule> Create(
      const char* deviceUniqueIdUTF8);

  static VideoCaptureModule::DeviceInfo* CreateDeviceInfo();

 private:
  ~VideoCaptureFactory();
};

}  // namespace webrtc

#endif  // MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_

因為這套介面webrtc進行了封裝,它非常簡潔且可以跨平臺。我們的VcmCapturerTest只不過是對它進行了更進一步的封裝。如果要更簡潔的實現,完全可以只用VideoCaptureFactory就實現採集,然後使用VideoCaptureModule::RegisterCaptureDataCallback註冊一個webrtc::test::VideoRenderer實現視訊播放。