Windows

通过阅读本文,您可以了解 Windows 输出音视频流裸数据的方法。

输出视频数据

1. 开启视频裸数据回调,调用以下接口:

int EnableVideoFrameObserver(bool enabled, unsigned int position);
说明
  • enable : true 表示开启订阅视频裸数据;false 表示取消订阅视频裸数据。

  • position:表示获取的视频数据所处的位置

    typedef enum {
      /** 采集视频数据,对应输出回调 OnCaptureVideoFrame */
      RtcEnginePositionPostCapture = 1 << 0,
      /** 渲染视频数据,对应输出回调 OnRemoteVideoFrame */
      RtcEnginePositionPreRender = 1 << 1,
      /** 编码前视频数据,对应输出回调 OnPreEncodeVideoFrame */
      RtcEnginePositionPreEncoder = 1 << 2,
    } RtcEngineVideoObservePosition;

调用此接口后,即可开始订阅视频裸数据,裸数据通过回调接口返回。

2. 注册监听获取视频裸数据回调,方法如下:

int RegisterVideoFrameObserver(RtcEngineVideoFrameObserver* observer)

3. 通过RtcEngineVideoFrameObserver回调获取视频裸数据,方法如下:

/**
* @brief 视频格式采集偏好。
*/
RtcEngineVideoPixelFormat GetVideoFormatPreference();

/**
* @brief 本地采集视频数据。
* @param frame video sample
* @return 
* - true: 需要写回SDK(默认写回);
* - false: 不需要写回SDK。
*/
bool OnCaptureVideoFrame(RtcEngineVideoFrame &frame);

/**
* @brief 本地编码前视频数据。
* @param videoTrack video track
* @param frame video sample
* @return 
* - true: 需要写回SDK(默认写回);
* - false: 不需要写回SDK。
*/
bool OnPreEncodeVideoFrame(RtcEngineVideoTrack videoTrack, RtcEngineVideoFrame &frame);

/**
* @brief 远端解码后视频数据。
* @param userId user id
* @param videoTrack video track
* @param frame video sample
* @return 
* - true: 需要写回SDK(默认写回);
* - false: 不需要写回SDK。
*/
bool OnRemoteVideoFrame(ding::rtc::String userId, RtcEngineVideoTrack videoTrack, RtcEngineVideoFrame &frame);
说明

调用 EnableVideoFrameObserver: true 接口以及RegisterVideoFrameObserver后:

  • 通过下面回调告诉引擎需要获取的视频格式:

    • GetVideoFormatPreference

  • 通过以下三个回调获取对应的视频裸数据:

    • OnCaptureVideoFrame为本地预览数据回调,在开始预览之后可收到数据流。

    • OnPreEncodeVideoFrame为本地编码前数据回调,在推流之后可收到数据流。

    • OnRemoteVideoFrame为拉流数据回调,subscribe 拉流成功后可收到数据流。

4. 订阅的远端视频裸数据写入本地YUV文件示例代码:

class DemoVideoObserver : public ding::rtc::RtcEngineVideoFrameObserver
{
public:
  ~DemoVideoObserver();
  ding::rtc::RtcEngineVideoPixelFormat GetVideoFormatPreference() override {
    return ding::rtc::RtcEngineVideoPixelFormat::RtcEngineVideoI420;
  }

  bool OnCaptureVideoFrame(ding::rtc::RtcEngineVideoFrame &frame) override {
    std::string streamId = "capture";
    SaveToFile(streamId, frame);
    return false;
  }

  bool OnRemoteVideoFrame(ding::rtc::String uid,
    ding::rtc::RtcEngineVideoTrack track,
    ding::rtc::RtcEngineVideoFrame &frame) override
  {
    std::string streamId = std::string(uid.c_str()) + "-" + (track == ding::rtc::RtcEngineVideoTrack::RtcEngineVideoTrackCamera ? "camera" : "screen");
    SaveToFile(streamId, frame);
    return false;
  }

  bool OnPreEncodeVideoFrame(
    ding::rtc::RtcEngineVideoTrack track,
    ding::rtc::RtcEngineVideoFrame &frame) override
  {
    std::string streamId = std::string("publish") + "-" + (track == ding::rtc::RtcEngineVideoTrack::RtcEngineVideoTrackCamera ? "camera" : "screen");
    SaveToFile(streamId, frame);
    return false;
  }

  // to save disk usage, scale down to 320x240, max 200 frames
  class StreamInfo {
  public:
    std::string streamId; // uid # tracktype
    int framesSaved_;
    FILE *fp_;
  };
  void SaveToFile(const std::string &fileName, ding::rtc::RtcEngineVideoFrame &frame);

  std::map<std::string, StreamInfo> streamMap_; // guarded by mutex_
  std::mutex mutex_;
};

DemoVideoObserver::~DemoVideoObserver()
{
    for (auto it=streamMap_.begin(); it!=streamMap_.end(); it++) {
        fclose(it->second.fp_);
    }
}

void DemoVideoObserver::SaveToFile(const std::string &streamId,
    ding::rtc::RtcEngineVideoFrame &frame)
{
    bool found = false;
    mutex_.lock();
    if (streamMap_.find(streamId) != streamMap_.end()) {
        found = true;
    }
    mutex_.unlock();

    if (!found) {
        std::string channel = GetGlobalContext()->dc_.appserver_param_.channelName;
        std::string dir = GetGlobalContext()->dc_.utf8_working_path + "/record/" + channel;
        recursive_mkdir(dir.c_str());

        std::string fileName = dir + "/" + streamId + "-320x240.yuv";
        FILE *fp = fopen(fileName.c_str(), "wb");
        if (fp == NULL) {
            printf("failed to create file %s\n", fileName.c_str());
            return;
        }

        StreamInfo si;
        si.streamId = streamId;
        si.framesSaved_ = 0;
        si.fp_ = fp;

        mutex_.lock();
        streamMap_[streamId] = si;
        mutex_.unlock();
    }

    mutex_.lock();
    FILE *fp = streamMap_[streamId].fp_;
    int count = streamMap_[streamId].framesSaved_;
    mutex_.unlock();

    if (frame.frameType != ding::rtc::RtcEngineVideoFrameType::RtcEngineVideoFrameRaw) {
        return;
    }

    if (frame.pixelFormat != ding::rtc::RtcEngineVideoPixelFormat::RtcEngineVideoI420) {
        return;
    }


    // scale down to 320x240
    ding::rtc::RtcEngineVideoFrame tmp;
    memset(&tmp, 0, sizeof(ding::rtc::RtcEngineVideoFrame));
    tmp.frameType = ding::rtc::RtcEngineVideoFrameType::RtcEngineVideoFrameRaw;
    tmp.pixelFormat = ding::rtc::RtcEngineVideoPixelFormat::RtcEngineVideoI420;
    tmp.width = 320;
    tmp.height = 240;
    int ystride = (tmp.width + 1) /  2 * 2;
    int hstride = (tmp.height + 1) / 2 * 2;
    tmp.stride[0] = ystride;
    tmp.stride[1] = ystride / 2;
    tmp.stride[2] = ystride / 2;
    tmp.offset[0] = 0;
    tmp.offset[1] = ystride * hstride;
    tmp.offset[2] = ystride * hstride * 5 / 4;
    int size = ystride * hstride * 3 / 2;
    tmp.data = malloc(size);

    const uint8_t *src[3];
    src[0] = (uint8_t *)frame.data + frame.offset[0];
    src[1] = (uint8_t *)frame.data + frame.offset[1];
    src[2] = (uint8_t *)frame.data + frame.offset[2];
    uint32_t src_strides[3];
    src_strides[0] = frame.stride[0];
    src_strides[1] = frame.stride[1];
    src_strides[2] = frame.stride[2];
    uint8_t *dst[3];
    dst[0] = (uint8_t *)tmp.data + tmp.offset[0];
    dst[1] = (uint8_t *)tmp.data + tmp.offset[1];
    dst[2] = (uint8_t *)tmp.data + tmp.offset[2];
    uint32_t dst_strides[3];
    dst_strides[0] = tmp.stride[0];
    dst_strides[1] = tmp.stride[1];
    dst_strides[2] = tmp.stride[2];
    ResizeYuvBuffers(src, src_strides,
        frame.width, frame.height,
        dst, dst_strides,
        tmp.width, tmp.height);

    ystride = tmp.stride[0];
    hstride = (tmp.height + 1) / 2 * 2;

    // y buffer
    size = ystride * hstride;
    const uint8_t *y = (const uint8_t *)tmp.data + tmp.offset[0];
    fwrite(y, 1, size, fp);

    // u buffer
    size = size / 4;
    const uint8_t *u = (const uint8_t *)tmp.data + tmp.offset[1];
    fwrite(u, 1, size, fp);

    // v buffer
    const uint8_t *v = (const uint8_t *)tmp.data + tmp.offset[2];
    fwrite(v, 1, size, fp);

    mutex_.lock();
    streamMap_[streamId].framesSaved_++;
    mutex_.unlock();

    free(tmp.data);
}

输出音频裸数据

1. 启动音频裸数据的回调监听:

int EnableAudioFrameObserver(bool enabled, unsigned int position, const RtcEngineAudioFrameObserverConfig& config)
说明
  • enable : true 表示开启;false表示关闭。

  • position:表示获取的音频数据所处的位置

    typedef enum {
        /** 采集的音频数据,对应输出回调 OnCapturedAudioFrame。 */
        RtcEngineAudioPositionCaptured = 1 << 0,
        /** 3A后的音频数据,对应输出回调 OnProcessCapturedAudioFrame */
        RtcEngineAudioPositionProcessCaptured = 1 << 1,
        /** 推流的音频数据,对应输出回调 OnPublishAudioFrame */
        RtcEngineAudioPositionPub = 1 << 2,
        /** 播放的音频数据,对应输出回调 OnPlaybackAudioFrame */
        RtcEngineAudioPositionPlayback = 1 << 3,
        /** 拉流的远端音频数据,对应输出回调 OnRemoteUserAudioFrame */
        RtcEngineAudioPositionRemoteUser = 1 << 4,
    } RtcEngineAudioObservePosition;
  • config:表示获取的音频裸数据的规格

    typedef struct RtcEngineAudioFrameObserverConfig {
        /** 回调音频采样率类型。 */
        RtcEngineAudioSampleRate sampleRate = RtcEngineAudioSampleRate_48000;
        /** 回调音频声道类型。 */
        RtcEngineAudioNumChannelType channels = RtcEngineStereoAudio;
        /** 回调音频数据读写选项::只读取音频裸数据(仅用于获取音频裸数据场景) 或者 读取并且写入音频裸数据(用于修改音频数据内容场景)。 */
        RtcEngineAudioFramePermissions permission = RtcEngineAudioFrameReadOnly;
    } RtcEngineAudioFrameObserverConfig;

调用此接口后,即可订阅音频裸数据或取消订阅音频裸数据。

2. 注册监听音频裸数据的回调:

int RegisterAudioFrameObserver(RtcEngineAudioFrameObserver* observer);

3. 通过RtcEngineAudioFrameObserver回调获取音频裸数据,方法如下:

@optional
/**
 * @brief 远端拉流数据回调
 * @param uid 远端用户ID。
 * @param frame 音频数据,详见{@link RtcEngineAudioFrame}。
 * @note 请不要在此回调函数中做任何耗时操作,否则可能导致声音异常
 */
- (void)onRemoteUserAudioFrame:(NSString *_Nonnull)uid frame:(DingRtcAudioDataSample * _Nonnull)frame;

/**
 * @brief 本地订阅音频数据回调。
 * @details 远端所有用户混音后待播放的音频数据。
 * @param frame 音频数据。
*/
- (void)onPlaybackAudioFrame:(DingRtcAudioDataSample * _Nonnull)frame;

/**
 * @brief 本地采集音频数据回调。
 * @param frame 音频数据。
 */
- (void)onCapturedAudioFrame:(DingRtcAudioDataSample * _Nonnull)frame;

/**
 * @brief 经过3A处理后的数据回调。
 * @param frame 音频数据。
*/
- (void)onProcessCapturedAudioFrame:(DingRtcAudioDataSample * _Nonnull)frame;

/**
 * @brief 本地推流音频数据回调。
 * @param frame 音频数据。
 */
- (void)onPublishAudioFrame:(DingRtcAudioDataSample * _Nonnull)frame;

@end

4. 订阅远端音频裸数据,并写入本地 pcm 文件示例代码:

class DemoAudioSink : public ding::rtc::RtcEngineAudioFrameObserver
{
public:
	DemoAudioSink();
    ~DemoAudioSink();

	void OnPlaybackAudioFrame(ding::rtc::RtcEngineAudioFrame &frame) override;

    void OnCapturedAudioFrame(ding::rtc::RtcEngineAudioFrame &frame) override;

    void OnProcessCapturedAudioFrame(ding::rtc::RtcEngineAudioFrame &frame) override;

    void OnPublishAudioFrame(ding::rtc::RtcEngineAudioFrame &frame) override;

  	void OnRemoteUserAudioFrame(const char *uid, ding::rtc::RtcEngineAudioFrame &frame) override;

private:
    void saveOneFrame(const std::string &id, ding::rtc::RtcEngineAudioFrame &frame);

    int64_t max_frames_;
    struct audio_track_record_context {
     	FILE *fp;
      	bool file_error;
      	int64_t frame_count;
    };
    std::map<std::string, struct audio_track_record_context> record_contexts;
};


DemoAudioSink::DemoAudioSink()
{
  max_frames_ = 50 * 30;
}

DemoAudioSink::~DemoAudioSink()
{
  for (auto it = record_contexts.begin(); it != record_contexts.end(); it++) {
    if (it->second.fp != NULL) {
      fclose(it->second.fp);
    }
  }
}

void DemoAudioSink::saveOneFrame(const std::string &id, ding::rtc::RtcEngineAudioFrame &frame)
{
    if (record_contexts.find(id) == record_contexts.end()) {
        struct audio_track_record_context empty;
        empty.fp = NULL;
        empty.file_error = false;
        empty.frame_count = 0;
        record_contexts[id] = empty;
    }
    if (record_contexts[id].frame_count >= max_frames_) {
        return;
    }

    if (record_contexts[id].file_error) {
        return;
    }

    if (record_contexts[id].fp == NULL) {
        std::string file;
        std::string channel = GetGlobalContext()->dc_.appserver_param_.channelName;
        std::string dir = GetGlobalContext()->dc_.utf8_working_path + "/record/" + channel;
        recursive_mkdir(dir.c_str());
        char t[128];
        sprintf(t, "%d_%s_%s.pcm", frame.samplesPerSec, frame.channels == 2 ? "stereo" : "mono", id.c_str());
        file = dir + "/" + t;

        record_contexts[id].fp = fopen(file.c_str(), "wb");
        if (record_contexts[id].fp == NULL) {
            printf("failed to create file %s\n", file.c_str());
            record_contexts[id].file_error = true;
            return;
        }
        else {
            printf("created file %s\n", file.c_str());
        }
    }

    if (frame.type != ding::rtc::RtcEngineAudioFrameType::RtcEngineAudioFramePcm16) {
        return;
    }

    size_t size = frame.samples * frame.bytesPerSample * frame.channels;
    if (fwrite(frame.buffer, 1, size, record_contexts[id].fp) != size) {
        record_contexts[id].file_error = true;
        fclose(record_contexts[id].fp);
        record_contexts[id].fp = NULL;
        return;
    }

    record_contexts[id].frame_count++;
}

void DemoAudioSink::OnRemoteUserAudioFrame(const char *uid, ding::rtc::RtcEngineAudioFrame &frame)
{
    saveOneFrame(uid, frame);
}

void DemoAudioSink::OnPlaybackAudioFrame(ding::rtc::RtcEngineAudioFrame &frame)
{
    saveOneFrame("playback", frame);
}

void DemoAudioSink::OnCapturedAudioFrame(ding::rtc::RtcEngineAudioFrame &frame)
{
    saveOneFrame("capture", frame);
}

void DemoAudioSink::OnProcessCapturedAudioFrame(ding::rtc::RtcEngineAudioFrame &frame)
{
    saveOneFrame("processed", frame);
}

void DemoAudioSink::OnPublishAudioFrame(ding::rtc::RtcEngineAudioFrame &frame)
{
    saveOneFrame("publish", frame);
}