本文介绍的是接入 Linux 端音视频通话的主要流程。
初始化引擎
指定音视频输入输出格式。
//音频方面,当前支持输入输出都是 16k 采样率的单声道 PCM,同时也支持 48k 双声道的输入输出
//视频方面,当前支持 I420P YUV 和 H264 格式的数据输入和输出
//以下示例为指定了视频输入输出都是 YUV 格式,分辨率为 640x360,指定 YUV 编码为 H264 后发送
//同时指定编码帧率为 24fps,编码码率为 600kbps
RtcVideoFormat video;
video.fps = 24;
video.width = 640;
video.height = 360;
video.kbps = 600;
video.videoCodec = VIDEO_CODEC_YUV420P;
video.yuvCodec = VIDEO_CODEC_H264;
//音频格式默认为单声道 16k 采样率
RtcAudioFormat audio;
audio.bytesPerSample = 2;
audio.channels = 1;
audio.sampleRate = 16000;
audio.audioCodec = AUDIO_CODEC_PCM;
//对接开发环境的房间服务器
//业务正式上线需要修改为对接线上环境 wss://artvcroom.alipay.com/ws
MRtcEngineInitParam param;
param.roomUrl = "wss://artvcroomdev.dl.alipaydev.com/ws";
param.audioFormat = audio;
param.videoFormat = video;
param.logLevel = "info";
//初始化引擎,需要同时指定引擎事件监听器
MRtcEngine* engine = MRtcEngine::Create(this);
engine->Init(param);
响应引擎事件
等待引擎初始化成功。
void Demo::OnEngineEvent(RtcEvent event) {
if (event.type == INIT) {
if (event.code == 0) {
initOK = true;
} else {
std::cout<<"Engine Init Fail,code:"<<event.code<<std::endl;
}
} else if(event.type == WSS_LINK) { //通话过程中与房间服务器的信令通道断连或者重新连接成功的通知
if (event.code == 0) {
std::cout<<"WSS_LINK OK!"<<std::endl;
} else {
std::cout<<"WSS_LINK Fail,code:"<<event.code<<std::endl;
}
}
}
创建会话
每个会话代表一次音视频通话过程。
//创建 session,同时需要指定 session 事件监听器
MRtcSession* session = engine->CreateSession(this);
创建通用的房间或者加入已经存在的房间
//创建房间的时候,需要指定业务相关的信息,此类信息需到 mPaaS 平台自助申请获取
//创建房间的时候,还能同时指定启动录制或者关闭录制
CreatRoomParam param;
param.autoSubscribe = true;
param.bizName = "demo";
param.subBiz = "default";
param.sign = "signature";
param.uid = "uidxxx";
param.workspaceId = "default";
param.ext = R"({"defaultRecord":false,"recordStrongDepend":false})";
session->CreateRoom(param);
or
JoinRoomParam param;
param.autoSubscribe = true;
param.bizName = "demo";
param.subBiz = "default";
param.sign = "signature";
param.token = "token";
param.roomId = "roomxxx;
param.uid = "uidxxx";
param.workspaceId = "default";
session->JoinRoom(param);
监听会话事件
等待底层数据链路打通(ICE_LINK)。
void LinuxSession::OnSessionEvent(RtcEvent event) {
if (event.type == CREATE_ROOM) {
if (event.code == 0) {
std::cout<<"CREATE_ROOM OK,roomId and token:" << event.ext <<std::endl;
if(doPub && !isP2P) {
PublishParam param;
param.enableAudio = true;
param.enableVideo = true;
std::cout<<"Do publish" << std::endl;
session->Publish(param);
}
StartRecord();
} else {
std::cout<<"CREATE_ROOM FAIL,code:"<<event.code<<std::endl;
}
} else if (event.type == JOIN_ROOM) {
if (event.code == 0) {
std::cout<<"JOIN_ROOM OK,Begin Do publish"<<std::endl;
if(doPub) {
PublishParam param;
param.enableAudio = enableAudio;
param.enableVideo = enableVideo;
session->Publish(param);
}
StartRecord();
} else {
std::cout<<"JOIN_ROOM FAIL,code:"<<event.code<<std::endl;
}
} else if (event.type == PUBLISH) {
if (event.code == 0) {
localPubStreamId = event.ext;
if(sourceConfig.videoType != AV_FILE_H264) {
std::string pcmFileName = sourceConfig.audioFile;
pcmFileReader.reset(new PCMFile());
pcmFileReader->Open(pcmFileName,sourceConfig.audioSampleRate,sourceConfig.audioChannels,"rb");
std::string yuvFileName = sourceConfig.videoFile;
yuvFileReader.reset(new YuvFile());
yuvFileReader->Open(yuvFileName,sourceConfig.yuvWidth,sourceConfig.yuvHeight,"rb");
}
std::cout<<"PUBLISH OK"<<std::endl;
} else {
std::cout<<"PUBLISH FAIL,code:"<<event.code<<std::endl;
}
} else if (event.type == SUBSCRIBE) {
if (event.code == 0) {
std::string pcmFileName = event.ext + ".pcm";
pcmFileWriter.reset(new PCMFile());
pcmFileWriter->Open(pcmFileName,16000,1,"wb+");
if(sourceConfig.videoType == AV_FILE_YUV) {
std::string yuvFileName = event.ext + ".yuv";
yuvFileWriter.reset(new YuvFile());
yuvFileWriter->Open(yuvFileName,"wb+");
} else if(sourceConfig.videoType == AV_FILE_H264) {
std::string h264FileName = event.ext + ".264";
h264File = fopen(h264FileName.c_str(),"wb+");
}
std::cout<<"SUBSCRIBE OK,Begin to recv data"<<std::endl;
} else {
std::cout<<"SUBSCRIBE FAIL,code:"<<event.code<<std::endl;
}
} else if (event.type == ICE_LINK) {
if(event.code == 0) {
if(localPubStreamId == event.ext) {
std::cout<<"ICE OK,Begin to send data"<<std::endl;
if(sourceConfig.videoType != AV_FILE_H264) {
if(audioThread == 0 && enableAudio) {
pthread_create((pthread_t*)&audioThread,(const pthread_attr_t*)NULL,PcmThread,(void*)this);
}
}
if(videoThread == 0 && enableVideo) {
if(sourceConfig.videoType == AV_FILE_YUV) {
pthread_create((pthread_t*)&videoThread,(const pthread_attr_t*)NULL,YuvThread,(void*)this);
} else if(sourceConfig.videoType == AV_FILE_H264) {
pthread_create((pthread_t*)&videoThread,(const pthread_attr_t*)NULL,H264Thread,(void*)this);
}
}
}
} else {
std::cout<<"ICE_LINK FAIL,code:"<<event.code<<std::endl;
}
} else if (event.type == EXIT_ROOM) {
if(session) {
engine->DestroySession(session);
session = nullptr;
}
if (event.code == 0) {
std::cout<<"EXIT_ROOM OK"<<std::endl;
} else {
std::cout<<"EXIT_ROOM FAIL,code:"<<event.code<<std::endl;
}
} else if (event.type == START_RECORD) {
if (event.code == 0) {
std::cout<<"Start record OK"<<std::endl;
} else {
std::cout<<"Start record FAIL"<<std::endl;
}
}
}
ICE_LINK 完成后发送音视频数据
//对于接入方,建议按照视频帧率来发送视频数据,按照音频采样周期来发送音频数据
//如果视频帧率为 25 帧,则每 1/25 秒发送一次视频数据
//如果音频采样周期为 10ms,那么每 10ms 发送一次音频数据
- 如果是 TTS 场景,也可以一次性发送某短文本的音频数据。SDK 内部有音频缓存,则会先缓存,然后再慢慢按照 10ms 的间隔发送音频数据到对端
- 如果是用手机麦克风来采集音频的场景,这种场景不走 TTS,手机采集声音按照 10ms 的间隔输入 PCM 数据
session->FeedAudio(audio);
session->FeedVideo(video);
ICE_LINK 完成后接收音视频数据
listener->OnAudio(audio,streamId)
listener->OnVudio(audio,streamId)
通话结束后销毁会话
//引擎只需要创建一次,整个业务应用不退出就无需销毁引擎
engine->DestroySession(session);
//如果整个业务应用退出,则销毁引擎
engine->Destory();
文档内容是否对您有帮助?