点击蓝字 ╳ 关注我们
巴延兴
深圳开鸿数字产业发展有限公司
资深OS框架开发工程师
一、简介
二、目录
audio_framework
├── frameworks
│ ├── js #js 接口
│ │ └── napi
│ │ └── audio_renderer #audio_renderer NAPI接口
│ │ ├── include
│ │ │ ├── audio_renderer_callback_napi.h
│ │ │ ├── renderer_data_request_callback_napi.h
│ │ │ ├── renderer_period_position_callback_napi.h
│ │ │ └── renderer_position_callback_napi.h
│ │ └── src
│ │ ├── audio_renderer_callback_napi.cpp
│ │ ├── audio_renderer_napi.cpp
│ │ ├── renderer_data_request_callback_napi.cpp
│ │ ├── renderer_period_position_callback_napi.cpp
│ │ └── renderer_position_callback_napi.cpp
│ └── native #native 接口
│ └── audiorenderer
│ ├── BUILD.gn
│ ├── include
│ │ ├── audio_renderer_private.h
│ │ └── audio_renderer_proxy_obj.h
│ ├── src
│ │ ├── audio_renderer.cpp
│ │ └── audio_renderer_proxy_obj.cpp
│ └── test
│ └── example
│ └── audio_renderer_test.cpp
├── interfaces
│ ├── inner_api #native实现的接口
│ │ └── native
│ │ └── audiorenderer #audio渲染本地实现的接口定义
│ │ └── include
│ │ └── audio_renderer.h
│ └── kits #js调用的接口
│ └── js
│ └── audio_renderer #audio渲染NAPI接口的定义
│ └── include
│ └── audio_renderer_napi.h
└── services #服务端
└── audio_service
├── BUILD.gn
├── client #IPC调用中的proxy端
│ ├── include
│ │ ├── audio_manager_proxy.h
│ │ ├── audio_service_client.h
│ └── src
│ ├── audio_manager_proxy.cpp
│ ├── audio_service_client.cpp
└── server #IPC调用中的server端
├── include
│ └── audio_server.h
└── src
├── audio_manager_stub.cpp
└──audio_server.cpp
三、音频渲染总体流程
四、Native接口使用
bool TestPlayback(int argc, char *argv[]) const
{
FILE* wavFile = fopen(path, "rb");
//读取wav文件头信息
size_t bytesRead = fread(&wavHeader, 1, headerSize, wavFile);
//设置AudioRenderer参数
AudioRendererOptions rendererOptions = {};
rendererOptions.streamInfo.encoding = AudioEncodingType::ENCODING_PCM;
rendererOptions.streamInfo.samplingRate = static_cast(wavHeader.SamplesPerSec);
rendererOptions.streamInfo.format = GetSampleFormat(wavHeader.bitsPerSample);
rendererOptions.streamInfo.channels = static_cast(wavHeader.NumOfChan);
rendererOptions.rendererInfo.contentType = contentType;
rendererOptions.rendererInfo.streamUsage = streamUsage;
rendererOptions.rendererInfo.rendererFlags = 0;
//创建AudioRender实例
unique_ptr audioRenderer = AudioRenderer::Create(rendererOptions);
shared_ptr cb1 = make_shared();
//设置音频渲染回调
ret = audioRenderer->SetRendererCallback(cb1);
//InitRender方法主要调用了audioRenderer实例的Start方法,启动音频渲染
if (!InitRender(audioRenderer)) {
AUDIO_ERR_LOG("AudioRendererTest: Init render failed");
fclose(wavFile);
return false;
}
//StartRender方法主要是读取wavFile文件的数据,然后通过调用audioRenderer实例的Write方法进行播放
if (!StartRender(audioRenderer, wavFile)) {
AUDIO_ERR_LOG("AudioRendererTest: Start render failed");
fclose(wavFile);
return false;
}
//停止渲染
if (!audioRenderer->Stop()) {
AUDIO_ERR_LOG("AudioRendererTest: Stop failed");
}
//释放渲染
if (!audioRenderer->Release()) {
AUDIO_ERR_LOG("AudioRendererTest: Release failed");
}
//关闭wavFile
fclose(wavFile);
return true;
}
五、调用流程
std::unique_ptr AudioRenderer::Create(const std::string cachePath,
const AudioRendererOptions &rendererOptions, const AppInfo &appInfo)
{
ContentType contentType = rendererOptions.rendererInfo.contentType;
StreamUsage streamUsage = rendererOptions.rendererInfo.streamUsage;
AudioStreamType audioStreamType = AudioStream::GetStreamType(contentType, streamUsage);
auto audioRenderer = std::make_unique(audioStreamType, appInfo);
if (!cachePath.empty()) {
AUDIO_DEBUG_LOG("Set application cache path");
audioRenderer->SetApplicationCachePath(cachePath);
}
audioRenderer->rendererInfo_.contentType = contentType;
audioRenderer->rendererInfo_.streamUsage = streamUsage;
audioRenderer->rendererInfo_.rendererFlags = rendererOptions.rendererInfo.rendererFlags;
AudioRendererParams params;
params.sampleFormat = rendererOptions.streamInfo.format;
params.sampleRate = rendererOptions.streamInfo.samplingRate;
params.channelCount = rendererOptions.streamInfo.channels;
params.encodingType = rendererOptions.streamInfo.encoding;
if (audioRenderer->SetParams(params) != SUCCESS) {
AUDIO_ERR_LOG("SetParams failed in renderer");
audioRenderer = nullptr;
return nullptr;
}
return audioRenderer;
}
int32_t AudioRendererPrivate::SetRendererCallback(const std::shared_ptr &callback)
{
RendererState state = GetStatus();
if (state == RENDERER_NEW || state == RENDERER_RELEASED) {
return ERR_ILLEGAL_STATE;
}
if (callback == nullptr) {
return ERR_INVALID_PARAM;
}
// Save reference for interrupt callback
if (audioInterruptCallback_ == nullptr) {
return ERROR;
}
std::shared_ptr cbInterrupt =
std::static_pointer_cast(audioInterruptCallback_);
cbInterrupt->SaveCallback(callback);
// Save and Set reference for stream callback. Order is important here.
if (audioStreamCallback_ == nullptr) {
audioStreamCallback_ = std::make_shared();
if (audioStreamCallback_ == nullptr) {
return ERROR;
}
}
std::shared_ptr cbStream =
std::static_pointer_cast(audioStreamCallback_);
cbStream->SaveCallback(callback);
(void)audioStream_->SetStreamCallback(audioStreamCallback_);
return SUCCESS;
}
bool AudioRendererPrivate::Start(StateChangeCmdType cmdType) const
{
AUDIO_INFO_LOG("AudioRenderer::Start");
RendererState state = GetStatus();
AudioInterrupt audioInterrupt;
switch (mode_) {
case InterruptMode:
audioInterrupt = sharedInterrupt_;
break;
case InterruptMode:
audioInterrupt = audioInterrupt_;
break;
default:
break;
}
AUDIO_INFO_LOG("AudioRenderer: %{public}d, streamType: %{public}d, sessionID: %{public}d",
mode_, audioInterrupt.streamType, audioInterrupt.sessionID);
if (audioInterrupt.streamType == STREAM_DEFAULT || audioInterrupt.sessionID == INVALID_SESSION_ID) {
return false;
}
int32_t ret = AudioPolicyManager::GetInstance().ActivateAudioInterrupt(audioInterrupt);
if (ret != 0) {
AUDIO_ERR_LOG("AudioRendererPrivate::ActivateAudioInterrupt Failed");
return false;
}
return audioStream_->StartAudioStream(cmdType);
}
bool AudioStream::StartAudioStream(StateChangeCmdType cmdType)
{
int32_t ret = StartStream(cmdType);
resetTime_ = true;
int32_t retCode = clock_gettime(CLOCK_MONOTONIC, &baseTimestamp_);
if (renderMode_ == RENDER_MODE_CALLBACK) {
isReadyToWrite_ = true;
writeThread_ = std::make_unique<std::thread>(&AudioStream::WriteCbTheadLoop, this);
} else if (captureMode_ == CAPTURE_MODE_CALLBACK) {
isReadyToRead_ = true;
readThread_ = std::make_unique<std::thread>(&AudioStream::ReadCbThreadLoop, this);
}
isFirstRead_ = true;
isFirstWrite_ = true;
state_ = RUNNING;
AUDIO_INFO_LOG("StartAudioStream SUCCESS");
if (audioStreamTracker_) {
AUDIO_DEBUG_LOG("AudioStream:Calling Update tracker for Running");
audioStreamTracker_->UpdateTracker(sessionId_, state_, rendererInfo_, capturerInfo_);
}
return true;
}
int32_t AudioServiceClient::StartStream(StateChangeCmdType cmdType)
{
int error;
lock_guard lockdata(dataMutex);
pa_operation *operation = nullptr;
pa_threaded_mainloop_lock(mainLoop);
pa_stream_state_t state = pa_stream_get_state(paStream);
streamCmdStatus = 0;
stateChangeCmdType_ = cmdType;
operation = pa_stream_cork(paStream, 0, PAStreamStartSuccessCb, (void *)this);
while (pa_operation_get_state(operation) == PA_OPERATION_RUNNING) {
pa_threaded_mainloop_wait(mainLoop);
}
pa_operation_unref(operation);
pa_threaded_mainloop_unlock(mainLoop);
if (!streamCmdStatus) {
AUDIO_ERR_LOG("Stream Start Failed");
ResetPAAudioClient();
return AUDIO_CLIENT_START_STREAM_ERR;
} else {
AUDIO_INFO_LOG("Stream Started Successfully");
return AUDIO_CLIENT_SUCCESS;
}
}
int32_t AudioRendererPrivate::Write(uint8_t *buffer, size_t bufferSize)
{
return audioStream_->Write(buffer, bufferSize);
}
size_t AudioStream::Write(uint8_t *buffer, size_t buffer_size)
{
int32_t writeError;
StreamBuffer stream;
stream.buffer = buffer;
stream.bufferLen = buffer_size;
isWriteInProgress_ = true;
if (isFirstWrite_) {
if (RenderPrebuf(stream.bufferLen)) {
return ERR_WRITE_FAILED;
}
isFirstWrite_ = false;
}
size_t bytesWritten = WriteStream(stream, writeError);
isWriteInProgress_ = false;
if (writeError != 0) {
AUDIO_ERR_LOG("WriteStream fail,writeError:%{public}d", writeError);
return ERR_WRITE_FAILED;
}
return bytesWritten;
}
size_t AudioServiceClient::WriteStream(const StreamBuffer &stream, int32_t &pError)
{
size_t cachedLen = WriteToAudioCache(stream);
if (!acache.isFull) {
pError = error;
return cachedLen;
}
pa_threaded_mainloop_lock(mainLoop);
const uint8_t *buffer = acache.buffer.get();
size_t length = acache.totalCacheSize;
error = PaWriteStream(buffer, length);
acache.readIndex += acache.totalCacheSize;
acache.isFull = false;
if (!error && (length >= 0) && !acache.isFull) {
uint8_t *cacheBuffer = acache.buffer.get();
uint32_t offset = acache.readIndex;
uint32_t size = (acache.writeIndex - acache.readIndex);
if (size > 0) {
if (memcpy_s(cacheBuffer, acache.totalCacheSize, cacheBuffer + offset, size)) {
AUDIO_ERR_LOG("Update cache failed");
pa_threaded_mainloop_unlock(mainLoop);
pError = AUDIO_CLIENT_WRITE_STREAM_ERR;
return cachedLen;
}
AUDIO_INFO_LOG("rearranging the audio cache");
}
acache.readIndex = 0;
acache.writeIndex = 0;
if (cachedLen < stream.bufferLen) {
StreamBuffer str;
str.buffer = stream.buffer + cachedLen;
str.bufferLen = stream.bufferLen - cachedLen;
AUDIO_DEBUG_LOG("writing pending data to audio cache: %{public}d", str.bufferLen);
cachedLen += WriteToAudioCache(str);
}
}
pa_threaded_mainloop_unlock(mainLoop);
pError = error;
return cachedLen;
}
六、总结
原文标题:OpenHarmony 3.2 Beta Audio——音频渲染
文章出处:【微信公众号:OpenAtom OpenHarmony】欢迎添加关注!文章转载请注明出处。
声明:本文内容及配图由入驻作者撰写或者入驻合作网站授权转载。文章观点仅代表作者本人,不代表电子发烧友网立场。文章及其配图仅供工程师学习之用,如有内容侵权或者其他违规问题,请联系本站处理。
举报投诉
-
鸿蒙
+关注
关注
55文章
1637浏览量
42120 -
OpenHarmony
+关注
关注
23文章
3311浏览量
15159
原文标题:OpenHarmony 3.2 Beta Audio——音频渲染
文章出处:【微信号:gh_e4f28cfa3159,微信公众号:OpenAtom OpenHarmony】欢迎添加关注!文章转载请注明出处。
发布评论请先 登录
相关推荐
OpenHarmony Sheet 表格渲染引擎
基于 Canvas 实现的高性能 Excel 表格引擎组件 [OpenHarmonySheet]。
由于大部分前端项目渲染层是使用框架根据排版模型树结构逐层渲染的,整棵渲染树也是与排版
发表于 01-05 16:32
OpenHarmony开源GPU库Mesa3D适配说明
本文档主要讲解在OpenHarmony中,Mesa3D的适配方法及原理说明。
环境说明:
OHOS版本: 适用3.2-Beta3及以上
内核版本: linux-5.10
硬件环境
发表于 12-25 11:38
Unity中国、Cocos为OpenHarmony游戏生态插上腾飞的翅膀
需求,结合本土文化元素和市场特色,为创作提供全面支持。团结引擎已兼容适配了中国科技生态内的众多软硬件平台,并联合基金会共同研发适配,已经完成了OpenHarmony3.2Release和 4.0 beta
发表于 10-23 16:15
HarmonyOS音频开发指导:使用AudioRenderer开发音频播放功能
: audio.StreamUsage.STREAM_USAGE_MEDIA, // 音频流使用类型
rendererFlags: 0 // 音频渲染器标志
}
private au
发表于 10-23 14:21
润开鸿DAYU200及DAYU210联袂通过OpenHarmony 3.2 Release版本兼容性测评
近日,润开鸿HH-SCDAYU200及HH-SCDAYU210两款开发平台同时通过OpenHarmony 3.2 Release版本兼容性测评,在引领技术共建、有力推动OpenHarmony新版本
发表于 09-22 11:11
润开鸿DAYU200及DAYU210联袂通过OpenHarmony 3.2 Release版本兼容性测评
3.2 Release版本兼容性测评,在引领技术共建、有力推动OpenHarmony新版本先行示范的同时,为基于3.2 Release版本开发商用富设备及相关行业应用提供了成熟、便捷的硬件平台
【触觉智能 Purple Pi OH 开发板体验】OpenHarmony音频播放应用
1 注意事项
DevEco Studio 4.0 Beta2(Build Version: 4.0.0.400)
OpenHarmony SDK API 9
创建工程类型选择Application
发表于 09-11 10:46
OpenHarmony 4.0 Beta2新版本发布,邀您体验
2023年8月3日,OpenAtom OpenHarmony(简称“OpenHarmony”)发布了Beta2版本,相较于历史版本我们持续完善ArkUI、文件管理、媒体、窗口、安全等系统能力、提升
发表于 08-25 09:49
HarmonyOS/OpenHarmony应用开发-ArkTS语言渲染控制if/else条件渲染
ArkTS提供了渲染控制的能力。条件渲染可根据应用的不同状态,使用if、else和else if渲染对应状态下的UI内容。说明:从API version 9开始,该接口支持在ArkTS卡片中使用。一
发表于 08-21 14:29
OpenHarmony 4.0 Beta1发布,邀您体验
点击蓝字 ╳ 关注我们 开源项目 OpenHarmony 是每个人的 OpenHarmony 初夏之际,OpenAtom OpenHarmony(简称“OpenHarmony”) 4.
OpenHarmony 4.0 Beta1发布,邀您体验
初夏之际,OpenAtom OpenHarmony(简称“OpenHarmony”) 4.0 Beta1版本如期而至。4.0 Beta1版本在3.2
发表于 06-08 14:14
OpenHarmony 3.2 Release新特性解读之驱动HCS
“OpenHarmony”)开源社区,在今年4月正式发布了OpenHarmony 3.2 Release版本,标准系统能力进一步完善,提升了系统的整体性能、稳定性和安全性。此次版本对于驱动也提供了一些
一加6T适配OpenHarmony 3.2
OpenHarmony-3.2-Release 已发布,并且已经有好一段时间,本次释放一加 6/6T 相关刷机包,主要升级为 OpenHarmony-3.2-Release 正式版并初步支持部分外设功能(如音频播放和蓝牙),及更
突破性进展|软通动力子公司率先完成OpenHarmony操作系统适配PC端
鸿湖万联应邀出席此次大会,并在本次大会上重磅发布了基于OpenHarmony 3.2 Release版本,面向PC端的SwanLinkOS商业PC发行版(Beta版)。
OpenHarmony开发者大会 | 技术分论坛:OpenHarmony 3.2 Release技术底座深度解析
2023(以下简称“大会”)在北京圆满落幕。大会正式对外发布了OpenAtom OpenHarmony(以下简称“OpenHarmony”)3.2 Release版本,该版本全面提升了复杂带屏设备体验,可以
评论