OpenSL ES錄製PCM音訊資料
前面已經介紹過了OpenSL ES播放音訊資料(Android通過OpenSL ES播放音訊套路詳解)和結合SoundTouch來實現變速變調播放(OpenSL ES利用SoundTouch實現PCM音訊的變速和變調),今天我們繼續用OpenSL ES來實現PCM音訊資料的錄製。開始之前我們先來看一下最終效果,文末有例項下載:
主體思路:
總的來說OpenSL ES的錄音要比播放簡單一些,在建立好引擎後,再建立好錄音介面基本就可以錄音了,只是我們現在是流式錄音,所以需要用至少2個buffer來緩存錄制好的PCM資料,這裡我們可以動態建立一個二維陣列,裡面有2個buffer,然後每次錄音取出一個,錄製好後再寫入檔案就可以了,2個buffer依次來儲存PCM資料,這樣就可以連續錄製流式音訊資料了,二維數組裡面自己維護了一個索引,來標識當前處於哪個buffer錄製狀態,暴露給外部的只是呼叫方法而已,細節對外也是隱藏的。
開始編碼:
1、編寫快取buffer佇列:RecordBuffer.h、RecordBuffer.cpp
// // Created by ywl on 2018/3/27. // #ifndef OPENSLRECORD_RECORDBUFFER_H #define OPENSLRECORD_RECORDBUFFER_H class RecordBuffer { public: short **buffer; int index = -1; public: RecordBuffer(int buffersize); ~RecordBuffer(); /** * 得到一個新的錄製buffer * @return */ short* getRecordBuffer(); /** * 得到當前錄製buffer * @return */ short* getNowBuffer(); }; #endif //OPENSLRECORD_RECORDBUFFER_H
// // Created by ywl on 2018/3/27. // #include "RecordBuffer.h" RecordBuffer::RecordBuffer(int buffersize) { buffer = new short *[2]; for(int i = 0; i < 2; i++) { buffer[i] = new short[buffersize]; } } RecordBuffer::~RecordBuffer() { } short *RecordBuffer::getRecordBuffer() { index++; if(index > 1) { index = 0; } return buffer[index]; } short *RecordBuffer::getNowBuffer() { return buffer[index]; }
這個佇列其實就是PCM儲存的buffer,getRecordBuffer()為即將要錄入PCM資料的buffer,getNowBuffer()是當前錄製好的PCM資料的buffer,可以寫入檔案,即我們得到的PCM資料。
2、使用OPenSL ES錄製PCM資料,過程分為:建立引擎->初始化IO裝置(自動檢測麥克風等音訊輸入裝置)->設定快取佇列->設定錄製PCM資料規格->設定錄音器介面->設定佇列介面並設定錄音狀態為錄製->開始錄音:
const char *path = env->GetStringUTFChars(path_, 0);
/**
* PCM檔案
*/
pcmFile = fopen(path, "w");
/**
* PCMbuffer佇列
*/
recordBuffer = new RecordBuffer(RECORDER_FRAMES * 2);
SLresult result;
/**
* 建立引擎物件
*/
result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
/**
* 設定IO裝置(麥克風)
*/
SLDataLocator_IODevice loc_dev = {SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
SLDataSource audioSrc = {&loc_dev, NULL};
/**
* 設定buffer佇列
*/
SLDataLocator_AndroidSimpleBufferQueue loc_bq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
/**
* 設定錄製規格:PCM、2聲道、44100HZ、16bit
*/
SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, 2, SL_SAMPLINGRATE_44_1,
SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT, SL_BYTEORDER_LITTLEENDIAN};
SLDataSink audioSnk = {&loc_bq, &format_pcm};
const SLInterfaceID id[1] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
const SLboolean req[1] = {SL_BOOLEAN_TRUE};
/**
* 建立錄製器
*/
result = (*engineEngine)->CreateAudioRecorder(engineEngine, &recorderObject, &audioSrc,
&audioSnk, 1, id, req);
if (SL_RESULT_SUCCESS != result) {
return;
}
result = (*recorderObject)->Realize(recorderObject, SL_BOOLEAN_FALSE);
if (SL_RESULT_SUCCESS != result) {
return;
}
result = (*recorderObject)->GetInterface(recorderObject, SL_IID_RECORD, &recorderRecord);
result = (*recorderObject)->GetInterface(recorderObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
&recorderBufferQueue);
finished = false;
result = (*recorderBufferQueue)->Enqueue(recorderBufferQueue, recordBuffer->getRecordBuffer(),
recorderSize);
result = (*recorderBufferQueue)->RegisterCallback(recorderBufferQueue, bqRecorderCallback, NULL);
LOGD("開始錄音");
/**
* 開始錄音
*/
(*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_RECORDING);
env->ReleaseStringUTFChars(path_, path);
錄音回撥如下:
void bqRecorderCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
{
// for streaming recording, here we would call Enqueue to give recorder the next buffer to fill
// but instead, this is a one-time buffer so we stop recording
LOGD("record size is %d", recorderSize);
fwrite(recordBuffer->getNowBuffer(), 1, recorderSize, pcmFile);
if(finished)
{
(*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_STOPPED);
fclose(pcmFile);
LOGD("停止錄音");
} else{
(*recorderBufferQueue)->Enqueue(recorderBufferQueue, recordBuffer->getRecordBuffer(),
recorderSize);
}
}
這樣就完成了OPenSL ES的PCM音訊資料錄製,我們這裡拿到了錄製的PCM資料可以用mediacodec或ffmpeg來編碼成aac格式的音訊,也可以直接用推流到伺服器來實現音訊直播。
完整程式碼如下:
#include <jni.h>
#include <string>
#include "AndroidLog.h"
#include "RecordBuffer.h"
#include "unistd.h"
extern "C"
{
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
}
//引擎介面
static SLObjectItf engineObject = NULL;
//引擎物件
static SLEngineItf engineEngine;
//錄音器介面
static SLObjectItf recorderObject = NULL;
//錄音器物件
static SLRecordItf recorderRecord;
//緩衝佇列
static SLAndroidSimpleBufferQueueItf recorderBufferQueue;
//錄製大小設為4096
#define RECORDER_FRAMES (2048)
static unsigned recorderSize = RECORDER_FRAMES * 2;
//PCM檔案
FILE *pcmFile;
//錄音buffer
RecordBuffer *recordBuffer;
bool finished = false;
void bqRecorderCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
{
// for streaming recording, here we would call Enqueue to give recorder the next buffer to fill
// but instead, this is a one-time buffer so we stop recording
LOGD("record size is %d", recorderSize);
fwrite(recordBuffer->getNowBuffer(), 1, recorderSize, pcmFile);
if(finished)
{
(*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_STOPPED);
fclose(pcmFile);
LOGD("停止錄音");
} else{
(*recorderBufferQueue)->Enqueue(recorderBufferQueue, recordBuffer->getRecordBuffer(),
recorderSize);
}
}
extern "C"
JNIEXPORT void JNICALL
Java_ywl5320_com_openslrecord_MainActivity_rdSound(JNIEnv *env, jobject instance, jstring path_) {
const char *path = env->GetStringUTFChars(path_, 0);
/**
* PCM檔案
*/
pcmFile = fopen(path, "w");
/**
* PCMbuffer佇列
*/
recordBuffer = new RecordBuffer(RECORDER_FRAMES * 2);
SLresult result;
/**
* 建立引擎物件
*/
result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
/**
* 設定IO裝置(麥克風)
*/
SLDataLocator_IODevice loc_dev = {SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
SLDataSource audioSrc = {&loc_dev, NULL};
/**
* 設定buffer佇列
*/
SLDataLocator_AndroidSimpleBufferQueue loc_bq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
/**
* 設定錄製規格:PCM、2聲道、44100HZ、16bit
*/
SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, 2, SL_SAMPLINGRATE_44_1,
SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT, SL_BYTEORDER_LITTLEENDIAN};
SLDataSink audioSnk = {&loc_bq, &format_pcm};
const SLInterfaceID id[1] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
const SLboolean req[1] = {SL_BOOLEAN_TRUE};
/**
* 建立錄製器
*/
result = (*engineEngine)->CreateAudioRecorder(engineEngine, &recorderObject, &audioSrc,
&audioSnk, 1, id, req);
if (SL_RESULT_SUCCESS != result) {
return;
}
result = (*recorderObject)->Realize(recorderObject, SL_BOOLEAN_FALSE);
if (SL_RESULT_SUCCESS != result) {
return;
}
result = (*recorderObject)->GetInterface(recorderObject, SL_IID_RECORD, &recorderRecord);
result = (*recorderObject)->GetInterface(recorderObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
&recorderBufferQueue);
finished = false;
result = (*recorderBufferQueue)->Enqueue(recorderBufferQueue, recordBuffer->getRecordBuffer(),
recorderSize);
result = (*recorderBufferQueue)->RegisterCallback(recorderBufferQueue, bqRecorderCallback, NULL);
LOGD("開始錄音");
/**
* 開始錄音
*/
(*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_RECORDING);
env->ReleaseStringUTFChars(path_, path);
}extern "C"
JNIEXPORT void JNICALL
Java_ywl5320_com_openslrecord_MainActivity_rdStop(JNIEnv *env, jobject instance) {
// TODO
if(recorderRecord != NULL)
{
finished = true;
}
}