process__genericResampling
process__OneTrack16BitsStereoNoResampling process__TwoTracks16BitsStereoNoResampling
AudioMixer构造的时候,hook是process__nop,有几个地方会改变这个函数指针的指向。 这部分涉及到数字音频技术,我就无力讲解了。我们看看最接近的函数 process__OneTrack16BitsStereoNoResampling
void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state, void* output) {
单track,16bit双声道,不需要重采样,大部分是这种情况了 const int i = 31 - __builtin_clz(state->enabledTracks); const track_t& t = state->tracks[i];
AudioBufferProvider::Buffer& b(t.buffer);
int32_t* out = static_cast
const int16_t vl = t.volume[0]; const int16_t vr = t.volume[1]; const uint32_t vrl = t.volumeRL; while (numFrames) {
b.frameCount = numFrames; //获得buffer
t.bufferProvider->getNextBuffer(&b); int16_t const *in = b.i16;
size_t outFrames = b.frameCount; if UNLIKELY--->不走这. else { do {
//计算音量等数据,和数字音频技术有关。这里不说了
uint32_t rl = *reinterpret_cast
in += 2;
int32_t l = mulRL(1, rl, vrl) >> 12; int32_t r = mulRL(0, rl, vrl) >> 12; *out++ = (r<<16) | (l & 0xFFFF); } while (--outFrames); }
numFrames -= b.frameCount; //释放buffer。
t.bufferProvider->releaseBuffer(&b); } }
好像挺简单的啊,不就是把数据处理下嘛。这里注意下buffer。到现在,我们还没看到取共享内存里AT端write的数据呐。 那只能到bufferProvider去看了。
注意,这里用的是AudioBufferProvider基类,实际的对象是Track。它从AudioBufferProvider派生。
我们用得是PlaybackThread的这个Track status_t
AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer) {
//一阵暗喜吧。千呼万唤始出来,终于见到cblk了 audio_track_cblk_t* cblk = this->cblk(); uint32_t framesReady;
uint32_t framesReq = buffer->frameCount; //哈哈,看看数据准备好了没,
framesReady = cblk->framesReady();
if (LIKELY(framesReady)) { uint32_t s = cblk->server;
uint32_t bufferEnd = cblk->serverBase + cblk->frameCount;
bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd; if (framesReq > framesReady) { framesReq = framesReady;
}
if (s + framesReq > bufferEnd) { framesReq = bufferEnd - s; } 获得真实的数据地址
buffer->raw = getBuffer(s, framesReq); if (buffer->raw == 0) goto getNextBuffer_exit;
buffer->frameCount = framesReq; return NO_ERROR; }
getNextBuffer_exit: buffer->raw = 0; buffer->frameCount = 0; return NOT_ENOUGH_DATA; }
再看看释放缓冲的地方:releaseBuffer,这个直接在ThreadBase中实现了 void
AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer) {
buffer->raw = 0;
mFrameCount = buffer->frameCount; step();
buffer->frameCount = 0; }
看看step吧。mFrameCount表示我已经用完了这么多帧。 bool AudioFlinger::ThreadBase::TrackBase::step() { bool result;
audio_track_cblk_t* cblk = this->cblk();
result = cblk->stepServer(mFrameCount);//哼哼,调用cblk的stepServer,更新 服务端的使用位置 return result;
}
到这里,大伙应该都明白了吧。原来AudioTrack中write的数据,最终是这么被使用的呀!!! 恩,看一个process__OneTrack16BitsStereoNoResampling不过瘾,再看看 process__TwoTracks16BitsStereoNoResampling。
void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state, void* output)
int i;
uint32_t en = state->enabledTracks;
i = 31 - __builtin_clz(en);
const track_t& t0 = state->tracks[i]; AudioBufferProvider::Buffer& b0(t0.buffer);
en &= ~(1<
i = 31 - __builtin_clz(en);
const track_t& t1 = state->tracks[i]; AudioBufferProvider::Buffer& b1(t1.buffer);
int16_t const *in0;
const int16_t vl0 = t0.volume[0]; const int16_t vr0 = t0.volume[1]; size_t frameCount0 = 0;
int16_t const *in1;
const int16_t vl1 = t1.volume[0]; const int16_t vr1 = t1.volume[1]; size_t frameCount1 = 0;
int32_t* out = static_cast
while (numFrames) {
if (frameCount0 == 0) { b0.frameCount = numFrames;
t0.bufferProvider->getNextBuffer(&b0); if (b0.i16 == NULL) { if (buff == NULL) {
buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount]; }
in0 = buff;
b0.frameCount = numFrames; } else { in0 = b0.i16; }
frameCount0 = b0.frameCount; }
if (frameCount1 == 0) { b1.frameCount = numFrames;
t1.bufferProvider->getNextBuffer(&b1); if (b1.i16 == NULL) { if (buff == NULL) {
buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount]; }
in1 = buff;
b1.frameCount = numFrames; } else { in1 = b1.i16; }
frameCount1 = b1.frameCount; }