static void audioThreadFunc(void *arg) { Audio::MixerImpl *mixer = (Audio::MixerImpl *)arg; OSystem_3DS *osys = (OSystem_3DS *)g_system; int i; const int channel = 0; int bufferIndex = 0; const int bufferCount = 3; const int bufferSize = 80000; // Can't be too small, based on delayMillis duration const int sampleRate = mixer->getOutputRate(); int sampleLen = 0; uint32 lastTime = osys->getMillis(true); uint32 time = lastTime; ndspWaveBuf buffers[bufferCount]; for (i = 0; i < bufferCount; ++i) { memset(&buffers[i], 0, sizeof(ndspWaveBuf)); buffers[i].data_vaddr = linearAlloc(bufferSize); buffers[i].looping = false; buffers[i].status = NDSP_WBUF_FREE; } ndspChnReset(channel); ndspChnSetInterp(channel, NDSP_INTERP_LINEAR); ndspChnSetRate(channel, sampleRate); ndspChnSetFormat(channel, NDSP_FORMAT_STEREO_PCM16); while (!osys->exiting) { osys->delayMillis(100); // Note: Increasing the delay requires a bigger buffer time = osys->getMillis(true); sampleLen = (time - lastTime) * 22 * 4; // sampleRate / 1000 * channelCount * sizeof(int16); lastTime = time; if (!osys->sleeping && sampleLen > 0) { bufferIndex++; bufferIndex %= bufferCount; ndspWaveBuf *buf = &buffers[bufferIndex]; buf->nsamples = mixer->mixCallback(buf->data_adpcm, sampleLen); if (buf->nsamples > 0) { DSP_FlushDataCache(buf->data_vaddr, bufferSize); ndspChnWaveBufAdd(channel, buf); } } } for (i = 0; i < bufferCount; ++i) linearFree(buffers[i].data_pcm8); }
void refillAudioBuffers(void) { if (!_audioEnabled) return; OSystem_N64 *osys = (OSystem_N64 *)g_system; byte *sndBuf; Audio::MixerImpl *localmixer = (Audio::MixerImpl *)osys->getMixer(); while (_requiredSoundSlots) { sndBuf = (byte *)getAIBuffer(); localmixer->mixCallback((byte *)sndBuf, osys->_audioBufferSize); putAIBuffer(); _requiredSoundSlots--; } }
static void * sfx_thread_func(void *arg) { Audio::MixerImpl *mixer = (Audio::MixerImpl *) arg; u8 sb_sw; while (true) { LWP_ThreadSleep(sfx_queue); if (sfx_thread_quit) break; // the hardware uses two buffers: a front and a back buffer // we use 3 buffers here: two are beeing pushed to the DSP, // and the free one is where our mixer writes to // thus the latency of our stream is: // 8192 [frag size] / 48000 / 2 [16bit] / 2 [stereo] * 2 [hw buffers] // -> 85.3ms sb_sw = (sb_hw + 1) % SFX_BUFFERS; mixer->mixCallback(sound_buffer[sb_sw], SFX_THREAD_FRAG_SIZE); DCFlushRange(sound_buffer[sb_sw], SFX_THREAD_FRAG_SIZE); } return NULL; }
void *OSystem_Android::audioThreadFunc(void *arg) { JNI::attachThread(); OSystem_Android *system = (OSystem_Android *)arg; Audio::MixerImpl *mixer = system->_mixer; uint buf_size = system->_audio_buffer_size; JNIEnv *env = JNI::getEnv(); jbyteArray bufa = env->NewByteArray(buf_size); bool paused = true; byte *buf; int offset, left, written; int samples, i; struct timespec tv_delay; tv_delay.tv_sec = 0; tv_delay.tv_nsec = 20 * 1000 * 1000; uint msecs_full = buf_size * 1000 / (mixer->getOutputRate() * 2 * 2); struct timespec tv_full; tv_full.tv_sec = 0; tv_full.tv_nsec = msecs_full * 1000 * 1000; bool silence; uint silence_count = 33; while (!system->_audio_thread_exit) { if (JNI::pause) { JNI::setAudioStop(); paused = true; silence_count = 33; LOGD("audio thread going to sleep"); sem_wait(&JNI::pause_sem); LOGD("audio thread woke up"); } buf = (byte *)env->GetPrimitiveArrayCritical(bufa, 0); assert(buf); samples = mixer->mixCallback(buf, buf_size); silence = samples < 1; // looks stupid, and it is, but currently there's no way to detect // silence-only buffers from the mixer if (!silence) { silence = true; for (i = 0; i < samples; i += 2) // SID streams constant crap if (READ_UINT16(buf + i) > 32) { silence = false; break; } } env->ReleasePrimitiveArrayCritical(bufa, buf, 0); if (silence) { if (!paused) silence_count++; // only pause after a while to prevent toggle mania if (silence_count > 32) { if (!paused) { LOGD("AudioTrack pause"); JNI::setAudioPause(); paused = true; } nanosleep(&tv_full, 0); continue; } } if (paused) { LOGD("AudioTrack play"); JNI::setAudioPlay(); paused = false; silence_count = 0; } offset = 0; left = buf_size; written = 0; while (left > 0) { written = JNI::writeAudio(env, bufa, offset, left); if (written < 0) { LOGE("AudioTrack error: %d", written); break; } // buffer full if (written < left) nanosleep(&tv_delay, 0); offset += written; left -= written; } if (written < 0) break; // prepare the next buffer, and run into the blocking AudioTrack.write } JNI::setAudioStop(); env->DeleteLocalRef(bufa); JNI::detachThread(); return 0; }