void DMA1_Stream3_IRQHandler(void){ if(DMA_GetFlagStatus(AUDIO_I2S_EXT_DMA_STREAM, AUDIO_I2S_EXT_DMA_FLAG_TC) != RESET) { /* Transfer complete interrupt */ /* Handle 2nd half */ audioCallback(rxbuf + szbuf, txbuf + szbuf, szbuf); /* Clear the Interrupt flag */ DMA_ClearFlag(AUDIO_I2S_EXT_DMA_STREAM, AUDIO_I2S_EXT_DMA_FLAG_TC); }else if (DMA_GetFlagStatus(AUDIO_I2S_EXT_DMA_STREAM, AUDIO_I2S_EXT_DMA_FLAG_HT) != RESET) { /* Half Transfer complete interrupt */ /* Handle 1st half */ audioCallback(rxbuf, txbuf, szbuf); /* Clear the Interrupt flag */ DMA_ClearFlag(AUDIO_I2S_EXT_DMA_STREAM, AUDIO_I2S_EXT_DMA_FLAG_HT); } }
int JackCpp::AudioIO::jackToClassAudioCallback(jack_nframes_t nframes){ //read in commands while(mCmdBuffer.getReadSpace() > 0){ cmd_t cmd; mCmdBuffer.read(cmd); switch(cmd){ case add_in_port: //we will have tested that we have this capacity, so we resize the buffer //to include the new port mJackInBuf.resize(mJackInBuf.size() + 1); mNumInputPorts++; break; case add_out_port: //we will have tested that we have this capacity, so we resize the buffer //to include the new port mJackOutBuf.resize(mJackOutBuf.size() + 1); mNumOutputPorts++; break; } } //get the input and output buffers for(unsigned int i = 0; i < mNumInputPorts; i++) mJackInBuf[i] = (jack_default_audio_sample_t *) jack_port_get_buffer ( mInputPorts[i], nframes); for(unsigned int i = 0; i < mNumOutputPorts; i++) mJackOutBuf[i] = (jack_default_audio_sample_t *) jack_port_get_buffer ( mOutputPorts[i], nframes); return audioCallback(nframes, mJackInBuf, mJackOutBuf); }
virtual void sleep(unsigned int msec) override { static unsigned char buffer[32768]; int bytes = 44100 * msec / 1000; LOGD("SLEEP %d msec = %d bytes", msec, bytes); if(bytes > sizeof(buffer)) bytes = sizeof(buffer); audioCallback((void*)ted, buffer, bytes); }
void OpenSLContext::BqPlayerCallback(SLAndroidSimpleBufferQueueItf bq) { if (bq != bqPlayerBufferQueue) { ELOG("Wrong bq!"); return; } int renderedFrames = audioCallback(buffer[curBuffer], framesPerBuffer); int sizeInBytes = framesPerBuffer * 2 * sizeof(short); int byteCount = (framesPerBuffer - renderedFrames) * 4; if (byteCount > 0) { memset(buffer[curBuffer] + renderedFrames * 2, 0, byteCount); } SLresult result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, buffer[curBuffer], sizeInBytes); // Comment from sample code: // the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT, // which for this code example would indicate a programming error if (result != SL_RESULT_SUCCESS) { ELOG("OpenSL ES: Failed to enqueue! %i %i", renderedFrames, sizeInBytes); } curBuffer ^= 1; // Switch buffer }
void getSamples(int16_t *target, int noSamples) { audioCallback((void*)ted, (unsigned char*)target, noSamples); for(int i = (noSamples-1)/2; i >= 0; i--) { target[i*2] = target[i*2+1] = target[i]; } }
bool OpenSLContext::Init() { SLresult result; // create engine result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL); if (result != SL_RESULT_SUCCESS) { ELOG("OpenSL ES: Failed to create the engine: %d", (int)result); return false; } result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE); assert(SL_RESULT_SUCCESS == result); result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine); assert(SL_RESULT_SUCCESS == result); result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 0, 0, 0); assert(SL_RESULT_SUCCESS == result); result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE); assert(SL_RESULT_SUCCESS == result); SLuint32 sr = SL_SAMPLINGRATE_44_1; if (sampleRate == 48000) { sr = SL_SAMPLINGRATE_48; } // Don't allow any other sample rates. SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2}; SLDataFormat_PCM format_pcm = { SL_DATAFORMAT_PCM, 2, sr, SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16, SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT, SL_BYTEORDER_LITTLEENDIAN }; SLDataSource audioSrc = {&loc_bufq, &format_pcm}; // configure audio sink SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject}; SLDataSink audioSnk = {&loc_outmix, NULL}; // create audio player const SLInterfaceID ids[2] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME}; const SLboolean req[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE}; result = (*engineEngine)->CreateAudioPlayer(engineEngine, &bqPlayerObject, &audioSrc, &audioSnk, 2, ids, req); if (result != SL_RESULT_SUCCESS) { ELOG("OpenSL ES: CreateAudioPlayer failed: %d", (int)result); // Should really tear everything down here. Sigh. return false; } result = (*bqPlayerObject)->Realize(bqPlayerObject, SL_BOOLEAN_FALSE); assert(SL_RESULT_SUCCESS == result); result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay); assert(SL_RESULT_SUCCESS == result); result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_BUFFERQUEUE, &bqPlayerBufferQueue); assert(SL_RESULT_SUCCESS == result); result = (*bqPlayerBufferQueue)->RegisterCallback(bqPlayerBufferQueue, &bqPlayerCallbackWrap, this); assert(SL_RESULT_SUCCESS == result); result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_VOLUME, &bqPlayerVolume); assert(SL_RESULT_SUCCESS == result); result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PLAYING); assert(SL_RESULT_SUCCESS == result); // Render and enqueue a first buffer. (or should we just play the buffer empty?) buffer[0] = new short[framesPerBuffer * 2]; buffer[1] = new short[framesPerBuffer * 2]; curBuffer = 0; audioCallback(buffer[curBuffer], framesPerBuffer); result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, buffer[curBuffer], sizeof(buffer[curBuffer])); if (SL_RESULT_SUCCESS != result) { return false; } curBuffer ^= 1; return true; }
void AudioCallback::getNextAudioBlock (const AudioSourceChannelInfo &bufferToFill) { audioCallback (bufferToFill.buffer->getArrayOfWritePointers(), bufferToFill.buffer->getNumChannels(), bufferToFill.buffer->getNumSamples()); }