void AudioNode::processIfNecessary(size_t framesToProcess) { ASSERT(context()->isAudioThread()); if (!isInitialized()) return; // Ensure that we only process once per rendering quantum. // This handles the "fanout" problem where an output is connected to multiple inputs. // The first time we're called during this time slice we process, but after that we don't want to re-process, // instead our output(s) will already have the results cached in their bus; double currentTime = context()->currentTime(); if (m_lastProcessingTime != currentTime) { m_lastProcessingTime = currentTime; // important to first update this time because of feedback loops in the rendering graph pullInputs(framesToProcess); bool silentInputs = inputsAreSilent(); if (!silentInputs) m_lastNonSilentTime = (context()->currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate); if (silentInputs && propagatesSilence()) silenceOutputs(); else { process(framesToProcess); unsilenceOutputs(); } } }
void OutputDeviceNodeXAudio::submitNextBuffer() { auto ctx = getContext(); if( ! ctx ) return; lock_guard<mutex> lock( ctx->getMutex() ); // verify context still exists, since its destructor may have been holding the lock ctx = getContext(); if( ! ctx ) return; ctx->preProcess(); auto internalBuffer = getInternalBuffer(); internalBuffer->zero(); pullInputs( internalBuffer ); if( checkNotClipping() ) internalBuffer->zero(); if( getNumChannels() == 2 ) dsp::interleaveStereoBuffer( internalBuffer, &mBufferInterleaved ); HRESULT hr = mSourceVoice->SubmitSourceBuffer( &mXAudioBuffer ); CI_ASSERT( hr == S_OK ); ctx->postProcess(); }
void OutputDeviceNodeWebAudio::renderInputs( emscripten::val e ) { auto outputBuffer = e["outputBuffer"]; auto inputBuffer = e["inputBuffer"]; auto ctx = getContext(); if( ! ctx ) { CI_LOG_I( "can't get context" ); return; } // this technically doesn't do anything but leave it here for now. lock_guard<mutex> lock( ctx->getMutex() ); ctx->preProcess(); auto internalBuffer = getInternalBuffer(); internalBuffer->zero(); pullInputs( internalBuffer ); if( checkNotClipping() ) { internalBuffer->zero(); } const size_t numFrames = internalBuffer->getNumFrames(); const size_t numChannels = internalBuffer->getNumChannels(); //dsp::interleave( internalBuffer->getData(), outputBuffer, numFrames, numChannels, numFrames ); ctx->postProcess(); // =========================================== // // make sure that the ScriptNodeProcessor's bufferSize prop is set properly. mImpl->setOutputBufferSize( numFrames ); // output array is what gets directed towards speakers val output = outputBuffer.call<val>( "getChannelData", 0 ); // input is only going to be populated when using native WebAudio nodes. val input = inputBuffer.call<val>( "getChannelData",0 ); // check first 100 values to see if input buffer is filled or empty(aka the value is 0). If empty, we won't bother processing, bool mInputIsEmpty = true; for( int a = 0; a < 100; ++a ) { if( input[a].as<float>() != 0.0 ) { mInputIsEmpty = false; } } // get internal cinder data float * data = static_cast<float*>( internalBuffer->getData() ); float * finalData; float * idata; if( !mInputIsEmpty ) { // will hold input data std::vector<float> _inputData; // copy Float32Array(aka "input") into _inputData vector ci::em::helpers::copyToVector( input,_inputData ); // convert to float* so we can pass information to dsp methods idata = &_inputData[0]; // add input data to any cinder data dsp::add(idata,data,finalData,numFrames); } else{ // if no input data just pass through finalData = data; } // loop through and set all info from finalData pointer onto the output buffer. for( int i = 0; i < numFrames; ++i ) { output.set<float>( i,finalData[i] ); } // release finalData pointer. // releasing these causes things to crash :( //free(finalData); //free(idata); }