void collectLiveEvents( BaseInstrument* instrument ) { AudioChannel* channel = instrument->audioChannel; std::vector<BaseAudioEvent*>* liveEvents = instrument->getLiveEvents(); // removal queue std::vector<BaseAudioEvent*> removes; int i = 0; for ( i; i < liveEvents->size(); i++ ) { BaseAudioEvent* audioEvent = liveEvents->at( i ); if ( !audioEvent->isDeletable()) channel->addLiveEvent( audioEvent ); else removes.push_back( audioEvent ); } // removal queue filled ? process it so we can safely // remove "deleted" AudioEvents without errors occurring if ( removes.size() > 0 ) { int i = 0; for ( i; i < removes.size(); i++ ) { BaseAudioEvent* audioEvent = removes[ i ]; instrument->removeEvent( audioEvent, true ); } } }
void ChannelMergerNode::process(size_t framesToProcess) { AudioNodeOutput* output = this->output(0); ASSERT(output); ASSERT_UNUSED(framesToProcess, framesToProcess == output->bus()->length()); // Output bus not updated yet, so just output silence. if (m_desiredNumberOfOutputChannels != output->numberOfChannels()) { output->bus()->zero(); return; } // Merge all the channels from all the inputs into one output. unsigned outputChannelIndex = 0; for (unsigned i = 0; i < numberOfInputs(); ++i) { AudioNodeInput* input = this->input(i); if (input->isConnected()) { unsigned numberOfInputChannels = input->bus()->numberOfChannels(); // Merge channels from this particular input. for (unsigned j = 0; j < numberOfInputChannels; ++j) { AudioChannel* inputChannel = input->bus()->channel(j); AudioChannel* outputChannel = output->bus()->channel(outputChannelIndex); outputChannel->copyFrom(inputChannel); ++outputChannelIndex; } } } ASSERT(outputChannelIndex == output->numberOfChannels()); }
void ChannelMergerHandler::process(size_t framesToProcess) { AudioNodeOutput& output = this->output(0); DCHECK_EQ(framesToProcess, output.bus()->length()); unsigned numberOfOutputChannels = output.numberOfChannels(); DCHECK_EQ(numberOfInputs(), numberOfOutputChannels); // Merge multiple inputs into one output. for (unsigned i = 0; i < numberOfOutputChannels; ++i) { AudioNodeInput& input = this->input(i); DCHECK_EQ(input.numberOfChannels(), 1u); AudioChannel* outputChannel = output.bus()->channel(i); if (input.isConnected()) { // The mixing rules will be applied so multiple channels are down- // mixed to mono (when the mixing rule is defined). Note that only // the first channel will be taken for the undefined input channel // layout. // // See: // http://webaudio.github.io/web-audio-api/#channel-up-mixing-and-down-mixing AudioChannel* inputChannel = input.bus()->channel(0); outputChannel->copyFrom(inputChannel); } else { // If input is unconnected, fill zeros in the channel. outputChannel->zero(); } } }
void EqualPowerPanner::pan(double azimuth, double /*elevation*/, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess) { // FIXME: implement stereo sources bool isInputSafe = inputBus && inputBus->numberOfChannels() == 1 && framesToProcess <= inputBus->length(); ASSERT(isInputSafe); if (!isInputSafe) return; bool isOutputSafe = outputBus && outputBus->numberOfChannels() == 2 && framesToProcess <= outputBus->length(); ASSERT(isOutputSafe); if (!isOutputSafe) return; AudioChannel* channel = inputBus->channel(0); float* sourceP = channel->data(); float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->data(); float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->data(); if (!sourceP || !destinationL || !destinationR) return; // Pan smoothly from left to right with azimuth going from -30 -> +30 degrees. double desiredPanPosition; if (azimuth > 30.0) desiredPanPosition = 1.0; else if (azimuth < -30.0) desiredPanPosition = 0.0; else desiredPanPosition = (azimuth + 30.0) / 60.0; double desiredGainL = 0.5 * cos(piDouble * desiredPanPosition) + 0.5; double desiredGainR = sqrt(1.0 - desiredGainL*desiredGainL); // Don't de-zipper on first render call. if (m_isFirstRender) { m_isFirstRender = false; m_gainL = desiredGainL; m_gainR = desiredGainR; } // Cache in local variables. double gainL = m_gainL; double gainR = m_gainR; // Get local copy of smoothing constant. const double SmoothingConstant = m_smoothingConstant; int n = framesToProcess; while (n--) { float input = *sourceP++; gainL += (desiredGainL - gainL) * SmoothingConstant; gainR += (desiredGainR - gainR) * SmoothingConstant; *destinationL++ = static_cast<float>(input * gainL); *destinationR++ = static_cast<float>(input * gainR); } m_gainL = gainL; m_gainR = gainR; }
std::vector<AudioChannel*> getAudioEvents( std::vector<AudioChannel*> channels, int bufferPosition, int bufferEnd, bool addLiveInstruments ) { // clear previous channel contents (note we don't delete the channels anywhere as we re-use them) channels.clear(); int i, l; // note we update the channels mix properties here as they might change during playback for ( i = 0, l = instruments.size(); i < l; ++i ) { BaseInstrument* instrument = instruments.at( i ); AudioChannel* instrumentChannel = instrument->audioChannel; instrumentChannel->reset(); instrumentChannel->mixVolume = instrument->volume; if ( !instrumentChannel->muted ) { if ( AudioEngine::playing ) collectSequencedEvents( instrument, bufferPosition, bufferEnd ); if ( addLiveInstruments && instrument->hasLiveEvents() ) collectLiveEvents( instrument ); channels.push_back( instrumentChannel ); } } return channels; }
void synthesize(float hz, float timbre, float volume) { LOG("hz=%f timbre=%f volume=%f\n", hz, timbre, volume); whiteNoiseChannel.setVolume(volume * 96.f); whiteNoiseChannel.setSpeed(hz * arraysize(whiteNoise)); }
bool Sequencer::getAudioEvents( std::vector<AudioChannel*>* channels, int bufferPosition, int bufferSize, bool addLiveInstruments, bool flushChannels ) { channels->clear(); int bufferEnd = bufferPosition + ( bufferSize - 1 ); // the highest SampleEnd value we'll query bool loopStarted = bufferEnd > AudioEngine::max_buffer_position; // whether this request exceeds the min_buffer_position - max_buffer_position range int i, l; // note we update the channels mix properties here as they might change during playback for ( i = 0, l = ( int ) instruments.size(); i < l; ++i ) { BaseInstrument* instrument = instruments.at( i ); AudioChannel* instrumentChannel = instrument->audioChannel; // clear previous channel contents when requested if ( flushChannels ) instrumentChannel->reset(); if ( !instrumentChannel->muted ) { if ( playing ) collectSequencedEvents( instrument, bufferPosition, bufferEnd ); if ( addLiveInstruments && instrument->hasLiveEvents() ) collectLiveEvents( instrument ); channels->push_back( instrumentChannel ); } } return loopStarted; }
/** * used by the getAudioEvents-method of the sequencer, this validates * the present AudioEvents against the requested position * and updates and flushes the removal queue * * @param instrument {BaseInstrument*} instrument to gather events from * @param bufferPosition {int} the current buffers start pointer * @param bufferEnd {int} the current buffers end pointer */ void collectSequencedEvents( BaseInstrument* instrument, int bufferPosition, int bufferEnd ) { if ( !instrument->hasEvents() ) return; AudioChannel* channel = instrument->audioChannel; std::vector<BaseAudioEvent*>* audioEvents = instrument->getEvents(); // removal queue std::vector<BaseAudioEvent*> removes; // channel has an internal loop (e.g. drum machine) ? recalculate requested // buffer position by subtracting all measures above the first if ( channel->maxBufferPosition > 0 ) { int samplesPerBar = AudioEngine::samples_per_bar; while ( bufferPosition >= channel->maxBufferPosition ) { bufferPosition -= samplesPerBar; bufferEnd -= samplesPerBar; } } int i = 0, amount = audioEvents->size(); for ( i; i < amount; i++ ) { BaseAudioEvent* audioEvent = audioEvents->at( i ); if ( audioEvent->isEnabled() ) { int sampleStart = audioEvent->getSampleStart(); int sampleEnd = audioEvent->getSampleEnd(); if ( audioEvent->isLoopeable() || ( sampleStart >= bufferPosition && sampleStart <= bufferEnd ) || ( sampleStart < bufferPosition && sampleEnd >= bufferPosition )) { if ( !audioEvent->isDeletable()) channel->addEvent( audioEvent ); else removes.push_back( audioEvent ); } } } // removal queue filled ? process it so we can safely // remove "deleted" AudioEvents without errors occurring if ( removes.size() > 0 ) { int i = 0; for ( i; i < removes.size(); i++ ) { BaseAudioEvent* audioEvent = removes[ i ]; instrument->removeEvent( audioEvent, false ); } } }
// Pulls on our provider to get rendered audio stream. OSStatus AudioDestinationMac::render(UInt32 numberOfFrames, AudioBufferList* ioData) { AudioBuffer* buffers = ioData->mBuffers; m_renderBus.setChannelMemory(0, (float*)buffers[0].mData, numberOfFrames); m_renderBus.setChannelMemory(1, (float*)buffers[1].mData, numberOfFrames); //@tofix - add support for local/live audio input. m_callback.render(m_input->m_audioBus, &m_renderBus, numberOfFrames); // Clamp values at 0db (i.e., [-1.0, 1.0]) for (unsigned i = 0; i < m_renderBus.numberOfChannels(); ++i) { AudioChannel* channel = m_renderBus.channel(i); VectorMath::vclip(channel->data(), 1, &kLowThreshold, &kHighThreshold, channel->mutableData(), 1, numberOfFrames); } return noErr; }
void synthInit() { Random gen = Random(); //Generate the White Noise for (int i = 0; i != arraysize(whiteNoise); i++) { whiteNoise[i] = gen.random() * 0x7fff; } whiteNoiseChannel.play(noiseAsset); }
void ChannelMergerNode::process(size_t framesToProcess) { AudioNodeOutput* output = this->output(0); ASSERT(output); ASSERT_UNUSED(framesToProcess, framesToProcess == output->bus()->length()); // Output bus not updated yet, so just output silence. if (m_desiredNumberOfOutputChannels != output->numberOfChannels()) { output->bus()->zero(); return; } // Merge all the channels from all the inputs into one output. unsigned outputChannelIndex = 0; unsigned maxAllowedOutputChannels = output->numberOfChannels(); for (unsigned i = 0; i < numberOfInputs(); ++i) { AudioNodeInput* input = this->input(i); if (input->isConnected()) { unsigned numberOfInputChannels = input->bus()->numberOfChannels(); // Merge channels from this particular input, but be careful not to exceed the number of // output channels. (This can happen if there are many inputs with each input // containing many channels.) for (unsigned j = 0; j < numberOfInputChannels; ++j) { if (outputChannelIndex < maxAllowedOutputChannels) { AudioChannel* inputChannel = input->bus()->channel(j); AudioChannel* outputChannel = output->bus()->channel(outputChannelIndex); outputChannel->copyFrom(inputChannel); ++outputChannelIndex; } } } if (outputChannelIndex >= maxAllowedOutputChannels) break; } ASSERT(outputChannelIndex == output->numberOfChannels()); }
void collectLiveEvents( BaseInstrument* instrument ) { AudioChannel* channel = instrument->audioChannel; std::vector<BaseAudioEvent*>* liveEvents = instrument->getLiveEvents(); // removal queue std::vector<BaseAudioEvent*> removes; int i = 0; for ( i; i < liveEvents->size(); i++ ) { BaseAudioEvent* audioEvent = liveEvents->at( i ); if ( !audioEvent->deletable()) channel->addLiveEvent( audioEvent ); else removes.push_back( audioEvent ); } // removal queue filled ? process it so we can safely // remove "deleted" AudioEvents without errors occurring if ( removes.size() > 0 ) { int i = 0; for ( i; i < removes.size(); i++ ) { BaseAudioEvent* audioEvent = removes[ i ]; // remove audio event from the list if ( std::find( liveEvents->begin(), liveEvents->end(), audioEvent ) != liveEvents->end()) { liveEvents->erase( std::find( liveEvents->begin(), liveEvents->end(), audioEvent )); } instrument->removeEvent( audioEvent ); } } }
void ChannelMergerNode::process(ContextRenderLock& r, size_t framesToProcess) { auto output = this->output(0); ASSERT_UNUSED(framesToProcess, framesToProcess == output->bus(r)->length()); // Output bus not updated yet, so just output silence. See Note * in checkNumberOfChannelsForInput if (m_desiredNumberOfOutputChannels != output->numberOfChannels()) { output->bus(r)->zero(); return; } // Merge all the channels from all the inputs into one output. uint32_t outputChannelIndex = 0; for (uint32_t i = 0; i < numberOfInputs(); ++i) { auto input = this->input(i); if (input->isConnected()) { uint32_t numberOfInputChannels = input->bus(r)->numberOfChannels(); // Merge channels from this particular input. for (uint32_t j = 0; j < numberOfInputChannels; ++j) { AudioChannel* inputChannel = input->bus(r)->channel(j); AudioChannel* outputChannel = output->bus(r)->channel(outputChannelIndex); outputChannel->copyFrom(inputChannel); ++outputChannelIndex; } } } ASSERT(outputChannelIndex == output->numberOfChannels()); }
int PulseAudioDriver::setup(bool capture, bool playback, const QString& ) { PENTER; sample_spec.rate = frame_rate; sample_spec.channels = 2; sample_spec.format = PA_SAMPLE_FLOAT32NE; assert(pa_sample_spec_valid(&sample_spec)); if (channel_map_set && channel_map.channels != sample_spec.channels) { fprintf(stderr, "Channel map doesn't match file.\n"); return -1; } /* Set up a new main loop */ if (!(mainloop = pa_mainloop_new())) { fprintf(stderr, "pa_mainloop_new() failed.\n"); return -1; } mainloop_api = pa_mainloop_get_api(mainloop); int r = pa_signal_init(mainloop_api); assert(r == 0); /* Create a new connection context */ if (!(context = pa_context_new(mainloop_api, "Traverso"))) { fprintf(stderr, "pa_context_new() failed.\n"); return -1; } pa_context_set_state_callback(context, context_state_callback, this); /* Connect the context */ pa_context_connect(context, "", (pa_context_flags_t)0, NULL); int ret; /* Run the main loop */ // if (pa_mainloop_run(mainloop, &ret) < 0) { // fprintf(stderr, "pa_mainloop_run() failed.\n"); // return -1; // } AudioChannel* audiochannel; int port_flags; char buf[32]; // TODO use the found maxchannel count for the playback stream, instead of assuming 2 !! for (int chn = 0; chn < 2; chn++) { snprintf (buf, sizeof(buf) - 1, "playback_%d", chn+1); audiochannel = device->register_playback_channel(buf, "32 bit float audio", port_flags, frames_per_cycle, chn); audiochannel->set_latency( frames_per_cycle + capture_frame_latency ); playbackChannels.append(audiochannel); } // TODO use the found maxchannel count for the capture stream, instead of assuming 0 !! for (int chn = 0; chn < 2; chn++) { snprintf (buf, sizeof(buf) - 1, "capture_%d", chn+1); audiochannel = device->register_capture_channel(buf, "32 bit float audio", port_flags, frames_per_cycle, chn); audiochannel->set_latency( frames_per_cycle + capture_frame_latency ); captureChannels.append(audiochannel); } return 1; }
void Reverb::process(const AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess) { // Do a fairly comprehensive sanity check. // If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases. bool isSafeToProcess = sourceBus && destinationBus && sourceBus->numberOfChannels() > 0 && destinationBus->numberOfChannels() > 0 && framesToProcess <= MaxFrameSize && framesToProcess <= sourceBus->length() && framesToProcess <= destinationBus->length(); ASSERT(isSafeToProcess); if (!isSafeToProcess) return; // For now only handle mono or stereo output if (destinationBus->numberOfChannels() > 2) { destinationBus->zero(); return; } AudioChannel* destinationChannelL = destinationBus->channel(0); const AudioChannel* sourceChannelL = sourceBus->channel(0); // Handle input -> output matrixing... size_t numInputChannels = sourceBus->numberOfChannels(); size_t numOutputChannels = destinationBus->numberOfChannels(); size_t numReverbChannels = m_convolvers.size(); if (numInputChannels == 2 && numReverbChannels == 2 && numOutputChannels == 2) { // 2 -> 2 -> 2 const AudioChannel* sourceChannelR = sourceBus->channel(1); AudioChannel* destinationChannelR = destinationBus->channel(1); m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess); m_convolvers[1]->process(sourceChannelR, destinationChannelR, framesToProcess); } else if (numInputChannels == 1 && numOutputChannels == 2 && numReverbChannels == 2) { // 1 -> 2 -> 2 for (int i = 0; i < 2; ++i) { AudioChannel* destinationChannel = destinationBus->channel(i); m_convolvers[i]->process(sourceChannelL, destinationChannel, framesToProcess); } } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 2) { // 1 -> 1 -> 2 m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess); // simply copy L -> R AudioChannel* destinationChannelR = destinationBus->channel(1); bool isCopySafe = destinationChannelL->data() && destinationChannelR->data() && destinationChannelL->length() >= framesToProcess && destinationChannelR->length() >= framesToProcess; ASSERT(isCopySafe); if (!isCopySafe) return; memcpy(destinationChannelR->mutableData(), destinationChannelL->data(), sizeof(float) * framesToProcess); } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 1) { // 1 -> 1 -> 1 m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess); } else if (numInputChannels == 2 && numReverbChannels == 4 && numOutputChannels == 2) { // 2 -> 4 -> 2 ("True" stereo) const AudioChannel* sourceChannelR = sourceBus->channel(1); AudioChannel* destinationChannelR = destinationBus->channel(1); AudioChannel* tempChannelL = m_tempBuffer->channel(0); AudioChannel* tempChannelR = m_tempBuffer->channel(1); // Process left virtual source m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess); m_convolvers[1]->process(sourceChannelL, destinationChannelR, framesToProcess); // Process right virtual source m_convolvers[2]->process(sourceChannelR, tempChannelL, framesToProcess); m_convolvers[3]->process(sourceChannelR, tempChannelR, framesToProcess); destinationBus->sumFrom(*m_tempBuffer); } else if (numInputChannels == 1 && numReverbChannels == 4 && numOutputChannels == 2) { // 1 -> 4 -> 2 (Processing mono with "True" stereo impulse response) // This is an inefficient use of a four-channel impulse response, but we should handle the case. AudioChannel* destinationChannelR = destinationBus->channel(1); AudioChannel* tempChannelL = m_tempBuffer->channel(0); AudioChannel* tempChannelR = m_tempBuffer->channel(1); // Process left virtual source m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess); m_convolvers[1]->process(sourceChannelL, destinationChannelR, framesToProcess); // Process right virtual source m_convolvers[2]->process(sourceChannelL, tempChannelL, framesToProcess); m_convolvers[3]->process(sourceChannelL, tempChannelR, framesToProcess); destinationBus->sumFrom(*m_tempBuffer); } else { // Handle gracefully any unexpected / unsupported matrixing // FIXME: add code for 5.1 support... destinationBus->zero(); } }
/** * starts the render thread * NOTE: the render thread is always active, even when the * sequencer is paused */ void start() { OPENSL_STREAM *p; p = android_OpenAudioDevice( AudioEngineProps::SAMPLE_RATE, AudioEngineProps::INPUT_CHANNELS, AudioEngineProps::OUTPUT_CHANNELS, AudioEngineProps::BUFFER_SIZE ); // hardware unavailable ? halt thread, trigger JNI callback for error handler if ( p == NULL ) { Observer::handleHardwareUnavailable(); return; } // audio hardware available, start render thread int buffer_size, i, c, ci; buffer_size = AudioEngineProps::BUFFER_SIZE; int outputChannels = AudioEngineProps::OUTPUT_CHANNELS; bool isMono = outputChannels == 1; std::vector<AudioChannel*> channels; std::vector<AudioChannel*> channels2; // used when loop starts for gathering events at the start range bool loopStarted = false; // whether the current buffer will exceed the end offset of the loop (read remaining samples from the start) int loopOffset = 0; // the offset within the current buffer where we start reading from the current loops start offset int loopAmount = 0; // amount of samples we must read from the current loops start offset float recbufferIn [ buffer_size ]; // used for recording from device input float outbuffer [ buffer_size * outputChannels ]; // the output buffer rendered by the hardware // generate buffers for temporary channel buffer writes AudioBuffer* channelBuffer = new AudioBuffer( outputChannels, buffer_size ); AudioBuffer* inbuffer = new AudioBuffer( outputChannels, buffer_size ); // accumulates all channels ("master strip") AudioBuffer* recbuffer = new AudioBuffer( AudioEngineProps::INPUT_CHANNELS, buffer_size ); thread = 1; // signal processors Finalizer* limiter = new Finalizer ( 2, 500, AudioEngineProps::SAMPLE_RATE, outputChannels ); LPFHPFilter* hpf = new LPFHPFilter(( float ) AudioEngineProps::SAMPLE_RATE, 55, outputChannels ); while ( thread ) { // erase previous buffer contents inbuffer->silenceBuffers(); // gather the audio events by the buffer range currently being processed int endPosition = bufferPosition + buffer_size; channels = sequencer::getAudioEvents( channels, bufferPosition, endPosition, true ); // read pointer exceeds maximum allowed offset ? => sequencer has started its loop // we must now also gather extra events at the start position of the seq. range loopStarted = endPosition > max_buffer_position; loopOffset = (( max_buffer_position + 1 ) - bufferPosition ); loopAmount = buffer_size - loopOffset; if ( loopStarted ) { // were we bouncing the audio ? save file and stop rendering if ( bouncing ) { DiskWriter::writeBufferToFile( AudioEngineProps::SAMPLE_RATE, AudioEngineProps::OUTPUT_CHANNELS, false ); // broadcast update via JNI, pass buffer identifier name to identify last recording Observer::handleBounceComplete( 1 ); thread = 0; // stop thread, halts rendering break; } else { endPosition -= max_buffer_position; channels2 = sequencer::getAudioEvents( channels2, min_buffer_position, min_buffer_position + buffer_size, false ); // er? the channels are magically merged by above invocation..., performing the insert below adds the same events TWICE*POP*!?!? //channels.insert( channels.end(), channels2.begin(), channels2.end() ); // merge the channels into one channels2.clear(); // would clear on next "getAudioEvents"-query... but why wait ? } } // record audio from Android device ? if ( recordFromDevice && AudioEngineProps::INPUT_CHANNELS > 0 ) { int recSamps = android_AudioIn( p, recbufferIn, AudioEngineProps::BUFFER_SIZE ); SAMPLE_TYPE* recBufferChannel = recbuffer->getBufferForChannel( 0 ); for ( int j = 0; j < recSamps; ++j ) { recBufferChannel[ j ] = recbufferIn[ j ];//static_cast<float>( recbufferIn[ j ] ); // merge recording into current input buffer for instant monitoring if ( monitorRecording ) { for ( int k = 0; k < outputChannels; ++k ) inbuffer->getBufferForChannel( k )[ j ] = recBufferChannel[ j ]; } } } // channel loop int j = 0; int channelAmount = channels.size(); for ( j; j < channelAmount; ++j ) { AudioChannel* channel = channels[ j ]; bool isCached = channel->hasCache; // whether this channel has a fully cached buffer bool mustCache = AudioEngineProps::CHANNEL_CACHING && channel->canCache() && !isCached; // whether to cache this channels output bool gotBuffer = false; int cacheReadPos = 0; // the offset we start ready from the channel buffer (when writing to cache) SAMPLE_TYPE channelVolume = ( SAMPLE_TYPE ) channel->mixVolume; std::vector<BaseAudioEvent*> audioEvents = channel->audioEvents; int amount = audioEvents.size(); // clear previous channel buffer content channelBuffer->silenceBuffers(); bool useChannelRange = channel->maxBufferPosition != 0; // channel has its own buffer range (i.e. drummachine) int maxBufferPosition = useChannelRange ? channel->maxBufferPosition : max_buffer_position; // we make a copy of the current buffer position indicator int bufferPos = bufferPosition; // ...in case the AudioChannels maxBufferPosition differs from the sequencer loop range // note that these buffer positions are always a full bar in length (as we loop measures) while ( bufferPos > maxBufferPosition ) bufferPos -= bytes_per_bar; // only render sequenced events when the sequencer isn't in the paused state // and the channel volume is actually at an audible level! ( > 0 ) if ( playing && amount > 0 && channelVolume > 0.0 ) { if ( !isCached ) { // write the audioEvent buffers into the main output buffer for ( int k = 0; k < amount; ++k ) { BaseAudioEvent* audioEvent = audioEvents[ k ]; if ( !audioEvent->isLocked()) // make sure we are allowed to query the contents { audioEvent->lock(); // prevent buffer mutations during this read cycle audioEvent->mixBuffer( channelBuffer, bufferPos, min_buffer_position, maxBufferPosition, loopStarted, loopOffset, useChannelRange ); audioEvent->unlock(); // release lock } } } else { channel->readCachedBuffer( channelBuffer, bufferPos ); } } // perform live rendering for this instrument if ( channel->hasLiveEvents ) { int lAmount = channel->liveEvents.size(); // the volume of the live events is divided by the channel mix as a live event // is played on the same instrument, but just as a different voice (note the // events can have their own mix level) float lAmp = channel->mixVolume > 0.0 ? MAX_PHASE / channel->mixVolume : MAX_PHASE; for ( int k = 0; k < lAmount; ++k ) { BaseAudioEvent* vo = channel->liveEvents[ k ]; channelBuffer->mergeBuffers( vo->synthesize( buffer_size ), 0, 0, lAmp ); } } // apply the processing chains processors / modulators ProcessingChain* chain = channel->processingChain; std::vector<BaseProcessor*> processors = chain->getActiveProcessors(); for ( int k = 0; k < processors.size(); k++ ) { BaseProcessor* processor = processors[ k ]; bool canCacheProcessor = processor->isCacheable(); // only apply processor when we're not caching or cannot cache its output if ( !isCached || !canCacheProcessor ) { // cannot cache this processor and we're caching ? write all contents // of the channelBuffer into the channels cache if ( mustCache && !canCacheProcessor ) mustCache = !writeChannelCache( channel, channelBuffer, cacheReadPos ); processors[ k ]->process( channelBuffer, channel->isMono ); } } // write cache if it didn't happen yet ;) (bus processors are (currently) non-cacheable) if ( mustCache ) mustCache = !writeChannelCache( channel, channelBuffer, cacheReadPos ); // write the channel buffer into the combined output buffer, apply channel volume // note live events are always audible as their volume is relative to the instrument if ( channel->hasLiveEvents && channelVolume == 0.0 ) channelVolume = MAX_PHASE; inbuffer->mergeBuffers( channelBuffer, 0, 0, channelVolume ); } // TODO: create bus processors for these ? // apply high pass filtering to prevent extreme low rumbling and nasty filter offsets hpf->process( inbuffer, buffer_size ); // limit the audio to prevent clipping limiter->process( inbuffer, isMono ); // write the accumulated buffers into the output buffer for ( i = 0, c = 0; i < buffer_size; i++, c += outputChannels ) { for ( ci = 0; ci < outputChannels; ci++ ) { float sample = ( float ) inbuffer->getBufferForChannel( ci )[ i ] * volume; // apply master volume // extreme limiting (still above the thresholds?) if ( sample < -MAX_PHASE ) sample = -MAX_PHASE; else if ( sample > +MAX_PHASE ) sample = +MAX_PHASE; outbuffer[ c + ci ] = sample; } // update the buffer pointers and sequencer position if ( playing ) { if ( ++bufferPosition % bytes_per_tick == 0 ) handleSequencerPositionUpdate( android_GetTimestamp( p )); if ( bufferPosition > max_buffer_position ) bufferPosition = min_buffer_position; } } // render the buffer in the audio hardware (unless we're bouncing as writing the output // makes it both unnecessarily audible and stalls this thread's execution if ( !bouncing ) android_AudioOut( p, outbuffer, buffer_size * AudioEngineProps::OUTPUT_CHANNELS ); // record the output if recording state is active if ( playing && ( recordOutput || recordFromDevice )) { if ( recordFromDevice ) // recording from device input ? > write the record buffer DiskWriter::appendBuffer( recbuffer ); else // recording global output ? > write the combined buffer DiskWriter::appendBuffer( inbuffer ); // exceeded maximum recording buffer amount ? > write current recording if ( DiskWriter::bufferFull() || haltRecording ) { int amountOfChannels = recordFromDevice ? AudioEngineProps::INPUT_CHANNELS : outputChannels; DiskWriter::writeBufferToFile( AudioEngineProps::SAMPLE_RATE, amountOfChannels, true ); if ( !haltRecording ) { DiskWriter::generateOutputBuffer(); // allocate new buffer for next iteration ++recordingFileId; } else { haltRecording = false; } } } // tempo update queued ? if ( queuedTempo != tempo ) handleTempoUpdate( queuedTempo, true ); } android_CloseAudioDevice( p ); // clear heap memory allocated before thread loop delete inbuffer; delete channelBuffer; delete limiter; delete hpf; }