void DiskInUGenInternal::processBlock(bool& shouldDelete, const unsigned int /*blockID*/, const int /*channel*/) throw() { int numChannels = getNumChannels(); int blockSize = uGenOutput.getBlockSize(); for(int i = 0; i < numChannels; i++) { bufferData[i] = proxies[i]->getSampleData(); } AudioSampleBuffer audioSampleBuffer(bufferData, numChannels, blockSize); AudioSourceChannelInfo info; info.buffer = &audioSampleBuffer; info.numSamples = blockSize; info.startSample = 0; if(filePlayer.isPlaying()) filePlayer.getNextAudioBlock(info); else info.clearActiveBufferRegion(); if(isDone()) { shouldDelete = shouldDelete ? true : shouldDeleteValue; } }
void SampleAudioSource::getNextAudioBlock(const AudioSourceChannelInfo& buf) { ScopedLock l(lock_); if (not (source_ and isRunning_)) { buf.clearActiveBufferRegion(); return; } currentTime_ += buf.numSamples; SampleTime overrun(currentTime_ - length_); if (overrun < 0) { source_->getNextAudioBlock(buf); panGainPlayer_->apply(buf); return; } AudioSourceChannelInfo b = buf; b.numSamples -= overrun; source_->getNextAudioBlock(b); panGainPlayer_->apply(b); b.startSample += b.numSamples; b.numSamples = overrun; b.clearActiveBufferRegion(); isRunning_ = false; callback_(callbackData_); // Might block - perhaps we should do this in another thread? }
void LAudioAppComponent::getNextAudioBlock( const AudioSourceChannelInfo& bufferToFill ) { if(stopped || !audioOpen || !hasCallback("getNextAudioBlock") || (audioOpen && audioSourcePlayer.getCurrentSource()==nullptr) ) { bufferToFill.clearActiveBufferRegion(); return; } if(hasCallback("getNextAudioBlock")) { MessageManagerLock mml (Thread::getCurrentThread()); if (! mml.lockWasGained()) { DBG("CAN'T GET LOCK"); return; // another thread is trying to kill us! } LAudioSampleBuffer audioBuffer(Ls, *bufferToFill.buffer); callback("getNextAudioBlock", 0, { bufferToFill.startSample, bufferToFill.numSamples, bufferToFill.buffer->getNumChannels(), new LRefBase("AudioSampleBuffer", &audioBuffer) }); if(volume>-1) { if(volume) bufferToFill.buffer->applyGain(volume); else bufferToFill.clearActiveBufferRegion(); } } }
void SequenceAudioSource::getNextAudioBlock (const AudioSourceChannelInfo& info) { const ScopedLock sl (lock); updatePlayingEvent (currentPlayingPosition); int nextSamples = samplesToNextEvent (currentPlayingPosition); if (currentPlayingPart >= 0 && currentPlayingPart < list.size ()) { int sampleToDo = jmax (-1, jmin (nextSamples, info.numSamples)); if (sampleToDo >= 0) { AudioSourceChannelInfo subInfo; subInfo.buffer = info.buffer; subInfo.startSample = info.startSample; subInfo.numSamples = sampleToDo; AudioEventHolder* event = list.getUnchecked (currentPlayingPart); event->resampleSource->getNextAudioBlock (subInfo); if (sampleToDo < info.numSamples) { DBG ("played last: " + String (sampleToDo) + " of " + String (info.numSamples)); subInfo.startSample = info.startSample + sampleToDo; subInfo.numSamples = info.numSamples - sampleToDo; currentPlayingPosition += subInfo.numSamples; updatePlayingEvent (currentPlayingPosition); if (currentPlayingPart >= 0 && currentPlayingPart < list.size ()) { AudioEventHolder* event = list.getUnchecked (currentPlayingPart); event->resampleSource->getNextAudioBlock (subInfo); } else { subInfo.clearActiveBufferRegion(); } DBG ("played last: " + String (subInfo.numSamples) + " of " + String (info.numSamples)); } currentPlayingPosition += subInfo.numSamples; } else { info.clearActiveBufferRegion(); currentPlayingPosition += info.numSamples; } } else { info.clearActiveBufferRegion(); currentPlayingPosition += info.numSamples; } }
bool AudioFormatWriter::writeFromAudioSource (AudioSource& source, int numSamplesToRead, const int samplesPerBlock) { const int maxChans = 128; AudioSampleBuffer tempBuffer (getNumChannels(), samplesPerBlock); int* buffers [maxChans]; while (numSamplesToRead > 0) { const int numToDo = jmin (numSamplesToRead, samplesPerBlock); AudioSourceChannelInfo info; info.buffer = &tempBuffer; info.startSample = 0; info.numSamples = numToDo; info.clearActiveBufferRegion(); source.getNextAudioBlock (info); int i; for (i = maxChans; --i >= 0;) buffers[i] = 0; for (i = tempBuffer.getNumChannels(); --i >= 0;) buffers[i] = (int*) tempBuffer.getSampleData (i, 0); if (! isFloatingPoint()) { int** bufferChan = buffers; while (*bufferChan != 0) { int* b = *bufferChan++; // float -> int for (int j = numToDo; --j >= 0;) { const double samp = *(const float*) b; if (samp <= -1.0) *b++ = INT_MIN; else if (samp >= 1.0) *b++ = INT_MAX; else *b++ = roundToInt (INT_MAX * samp); } } } if (! write ((const int**) buffers, numToDo)) return false; numSamplesToRead -= numToDo; } return true; }
// Audio Processing (split in "processAmbisonicBuffer" and "fillNextAudioBlock" to enable // IR recording: using the same methods as the main thread) void MainContentComponent::getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) { // check if update required if( updateNumFreqBandrequired ){ sourceImagesHandler.setFilterBankSize(numFreqBands); updateNumFreqBandrequired = false; // trigger general update: must re-dimension abs.coeffs and trigger update future->current, see in function sourceImagesHandler.updateFromOscHandler(oscHandler); } // fill buffer with audiofile data audioIOComponent.getNextAudioBlock(bufferToFill); // execute main audio processing if( !isRecordingIr ) { processAmbisonicBuffer( bufferToFill.buffer ); if( audioRecorder.isRecording() ){ recordAmbisonicBuffer(); } fillNextAudioBlock( bufferToFill.buffer ); } // simply clear output buffer else { bufferToFill.clearActiveBufferRegion(); } // check if source images need update (i.e. update called by OSC handler // while source images in the midst of a crossfade if( sourceImageHandlerNeedsUpdate && sourceImagesHandler.crossfadeOver ) { sourceImagesHandler.updateFromOscHandler(oscHandler); requireDelayLineSizeUpdate = true; sourceImageHandlerNeedsUpdate = false; } }
void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) override { bufferToFill.clearActiveBufferRegion(); const int bufferSize = buffer->getNumSamples(); const int samplesNeeded = bufferToFill.numSamples; const int samplesToCopy = jmin (bufferSize - position, samplesNeeded); if (samplesToCopy > 0) { int maxInChannels = buffer->getNumChannels(); int maxOutChannels = bufferToFill.buffer->getNumChannels(); if (! playAcrossAllChannels) maxOutChannels = jmin (maxOutChannels, maxInChannels); for (int i = 0; i < maxOutChannels; ++i) bufferToFill.buffer->copyFrom (i, bufferToFill.startSample, *buffer, i % maxInChannels, position, samplesToCopy); } position += samplesNeeded; if (looping) position %= bufferSize; }
void MixerAudioSource::getNextAudioBlock (const AudioSourceChannelInfo& info) { const ScopedLock sl (lock); if (inputs.size() > 0) { inputs.getUnchecked(0)->getNextAudioBlock (info); if (inputs.size() > 1) { tempBuffer.setSize (jmax (1, info.buffer->getNumChannels()), info.buffer->getNumSamples()); AudioSourceChannelInfo info2 (&tempBuffer, 0, info.numSamples); for (int i = 1; i < inputs.size(); ++i) { inputs.getUnchecked(i)->getNextAudioBlock (info2); for (int chan = 0; chan < info.buffer->getNumChannels(); ++chan) info.buffer->addFrom (chan, info.startSample, tempBuffer, chan, 0, info.numSamples); } } } else { info.clearActiveBufferRegion(); } }
void SampleSynthAudioSource::getNextAudioBlock(const AudioSourceChannelInfo& bufferToFill) { bufferToFill.clearActiveBufferRegion(); MidiBuffer incomingMidi; midiCollector_.removeNextBlockOfMessages (incomingMidi, bufferToFill.numSamples); process(*bufferToFill.buffer, incomingMidi, bufferToFill.numSamples); }
void BufferingAudioSource::getNextAudioBlock (const AudioSourceChannelInfo& info) { const ScopedLock sl (bufferStartPosLock); const int validStart = (int) (jlimit (bufferValidStart, bufferValidEnd, nextPlayPos) - nextPlayPos); const int validEnd = (int) (jlimit (bufferValidStart, bufferValidEnd, nextPlayPos + info.numSamples) - nextPlayPos); if (validStart == validEnd) { // total cache miss info.clearActiveBufferRegion(); } else { if (validStart > 0) info.buffer->clear (info.startSample, validStart); // partial cache miss at start if (validEnd < info.numSamples) info.buffer->clear (info.startSample + validEnd, info.numSamples - validEnd); // partial cache miss at end if (validStart < validEnd) { for (int chan = jmin (numberOfChannels, info.buffer->getNumChannels()); --chan >= 0;) { jassert (buffer.getNumSamples() > 0); const int startBufferIndex = (int) ((validStart + nextPlayPos) % buffer.getNumSamples()); const int endBufferIndex = (int) ((validEnd + nextPlayPos) % buffer.getNumSamples()); if (startBufferIndex < endBufferIndex) { info.buffer->copyFrom (chan, info.startSample + validStart, buffer, chan, startBufferIndex, validEnd - validStart); } else { const int initialSize = buffer.getNumSamples() - startBufferIndex; info.buffer->copyFrom (chan, info.startSample + validStart, buffer, chan, startBufferIndex, initialSize); info.buffer->copyFrom (chan, info.startSample + validStart + initialSize, buffer, chan, 0, (validEnd - validStart) - initialSize); } } } nextPlayPos += info.numSamples; } }
void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) override { // Your audio-processing code goes here! // For more details, see the help for AudioProcessor::getNextAudioBlock() // Right now we are not producing any data, in which case we need to clear the buffer // (to prevent the output of random noise) bufferToFill.clearActiveBufferRegion(); }
bool AudioFormatWriter::writeFromAudioSource (AudioSource& source, int numSamplesToRead, const int samplesPerBlock) { AudioSampleBuffer tempBuffer (getNumChannels(), samplesPerBlock); while (numSamplesToRead > 0) { const int numToDo = jmin (numSamplesToRead, samplesPerBlock); AudioSourceChannelInfo info (&tempBuffer, 0, numToDo); info.clearActiveBufferRegion(); source.getNextAudioBlock (info); if (! writeFromAudioSampleBuffer (tempBuffer, 0, numToDo)) return false; numSamplesToRead -= numToDo; } return true; }
void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) { // the synth always adds its output to the audio buffer, so we have to clear it // first.. bufferToFill.clearActiveBufferRegion(); // fill a midi buffer with incoming messages from the midi input. MidiBuffer incomingMidi; midiCollector.removeNextBlockOfMessages (incomingMidi, bufferToFill.numSamples); // and now get the synth to process the midi events and generate its output. synth.renderNextBlock (*bufferToFill.buffer, incomingMidi, 0, bufferToFill.numSamples); }
void DrumMachine::getNextAudioBlock(const AudioSourceChannelInfo& bufferToFill) { // std::cout << "drum machine!!" << std::endl; auto& transport = audioEngine.getTransport(); if (transport.isPlaying()) { int frameStartSamples = transport.getFrameStartSamples(); float frameStartTicks = transport.getFrameStartTicks(); float frameEndTicks = transport.getFrameEndTicks(); if ((int) frameStartTicks < (int) frameEndTicks) { int tick = (int) frameEndTicks; if (patternLength != 0) { int ntick = tick % patternLength; for (int voice = 0; voice < NUM_DRUM_VOICES; voice++) { if (mute[voice]) continue; if (pattern[voice][ntick] > 0) { // we need to queue the appropriate note in the drum machine's synth. int offset = transport.ticksToSamples(tick) - frameStartSamples; if (offset > 0) { MidiMessage msg = MidiMessage::noteOn(1, voice, (float) 1.0); msg.setTimeStamp(offset); midiCollector.addMessageToQueue(msg); } } } } } } // the synth always adds its output to the audio buffer, so we have to clear it // first.. bufferToFill.clearActiveBufferRegion(); // fill a midi buffer with incoming messages from the midi input. MidiBuffer incomingMidi; midiCollector.removeNextBlockOfMessages(incomingMidi, bufferToFill.numSamples); // pass these messages to the keyboard state so that it can update the component // to show on-screen which keys are being pressed on the physical midi keyboard. // This call will also add midi messages to the buffer which were generated by // the mouse-clicking on the on-screen keyboard. keyboardState.processNextMidiBuffer(incomingMidi, 0, bufferToFill.numSamples, true); // and now get the synth to process the midi events and generate its output. synth.renderNextBlock(*bufferToFill.buffer, incomingMidi, 0, bufferToFill.numSamples); bufferToFill.buffer->applyGain(0, 0, bufferToFill.numSamples, 0.2); bufferToFill.buffer->applyGain(1, 0, bufferToFill.numSamples, 0.2); }
void Source::getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) { // calculations float trackCutoff = parameters->getUnchecked(Controller::getParameterId(Controller::params::cutoff, trackId))->getScaledValue(); float trackDistort = parameters->getUnchecked(Controller::getParameterId(Controller::params::distort, trackId))->getScaledValue(); float trackLevel = parameters->getUnchecked(Controller::getParameterId(Controller::params::level, trackId))->getScaledValue(); float trackMute = parameters->getUnchecked(Controller::getParameterId(Controller::params::mute, trackId))->getScaledValue(); float globalCutoff = parameters->getUnchecked(Controller::getParameterId(Controller::params::cutoff))->getScaledValue(); float globalDistort = parameters->getUnchecked(Controller::getParameterId(Controller::params::distort))->getScaledValue(); float modulationCutoff = sampler.currentModulations != nullptr ? Parameter::scale(Controller::params::cutoff, true, sampler.currentModulations->getUnchecked(Mixer::mods::cutoff)) : 0.0; float cutoff = fmax(0, fmin(1, trackCutoff + globalCutoff + modulationCutoff)); float distort = 1 - fmax(0, fmin(0.93, trackDistort + globalDistort)); float level = trackLevel * !trackMute; // setup bufferToFill.clearActiveBufferRegion(); midiCollector.removeNextBlockOfMessages (incomingMidi, bufferToFill.numSamples); // render sampler sampler.renderNextBlock(*bufferToFill.buffer, incomingMidi, 0, bufferToFill.numSamples); // dsp: distortion float* outL = bufferToFill.buffer->getWritePointer (0, 0); float* outR = bufferToFill.buffer->getWritePointer (1, 0); for (int i=bufferToFill.numSamples; i>=0; --i) { outL[i] = foldback(outL[i], distort); outR[i] = foldback(outR[i], distort); } // dsp: filter filterL.setCoefficients(IIRCoefficients::makeLowPass(sampleRate, MidiMessage::getMidiNoteInHertz(cutoff * 128))); filterR.setCoefficients(IIRCoefficients::makeLowPass(sampleRate, MidiMessage::getMidiNoteInHertz(cutoff * 128))); filterL.processSamples(bufferToFill.buffer->getWritePointer(0), bufferToFill.buffer->getNumSamples()); filterR.processSamples(bufferToFill.buffer->getWritePointer(1), bufferToFill.buffer->getNumSamples()); // dsp: level bufferToFill.buffer->applyGainRamp(0, bufferToFill.numSamples, lastLevel, level); lastLevel = level; }
void ChannelRemappingAudioSource::getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) { const ScopedLock sl (lock); buffer.setSize (requiredNumberOfChannels, bufferToFill.numSamples, false, false, true); const int numChans = bufferToFill.buffer->getNumChannels(); int i; for (i = 0; i < buffer.getNumChannels(); ++i) { const int remappedChan = getRemappedInputChannel (i); if (remappedChan >= 0 && remappedChan < numChans) { buffer.copyFrom (i, 0, *bufferToFill.buffer, remappedChan, bufferToFill.startSample, bufferToFill.numSamples); } else { buffer.clear (i, 0, bufferToFill.numSamples); } } remappedInfo.numSamples = bufferToFill.numSamples; source->getNextAudioBlock (remappedInfo); bufferToFill.clearActiveBufferRegion(); for (i = 0; i < requiredNumberOfChannels; ++i) { const int remappedChan = getRemappedOutputChannel (i); if (remappedChan >= 0 && remappedChan < numChans) { bufferToFill.buffer->addFrom (remappedChan, bufferToFill.startSample, buffer, i, 0, bufferToFill.numSamples); } } }
void AudioTransportSource::getNextAudioBlock (const AudioSourceChannelInfo& info) { const ScopedLock sl (callbackLock); inputStreamEOF = false; if (masterSource != nullptr && ! stopped) { masterSource->getNextAudioBlock (info); if (! playing) { // just stopped playing, so fade out the last block.. for (int i = info.buffer->getNumChannels(); --i >= 0;) info.buffer->applyGainRamp (i, info.startSample, jmin (256, info.numSamples), 1.0f, 0.0f); if (info.numSamples > 256) info.buffer->clear (info.startSample + 256, info.numSamples - 256); } if (positionableSource->getNextReadPosition() > positionableSource->getTotalLength() + 1 && ! positionableSource->isLooping()) { playing = false; inputStreamEOF = true; sendChangeMessage(); } stopped = ! playing; for (int i = info.buffer->getNumChannels(); --i >= 0;) { info.buffer->applyGainRamp (i, info.startSample, info.numSamples, lastGain, gain); } } else { info.clearActiveBufferRegion(); stopped = true; } lastGain = gain; }
void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) override { // the synth always adds its output to the audio buffer, so we have to clear it // first.. bufferToFill.clearActiveBufferRegion(); // fill a midi buffer with incoming messages from the midi input. MidiBuffer incomingMidi; midiCollector.removeNextBlockOfMessages (incomingMidi, bufferToFill.numSamples); // pass these messages to the keyboard state so that it can update the component // to show on-screen which keys are being pressed on the physical midi keyboard. // This call will also add midi messages to the buffer which were generated by // the mouse-clicking on the on-screen keyboard. keyboardState.processNextMidiBuffer (incomingMidi, 0, bufferToFill.numSamples, true); // and now get the synth to process the midi events and generate its output. synth.renderNextBlock (*bufferToFill.buffer, incomingMidi, 0, bufferToFill.numSamples); }
void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) override { // Your audio-processing code goes here! auto buffer = bufferToFill.buffer; GestureInterpretor::audioRMS = buffer->getMagnitude(0, 0, bufferToFill.numSamples); const float* ptr = buffer->getArrayOfReadPointers()[0]; // For more details, see the help for AudioProcessor::getNextAudioBlock() for (int i = 0; i < bufferToFill.numSamples; i++) { GestureInterpretor::audioInBuffer[i] = ptr[i]; } // Right now we are not producing any data, in which case we need to clear the buffer // (to prevent the output of random noise) bufferToFill.clearActiveBufferRegion(); }
/* This method generates the actual audio samples. In this example the buffer is filled with a sine wave whose frequency and amplitude are controlled by the mouse position. */ void getNextAudioBlock (const AudioSourceChannelInfo& bufferToFill) override { bufferToFill.clearActiveBufferRegion(); const float originalPhase = phase; for (int chan = 0; chan < bufferToFill.buffer->getNumChannels(); ++chan) { phase = originalPhase; float* const channelData = bufferToFill.buffer->getWritePointer (chan, bufferToFill.startSample); for (int i = 0; i < bufferToFill.numSamples ; ++i) { channelData[i] = amplitude * std::sin (phase); // increment the phase step for the next sample phase = std::fmod (phase + phaseDelta, float_Pi * 2.0f); } } }
void ContentComponent::getNextAudioBlock (const AudioSourceChannelInfo& buf) { if (! tick) { buf.clearActiveBufferRegion(); return; } auto& source (tick->getAudioSource()); source.getNextAudioBlock (buf); if (gain != lastGain) { buf.buffer->applyGainRamp (buf.startSample, buf.numSamples, lastGain, gain); lastGain = (float) gain; } else { buf.buffer->applyGain (gain); } }
void AudioEngine::getNextAudioBlock(const AudioSourceChannelInfo &buffer_to_fill) { double start = Time::getMillisecondCounterHiRes(); if (buffer_to_fill.buffer->getNumChannels() <= 0) { Logger::getCurrentLogger()->writeToLog("Error: No Input Audio Channel"); } else { if (_controller.isRecording()) { int buffer_size = buffer_to_fill.numSamples; int hop_size = _ring_buffer->getHopSize(); float** channel_data = 0; channel_data = new float*[_num_input_channels]; for(int i = 0; i < _num_input_channels; i++) { channel_data[i] = buffer_to_fill.buffer->getWritePointer (i, buffer_to_fill.startSample); } float data = 0; for(int i = 0; i < buffer_size; i++) { data = 0; for (int j = 0; j < _num_input_channels; j++) { data = data + channel_data[j][i]; } float data_avg = data / _num_input_channels; _channel_data_avg.push_back(data_avg); } while (_channel_data_avg.size() >= hop_size) { _ring_buffer->addNextBufferToFrame(_channel_data_avg); float midi_pitch_of_window = _pitch_tracker->findPitchInMidi(_ring_buffer); if (_controller.getRecordingTimeInBeats() >= 0) { cout << midi_pitch_of_window << endl; _controller.pushPitchToModel(midi_pitch_of_window, _controller.getRecordingTimeInBeats()); } _channel_data_avg.erase(_channel_data_avg.begin(), _channel_data_avg.begin() + hop_size); } buffer_to_fill.clearActiveBufferRegion(); // Update number of samples recorded double curr_beat = _controller.getRecordingTimeInBeats(); _controller.addRecordingSamples(buffer_to_fill.numSamples); double next_beat = _controller.getRecordingTimeInBeats(); // Play Reference Pitch if (next_beat < 0 && next_beat >= -_controller.getTimeSignatureNumerator() ) { playReferencePitch(buffer_to_fill, buffer_to_fill.numSamples); } // Play Metronome if (floor(curr_beat) != floor(next_beat)) { playMetronome(buffer_to_fill, floor(next_beat)); } delete [] channel_data; } else if (_controller.isPlaying()) { // Update number of samples played double curr_beat = _controller.getPlaybackTimeInBeats(); _controller.addPlaybackSamples(buffer_to_fill.numSamples); double next_beat = _controller.getPlaybackTimeInBeats(); // Play Chords if (floor(curr_beat) != floor(next_beat)) { playChords(buffer_to_fill, floor(next_beat)); } buffer_to_fill.clearActiveBufferRegion(); MidiBuffer incoming_midi; _midi_collector.removeNextBlockOfMessages(incoming_midi, buffer_to_fill.numSamples); const int start_sample = 0; const bool inject_indirect_events = true; _midi_keyboard_state.processNextMidiBuffer(incoming_midi, start_sample, buffer_to_fill.numSamples, inject_indirect_events); _playback_synth.renderNextBlock(*buffer_to_fill.buffer, incoming_midi, start_sample, buffer_to_fill.numSamples); // Play Metronome if (floor(curr_beat) != floor(next_beat)) { playMetronome(buffer_to_fill, floor(next_beat)); } // Check if playback should stop if (next_beat > _controller.getRecordingTimeInBeats()) { _controller.setStopFlag(true); } } else { buffer_to_fill.clearActiveBufferRegion(); cout << "Entered Limbo State" << endl; } } double duration = Time::getMillisecondCounterHiRes() - start; //cout << duration << endl; if (duration > 11.6099773) { Logger::getCurrentLogger()->writeToLog ("Time Exceeded in Audio Callback"); } }
void LoopMachine::getNextAudioBlockFixedBpm(const AudioSourceChannelInfo& bufferToFill) { auto& transport = audioEngine.getTransport(); bool mainTransportPlaying = transport.isPlaying(); if (fixedBpmTransport.isPlaying() != mainTransportPlaying) { if (mainTransportPlaying) fixedBpmTransport.play(); else fixedBpmTransport.stop(); } fixedBpmTransport.updateTransport(bufferToFill.numSamples); bufferToFill.clearActiveBufferRegion(); if (fixedBpmTransport.isPlaying()) { float frameStartTicks = fixedBpmTransport.getFrameStartTicks(); float frameEndTicks = fixedBpmTransport.getFrameEndTicks(); float nextTick = (float) ((int)frameStartTicks + 1); float fadeLengthTicks = fixedBpmTransport.millisToTicks(FADE_TIME_MS); float fadeStartTicks = nextTick - fadeLengthTicks; float fadeEndTicks = nextTick; if (frameStartTicks < fadeStartTicks && frameEndTicks >= fadeStartTicks) drainRingBuffer(); // std::cout << "MPD: CPP: LoopMachine::getNextAudioBlock: reality check! " << ((int)nextTick/4) << std::endl; for (int groupIx = 0; groupIx < groupIxToLoopInfo.size(); groupIx++) { int state = audioState[groupIx]; int prevState = prevAudioState[groupIx]; if (state == LOOP_INACTIVE && prevState == LOOP_INACTIVE) { // we were doing nothing last period, and we're still doing nothing: do nothing } else if (state == LOOP_INACTIVE && prevState != LOOP_INACTIVE) { // for this loop group, we are fading out: going from an active loop to silence. processFadeOut(groupIx, prevState, frameStartTicks, frameEndTicks, fadeStartTicks, fadeEndTicks, bufferToFill); } else if (!wasPlaying || (state != LOOP_INACTIVE && prevState == LOOP_INACTIVE)) { // for this loop group, we are fading in: going from silence to signal. setReaderPos(groupIx, state, fadeStartTicks, frameStartTicks); processFadeIn(groupIx, state, frameStartTicks, frameEndTicks, fadeStartTicks, fadeEndTicks, bufferToFill); } else if (prevState != state) { // for this loop group, the loop being played has switched: do a crossfade processFadeOut(groupIx, prevState, frameStartTicks, frameEndTicks, fadeStartTicks, fadeEndTicks, bufferToFill); setReaderPos(groupIx, state, fadeStartTicks, frameStartTicks); processFadeIn(groupIx, state, frameStartTicks, frameEndTicks, fadeStartTicks, fadeEndTicks, bufferToFill); } else { // we're playing the same thing as in the last period. if (!wasPlaying) { auto src = (*groupIxToLoopInfo[groupIx])[state]; src->reader->setNextReadPosition(0); } processBlock(groupIx, state, 0, bufferToFill.numSamples, bufferToFill); } } if (frameStartTicks < fadeEndTicks && frameEndTicks >= fadeEndTicks) { bool changes = false; for (int groupIx = 0; groupIx < groupIxToLoopInfo.size(); groupIx++) { int state = audioState[groupIx]; if (state != LOOP_INACTIVE) { auto type = (*groupIxToLoopInfo[groupIx])[state]->type; auto src = (*groupIxToLoopInfo[groupIx])[state]->reader; if (type == LoopType::ONE_SHOT && audioState[groupIx] != LOOP_INACTIVE && src != nullptr && src->getNextReadPosition() >= src->getTotalLength()) { // (groupIx, prevState) is done playing. // now we need to plop a message in the ring buffer audioState[groupIx] = LOOP_INACTIVE; userState[groupIx] = LOOP_INACTIVE; int ix = ++endReserveIx & RINGBUF_SIZE_M1; // == ++reserveIx % RINGBUF_SIZE endringbuf[ix][0] = groupIx; endringbuf[ix][1] = state; endCommitIx++; changes = true; src->setNextReadPosition(0); // std::cout << "MPD: handling messaages audio thread" << std::endl; } } } if (changes) sendChangeMessage(); std::memcpy(prevAudioState, audioState, sizeof(audioState)); wasPlaying = true; } // bufferToFill.buffer->applyGain(0, 0, bufferToFill.numSamples, 0.5); // bufferToFill.buffer->applyGain(1, 0, bufferToFill.numSamples, 0.5); // wasPlaying = true; } if (wasPlaying && !fixedBpmTransport.isPlaying()) wasPlaying = false; }