void FilterGuiDemoAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { float numSamples = buffer.getNumSamples(); float currentSampleRate = getSampleRate(); //Handles filter being added onto an already playing audio track where some hosts will not call prepare to play method. if (filter1->getSampleRate() != currentSampleRate) { filter1->initializeFilter(currentSampleRate, defaultMinFilterFrequency, defaultMaxFilterFrequency); } // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). // I've added this to avoid people getting screaming feedback // when they first compile the plugin, but obviously you don't need to // this code if your algorithm already fills all the output channels. for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i) buffer.clear (i, 0, buffer.getNumSamples()); // MAIN AUDIO PROCESSING BLOCK. PROCESS FILTER TWICE FOR STEREO CHANNELS for (int channel = 0; channel < getTotalNumInputChannels(); ++channel) { const float* input = buffer.getReadPointer(channel); float* output = buffer.getWritePointer (channel); for (int i = 0; i < numSamples; i++) { output[i] = filter1->processFilter(input[i], channel); } } }
void AudioProcessor::setPlayConfigDetails (const int newNumIns, const int newNumOuts, const double newSampleRate, const int newBlockSize) { const int oldNumInputs = getTotalNumInputChannels(); const int oldNumOutputs = getTotalNumOutputChannels(); // if the user is using this method then they do not want any side-buses or aux outputs disableNonMainBuses (true); disableNonMainBuses (false); if (getTotalNumInputChannels() != newNumIns) setPreferredBusArrangement (true, 0, AudioChannelSet::canonicalChannelSet (newNumIns)); if (getTotalNumOutputChannels() != newNumOuts) setPreferredBusArrangement (false, 0, AudioChannelSet::canonicalChannelSet (newNumOuts)); // the processor may not support this arrangement at all jassert (newNumIns == getTotalNumInputChannels() && newNumOuts == getTotalNumOutputChannels()); setRateAndBufferSizeDetails (newSampleRate, newBlockSize); if (oldNumInputs != newNumIns || oldNumOutputs != newNumOuts) { updateSpeakerFormatStrings(); numChannelsChanged(); } }
void JuceVibAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { if (bypassed) { processBlockBypassed(buffer, midiMessages); } else { const int totalNumInputChannels = getTotalNumInputChannels(); const int totalNumOutputChannels = getTotalNumOutputChannels(); //Set parameters lfoFreq = freqParam->get(); lfoAmp = depthParam->get(); Vib->setFreq(lfoFreq*maxFreq); Vib->setDepth(lfoAmp); // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). // This is here to avoid people getting screaming feedback // when they first compile a plugin, but obviously you don't need to keep // this code if your algorithm always overwrites all the output channels. for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear(i, 0, buffer.getNumSamples()); float** ppfWriteBuffer = buffer.getArrayOfWritePointers(); Vib->process(ppfWriteBuffer, ppfWriteBuffer, buffer.getNumSamples()); } }
void InstanceProcessor::loadPatch(std::string const& name, std::string const& path) { suspendProcessing(true); if(isSuspended()) { { releaseDsp(); m_patch = pd::Patch(*this, name, path); pd::Patch patch(getPatch()); if(patch.isValid()) { m_patch_tie = pd::Tie(std::to_string(patch.getDollarZero()) + "-playhead"); } else { m_patch_tie = pd::Tie(); sendConsoleError("Camomile can't find the patch : " + name); } } parametersChanged(); prepareDsp(getTotalNumInputChannels(), getTotalNumOutputChannels(), AudioProcessor::getSampleRate(), getBlockSize()); pd::PatchManager::notifyListeners(); } suspendProcessing(false); }
void TestFilterAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { const int totalNumInputChannels = getTotalNumInputChannels(); const int totalNumOutputChannels = getTotalNumOutputChannels(); // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). // This is here to avoid people getting screaming feedback // when they first compile a plugin, but obviously you don't need to keep // this code if your algorithm always overwrites all the output channels. for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear (i, 0, buffer.getNumSamples()); // This is the place where you'd normally do the guts of your plugin's // audio processing... M2S.process(buffer); _corrCoeff = corrCoeff.findCorrCoeff(buffer.getArrayOfReadPointers(), buffer.getNumSamples()); pPPM->ppmProcess( buffer.getArrayOfReadPointers(), buffer.getNumSamples()); for (int channel=0; channel < buffer.getNumChannels(); channel++) { _peakVal[channel] = pPPM->getPeak(channel); } }
void processBlock (AudioSampleBuffer& buffer, MidiBuffer&) override { for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i) buffer.clear (i, 0, buffer.getNumSamples()); AudioSampleBuffer mainInputOutput = busArrangement.getBusBuffer (buffer, true, 0); AudioSampleBuffer sideChainInput = busArrangement.getBusBuffer (buffer, true, 1); float alphaCopy = *alpha; float thresholdCopy = *threshold; for (int j = 0; j < buffer.getNumSamples(); ++j) { float mixedSamples = 0.0f; for (int i = 0; i < sideChainInput.getNumChannels(); ++i) mixedSamples += sideChainInput.getReadPointer (i) [j]; mixedSamples /= static_cast<float> (sideChainInput.getNumChannels()); lowPassCoeff = (alphaCopy * lowPassCoeff) + ((1.0f - alphaCopy) * mixedSamples); if (lowPassCoeff >= thresholdCopy) sampleCountDown = (int) getSampleRate(); // very in-effective way of doing this for (int i = 0; i < mainInputOutput.getNumChannels(); ++i) *mainInputOutput.getWritePointer (i, j) = sampleCountDown > 0 ? *mainInputOutput.getReadPointer (i, j) : 0.0f; if (sampleCountDown > 0) --sampleCountDown; } }
void IAAEffectProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer&) { const float gain = *parameters.getRawParameterValue ("gain"); const int totalNumInputChannels = getTotalNumInputChannels(); const int totalNumOutputChannels = getTotalNumOutputChannels(); const int numSamples = buffer.getNumSamples(); for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear (i, 0, buffer.getNumSamples()); // Apply the gain to the samples using a ramp to avoid discontinuities in // the audio between processed buffers. for (int channel = 0; channel < totalNumInputChannels; ++channel) { buffer.applyGainRamp (channel, 0, numSamples, previousGain, gain); meterListeners.call (&IAAEffectProcessor::MeterListener::handleNewMeterValue, channel, buffer.getMagnitude (channel, 0, numSamples)); } previousGain = gain; // Now ask the host for the current time so we can store it to be displayed later. updateCurrentTimeInfoFromHost (lastPosInfo); }
void ZenGuitestAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { const int totalNumInputChannels = getTotalNumInputChannels(); const int totalNumOutputChannels = getTotalNumOutputChannels(); // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). // This is here to avoid people getting screaming feedback // when they first compile a plugin, but obviously you don't need to keep // this code if your algorithm always overwrites all the output channels. for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear (i, 0, buffer.getNumSamples()); // This is the place where you'd normally do the guts of your plugin's // audio processing... for (int channel = 0; channel < totalNumInputChannels; ++channel) { float* channelData = buffer.getWritePointer (channel); // ..do something to the data... } /*float* leftData = buffer.getWritePointer(0); //leftData references left channel now float* rightData = buffer.getWritePointer(1); //right data references right channel now for (long i = 0; i < buffer.getNumSamples(); i++) { leftData[i] = 0; rightData[i] = 0; }*/ }
//============================================================================== void TestFilterAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock) { // Use this method as the place to do any pre-playback // initialisation that you need. pPPM->initInstance(sampleRate, samplesPerBlock, getTotalNumInputChannels()); M2S.init(sampleRate); }
void DaalDelAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) { ScopedNoDenormals noDenormals; auto totalNumInputChannels = getTotalNumInputChannels(); auto totalNumOutputChannels = getTotalNumOutputChannels(); // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). // This is here to avoid people getting screaming feedback // when they first compile a plugin, but obviously you don't need to keep // this code if your algorithm always overwrites all the output channels. for (auto i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear (i, 0, buffer.getNumSamples()); // ==== // Lengths for circular buffer const int bufferLength = buffer.getNumSamples(); const int delayBufferLength = _delayBuffer.getNumSamples(); // This is the place where you'd normally do the guts of your plugin's // audio processing... // Make sure to reset the state if your inner loop is processing // the samples and the outer loop is handling the channels. // Alternatively, you can process the samples with the channels // interleaved by keeping the same state. for (int channel = 0; channel < totalNumInputChannels; ++channel) { //auto* channelData = buffer.getWritePointer (channel); // ..do something to the data... // Set up circular buffer const float* bufferData = buffer.getReadPointer(channel); const float* delayBufferData = _delayBuffer.getReadPointer(channel); float* dryBuffer = buffer.getWritePointer(channel); // Apply gains (now do this before getting from delay) applyDryWetToBuffer(buffer, channel, bufferLength, dryBuffer); // Copy data from main to delay buffer fillDelayBuffer(channel, bufferLength, delayBufferLength, bufferData, delayBufferData); // Copy data from delay buffer to output buffer getFromDelayBuffer(buffer, channel, bufferLength, delayBufferLength, bufferData, delayBufferData); // Feedback feedbackDelay(channel, bufferLength, delayBufferLength, dryBuffer); } _writePosition += bufferLength; // Increment _writePosition %= delayBufferLength; // Wrap around position index // Update values from tree updateTreeParams(); }
//============================================================================== void BeatboxVoxAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { const auto totalNumInputChannels = getTotalNumInputChannels(); const auto totalNumOutputChannels = getTotalNumOutputChannels(); const auto sampleRate = getSampleRate(); const auto numSamples = buffer.getNumSamples(); //Reset the noise synth if triggered midiMessages.addEvent(MidiMessage::noteOff(1, noiseNoteNumber), 0); classifier.processAudioBuffer(buffer.getReadPointer(0), numSamples); //This is used for configuring the onset detector settings from the GUI if (classifier.noteOnsetDetected()) { if (usingOSDTestSound.load()) { triggerOSDTestSound(midiMessages); } else if (classifier.getNumBuffersDelayed() > 0) { triggerNoise(midiMessages); } } const auto sound = classifier.classify(); switch (sound) { case soundLabel::KickDrum: triggerKickDrum(midiMessages); break; case soundLabel::SnareDrum: triggerSnareDrum(midiMessages); break; case soundLabel::HiHat: triggerHiHat(midiMessages); break; default: break; } /** Now classification complete clear the input buffer/signal. * We only want synth response output, no a blend of input vocal * signal + synth output. **/ buffer.clear(); if (usingOSDTestSound.load()) osdTestSynth.renderNextBlock(buffer, midiMessages, 0, buffer.getNumSamples()); else drumSynth.renderNextBlock(buffer, midiMessages, 0, buffer.getNumSamples()); //Not outputting midi so clear after rendering synths midiMessages.clear(); }
//============================================================================== void DaalDelAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock) { // Use this method as the place to do any pre-playback // initialisation that you need.. const int numInputChannels = getTotalNumInputChannels(); const int delayBufferSize = 20 * (sampleRate + samplesPerBlock); // 20 seconds (plus a bit) _delayBuffer.setSize(numInputChannels, delayBufferSize); _sampleRate = sampleRate; }
void PluginProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { (void)midiMessages; // In case we have more outputs than inputs, we'll clear any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i) { buffer.clear (i, 0, buffer.getNumSamples()); } }
bool AudioProcessor::applyBusLayouts (const BusesLayout& layouts) { if (layouts == getBusesLayout()) return true; const int numInputBuses = getBusCount (true); const int numOutputBuses = getBusCount (false); const int oldNumberOfIns = getTotalNumInputChannels(); const int oldNumberOfOuts = getTotalNumOutputChannels(); if (layouts.inputBuses. size() != numInputBuses || layouts.outputBuses.size() != numOutputBuses) return false; for (int busIdx = 0; busIdx < numInputBuses; ++busIdx) { Bus& bus = *getBus (true, busIdx); const AudioChannelSet& set = layouts.getChannelSet (true, busIdx); bus.layout = set; if (! set.isDisabled()) bus.lastLayout = set; } for (int busIdx = 0; busIdx < numOutputBuses; ++busIdx) { Bus& bus = *getBus (false, busIdx); const AudioChannelSet& set = layouts.getChannelSet (false, busIdx); bus.layout = set; if (! set.isDisabled()) bus.lastLayout = set; } const bool channelNumChanged = (oldNumberOfIns != getTotalNumInputChannels() || oldNumberOfOuts != getTotalNumOutputChannels()); audioIOChanged (false, channelNumChanged); return true; }
void AudioProcessor::setPlayConfigDetails (const int newNumIns, const int newNumOuts, const double newSampleRate, const int newBlockSize) { bool success = true; if (getTotalNumInputChannels() != newNumIns) success &= setChannelLayoutOfBus (true, 0, AudioChannelSet::canonicalChannelSet (newNumIns)); if (getTotalNumOutputChannels() != newNumOuts) success &= setChannelLayoutOfBus (false, 0, AudioChannelSet::canonicalChannelSet (newNumOuts)); // if the user is using this method then they do not want any side-buses or aux outputs success &= disableNonMainBuses(); jassert (success); // the processor may not support this arrangement at all jassert (success && newNumIns == getTotalNumInputChannels() && newNumOuts == getTotalNumOutputChannels()); setRateAndBufferSizeDetails (newSampleRate, newBlockSize); }
void JuceVibAudioProcessor::processBlockBypassed (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { const int totalNumInputChannels = getTotalNumInputChannels(); const int totalNumOutputChannels = getTotalNumOutputChannels(); // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). // This is here to avoid people getting screaming feedback // when they first compile a plugin, but obviously you don't need to keep // this code if your algorithm always overwrites all the output channels. for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear (i, 0, buffer.getNumSamples()); // This is the place where you'd normally do the guts of your plugin's // audio processing... }
void ATKBassPreampAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { if (*parameters.getRawParameterValue ("gain") != old_gain) { old_gain = *parameters.getRawParameterValue ("gain"); levelFilter.set_volume_db(old_gain); } if(*parameters.getRawParameterValue ("bass") != old_bass) { old_bass = *parameters.getRawParameterValue ("bass"); toneFilter.set_low((old_bass + 1) / 2); } if(*parameters.getRawParameterValue ("medium") != old_medium) { old_medium = *parameters.getRawParameterValue ("medium"); toneFilter.set_middle((old_medium + 1) / 2); } if(*parameters.getRawParameterValue ("high") != old_high) { old_high = *parameters.getRawParameterValue ("high"); toneFilter.set_high((old_high + 1) / 2); } if (*parameters.getRawParameterValue ("volume") != old_volume) { old_volume = *parameters.getRawParameterValue ("volume"); volumeFilter.set_volume(-std::pow(10., old_volume / 20)); } if(*parameters.getRawParameterValue ("drywet") != old_drywet) { old_drywet = *parameters.getRawParameterValue ("drywet"); dryWetFilter.set_dry(old_drywet / 100); } const int totalNumInputChannels = getTotalNumInputChannels(); const int totalNumOutputChannels = getTotalNumOutputChannels(); assert(totalNumInputChannels == totalNumOutputChannels); assert(totalNumOutputChannels == 1); inFilter.set_pointer(buffer.getReadPointer(0), buffer.getNumSamples()); outFilter.set_pointer(buffer.getWritePointer(0), buffer.getNumSamples()); outFilter.process(buffer.getNumSamples()); }
void Ambix_mirrorAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { int NumSamples = buffer.getNumSamples(); // save old parameters for interpolation (start ramp) _gain_factors = gain_factors; calcParams(); for (int acn = 0; acn < getTotalNumInputChannels(); acn++) { buffer.applyGainRamp(acn, 0, NumSamples, _gain_factors.getUnchecked(acn), gain_factors.getUnchecked(acn)); } }
void Ambix_rotatorAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { if (_new_params) { _Sh_transf = Sh_transf; // buffer old values calcParams(); // calc new transformation matrix } int NumSamples = buffer.getNumSamples(); output_buffer.setSize(buffer.getNumChannels(), NumSamples); output_buffer.clear(); int num_out_ch = jmin(AMBI_CHANNELS,getTotalNumOutputChannels()); int num_in_ch = jmin(AMBI_CHANNELS,getTotalNumInputChannels()); // 0th channel is invariant! output_buffer.addFrom(0, 0, buffer, 0, 0, NumSamples); for (int out = 1; out < num_out_ch; out++) { int n = (int)sqrtf(out);// order int in_start = n*n; int in_end = jmin((n+1)*(n+1), num_in_ch); for (int in = in_start; in < in_end; in++) { if (!_new_params) { if (Sh_transf(in, out) != 0.f) output_buffer.addFrom(out, 0, buffer, in, 0, NumSamples, (float)Sh_transf(in, out)); } else { if (_Sh_transf(in, out) != 0.f || Sh_transf(in, out) != 0.f) output_buffer.addFromWithRamp(out, 0, buffer.getReadPointer(in), NumSamples, (float)_Sh_transf(in, out), (float)Sh_transf(in, out)); } } } if (_new_params) _new_params = false; buffer = output_buffer; }
void InstanceProcessor::closePatch() { suspendProcessing(true); if(isSuspended()) { { releaseDsp(); m_patch = pd::Patch(); m_patch_tie = pd::Tie(); } parametersChanged(); prepareDsp(getTotalNumInputChannels(), getTotalNumOutputChannels(), AudioProcessor::getSampleRate(), getBlockSize()); pd::PatchManager::notifyListeners(); } suspendProcessing(false); }
//============================================================================== bool AudioProcessor::setPreferredBusArrangement (bool isInput, int busIndex, const AudioChannelSet& preferredSet) { const int oldNumInputs = getTotalNumInputChannels(); const int oldNumOutputs = getTotalNumOutputChannels(); Array<AudioProcessorBus>& buses = isInput ? busArrangement.inputBuses : busArrangement.outputBuses; const int numBuses = buses.size(); if (! isPositiveAndBelow (busIndex, numBuses)) return false; #ifdef JucePlugin_MaxNumInputChannels if (isInput && preferredSet.size() > JucePlugin_MaxNumInputChannels) return false; #endif #ifdef JucePlugin_MaxNumOutputChannels if (! isInput && preferredSet.size() > JucePlugin_MaxNumOutputChannels) return false; #endif AudioProcessorBus& bus = buses.getReference (busIndex); #ifdef JucePlugin_PreferredChannelConfigurations // the user is using the deprecated way to specify channel configurations if (numBuses > 0 && busIndex == 0) { const short channelConfigs[][2] = { JucePlugin_PreferredChannelConfigurations }; const int numChannelConfigs = sizeof (channelConfigs) / sizeof (*channelConfigs); // we need the main bus in the opposite direction Array<AudioProcessorBus>& oppositeBuses = isInput ? busArrangement.outputBuses : busArrangement.inputBuses; AudioProcessorBus* oppositeBus = (busIndex < oppositeBuses.size()) ? &oppositeBuses.getReference (0) : nullptr; // get the target number of channels const int mainBusNumChannels = preferredSet.size(); const int mainBusOppositeChannels = (oppositeBus != nullptr) ? oppositeBus->channels.size() : 0; const int dir = isInput ? 0 : 1; // find a compatible channel configuration on the opposite bus which is the closest match // to the current number of channels on that bus int distance = std::numeric_limits<int>::max(); int bestConfiguration = -1; for (int i = 0; i < numChannelConfigs; ++i) { // is the configuration compatible with the preferred set if (channelConfigs[i][dir] == mainBusNumChannels) { const int configChannels = channelConfigs[i][dir^1]; const int channelDifference = std::abs (configChannels - mainBusOppositeChannels); if (channelDifference < distance) { distance = channelDifference; bestConfiguration = configChannels; // we can exit if we found a perfect match if (distance == 0) break; } } } // unable to find a good configuration if (bestConfiguration == -1) return false; // did the number of channels change on the opposite bus? if (mainBusOppositeChannels != bestConfiguration && oppositeBus != nullptr) { // if the channels on the opposite bus are the same as the preferred set // then also copy over the layout information. If not, then assume // a cononical channel layout if (bestConfiguration == mainBusNumChannels) oppositeBus->channels = preferredSet; else oppositeBus->channels = AudioChannelSet::canonicalChannelSet (bestConfiguration); } } #endif bus.channels = preferredSet; if (oldNumInputs != getTotalNumInputChannels() || oldNumOutputs != getTotalNumOutputChannels()) { updateSpeakerFormatStrings(); numChannelsChanged(); } return true; }
void Processor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& /*midiMessages*/) { const int numInputChannels = getTotalNumInputChannels(); const int numOutputChannels = getTotalNumOutputChannels(); const size_t samplesToProcess = buffer.getNumSamples(); // Determine channel data const float* channelData0 = nullptr; const float* channelData1 = nullptr; if (numInputChannels == 1) { channelData0 = buffer.getReadPointer(0); channelData1 = buffer.getReadPointer(0); } else if (numInputChannels == 2) { channelData0 = buffer.getReadPointer(0); channelData1 = buffer.getReadPointer(1); } // Convolution _wetBuffer.clear(); if (numInputChannels > 0 && numOutputChannels > 0) { float autoGain = 1.0f; if (getParameter(Parameters::AutoGainOn)) { autoGain = DecibelScaling::Db2Gain(getParameter(Parameters::AutoGainDecibels)); } // Convolve IRAgent* irAgent00 = getAgent(0, 0); if (irAgent00 && irAgent00->getConvolver() && numInputChannels >= 1 && numOutputChannels >= 1) { irAgent00->process(channelData0, &_convolutionBuffer[0], samplesToProcess); _wetBuffer.addFrom(0, 0, &_convolutionBuffer[0], samplesToProcess, autoGain); } IRAgent* irAgent01 = getAgent(0, 1); if (irAgent01 && irAgent01->getConvolver() && numInputChannels >= 1 && numOutputChannels >= 2) { irAgent01->process(channelData0, &_convolutionBuffer[0], samplesToProcess); _wetBuffer.addFrom(1, 0, &_convolutionBuffer[0], samplesToProcess, autoGain); } IRAgent* irAgent10 = getAgent(1, 0); if (irAgent10 && irAgent10->getConvolver() && numInputChannels >= 2 && numOutputChannels >= 1) { irAgent10->process(channelData1, &_convolutionBuffer[0], samplesToProcess); _wetBuffer.addFrom(0, 0, &_convolutionBuffer[0], samplesToProcess, autoGain); } IRAgent* irAgent11 = getAgent(1, 1); if (irAgent11 && irAgent11->getConvolver() && numInputChannels >= 2 && numOutputChannels >= 2) { irAgent11->process(channelData1, &_convolutionBuffer[0], samplesToProcess); _wetBuffer.addFrom(1, 0, &_convolutionBuffer[0], samplesToProcess, autoGain); } } // Stereo width if (numOutputChannels >= 2) { _stereoWidth.updateWidth(getParameter(Parameters::StereoWidth)); _stereoWidth.process(_wetBuffer.getWritePointer(0), _wetBuffer.getWritePointer(1), samplesToProcess); } // Dry/wet gain { float dryGain0, dryGain1; _dryGain.updateValue(DecibelScaling::Db2Gain(getParameter(Parameters::DryDecibels))); _dryGain.getSmoothValues(samplesToProcess, dryGain0, dryGain1); buffer.applyGainRamp(0, samplesToProcess, dryGain0, dryGain1); } { float wetGain0, wetGain1; _wetGain.updateValue(DecibelScaling::Db2Gain(getParameter(Parameters::WetDecibels))); _wetGain.getSmoothValues(samplesToProcess, wetGain0, wetGain1); _wetBuffer.applyGainRamp(0, samplesToProcess, wetGain0, wetGain1); } // Level measurement (dry) if (numInputChannels == 1) { _levelMeasurementsDry[0].process(samplesToProcess, buffer.getReadPointer(0)); _levelMeasurementsDry[1].reset(); } else if (numInputChannels == 2) { _levelMeasurementsDry[0].process(samplesToProcess, buffer.getReadPointer(0)); _levelMeasurementsDry[1].process(samplesToProcess, buffer.getReadPointer(1)); } // Sum wet to dry signal { float dryOnGain0, dryOnGain1; _dryOn.updateValue(getParameter(Parameters::DryOn) ? 1.0f : 0.0f); _dryOn.getSmoothValues(samplesToProcess, dryOnGain0, dryOnGain1); buffer.applyGainRamp(0, samplesToProcess, dryOnGain0, dryOnGain1); } { float wetOnGain0, wetOnGain1; _wetOn.updateValue(getParameter(Parameters::WetOn) ? 1.0f : 0.0f); _wetOn.getSmoothValues(samplesToProcess, wetOnGain0, wetOnGain1); if (numOutputChannels > 0) { buffer.addFromWithRamp(0, 0, _wetBuffer.getReadPointer(0), samplesToProcess, wetOnGain0, wetOnGain1); } if (numOutputChannels > 1) { buffer.addFromWithRamp(1, 0, _wetBuffer.getReadPointer(1), samplesToProcess, wetOnGain0, wetOnGain1); } } // Level measurement (wet/out) if (numOutputChannels == 1) { _levelMeasurementsWet[0].process(samplesToProcess, _wetBuffer.getReadPointer(0)); _levelMeasurementsWet[1].reset(); _levelMeasurementsOut[0].process(samplesToProcess, buffer.getReadPointer(0)); _levelMeasurementsOut[1].reset(); } else if (numOutputChannels == 2) { _levelMeasurementsWet[0].process(samplesToProcess, _wetBuffer.getReadPointer(0)); _levelMeasurementsWet[1].process(samplesToProcess, _wetBuffer.getReadPointer(1)); _levelMeasurementsOut[0].process(samplesToProcess, buffer.getReadPointer(0)); _levelMeasurementsOut[1].process(samplesToProcess, buffer.getReadPointer(1)); } // In case we have more outputs than inputs, we'll clear any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). for (int i=numInputChannels; i<numOutputChannels; ++i) { buffer.clear(i, 0, buffer.getNumSamples()); } // Update beats per minute info float beatsPerMinute = 0.0f; juce::AudioPlayHead* playHead = getPlayHead(); if (playHead) { juce::AudioPlayHead::CurrentPositionInfo currentPositionInfo; if (playHead->getCurrentPosition(currentPositionInfo)) { beatsPerMinute = static_cast<float>(currentPositionInfo.bpm); } } if (::fabs(_beatsPerMinute.exchange(beatsPerMinute)-beatsPerMinute) > 0.001f) { notifyAboutChange(); } }
void Mcfx_delayAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { // compute read position _buf_read_pos = _buf_write_pos - _delay_smpls; if (_buf_read_pos < 0) _buf_read_pos = _buf_size + _buf_read_pos - 1; // std::cout << "size : " << _buf_size << " read pos: " << _buf_read_pos << std::endl; // resize buffer if necessary if (_delay_buffer.getNumChannels() < buffer.getNumChannels() || _delay_buffer.getNumSamples() < _buf_size) { // resize buffer _delay_buffer.setSize(buffer.getNumChannels(), _buf_size, true, true, false); } // write to the buffer if (_buf_write_pos + buffer.getNumSamples() < _buf_size) { for (int ch = 0; ch < buffer.getNumChannels(); ch++) { // copy straight into buffer _delay_buffer.copyFrom(ch, _buf_write_pos, buffer, ch, 0, buffer.getNumSamples()); } // update write position _buf_write_pos += buffer.getNumSamples(); } else { // if buffer reaches end int samples_to_write1 = _buf_size - _buf_write_pos; int samples_to_write2 = buffer.getNumSamples() - samples_to_write1; // std::cout << "spl_write1: " << samples_to_write1 << " spl_write2: " << samples_to_write2 << std::endl; for (int ch = 0; ch < buffer.getNumChannels(); ch++) { // copy until end _delay_buffer.copyFrom(ch, _buf_write_pos, buffer, ch, 0, samples_to_write1); // start copy to front _delay_buffer.copyFrom(ch, 0, buffer, ch, samples_to_write1, samples_to_write2); } // update write position _buf_write_pos = samples_to_write2; } // read from buffer if (_buf_read_pos + buffer.getNumSamples() < _buf_size) { for (int ch = 0; ch < buffer.getNumChannels(); ch++) { buffer.copyFrom(ch, 0, _delay_buffer, ch, _buf_read_pos, buffer.getNumSamples()); } // update read position _buf_read_pos += buffer.getNumSamples(); } else { int samples_to_read1 = _buf_size - _buf_read_pos; int samples_to_read2 = buffer.getNumSamples() - samples_to_read1; for (int ch = 0; ch < buffer.getNumChannels(); ch++) { // copy until end buffer.copyFrom(ch, 0, _delay_buffer, ch, _buf_read_pos, samples_to_read1); // start copy from front buffer.copyFrom(ch, samples_to_read1, _delay_buffer, ch, 0, samples_to_read2); } // update write position _buf_read_pos = samples_to_read2; } // In case we have more outputs than inputs, we'll clear any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i) { buffer.clear (i, 0, buffer.getNumSamples()); } }
void SynthAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { const int totalNumInputChannels = getTotalNumInputChannels(); const int totalNumOutputChannels = getTotalNumOutputChannels(); // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). // This is here to avoid people getting screaming feedback // when they first compile a plugin, but obviously you don't need to keep // this code if your algorithm always overwrites all the output channels. MidiBuffer Midi; int time; MidiMessage m; for(MidiBuffer::Iterator i(midiMessages); i.getNextEvent(m, time);){ //handle monophonic on/off of notes if(m.isNoteOn()){ noteOn++; } if(m.isNoteOff()){ noteOn--; } if(noteOn > 0){ monoNoteOn = 1.0f; env.reset(); //handle the pitch of the note noteVal = m.getNoteNumber(); osc.setF(m.getMidiNoteInHertz(noteVal)); }else{ monoNoteOn = 0.0f; } } for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear (i, 0, buffer.getNumSamples()); for (int channel = 0; channel < totalNumOutputChannels; ++channel){ //just do the synth stuff on one channel. if(channel == 0){ for(int sample = 0; sample < buffer.getNumSamples(); ++sample){ //do this stuff here. it's terribly inefficient.. freqValScaled = 20000.0f * pow(freqP->get(), 3.0f); envValScaled = 10000.0f * pow(envP->get(), 3.0f); speedValScaled = pow((1.0f - speedP->get()), 2.0f); oscValScaled = (oscP->get() - 0.5f) * 70.0f; detValScaled = (detP->get() - 0.5f) * 24.0f; filter.setFc(freqSmoothing.process(freqValScaled + (envValScaled * pow(env.process(),3.0f))) / UPSAMPLING); env.setSpeed(speedValScaled); filter.setQ(qP->get()); float frequency = noteVal + 24.0f + oscValScaled + modOsc.process(0) + (driftSmoothing.process(random.nextFloat() - 0.5f) * 20.0f); float frequency2 = exp((frequency + detValScaled + (driftSmoothing2.process(random.nextFloat() - 0.5f) * 10.0f)) / 17.31f) / UPSAMPLING; frequency = exp(frequency / 17.31f) / UPSAMPLING; osc.setF(frequency); osc2.setF(frequency2); float monoNoteOn2 = ampSmoothing.process(monoNoteOn); float data; for(int i = 0; i < UPSAMPLING; i++){ data = 20.0f * filter.process(0.1f * osc.process() + ampP->get() * 0.1f * osc2.process()); } data *= monoNoteOn2; buffer.setSample(0, sample, data); buffer.setSample(1, sample, data); } } } }
void InstanceProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { for(int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i) { buffer.clear(i, 0, buffer.getNumSamples()); } bool infos = false; AudioPlayHead* playhead = getPlayHead(); if(playhead && m_patch_tie) { infos = playhead->getCurrentPosition(m_playinfos); } lock(); { m_midi.clear(); if(infos) { m_playing_list.setFloat(0, m_playinfos.isPlaying); m_playing_list.setFloat(1, m_playinfos.timeInSeconds); sendMessageAnything(m_patch_tie, s_playing, m_playing_list); m_measure_list.setFloat(0, m_playinfos.bpm); m_measure_list.setFloat(1, m_playinfos.timeSigNumerator); m_measure_list.setFloat(2, m_playinfos.timeSigDenominator); m_measure_list.setFloat(3, m_playinfos.ppqPosition); m_measure_list.setFloat(4, m_playinfos.ppqPositionOfLastBarStart); sendMessageAnything(m_patch_tie, s_measure, m_measure_list); } for(size_t i = 0; i < m_parameters.size() && m_parameters[i].isValid(); ++i) { sendMessageFloat(m_parameters[i].getTie(), m_parameters[i].getValueNonNormalized()); } MidiMessage message; MidiBuffer::Iterator it(midiMessages); int position = midiMessages.getFirstEventTime(); while(it.getNextEvent(message, position)) { if(message.isNoteOnOrOff()) { sendMidiNote(message.getChannel(), message.getNoteNumber(), message.getVelocity()); } else if(message.isController()) { sendMidiControlChange(message.getChannel(), message.getControllerNumber(), message.getControllerValue()); } else if(message.isPitchWheel()) { sendMidiPitchBend(message.getChannel(), message.getPitchWheelValue()); } else if(message.isChannelPressure()) { sendMidiAfterTouch(message.getChannel(), message.getChannelPressureValue()); } else if(message.isAftertouch()) { sendMidiPolyAfterTouch(message.getChannel(), message.getNoteNumber(), message.getAfterTouchValue()); } else if(message.isProgramChange()) { sendMidiProgramChange(message.getChannel(), message.getProgramChangeNumber()); } } } midiMessages.clear(); performDsp(buffer.getNumSamples(), getTotalNumInputChannels(), buffer.getArrayOfReadPointers(), getTotalNumOutputChannels(), buffer.getArrayOfWritePointers()); midiMessages.swapWith(m_midi); unlock(); }
void AudioProcessorValueTreeStateDemoAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). // I've added this to avoid people getting screaming feedback // when they first compile the plugin, but obviously you don't need to // this code if your algorithm already fills all the output channels. for (int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i) buffer.clear (i, 0, buffer.getNumSamples()); // This is the place where you'd normally do the guts of your plugin's // audio processing... // get parameters... // fParam1 = (float)0.8; //clip // fParam2 = (float)0.50; //bits // fParam3 = (float)0.65; //rate // fParam4 = (float)0.9; //postfilt // fParam5 = (float)0.58; //non-lin // fParam6 = (float)0.5; //level const float clip = *parameters.getRawParameterValue(PARAM_ID_CLIP); const float bits = *parameters.getRawParameterValue(PARAM_ID_BITS); const float rate = *parameters.getRawParameterValue(PARAM_ID_SAMPLERATE); const float postfilt = *parameters.getRawParameterValue(PARAM_ID_POSTFILT); const float nonlin = *parameters.getRawParameterValue(PARAM_ID_NONLIN); const long nonLinearityMode = *parameters.getRawParameterValue(PARAM_ID_NONLIN_MODE); const float level = *parameters.getRawParameterValue(PARAM_ID_LEVEL); const long sampleRateMode = (long)*parameters.getRawParameterValue(PARAM_ID_POSTFILT_MODE); long tn = (long) ( getSampleRate() / rate ); if (tn < 1) tn = 1; float mode; if (sampleRateMode == Parameters::kSampleRateMode_SampleAndHold) mode = 1.0f; else mode = 0.0f; // tcount = 1; // XXX when does this need to happen? float clp = powf(10.0f, clip/20.0f); float fo2 = filterFreq( postfilt ); float fi2 = (1.0f-fo2); fi2 = fi2*fi2; fi2 = fi2*fi2; float gi = powf(2.0f, (long)bits - 2); // XXX why is this 2 - 14 instead of 4 - 16? float go = 1.0f / gi; if (sampleRateMode == Parameters::kSampleRateMode_SampleAndHold) gi = -gi/(float)tn; else gi = -gi; float ga = powf(10.0f, level/20.0f); float lin, lin2; lin = powf(10.0f, nonlin * -0.003f); if (nonLinearityMode == Parameters::kNonLinearityMode_Even) lin2 = lin; else lin2 = 1.0f; float b0=buf0; float b1=buf1, b2=buf2, b3=buf3, b4=buf4, b5=buf5; float b6=buf6, b7=buf7, b8=buf8, b9=buf9; long t = tcount; const float* input = buffer.getReadPointer(0); float* output = buffer.getWritePointer(0); float* output2 = nullptr; if (buffer.getNumChannels()>1) output2 = buffer.getWritePointer(1); for (int samp=0; samp < buffer.getNumSamples(); samp++) { b0 = input[samp] + mode * b0; if (t >= tn) { t = 0; b5 = (float)(go * (long)(b0 * gi)); if (b5 > 0.0f) { b5 = powf(b5, lin2); if (b5 > clp) b5 = clp; } else { b5 = -powf(-b5, lin); if (b5 < -clp) b5 = -clp; } b0 = 0.0f; } t++; b1 = fi2 * (b5 * ga) + fo2 * b1; b2 = b1 + fo2 * b2; b3 = b2 + fo2 * b3; b4 = b3 + fo2 * b4; b6 = fi2 * b4 + fo2 * b6; b7 = b6 + fo2 * b7; b8 = b7 + fo2 * b8; b9 = b8 + fo2 * b9; output[samp] = b9; if (output2!=nullptr) output2[samp] = b9; } if (fabsf(b1) < 1.0e-10f) { buf1 = 0.0f; buf2 = 0.0f; buf3 = 0.0f; buf4 = 0.0f; buf6 = 0.0f; buf7 = 0.0f; buf8 = 0.0f; buf9 = 0.0f; buf0 = 0.0f; buf5 = 0.0f; } else { buf1 = b1; buf2 = b2; buf3 = b3; buf4 = b4; buf6 = b6; buf7 = b7; buf8 = b8; buf9 = b9; buf0 = b0; buf5 = b5; tcount = t; } }
void Ambix_wideningAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { // backup old coefficients memcpy(_cos_coeffs, cos_coeffs, sizeof(float)*AMBI_ORDER*(BESSEL_APPR+1)); memcpy(_sin_coeffs, sin_coeffs, sizeof(float)*AMBI_ORDER*(BESSEL_APPR+1)); // calc new coefficients calcParams(); ////////////////////// // write to the buffer if (_buf_write_pos + buffer.getNumSamples() < _buf_size) { for (int ch = 0; ch < buffer.getNumChannels(); ch++) { // copy straight into buffer ring_buffer.copyFrom(ch, _buf_write_pos, buffer, ch, 0, buffer.getNumSamples()); } // update write position _buf_write_pos += buffer.getNumSamples(); } else { // if buffer reaches end int samples_to_write1 = _buf_size - _buf_write_pos; int samples_to_write2 = buffer.getNumSamples() - samples_to_write1; // std::cout << "spl_write1: " << samples_to_write1 << " spl_write2: " << samples_to_write2 << std::endl; for (int ch = 0; ch < buffer.getNumChannels(); ch++) { // copy until end ring_buffer.copyFrom(ch, _buf_write_pos, buffer, ch, 0, samples_to_write1); // start copy to front ring_buffer.copyFrom(ch, 0, buffer, ch, samples_to_write1, samples_to_write2); } // update write position _buf_write_pos = samples_to_write2; } // String debug = String::empty; ///////////////////////// // compute read positions (tap delay times) // Q is computed in calcParams() (SampleRate dependent) for (int i=0; i < BESSEL_APPR*2+1; i++) { _buf_read_pos[i] = _buf_write_pos - i*Q - buffer.getNumSamples(); if (_buf_read_pos[i] < 0) _buf_read_pos[i] = _buf_size + _buf_read_pos[i]; // -1 ? // debug << _buf_read_pos[i] << " "; } // std::cout << "read pos: " << debug << std::endl; ///////////////////////// // do the rotation buffer.clear(); int fir_length = 2*BESSEL_APPR+1; if (single_sided) fir_length = BESSEL_APPR+1; for (int acn_out = 0; acn_out < getTotalNumOutputChannels(); acn_out++) // iterate over output channels { int l_out = 0; int m_out = 0; ACNtoLM(acn_out, l_out, m_out); for (int acn_in = 0; acn_in < getTotalNumInputChannels(); acn_in++) // iterate over input channels { int l_in=0; // degree 0, 1, 2, 3, 4, ... int m_in=0; // order ..., -2, -1, 0 , 1, 2, ... ACNtoLM(acn_in, l_in, m_in); if (abs(m_out) == abs (m_in) && l_in == l_out) { // if degree and order match do something int pos_index = 0; // index of _buf_read_pos /////////////// // pass through terms (z symmetric, m=0) if (m_out == 0 && m_in == 0) { if (!single_sided) pos_index = BESSEL_APPR; // read from buffer if (_buf_read_pos[pos_index] + buffer.getNumSamples() < _buf_size) { buffer.copyFrom(acn_out, 0, ring_buffer, acn_out, _buf_read_pos[pos_index], buffer.getNumSamples()); } else { int samples_to_read1 = _buf_size - _buf_read_pos[pos_index]; int samples_to_read2 = buffer.getNumSamples() - samples_to_read1; // copy until end buffer.copyFrom(acn_out, 0, ring_buffer, acn_out, _buf_read_pos[pos_index], samples_to_read1); // start copy from front buffer.copyFrom(acn_out, samples_to_read1, ring_buffer, acn_out, 0, samples_to_read2); } } /////////////// // cosine terms else if (m_in < 0 && m_out < 0) // cosine { for (int i=0; i < fir_length; i++) { // ITERATE BESSEL APPR int j = abs(i-BESSEL_APPR); if (single_sided) j=i; float _coeff = _cos_coeffs[l_in-1][j]; float coeff = cos_coeffs[l_in-1][j]; if (coeff != 0.f) { // skip zero coefficients // read from buffer if (_buf_read_pos[i] + buffer.getNumSamples() < _buf_size) { if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, 0, ring_buffer.getReadPointer(acn_in, _buf_read_pos[i]), buffer.getNumSamples(), _coeff, coeff); else buffer.addFrom(acn_out, 0, ring_buffer, acn_in, _buf_read_pos[i], buffer.getNumSamples(), coeff); } else { int samples_to_read1 = _buf_size - _buf_read_pos[i]; int samples_to_read2 = buffer.getNumSamples() - samples_to_read1; // copy until end if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, 0, ring_buffer.getReadPointer(acn_in, _buf_read_pos[i]), samples_to_read1, _coeff, coeff); else buffer.addFrom(acn_out, 0, ring_buffer, acn_in, _buf_read_pos[i], samples_to_read1, coeff); // start copy from front if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, samples_to_read1, ring_buffer.getReadPointer(acn_in, 0), samples_to_read2, _coeff, coeff); else buffer.addFrom(acn_out, samples_to_read1, ring_buffer, acn_in, 0, samples_to_read2, coeff); } } } // end iterate BESSEL_APPR } /////////////// // -sine terms else if (m_in < 0 && m_out > 0) // -sine { for (int i=0; i < fir_length; i++) { // ITERATE BESSEL APPR int j = abs(i-BESSEL_APPR); if (single_sided) j=i; float _coeff = -_sin_coeffs[l_in-1][j]; float coeff = -sin_coeffs[l_in-1][j]; if (coeff != 0.f) { // skip zero coefficients // read from buffer if (_buf_read_pos[i] + buffer.getNumSamples() < _buf_size) { if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, 0, ring_buffer.getReadPointer(acn_in, _buf_read_pos[i]), buffer.getNumSamples(), _coeff, coeff); else buffer.addFrom(acn_out, 0, ring_buffer, acn_in, _buf_read_pos[i], buffer.getNumSamples(), coeff); } else { int samples_to_read1 = _buf_size - _buf_read_pos[i]; int samples_to_read2 = buffer.getNumSamples() - samples_to_read1; // copy until end if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, 0, ring_buffer.getReadPointer(acn_in, _buf_read_pos[i]), samples_to_read1, _coeff, coeff); else buffer.addFrom(acn_out, 0, ring_buffer, acn_in, _buf_read_pos[i], samples_to_read1, coeff); // start copy from front if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, samples_to_read1, ring_buffer.getReadPointer(acn_in, 0), samples_to_read2, _coeff, coeff); else buffer.addFrom(acn_out, samples_to_read1, ring_buffer, acn_in, 0, samples_to_read2, coeff); } } } // end iterate BESSEL_APPR } /////////////// // cosine terms else if (m_in > 0 && m_out > 0) // cosine { for (int i=0; i < fir_length; i++) { // ITERATE BESSEL APPR int j = abs(i-BESSEL_APPR); if (single_sided) j=i; float _coeff = _cos_coeffs[l_in-1][j]; float coeff = cos_coeffs[l_in-1][j]; if (coeff != 0.f) { // skip zero coefficients // read from buffer if (_buf_read_pos[i] + buffer.getNumSamples() < _buf_size) { if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, 0, ring_buffer.getReadPointer(acn_in, _buf_read_pos[i]), buffer.getNumSamples(), _coeff, coeff); else buffer.addFrom(acn_out, 0, ring_buffer, acn_in, _buf_read_pos[i], buffer.getNumSamples(), coeff); } else { int samples_to_read1 = _buf_size - _buf_read_pos[i]; int samples_to_read2 = buffer.getNumSamples() - samples_to_read1; // copy until end if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, 0, ring_buffer.getReadPointer(acn_in, _buf_read_pos[i]), samples_to_read1, _coeff, coeff); else buffer.addFrom(acn_out, 0, ring_buffer, acn_in, _buf_read_pos[i], samples_to_read1, coeff); // start copy from front if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, samples_to_read1, ring_buffer.getReadPointer(acn_in, 0), samples_to_read2, _coeff, coeff); else buffer.addFrom(acn_out, samples_to_read1, ring_buffer, acn_in, 0, samples_to_read2, coeff); } } } // end iterate BESSEL_APPR } /////////////// // sine terms else if (m_in > 0 && m_out < 0) // sine { for (int i=0; i < fir_length; i++) { // ITERATE BESSEL APPR int j = abs(i-BESSEL_APPR); if (single_sided) j=i; float _coeff = _sin_coeffs[l_in-1][j]; float coeff = sin_coeffs[l_in-1][j]; if (coeff != 0.f) { // skip zero coefficients // read from buffer if (_buf_read_pos[i] + buffer.getNumSamples() < _buf_size) { if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, 0, ring_buffer.getReadPointer(acn_in, _buf_read_pos[i]), buffer.getNumSamples(), _coeff, coeff); else buffer.addFrom(acn_out, 0, ring_buffer, acn_in, _buf_read_pos[i], buffer.getNumSamples(), coeff); } else { int samples_to_read1 = _buf_size - _buf_read_pos[i]; int samples_to_read2 = buffer.getNumSamples() - samples_to_read1; // copy until end if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, 0, ring_buffer.getReadPointer(acn_in, _buf_read_pos[i]), samples_to_read1, _coeff, coeff); else buffer.addFrom(acn_out, 0, ring_buffer, acn_in, _buf_read_pos[i], samples_to_read1, coeff); // start copy from front if (_coeff != coeff) // interpolate? buffer.addFromWithRamp(acn_out, samples_to_read1, ring_buffer.getReadPointer(acn_in, 0), samples_to_read2, _coeff, coeff); else buffer.addFrom(acn_out, samples_to_read1, ring_buffer, acn_in, 0, samples_to_read2, coeff); } } } // end iterate BESSEL_APPR } } } } }