void DRowAudioFilter::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { smoothParameters(); const int numInputChannels = getNumInputChannels(); int numSamples = buffer.getNumSamples(); // set up the parameters to be used float preDelay = (float)params[PREDELAY].getSmoothedValue(); float earlyDecay = (float)params[EARLYDECAY].getSmoothedNormalisedValue(); earlyDecay = sqrt(sqrt(earlyDecay)); float late = (float)params[EARLYLATEMIX].getSmoothedNormalisedValue(); float early = 1.0f - late; float fbCoeff = (float)params[FBCOEFF].getSmoothedNormalisedValue(); fbCoeff = -sqrt(sqrt(fbCoeff)); float delayTime = (float)params[DELTIME].getSmoothedValue(); float filterCf = (float)params[FILTERCF].getSmoothedValue(); float allpassCoeff = (float)params[DIFFUSION].getSmoothedNormalisedValue(); float spread1 = (float)params[SPREAD].getSmoothedNormalisedValue(); float spread2 = 1.0f - spread1; float lowEQGain = (float)decibelsToAbsolute(params[LOWEQ].getSmoothedValue()); float highEQGain = (float)decibelsToAbsolute(params[HIGHEQ].getSmoothedValue()); float wet = (float)params[WETDRYMIX].getSmoothedNormalisedValue(); float dry = 1.0f - wet; float width = (spread1-0.5f) * 0.1f * delayTime; // we can only deal with 2-in, 2-out at the moment if (numInputChannels == 2) { // pre-delay section preDelayFilterL.setDelayTime(currentSampleRate, preDelay); preDelayFilterR.setDelayTime(currentSampleRate, preDelay); // early reflections section int roomShape = roundFloatToInt(params[ROOMSHAPE].getValue()); float delayCoeff = 0.08f * delayTime; if (roomShape != prevRoomShape) { delayLineL.removeAllTaps(); delayLineR.removeAllTaps(); for(int i = 0; i < 5; i++) { delayLineL.addTapAtTime(earlyReflectionCoeffs[roomShape-3][i], currentSampleRate); delayLineR.addTapAtTime(earlyReflectionCoeffs[roomShape-3][i], currentSampleRate); } // we have to set this here incase the delay time has not changed delayLineL.setTapSpacingExplicitly(delayCoeff); delayLineR.setTapSpacingExplicitly(delayCoeff + spread1); prevRoomShape = roomShape; } delayLineL.setTapSpacing(delayCoeff); delayLineL.scaleFeedbacks(earlyDecay); delayLineR.setTapSpacing(delayCoeff + spread1); delayLineR.scaleFeedbacks(earlyDecay); // comb filter section for (int i = 0; i < 8; ++i) { delayTime *= filterMultCoeffs[i]; delayTime += Random::getSystemRandom().nextInt(100)*0.0001; setupFilter(combFilterL[i], fbCoeff, delayTime, filterCf); setupFilter(combFilterR[i], fbCoeff, delayTime + width, filterCf); } // allpass section for (int i = 0; i < 4; ++i) { delayTime *= allpassMultCoeffs[i]; delayTime -= Random::getSystemRandom().nextInt(100)*0.0001; allpassFilterL[i].setGain(allpassCoeff); allpassFilterL[i].setDelayTime(currentSampleRate, delayTime); allpassFilterR[i].setGain(allpassCoeff); allpassFilterR[i].setDelayTime(currentSampleRate, delayTime + width); } // final EQ section lowEQL.makeLowShelf(currentSampleRate, 500, 1, lowEQGain); lowEQR.makeLowShelf(currentSampleRate, 500, 1, lowEQGain); highEQL.makeHighShelf(currentSampleRate, 3000, 1, highEQGain); highEQR.makeHighShelf(currentSampleRate, 3000, 1, highEQGain); //======================================================================== // Processing //======================================================================== int noSamples = buffer.getNumSamples(); int noChannels = buffer.getNumChannels(); // create a copy of the input buffer so we can apply a wet/dry mix later AudioSampleBuffer wetBuffer(noChannels, noSamples); wetBuffer.copyFrom(0, 0, buffer, 0, 0, noSamples); wetBuffer.copyFrom(1, 0, buffer, 1, 0, noSamples); // mono mix wet buffer (used for stereo spread later) float *pfWetL = wetBuffer.getSampleData(0); float *pfWetR = wetBuffer.getSampleData(1); while (--numSamples >= 0) { *pfWetL = *pfWetR = (0.5f * (*pfWetL + *pfWetR)); pfWetL++; pfWetR++; } numSamples = buffer.getNumSamples(); // apply the pre-delay to the wet buffer preDelayFilterL.processSamples(wetBuffer.getSampleData(0), noSamples); preDelayFilterR.processSamples(wetBuffer.getSampleData(1), noSamples); // create a buffer to hold the early reflections AudioSampleBuffer earlyReflections(noChannels, noSamples); earlyReflections.copyFrom(0, 0, wetBuffer, 0, 0, noSamples); earlyReflections.copyFrom(1, 0, wetBuffer, 1, 0, noSamples); // and process the early reflections delayLineL.processSamples(earlyReflections.getSampleData(0), noSamples); delayLineR.processSamples(earlyReflections.getSampleData(1), noSamples); // create a buffer to hold the late reverb AudioSampleBuffer lateReverb(noChannels, noSamples); lateReverb.clear(); float *pfLateL = lateReverb.getSampleData(0); float *pfLateR = lateReverb.getSampleData(1); pfWetL = wetBuffer.getSampleData(0); pfWetR = wetBuffer.getSampleData(1); // comb filter section for (int i = 0; i < 8; ++i) { combFilterL[i].processSamplesAdding(pfWetL, pfLateL, noSamples); combFilterR[i].processSamplesAdding(pfWetR, pfLateR, noSamples); } // allpass filter section for (int i = 0; i < 4; ++i) { allpassFilterL[i].processSamples(lateReverb.getSampleData(0), noSamples); allpassFilterR[i].processSamples(lateReverb.getSampleData(1), noSamples); } // clear wet buffer wetBuffer.clear(); // add early reflections to wet buffer wetBuffer.addFrom(0, 0, earlyReflections, 0, 0, noSamples, early); wetBuffer.addFrom(1, 0, earlyReflections, 1, 0, noSamples, early); // add late reverb to wet buffer lateReverb.applyGain(0, noSamples, 0.1f); wetBuffer.addFrom(0, 0, lateReverb, 0, 0, noSamples, late); wetBuffer.addFrom(1, 0, lateReverb, 1, 0, noSamples, late); // final EQ lowEQL.processSamples(pfWetL, noSamples); lowEQR.processSamples(pfWetR, noSamples); highEQL.processSamples(pfWetL, noSamples); highEQR.processSamples(pfWetR, noSamples); // create stereo spread while (--numSamples >= 0) { float fLeft = *pfWetL; float fRight = *pfWetR; *pfWetL = (fLeft * spread1) + (fRight * spread2); *pfWetR = (fRight * spread1) + (fLeft * spread2); pfWetL++; pfWetR++; } numSamples = buffer.getNumSamples(); // apply wet/dry mix gains wetBuffer.applyGain(0, noSamples, wet); buffer.applyGain(0, noSamples, dry); // add wet buffer to output buffer buffer.addFrom(0, 0, wetBuffer, 0, 0, noSamples); buffer.addFrom(1, 0, wetBuffer, 1, 0, noSamples); } //======================================================================== // in case we have more outputs than inputs, we'll clear any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) { buffer.clear (i, 0, buffer.getNumSamples()); } }
void DRowAudioFilter::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { smoothParameters(); const int numInputChannels = getNumInputChannels(); int numSamples = buffer.getNumSamples(); // set up the parameters to be used float inGain = decibelsToAbsolute(params[INGAIN].getSmoothedValue()); float outGain = decibelsToAbsolute(params[OUTGAIN].getSmoothedValue()); buffer.applyGain(0, buffer.getNumSamples(), inGain); if (numInputChannels == 2) { // get sample pointers float* channelL = buffer.getSampleData(0); float* channelR = buffer.getSampleData(1); // pre-filter inFilterL->processSamples(buffer.getSampleData(0), numSamples); inFilterR->processSamples(buffer.getSampleData(1), numSamples); while (--numSamples >= 0) { float sampleL = *channelL; float sampleR = *channelR; // clip samples sampleL = jlimit(-1.0f, 1.0f, sampleL); sampleR = jlimit(-1.0f, 1.0f, sampleR); if (sampleL < 0.0f) { sampleL *= -1.0f; sampleL = linearInterpolate(distortionBuffer, distortionBufferSize, sampleL*distortionBufferMax); sampleL *= -1.0f; } else { sampleL = linearInterpolate(distortionBuffer, distortionBufferSize, sampleL*distortionBufferMax); } if (sampleR < 0.0f) { sampleR *= -1.0f; sampleR = linearInterpolate(distortionBuffer, distortionBufferSize, sampleR*distortionBufferMax); sampleR *= -1.0f; } else { sampleR = linearInterpolate(distortionBuffer, distortionBufferSize, sampleR*distortionBufferMax); } *channelL++ = sampleL; *channelR++ = sampleR; } // post-filter outFilterL->processSamples(buffer.getSampleData(0), buffer.getNumSamples()); outFilterR->processSamples(buffer.getSampleData(1), buffer.getNumSamples()); buffer.applyGain(0, buffer.getNumSamples(), outGain); } else if (numInputChannels == 1) { // get sample pointers float* channelL = buffer.getSampleData(0); // pre-filter inFilterL->processSamples(buffer.getSampleData(0), numSamples); while (--numSamples >= 0) { float sampleL = *channelL; // clip samples sampleL = jlimit(-1.0f, 1.0f, sampleL); if (sampleL < 0.0f) { sampleL *= -1.0f; sampleL = linearInterpolate(distortionBuffer, distortionBufferSize, sampleL*distortionBufferMax); sampleL *= -1.0f; } else { sampleL = linearInterpolate(distortionBuffer, distortionBufferSize, sampleL*distortionBufferMax); } *channelL++ = sampleL; } // post-filter outFilterL->processSamples(buffer.getSampleData(0), buffer.getNumSamples()); buffer.applyGain(0, buffer.getNumSamples(), outGain); } //======================================================================== // in case we have more outputs than inputs, we'll clear any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) { buffer.clear (i, 0, buffer.getNumSamples()); } }