void processAudio(AudioBuffer &buffer){ double rate = getSampleRate(); unsigned int sampleDelay = getSampleDelay(getRampedParameterValue(PARAMETER_A), rate); sampleDelay = min(sampleDelay, bufferSize); float feedback = getRampedParameterValue(PARAMETER_B); float bias = getBiasExponent(1 - getRampedParameterValue(PARAMETER_C)); float dryWetMix = getRampedParameterValue(PARAMETER_D); int size = buffer.getSize(); for(int ch = 0; ch<buffer.getChannels(); ++ch) { float* buf = buffer.getSamples(ch); for (int i=0; i<size; ++i) { float delaySample = circularBuffer[writeIdx]; float v = buf[i] + circularBuffer[writeIdx] * feedback; v = applyBias(v, bias); circularBuffer[writeIdx] = min(1, max(-1, v)); // Guard: hard range limits. buf[i] = linearBlend(buf[i], delaySample, dryWetMix); writeIdx = (++writeIdx) % sampleDelay; } } }
void processAudio(AudioBuffer &buffer) { double rate = getSampleRate(); float p1 = getRampedParameterValue(PARAMETER_A); float freq1 = p1*p1 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ; double step1 = freq1 / rate; float amt1 = getRampedParameterValue(PARAMETER_B); float p2 = getRampedParameterValue(PARAMETER_C); float freq2 = p2*p2 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ; float amt2 = getRampedParameterValue(PARAMETER_D); double step2 = freq2 / rate; int size = buffer.getSize(); for(int ch = 0; ch<buffer.getChannels(); ++ch) { float* buf = buffer.getSamples(ch); for (int i=0; i<size; ++i) { float mod1 = sin(2 * M_PI * phase1) / 2 + .5; // 0..1 float mod2 = sin(2 * M_PI * phase2) / 2 + .5; // 0..1 float gain1 = (amt1 * mod1) + (1 - amt1); float gain2 = (amt2 * mod2) + (1 - amt2); buf[i] = (gain1 * gain2) * buf[i]; phase1 += step1; phase2 += step2; } } }
void processAudio(AudioBuffer &buffer){ setCoeffs(getLpFreq(), 0.8f); float delayTime = getParameterValue(PARAMETER_A); // get delay time value float feedback = getParameterValue(PARAMETER_B); // get feedback value float wetDry = getParameterValue(PARAMETER_D); // get gain value float delaySamples = delayTime * (DELAY_BUFFER_LENGTH-1); int size = buffer.getSize(); for (int ch = 0; ch<buffer.getChannels(); ++ch) { float* buf = buffer.getSamples(ch); process(size, buf, outBuf); // low pass filter for delay buffer for(int i = 0; i < size; i++){ outBuf[i] = outBuf[i] + feedback * delayBuffer.read(delaySamples); buf[i] = (1.f - wetDry) * buf[i] + wetDry * outBuf[i]; //crossfade for wet/dry balance delayBuffer.write(buf[i]); } } }
void mixAudioBufferChannelsLogrithmicDRC(AudioBuffer &audioBuffer, std::vector<float> &channelLevels, AudioBuffer &mixBuffer, float threshold) { if(audioBuffer.getChannels() == 0) return; AudioFormat format=audioBuffer.getFormat(); unsigned int samples=audioBuffer.getSamples(); switch(format) { case AudioFormat::UInt8: case AudioFormat::UInt8P: mixChannelsLogrithmicDRC<uint8_t>((uint8_t *)audioBuffer.getBuffer(), channelLevels, (uint8_t *)mixBuffer.getBuffer(), samples, threshold); break; case AudioFormat::Int16: case AudioFormat::Int16P: mixChannelsLogrithmicDRC<int16_t>((int16_t *)audioBuffer.getBuffer(), channelLevels, (int16_t *)mixBuffer.getBuffer(), samples, threshold); break; case AudioFormat::Int32: case AudioFormat::Int32P: mixChannelsLogrithmicDRC<int32_t>((int32_t *)audioBuffer.getBuffer(), channelLevels, (int32_t *)mixBuffer.getBuffer(), samples, threshold); break; case AudioFormat::Float: case AudioFormat::FloatP: mixChannelsLogrithmicDRC<float>((float *)audioBuffer.getBuffer(), channelLevels, (float *)mixBuffer.getBuffer(), samples, threshold); break; case AudioFormat::Double: case AudioFormat::DoubleP: mixChannelsLogrithmicDRC<double>((double *)audioBuffer.getBuffer(), channelLevels, (double *)mixBuffer.getBuffer(), samples, threshold); break; } }
void processAudio(AudioBuffer &buffer) { // Reasonably assume we will not have more than 32 channels float* ins[32]; float* outs[32]; int n = buffer.getChannels(); if ( (fDSP.getNumInputs() < 32) && (fDSP.getNumOutputs() < 32) ) { // create the table of input channels for(int ch=0; ch<fDSP.getNumInputs(); ++ch) { ins[ch] = buffer.getSamples(ch%n); } // create the table of output channels for(int ch=0; ch<fDSP.getNumOutputs(); ++ch) { outs[ch] = buffer.getSamples(ch%n); } // read OWL parameters and updates corresponding Faust Widgets zones fUI.update(); // Process the audio samples fDSP.compute(buffer.getSize(), ins, outs); } }
void PatchController::process(AudioBuffer& buffer){ if(activeSlot == GREEN && green.index != settings.patch_green){ memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float)); // green must be active slot when patch constructor is called green.setPatch(settings.patch_green); codec.softMute(false); debugClear(); return; }else if(activeSlot == RED && red.index != settings.patch_red){ memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float)); // red must be active slot when constructor is called red.setPatch(settings.patch_red); codec.softMute(false); debugClear(); return; } switch(mode){ case SINGLE_MODE: case DUAL_GREEN_MODE: green.setParameterValues(getAnalogValues()); green.patch->processAudio(buffer); break; case DUAL_RED_MODE: red.setParameterValues(getAnalogValues()); red.patch->processAudio(buffer); break; case SERIES_GREEN_MODE: green.setParameterValues(getAnalogValues()); green.patch->processAudio(buffer); red.patch->processAudio(buffer); break; case SERIES_RED_MODE: red.setParameterValues(getAnalogValues()); green.patch->processAudio(buffer); red.patch->processAudio(buffer); break; case PARALLEL_GREEN_MODE: green.setParameterValues(getAnalogValues()); processParallel(buffer); break; case PARALLEL_RED_MODE: red.setParameterValues(getAnalogValues()); processParallel(buffer); break; } }
void processAudio(AudioBuffer &buffer){ prepare(); int size = buffer.getSize(); for (int ch = 0; ch<buffer.getChannels(); ++ch) { float* buf = buffer.getSamples(ch); for(int i = 0; i < size; ++i) buf[i] = processSample(buf[i]); } }
void processAudio(AudioBuffer &buffer){ float gain = getParameterValue(PARAMETER_A)*2; int size = buffer.getSize(); for(int ch=0; ch<buffer.getChannels(); ++ch){ float* buf = buffer.getSamples(ch); for(int i=0; i<size; ++i) buf[i] = gain*buf[i]; } }
void processAudio(AudioBuffer &owlbuf) { float *in1; float *in2; float *out1; float *out2; float a, b, e, f, g, i; float e1=env1, e2=env2, e3=env3, e4=env4, y=dry; float a1=att1, a2=att2, r12=rel12, a34=att34, r3=rel3, r4=rel4; float fi=fili, fo=filo, fx=filx, fb1=fbuf1, fb2=fbuf2; int sampleFrames = owlbuf.getSize(); if (owlbuf.getChannels() < 2) { // Mono check in1 = owlbuf.getSamples(0); // L in2 = owlbuf.getSamples(0); // R out1 = owlbuf.getSamples(0); // L out2 = owlbuf.getSamples(0); // R } else { in1 = owlbuf.getSamples(0); // L in2 = owlbuf.getSamples(1); // R out1 = owlbuf.getSamples(0); // L out2 = owlbuf.getSamples(1); // R } setParameters(); --in1; --in2; --out1; --out2; while(--sampleFrames >= 0) { a = *++in1; b = *++in2; // Filter processing fb1 = fo*fb1 + fi*a; fb2 = fo*fb2 + fi*b; e = fb1 + fx*a; f = fb2 + fx*b; i = a + b; i = (i>0)? i : -i; // stereo sum ; fabs() e1 = (i>e1)? e1 + a1 * (i-e1) : e1 * r12; e2 = (i>e2)? e2 + a2 * (i-e2) : e2 * r12; e3 = (i>e3)? e3 + a34 * (i-e3) : e3 * r3; e4 = (i>e4)? e4 + a34 * (i-e4) : e4 * r4; g = (e1 - e2 + e3 - e4); *++out1 = y * (a + e * g); *++out2 = y * (b + f * g); } if(e1<1.0e-10) { env1=0.f; env2=0.f; env3=0.f; env4=0.f; fbuf1=0.f; fbuf2=0.f; } else { env1=e1; env2=e2; env3=e3; env4=e4; fbuf1=fb1; fbuf2=fb2; } }
void processAudio(AudioBuffer &buffer){ assert_param(buffer.getChannels() > 1); float gainL = getParameterValue(PARAMETER_A)*2; float gainR = getParameterValue(PARAMETER_B)*2; int size = buffer.getSize(); float* left = buffer.getSamples(0); float* right = buffer.getSamples(1); for(int i=0; i<size; ++i){ left[i] = gainL*left[i]; right[i] = gainR*right[i]; } }
void processAudio(AudioBuffer &buffer){ int size = buffer.getSize(); unsigned int delaySamples; rate = getParameterValue(PARAMETER_A) * 0.000005f; // flanger needs slow rate depth = getParameterValue(PARAMETER_B); feedback = getParameterValue(PARAMETER_C)* 0.707; // so we keep a -3dB summation of the delayed signal for (int ch = 0; ch<buffer.getChannels(); ++ch) { for (int i = 0 ; i < size; i++) { float* buf = buffer.getSamples(ch); delaySamples = (depth * modulate(rate)) * (delayBuffer.getSize()-1); // compute delay according to rate and depth buf[i] += feedback * delayBuffer.read(delaySamples); // add scaled delayed signal to dry signal delayBuffer.write(buf[i]); // update delay buffer } } }
void processAudio(AudioBuffer &buffer){ // assert_param(buffer.getChannels() > 1); float gainLL = getParameterValue(PARAMETER_A); float gainLR = getParameterValue(PARAMETER_B); float gainRL = getParameterValue(PARAMETER_C); float gainRR = getParameterValue(PARAMETER_D); int size = buffer.getSize(); float* left = buffer.getSamples(0); float* right = buffer.getChannels() > 1 ? buffer.getSamples(1) : left; float l, r; for(int i=0; i<size; ++i){ l = gainLL*left[i] + gainLR*right[i]; r = gainRL*left[i] + gainRR*right[i]; left[i] = l; right[i] = r; } }
void processAudio(AudioBuffer &buffer) { double rate = getSampleRate(); if (circularBuffer==NULL) { bufferSize = MAX_DELAY * rate; circularBuffer = new float[bufferSize]; memset(circularBuffer, 0, bufferSize*sizeof(float)); writeIdx = 0; } float p1 = getRampedParameterValue(PARAMETER_A); float p2 = getRampedParameterValue(PARAMETER_B); // float p3 = getRampedParameterValue(PARAMETER_C); float p4 = getRampedParameterValue(PARAMETER_D); unsigned int maxSampleDelay = rate * (MIN_DELAY + p1*p1 * (MAX_DELAY-MIN_DELAY)); float bias = MIN_BIAS + p2*p2 * (MAX_BIAS-MIN_BIAS); // float cutoff = p3; float dryWetMix = p4; int size = buffer.getSize(); for(int ch = 0; ch<buffer.getChannels(); ++ch) { float* buf = buffer.getSamples(ch); Random r; for (int i=0; i<size; ++i) { int offset = floor(maxSampleDelay * pow(r.nextFloat(), bias) + 0.5); int readIdx = writeIdx - offset; while (readIdx<0) readIdx += bufferSize; circularBuffer[writeIdx] = buf[i]; buf[i] = circularBuffer[readIdx] * dryWetMix + buf[i] * (1 - dryWetMix); writeIdx = (++writeIdx) % bufferSize; } } }
void processAudio(AudioBuffer &buffer) { const int size = buffer.getSize(); const float coarsePitch = getRampedParameterValue(PARAMETER_A); const float finePitch = getRampedParameterValue(PARAMETER_B); const float decay = getRampedParameterValue(PARAMETER_C); const float mix = getRampedParameterValue(PARAMETER_D); if (coarsePitch != mPrevCoarsePitch || finePitch != mPrevFinePitch || decay != mPrevDecay) { const float freq = midi2CPS(MIN_PITCH + floor(mPrevCoarsePitch * PITCH_RANGE) + finePitch); for (int c = 0; c < NUM_COMBS; c++) { mCombs[c].setFreqCPS(freq * FREQ_RATIOS[c]); mCombs[c].setDecayTimeMs(MIN_DECAY + (decay * DECAY_RANGE)); } mPrevCoarsePitch = coarsePitch; mPrevFinePitch = finePitch; mPrevDecay = decay; } for(int ch = 0; ch<buffer.getChannels(); ++ch) { float* buf = buffer.getSamples(ch); for(int i = 0; i < size; i++) { float ips = buf[i]; float ops = 0.; const float smoothMix = mMixSmoother.process(mix); for (int c = 0; c < NUM_COMBS; c++) { ops += mCombs[c].process(ips); } buf[i] = mDCBlocker.process( ((ops * 0.1) * smoothMix) + (ips * (1.-smoothMix)) ); } } }
void processAudio(AudioBuffer &buffer){ int size = buffer.getSize(); float w, z; //implement with less arrays? setCoeffs(getLpFreq(), 0.8f); rate = 0.01f, depth = 0.3f; float delayTime = getParameterValue(PARAMETER_A); // get delay time value float feedback = getParameterValue(PARAMETER_B); // get feedback value float wetDry = getParameterValue(PARAMETER_D); // get gain value float delaySamples = delayTime * (DELAY_BUFFER_LENGTH-1); for (int ch = 0; ch<buffer.getChannels(); ++ch) { float* buf = buffer.getSamples(ch); process(size, buf, outBuf); // low pass filter for delay buffer float d = _dmin + (_dmax-_dmin) * ((sin( _lfoPhase ) + 1.f)/2.f); _lfoPhase += rate; if( _lfoPhase >= M_PI * 2.f ) _lfoPhase -= M_PI * 2.f; //update filter coeffs for( int i=0; i<6; i++ ) _alps[i].Delay( d ); for (int i = 0; i < size; i++){ outBuf[i] = outBuf[i] + feedback * delayBuffer.read(delaySamples); buf[i] = (1.f - wetDry) * buf[i] + wetDry * outBuf[i]; //crossfade for wet/dry balance delayBuffer.write(buf[i]); //calculate output z = _alps[0].Update(_alps[1].Update(_alps[2].Update(_alps[3].Update(_alps[4].Update(_alps[5].Update(buf[i] + _zm1 * (feedback*0.1))))))); _zm1 = z; buf[i] = buf[i] + z * depth; } } }
void processAudio(AudioBuffer &buffer){ data.numSamps = getParameterValue(PARAMETER_A) * (KP_NUM_SAMPLES-8)+8; data.amp = getParameterValue(PARAMETER_B); data.g = getParameterValue(PARAMETER_C)*(0.5-0.48)+0.48; data.duration = getParameterValue(PARAMETER_D) * KP_NUM_BUFFER; if(isButtonPressed(PUSHBUTTON) && !data.noteOn){ data.noteOn = true; pressButton(RED_BUTTON); } int size = buffer.getSize(); float* left = buffer.getSamples(0); float* right = buffer.getChannels() > 1 ? buffer.getSamples(1) : left; for(int i=0; i<size; ++i){ if(data.noteOn){ if(data.phase > (data.numSamps + 1)){ // if we have filled up our delay line, y(n) = g * (y(n-N) + y( n-(N+1) )) data.pluck[data.phase] = data.g * ( data.pluck[data.phase-data.numSamps] + data.pluck[data.phase - (data.numSamps + 1)] ); }else{ // computing the first N samples, y(n) = x(n) if(data.noiseType == KP_NOISETYPE_GAUSSIAN) data.pluck[data.phase] = data.noise[data.phase]; // use gaussian white noise if(data.noiseType == KP_NOISETYPE_RANDOM) data.pluck[data.phase] = rand()%100000/100000.; // use random noise } left[i] = data.amp * data.pluck[data.phase]; // left channel right[i] = data.amp * data.pluck[data.phase]; // right channel if(data.phase >= data.duration){ // if we have reached the end of our duration data.phase = 0; data.noteOn = false; pressButton(GREEN_BUTTON); }else{ data.phase++; } }else{ left[i] = right[i] = 0; } } }
void processAudio(AudioBuffer &buffer){ float drive = getParameterValue(PARAMETER_A); // get input drive value float offset = getParameterValue(PARAMETER_B); // get offset value float gain = getParameterValue(PARAMETER_D); // get output gain value offset /= 10; drive += 0.03; drive *= 40; gain/= 2; int size = buffer.getSize(); for (int ch = 0; ch<buffer.getChannels(); ++ch) { //for each channel float* buf = buffer.getSamples(ch); for (int i = 0; i < size; ++i) { //process each sample buf[i] = gain*nonLinear((buf[i]+offset)*drive); } } }
void processAudio(AudioBuffer &buffer) { // apply filter float level = knobs[PARAMETER_D]; int size = buffer.getSize(); for (int i = 0; i < size; i++) { if (parametersChanged()) { // update filter factors if knobs were moved updateFactors(); } // modulate resonance float q = r; if (knobs[PARAMETER_C] > 0.0f) { phase += PSYF_MOD_SPEED * knobs[PARAMETER_C] / (float)size; if (phase >= 1.0f) { phase -= 1.0f; } q += q * PSYF_MOD_LEVEL * sinf(phase * PSYF_TWOPI); } for (int ch = 0; ch < buffer.getChannels(); ++ch) { float* buf = buffer.getSamples(ch); // apply filter x = buf[i] - q * buf3[ch]; buf0 = x * p + oldX * p - k * buf0; buf1 = buf0 * p + oldBuf0[ch] * p - k * buf1; buf2 = buf1 * p + oldBuf1[ch] * p - k * buf2; buf3[ch] = buf2 * p + oldBuf2[ch] * p - k * buf3[ch]; buf3[ch] -= (buf3[ch] * buf3[ch] * buf3[ch]) / 6.0f; oldX = x; oldBuf0[ch] = buf0; oldBuf1[ch] = buf1; oldBuf2[ch] = buf2; buf[i] = (1.0f - level) * buf[i] + level * buf3[ch] * PSYF_WET_DRY_COMPENSATION; } } }
void processAudio(AudioBuffer &buffer) { float bias = getBias(1 - getRampedParameterValue(PARAMETER_A)); float dryWetMix = getRampedParameterValue(PARAMETER_D); int size = buffer.getSize(); for(int ch = 0; ch<buffer.getChannels(); ++ch) { float* buf = buffer.getSamples(ch); for (int i=0; i<size; ++i) { float v = powf(fabs(buf[i]), bias) * // bias (buf[i] < 0 ? -1 : 1); // sign buf[i] = v * dryWetMix + buf[i] * (1 - dryWetMix); } } }
void processAudio(AudioBuffer &buffer) { float *in1; float *in2; float *out1; float *out2; float a, b, c, d, g, l=fb3, m, h, s, sl=slev; float f1i=fi1, f1o=fo1, f2i=fi2, f2o=fo2, b1=fb1, b2=fb2; float g1, d1=driv1, t1=trim1; float g2, d2=driv2, t2=trim2; float g3, d3=driv3, t3=trim3; int v=valve; int sampleFrames = buffer.getSize(); if (buffer.getChannels() < 2) { // Mono check in1 = buffer.getSamples(0); // L in2 = buffer.getSamples(0); // R out1 = buffer.getSamples(0); // L out2 = buffer.getSamples(0); // R } else { in1 = buffer.getSamples(0); // L in2 = buffer.getSamples(1); // R out1 = buffer.getSamples(0); // L out2 = buffer.getSamples(1); // R } getParameters(); --in1; --in2; --out1; --out2; while(--sampleFrames >= 0) { a = *++in1; b = *++in2; //process from here... s = (a - b) * sl; //keep stereo component for later a += (float)(b + 0.00002); //dope filter at low level b2 = (f2i * a) + (f2o * b2); //crossovers b1 = (f1i * b2) + (f1o * b1); l = (f1i * b1) + (f1o * l); m=b2-l; h=a-b2; g = (l>0)? l : -l; g = (float)(1.0 / (1.0 + d1 * g) ); //distort g1=g; g = (m>0)? m : -m; g = (float)(1.0 / (1.0 + d2 * g) ); g2=g; g = (h>0)? h : -h; g = (float)(1.0 / (1.0 + d3 * g) ); g3=g; if(v) { if(l>0)g1=1.0; if(m>0)g2=1.0; if(h>0)g3=1.0; } a = (l*g1*t1) + (m*g2*t2) + (h*g3*t3); c = a + s; // output d = a - s; *++out1 = c * (mainGain * 2.0); *++out2 = d * (mainGain * 2.0); } fb1=b1; fb2=b2, fb3=l; }