Пример #1
0
    void processAudio(AudioBuffer &buffer) {
        double rate = getSampleRate();

        float p1 = getRampedParameterValue(PARAMETER_A);
        float freq1 = p1*p1 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
        double step1 = freq1 / rate;
        float amt1 = getRampedParameterValue(PARAMETER_B);

        float p2 = getRampedParameterValue(PARAMETER_C);
        float freq2 = p2*p2 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
        float amt2 = getRampedParameterValue(PARAMETER_D);
        double step2 = freq2 / rate;

        int size = buffer.getSize();

        for(int ch = 0; ch<buffer.getChannels(); ++ch)
        {
            float* buf = buffer.getSamples(ch);

            for (int i=0; i<size; ++i)
            {
                float mod1 = sin(2 * M_PI * phase1) / 2 + .5; // 0..1
                float mod2 = sin(2 * M_PI * phase2) / 2 + .5; // 0..1
                float gain1 = (amt1 * mod1) + (1 - amt1);
                float gain2 = (amt2 * mod2) + (1 - amt2);
                buf[i] = (gain1 * gain2) * buf[i];
                phase1 += step1;
                phase2 += step2;
            }
        }

    }
Пример #2
0
    void processAudio(AudioBuffer &buffer){

    double rate = getSampleRate();
    

    unsigned int sampleDelay = getSampleDelay(getRampedParameterValue(PARAMETER_A), rate);
    sampleDelay = min(sampleDelay, bufferSize);
    float feedback = getRampedParameterValue(PARAMETER_B);
    float bias = getBiasExponent(1 - getRampedParameterValue(PARAMETER_C));
    float dryWetMix = getRampedParameterValue(PARAMETER_D);
    

    int size = buffer.getSize();

 	for(int ch = 0; ch<buffer.getChannels(); ++ch)
 	{
	    float* buf = buffer.getSamples(ch);

	    for (int i=0; i<size; ++i)
	    {
	      float delaySample = circularBuffer[writeIdx];
	      float v = buf[i] + circularBuffer[writeIdx] * feedback;
	      v = applyBias(v, bias);
	      circularBuffer[writeIdx] = min(1, max(-1, v)); // Guard: hard range limits.
	      buf[i] = linearBlend(buf[i], delaySample, dryWetMix);

	      writeIdx = (++writeIdx) % sampleDelay;
	    }
		
  	}
  }
  void processAudio(AudioBuffer& buf){
    float minf = getParameterValue(PARAMETER_A)*0.1 + 0.001;
    float maxf = min(0.4, minf + getParameterValue(PARAMETER_B)*0.2);
    // range should be exponentially related to minf
    //    int tones = getParameterValue(PARAMETER_C)*(TONES-1) + 1;
    int tones = 12;
    float spread = getParameterValue(PARAMETER_C) + 1.0;
    float rate = 1.0 + (getParameterValue(PARAMETER_D) - 0.5)*0.00002;
    int size = buf.getSize();
    FloatArray out = buf.getSamples(LEFT_CHANNEL);
    float amp;
    for(int t=1; t<tones; ++t)
      inc[t] = inc[t-1]*spread;
    for(int i=0; i<size; ++i){
      for(int t=0; t<tones; ++t){
	amp = getAmplitude((inc[t]-minf)/(maxf-minf));
	out[i] += amp * getWave(acc[t]);
        acc[t] += inc[t];
	if(acc[t] > 1.0)
	  acc[t] -= 1.0;
	else if(acc[t] < 0.0)
	  acc[t] += 1.0;
        inc[t] *= rate;
      }
    }
    if(inc[0] > maxf)
      inc[0] = minf;
      // while(inc[0] > minf)
      // 	inc[0] *= 0.5;
    else if(inc[0] < minf)
      inc[0] = maxf;
      // while(inc[0] < maxf)
      // 	inc[0] *= 2.0;
  }
Пример #4
0
 void processAudio(AudioBuffer &buffer){
   if(isButtonPressed(PUSHBUTTON))
     reset();
   dt = getParameterValue(PARAMETER_A)*getParameterValue(PARAMETER_A)*0.0250;
   float rotateX = getParameterValue(PARAMETER_B)*M_PI;
   float rotateY = getParameterValue(PARAMETER_C)*M_PI;
   float rotateZ = getParameterValue(PARAMETER_E)*M_PI;
   float gainL, gainR;
   gainL = gainR = getParameterValue(PARAMETER_D)*2/25.0;
   int size = buffer.getSize();
   float* left = buffer.getSamples(0);
   float* right = buffer.getSamples(1);
   float dx, dy, dz;
   updateMatrix(rotateX, rotateY, rotateZ);
   for(int i=0;i<size;i++){
     dx = a*(y - x);
     dy = (x * (c - z) - y);
     dz = (x*y - b * z);
     x += dx*dt;
     y += dy*dt;
     z += dz*dt;
     P[0] = x;
     P[1] = y;
     P[2] = z-25; // centre on z axis
     rotateP();
     left[i] = Pprime[0] * gainL;
     right[i] = Pprime[1] * gainR;
   }
   // debugMessage("x/y/z", x, y, z);
 }
Пример #5
0
  void processAudio(AudioBuffer &buffer) {
        
    int size  = buffer.getSize();
    float y;
        
    rate      = Rate(getParameterValue(PARAMETER_A));
    depth     = getParameterValue(PARAMETER_B);
    feedback  = getParameterValue(PARAMETER_C);
        
    //calculate and update phaser sweep lfo...
    float d  = _dmin + (_dmax-_dmin) * ((sin( _lfoPhase ) + 1.f)/2.f);
        
    _lfoPhase += rate;
    if( _lfoPhase >= M_PI * 2.f )
      _lfoPhase -= M_PI * 2.f;
        
    //update filter coeffs
    for( int i=0; i<6; i++ )
      _alps[i].Delay( d );
      
      
//       for (int ch = 0; ch<buffer.getChannels(); ++ch) {
          
            float* buf  = buffer.getSamples(0);
            for (int i = 0; i < size; i++) {
              //calculate output
              y = _alps[0].Update(_alps[1].Update(_alps[2].Update(_alps[3].Update(_alps[4].Update(
                                                      _alps[5].Update( buf[i] + _zm1 * feedback ))))));
              _zm1 = y;
                
              buf[i] = buf[i] + y * depth;
                
//             }
      }
  }
Пример #6
0
  void processAudio(AudioBuffer &buffer){
    float y[getBlockSize()];
    setCoeffs(getLpFreq(), 0.8f);
    float delayTime = getParameterValue(PARAMETER_A); // get delay time value    
    float feedback  = getParameterValue(PARAMETER_B); // get feedback value
    float wetDry    = getParameterValue(PARAMETER_D); // get gain value

    if(abs(time - delayTime) < 0.01)
      delayTime = time;
    else
      time = delayTime;
        
    float delaySamples = delayTime * (delayBuffer.getSize()-1);        
    int size = buffer.getSize();
    float* x = buffer.getSamples(0);
    process(size, x, y);     // low pass filter for delay buffer
    for(int n = 0; n < size; n++){
        
      //linear interpolation for delayBuffer index
      dSamples = olddelaySamples + (delaySamples - olddelaySamples) * n / size;
        
      y[n] = y[n] + feedback * delayBuffer.read(dSamples);
      x[n] = (1.f - wetDry) * x[n] + wetDry * y[n];  //crossfade for wet/dry balance
      delayBuffer.write(x[n]);
    }
    olddelaySamples = delaySamples;
  }
 void processAudio(AudioBuffer &buffer)
 {
     // Reasonably assume we will not have more than 32 channels
     float*  ins[32];
     float*  outs[32];
     int     n = buffer.getChannels();
     
     if ( (fDSP.getNumInputs() < 32) && (fDSP.getNumOutputs() < 32) ) {
         
         // create the table of input channels
         for(int ch=0; ch<fDSP.getNumInputs(); ++ch) {
             ins[ch] = buffer.getSamples(ch%n);
         }
         
         // create the table of output channels
         for(int ch=0; ch<fDSP.getNumOutputs(); ++ch) {
             outs[ch] = buffer.getSamples(ch%n);
         }
         
         // read OWL parameters and updates corresponding Faust Widgets zones
         fUI.update(); 
         
         // Process the audio samples
         fDSP.compute(buffer.getSize(), ins, outs);
     }
 }
Пример #8
0
  void processAudio(AudioBuffer &buffer) {
    float delayTime, feedback, wetDry;
    delayTime = getParameterValue(PARAMETER_A);
    feedback  = getParameterValue(PARAMETER_B);
    wetDry    = getParameterValue(PARAMETER_D);
    int size = buffer.getSize();
    int32_t newDelay;
    if(abs(time - delayTime) > 0.01){
      newDelay = delayTime * (delayBuffer.getSize()-1);
      time = delayTime;
    }else{
      newDelay = delay;
    }
    float* x = buffer.getSamples(0);
    float y;
    for (int n = 0; n < size; n++){
//       y = buf[i] + feedback * delayBuffer.read(delay);
//       buf[i] = wetDry * y + (1.f - wetDry) * buf[i];
//       delayBuffer.write(buf[i]);
      if(newDelay - delay > 4){
	y = getDelayAverage(delay-5, 5);
	delay -= 5;
      }else if(delay - newDelay > 4){
	y = getDelayAverage(delay+5, 5);
	delay += 5;
      }else{
	y = delayBuffer.read(delay);
      }
      x[n] = wetDry * y + (1.f - wetDry) * x[n];  // crossfade for wet/dry balance
      delayBuffer.write(feedback * x[n]);
    }
  }
Пример #9
0
  void processAudio(AudioBuffer &buffer){
    
    setCoeffs(getLpFreq(), 0.8f);
        
    float delayTime = getParameterValue(PARAMETER_A); // get delay time value    
    float feedback  = getParameterValue(PARAMETER_B); // get feedback value
    float wetDry    = getParameterValue(PARAMETER_D); // get gain value
        
    float delaySamples = delayTime * (DELAY_BUFFER_LENGTH-1);
        
    int size = buffer.getSize();
      
      for (int ch = 0; ch<buffer.getChannels(); ++ch) {
          
          float* buf = buffer.getSamples(ch);
          process(size, buf, outBuf);     // low pass filter for delay buffer
          
          for(int i = 0; i < size; i++){

              outBuf[i] = outBuf[i] + feedback * delayBuffer.read(delaySamples);
              buf[i] = (1.f - wetDry) * buf[i] + wetDry * outBuf[i];  //crossfade for wet/dry balance
              delayBuffer.write(buf[i]);
          }
      }
  }
Пример #10
0
void PatchController::process(AudioBuffer& buffer){
  if(activeSlot == GREEN && green.index != settings.patch_green){
    memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float));
    // green must be active slot when patch constructor is called
    green.setPatch(settings.patch_green);
    codec.softMute(false);
    debugClear();
    return;
  }else if(activeSlot == RED && red.index != settings.patch_red){
    memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float));
    // red must be active slot when constructor is called
    red.setPatch(settings.patch_red);
    codec.softMute(false);
    debugClear();
    return;
  }
  switch(mode){
  case SINGLE_MODE:
  case DUAL_GREEN_MODE:
    green.setParameterValues(getAnalogValues());
    green.patch->processAudio(buffer);
    break;
  case DUAL_RED_MODE:
    red.setParameterValues(getAnalogValues());
    red.patch->processAudio(buffer);
    break;
  case SERIES_GREEN_MODE:
    green.setParameterValues(getAnalogValues());
    green.patch->processAudio(buffer);
    red.patch->processAudio(buffer);
    break;
  case SERIES_RED_MODE:
    red.setParameterValues(getAnalogValues());
    green.patch->processAudio(buffer);
    red.patch->processAudio(buffer);
    break;
  case PARALLEL_GREEN_MODE:
    green.setParameterValues(getAnalogValues());
    processParallel(buffer);
    break;
  case PARALLEL_RED_MODE:
    red.setParameterValues(getAnalogValues());
    processParallel(buffer);
    break;
  }
}
Пример #11
0
 FixedDelayPatch() {
   AudioBuffer* buffer = createMemoryBuffer(1, REQUEST_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
   registerParameter(PARAMETER_A, "Feedback");
   registerParameter(PARAMETER_B, "Mix");
   registerParameter(PARAMETER_C, "");    
   registerParameter(PARAMETER_D, "");    
 }
 SimpleDriveDelayPatch() : delay(0)
   {
   registerParameter(PARAMETER_A, "Delay");
   registerParameter(PARAMETER_B, "Feedback");
   registerParameter(PARAMETER_C, "Drive");
   registerParameter(PARAMETER_D, "Wet/Dry ");
   AudioBuffer* buffer = createMemoryBuffer(1, REQUEST_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
     }
Пример #13
0
  void processAudio(AudioBuffer &buffer){
    float gain = getParameterValue(PARAMETER_A)*2;
    int size = buffer.getSize();
    for(int ch=0; ch<buffer.getChannels(); ++ch){
      float* buf = buffer.getSamples(ch);
      for(int i=0; i<size; ++i)
	buf[i] = gain*buf[i];
    }
  }
Пример #14
0
 LpfDelayPatch() : x1(0.0f), x2(0.0f), y1(0.0f), y2(0.0f), olddelaySamples(0.0f) {
   AudioBuffer* buffer = createMemoryBuffer(1, REQUEST_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
   registerParameter(PARAMETER_A, "Delay", "Delay time");
   registerParameter(PARAMETER_B, "Feedback", "Delay loop feedback");
   registerParameter(PARAMETER_C, "Fc", "Filter cutoff frequency");
   registerParameter(PARAMETER_D, "Dry/Wet", "Dry/wet mix");
   setCoeffs(getLpFreq()/getSampleRate(), 0.6f);
 }    
  void processAudio(AudioBuffer &buffer)
  {

    int size = buffer.getSize();
	
	float samp_float = getParameterValue(PARAMETER_A);
	int samp_freq = ceil(samp_float*63+0.1);
	
	float mayhem_rate = getParameterValue(PARAMETER_B);
	mayhem_rate *= 0.03;
	float mayhem = 1;
	
	if(abs(getParameterValue(PARAMETER_C)*2+1-prev_freq)>0.01)	//if the knob was turned
	{
		mayhem_freq = getParameterValue(PARAMETER_C);	//update center frequency					
		mayhem_freq *= 2;
		mayhem_freq += 1;			//mayhem_freq range = 1 to 3 --> 375 -- 1125 Hz
		prev_freq = mayhem_freq;	//store value to compare next time	
	}
	
	float mayhem_depth = getParameterValue(PARAMETER_D);
	mayhem_depth *= depth;
	
    //for(int ch=0; ch<buffer.getChannels(); ++ch){
      float* buf = buffer.getSamples(0);
      for(int i=0; i<size; ++i)
		{
			if(i%samp_freq==0)
			{	
				buf[i] = buf[i]*((1-mayhem)+mayhem*abs(cos(2*M_PI*mayhem_freq*(i+update_freq_cnt*size)/size)));
				samp = buf[i];	
			}
			else
				buf[i] = samp;
//				buf[i] = samp*(1-mayhem)+buf[i]*mayhem*abs(cos(2*M_PI*mayhem_freq*(i+update_freq_cnt*size)/size));
		}
//		update_freq_cnt++;
//		if(update_freq_cnt == 10)
		{
		update_freq_cnt = 0;
		if(mayhem_freq>=prev_freq+mayhem_depth || mayhem_freq>=3) inc_flag = 0;			//sets maximum freq 3*fs/size = 1125 Hz
		if(mayhem_freq<=prev_freq-mayhem_depth || mayhem_freq<=1)inc_flag = 1;		//minimum freq that can be achieved in 128 samples is 375 Hz
		if(inc_flag == 0) 
		{
			mayhem_freq /= 1+mayhem_rate*mayhem_depth/depth;
	//		freq = floor(fs/size*mayhem_freq);	//only integer frequencies
		}
		
		if(inc_flag == 1) 
		{
			mayhem_freq *= 1+mayhem_rate*mayhem_depth/depth;
	//		freq = ceil(fs/size*mayhem_freq);	//only integer frequencies
		}
		
	//	mayhem_freq = freq*size/fs;		//only integer frequencies
		}
	}
Пример #16
0
 void processAudio(AudioBuffer &buffer){
   prepare();
   int size = buffer.getSize();
     
   for (int ch = 0; ch<buffer.getChannels(); ++ch) {
       float* buf = buffer.getSamples(ch);
       for(int i = 0; i < size; ++i) buf[i] = processSample(buf[i]);
   }
 }
Пример #17
0
 FlangerPatch(){
   AudioBuffer* buffer = createMemoryBuffer(1, FLANGER_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
   registerParameter(PARAMETER_A, "Rate");
   registerParameter(PARAMETER_B, "Depth");
   registerParameter(PARAMETER_C, "Feedback");
   registerParameter(PARAMETER_D, "");    
   phase = 0;
 }
 SimpleDelayPatch() : delay(0), alpha(0.04), dryWet(0.f)
 {
   registerParameter(PARAMETER_A, "Delay");
   registerParameter(PARAMETER_B, "Feedback");
   registerParameter(PARAMETER_C, "");
   registerParameter(PARAMETER_D, "Dry/Wet");
   AudioBuffer* buffer = createMemoryBuffer(1, SIMPLE_DELAY_REQUEST_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
 }
Пример #19
0
 void processAudio(AudioBuffer &buffer) {
   float* x = buffer.getSamples(0);
   float feedback = getParameterValue(PARAMETER_A);
   float mix = getParameterValue(PARAMETER_B);
   for(int n = 0; n < buffer.getSize(); n++){
     x[n] = delayBuffer.tail()*mix + x[n]*(1.0f-mix);
     delayBuffer.write(feedback * x[n]);
   }
 }
Пример #20
0
  BiasedDelayPatch() : MIN_DELAY(0.01), MAX_DELAY(4), MIN_BIAS(0.5), MED_BIAS(1), MAX_BIAS(3), ramp(0.1), circularBuffer(NULL) {
    registerParameter(PARAMETER_A, "Delay");
    registerParameter(PARAMETER_B, "Feedback");
    registerParameter(PARAMETER_C, "Bias");
    registerParameter(PARAMETER_D, "Dry/Wet");
    memset(oldVal, 0, sizeof(oldVal));

    AudioBuffer* buffer = createMemoryBuffer(1, MAX_DELAY * getSampleRate());
    bufferSize = buffer->getSize();    
    circularBuffer = buffer->getSamples(0);
  }
  MultiTapDelayPatch() {
    registerParameter(PARAMETER_A, "Delay");
    registerParameter(PARAMETER_B, "Feedback");
    registerParameter(PARAMETER_C, "Dry/Wet");
    registerParameter(PARAMETER_D, "Tap selector");
	
	
    AudioBuffer* audioBuffer = createMemoryBuffer(1, (int) (MAX_DELAY_MS / 1000) * getSampleRate());
    buffer = audioBuffer->getSamples(0);
    memset(buffer, 0, audioBuffer->getSize() *sizeof(float));

    bufferSize = audioBuffer->getSize();
	
    writeIndex = 0;
    currentDelay = 0;
    currentSelection = 0;
    delayInSamples = 0;

    computeVariables();
  }
Пример #22
0
    void processAudio(AudioBuffer &owlbuf)
    {
	float *in1;
	float *in2;
	float *out1;
	float *out2;
	float a, b, e, f, g, i;
	float e1=env1, e2=env2, e3=env3, e4=env4, y=dry;
	float a1=att1, a2=att2, r12=rel12, a34=att34, r3=rel3, r4=rel4;
	float fi=fili, fo=filo, fx=filx, fb1=fbuf1, fb2=fbuf2;
	int sampleFrames = owlbuf.getSize();

	if (owlbuf.getChannels() < 2) {  // Mono check
	    in1 = owlbuf.getSamples(0); // L
	    in2 = owlbuf.getSamples(0); // R
	    out1 = owlbuf.getSamples(0); // L
	    out2 = owlbuf.getSamples(0); // R
	} else {
	    in1 = owlbuf.getSamples(0); // L
	    in2 = owlbuf.getSamples(1); // R
	    out1 = owlbuf.getSamples(0); // L
	    out2 = owlbuf.getSamples(1); // R
	}
	setParameters();

	--in1;
	--in2;
	--out1;
	--out2;

	while(--sampleFrames >= 0)
	{
	    a = *++in1;
	    b = *++in2;

	    // Filter processing
	    fb1 = fo*fb1 + fi*a;
	    fb2 = fo*fb2 + fi*b;
	    e = fb1 + fx*a;
	    f = fb2 + fx*b;

	    i = a + b; i = (i>0)? i : -i; // stereo sum ; fabs()
	    e1 = (i>e1)? e1 + a1 * (i-e1) : e1 * r12;
	    e2 = (i>e2)? e2 + a2 * (i-e2) : e2 * r12;
	    e3 = (i>e3)? e3 + a34 * (i-e3) : e3 * r3;
	    e4 = (i>e4)? e4 + a34 * (i-e4) : e4 * r4;
	    g = (e1 - e2 + e3 - e4);

	    *++out1 = y * (a + e * g);
	    *++out2 = y * (b + f * g);
	}
	if(e1<1.0e-10) { env1=0.f; env2=0.f; env3=0.f; env4=0.f; fbuf1=0.f; fbuf2=0.f; }
	else { env1=e1;  env2=e2;  env3=e3;  env4=e4;  fbuf1=fb1; fbuf2=fb2; }
    }
Пример #23
0
  void processAudio(AudioBuffer &buffer){
    assert_param(buffer.getChannels() > 1);
    float gainL = getParameterValue(PARAMETER_A)*2;
    float gainR = getParameterValue(PARAMETER_B)*2;
    int size = buffer.getSize();
    float* left = buffer.getSamples(0);
    float* right = buffer.getSamples(1);
    for(int i=0; i<size; ++i){
	left[i] = gainL*left[i];
	right[i] = gainR*right[i];
    }
  }
Пример #24
0
  void processAudio(AudioBuffer &buffer) {
    float frequency = getParameterValue(PARAMETER_A) * 10000;
    float amplitude = getParameterValue(PARAMETER_B);
    float* left = buffer.getSamples(LEFT_CHANNEL);
    float linc = frequency/getSampleRate();
    int size = buffer.getSize();
    for(int n = 0; n<size; n++){
      left[n] = sinf(2*M_PI*pos) * amplitude;
      if((pos += linc) > 1.0f)
	pos -= 1.0f;
    }
  }
Пример #25
0
  void processAudio(AudioBuffer &buffer)
  {
	int size = buffer.getSize();

	float reverb_scale = getParameterValue(PARAMETER_A);			//get reverb length from knob
	if(reverb_scale<0.1) reverb_scale=0.1;							//apply lower limit to reverb length
	reverb_time = round(reverb_scale*reverb_buf_length/2);			//apply scaling factor to the window size to obtain reverb_time (in samples)
	int mod = reverb_time%size;		//ensure that reverb_time is an even multiple of audio buffer size
	reverb_time -= mod;
	if(reverse_cnt>reverb_time)	reverse_cnt = 0;	

	
	float wet = getParameterValue(PARAMETER_B);			//get wet/dry mix from knob		
	
	float level = getParameterValue(PARAMETER_C);		//get output level from knob
	level*=2;

	
    //for(int ch=0; ch<buffer.getChannels(); ch++)
	{
		float* buf = buffer.getSamples(0);
		
		for(int i=0; i<size; i++)
		{		
			reverb_buffer[reverb_buf_length-1-i-reverb_index*size] = reverb_buffer[i+reverb_index*size];	//load reverse into end of buffer
			reverb_buffer[i+reverb_index*size] = buf[i];	//load number of samples into the reverse buffer equal to size of audio buffer

			if (reverse_flag == 1)
			{
				buf[i] = level*((1-wet)*buf[i]+wet*reverb_buffer[reverb_buf_length-1-reverse_cnt]*abs(reverse_cnt-reverb_time)*reverse_cnt/reverb_time/200);
				reverse_cnt++;
				if(reverse_cnt==reverb_time)
				{
					reverse_cnt=0;
					reverse_flag=0;
//					reverb_index=0;
				}
			}
			
			else buf[i] = level*buf[i];
			
		}
		
		reverb_index++;		//increment the window index
		
		if(reverse_flag==0)
		{
			reverb_index=0;				//reset the window index to 0
			reverse_flag = 1;			//set flag to trigger reverse
		}
	}
  }
Пример #26
0
  void processAudio(AudioBuffer &buffer) {
    FloatArray left = buffer.getSamples(LEFT_CHANNEL);
    FloatArray right = buffer.getSamples(RIGHT_CHANNEL);
    for(int i = 0; i<buffer.getSize(); i++){
      if(abs(last-target) < 0.001){
	last = target;
	target = noise->getNextSample()*range;
      }
      left[i] = last;
      last += getIncrement();
      right[i] = hz.voltsToSample(quantize(hz.sampleToVolts(right[i])));
    }
  }
Пример #27
0
  void processAudio(AudioBuffer &buffer){
    float a = getDbGain(PARAMETER_A);
    float b = getDbGain(PARAMETER_B);
    float c = getDbGain(PARAMETER_C);
    float d = getDbGain(PARAMETER_D);
    eqL.setCoeffs(a, b, c, d);
    eqR.copyCoeffs(eqL);
 
    // process
    float numSamples = buffer.getSize();
    float* bufL = buffer.getSamples(0);
    float* bufR = buffer.getSamples(1);
    eqL.process(numSamples, bufL);
    eqR.process(numSamples, bufR);
  }
Пример #28
0
 void processAudio(AudioBuffer &buffer){
   float gain = getParameterValue(PARAMETER_A);
   gain = gain*gain*2.0;
   float iterations = getParameterValue(PARAMETER_B);
   float r = getParameterValue(PARAMETER_C)*(maxR-minR) + minR;
   iterations = iterations*iterations*maxI;
   int size = buffer.getSize();
   FloatArray left = buffer.getSamples(LEFT_CHANNEL);
   FloatArray right = buffer.getSamples(RIGHT_CHANNEL);
   float wet = getParameterValue(PARAMETER_D);
   for(int i=0; i<size; i++){
     left[i] = processSample(gain*left[i], iterations, r) * wet + left[i]*(1-wet);
     right[i] = processSample(gain*right[i], iterations, r) * wet + right[i]*(1-wet);
   }
 }
Пример #29
0
 void processAudio(AudioBuffer &buffer){
   // update filter coefficients
   float fn = getFrequency()/getSampleRate();
   float Q = getQ();
   float g = getDbGain();
   peqL.setCoeffsPEQ(fn, Q, g) ;
   peqR.setCoeffsPEQ(fn, Q, g) ;
     
   // process
   int size = buffer.getSize();
   float* left = buffer.getSamples(0);
   peqL.process(size, left);
   float* right = buffer.getSamples(1);
   peqR.process(size, right);
 }
Пример #30
0
 void processAudio(AudioBuffer &buffer){
   FloatArray l1 = buffer.getSamples(LEFT_CHANNEL);
   FloatArray r1 = buffer.getSamples(RIGHT_CHANNEL);
   FloatArray l2 = buf->getSamples(LEFT_CHANNEL);
   FloatArray r2 = buf->getSamples(RIGHT_CHANNEL);
   float morph = getParameterValue(MORPH_PARAMETER);
   l2.copyFrom(l1);
   r2.copyFrom(r1);
   green.processAudio(*buf);
   red.processAudio(buffer);
   int size = buffer.getSize();
   for(int i=0; i<size; ++i){
     l1[i] = l1[i]*(1-morph) + l2[i]*morph;
     r1[i] = r1[i]*(1-morph) + r2[i]*morph;
   }
 }