Esempio n. 1
0
  void processAudio(AudioBuffer &buffer) {
    float delayTime, feedback, wetDry;
    delayTime = getParameterValue(PARAMETER_A);
    feedback  = getParameterValue(PARAMETER_B);
    wetDry    = getParameterValue(PARAMETER_D);
    int size = buffer.getSize();
    int32_t newDelay;
    if(abs(time - delayTime) > 0.01){
      newDelay = delayTime * (delayBuffer.getSize()-1);
      time = delayTime;
    }else{
      newDelay = delay;
    }
    float* x = buffer.getSamples(0);
    float y;
    for (int n = 0; n < size; n++){
//       y = buf[i] + feedback * delayBuffer.read(delay);
//       buf[i] = wetDry * y + (1.f - wetDry) * buf[i];
//       delayBuffer.write(buf[i]);
      if(newDelay - delay > 4){
	y = getDelayAverage(delay-5, 5);
	delay -= 5;
      }else if(delay - newDelay > 4){
	y = getDelayAverage(delay+5, 5);
	delay += 5;
      }else{
	y = delayBuffer.read(delay);
      }
      x[n] = wetDry * y + (1.f - wetDry) * x[n];  // crossfade for wet/dry balance
      delayBuffer.write(feedback * x[n]);
    }
  }
void mixAudioBufferChannelsLogrithmicDRC(AudioBuffer &audioBuffer, std::vector<float> &channelLevels, AudioBuffer &mixBuffer, float threshold)
{
	if(audioBuffer.getChannels() == 0)
		return;

	AudioFormat format=audioBuffer.getFormat();
	unsigned int samples=audioBuffer.getSamples();

	switch(format)
	{
	case AudioFormat::UInt8:
	case AudioFormat::UInt8P:
		mixChannelsLogrithmicDRC<uint8_t>((uint8_t *)audioBuffer.getBuffer(), channelLevels, (uint8_t *)mixBuffer.getBuffer(), samples, threshold);
		break;
	case AudioFormat::Int16:
	case AudioFormat::Int16P:
		mixChannelsLogrithmicDRC<int16_t>((int16_t *)audioBuffer.getBuffer(), channelLevels, (int16_t *)mixBuffer.getBuffer(), samples, threshold);
		break;
	case AudioFormat::Int32:
	case AudioFormat::Int32P:
		mixChannelsLogrithmicDRC<int32_t>((int32_t *)audioBuffer.getBuffer(), channelLevels, (int32_t *)mixBuffer.getBuffer(), samples, threshold);
		break;
	case AudioFormat::Float:
	case AudioFormat::FloatP:
		mixChannelsLogrithmicDRC<float>((float *)audioBuffer.getBuffer(), channelLevels, (float *)mixBuffer.getBuffer(), samples, threshold);
		break;
	case AudioFormat::Double:
	case AudioFormat::DoubleP:
		mixChannelsLogrithmicDRC<double>((double *)audioBuffer.getBuffer(), channelLevels, (double *)mixBuffer.getBuffer(), samples, threshold);
		break;
	}
}
  void processAudio(AudioBuffer& buf){
    float minf = getParameterValue(PARAMETER_A)*0.1 + 0.001;
    float maxf = min(0.4, minf + getParameterValue(PARAMETER_B)*0.2);
    // range should be exponentially related to minf
    //    int tones = getParameterValue(PARAMETER_C)*(TONES-1) + 1;
    int tones = 12;
    float spread = getParameterValue(PARAMETER_C) + 1.0;
    float rate = 1.0 + (getParameterValue(PARAMETER_D) - 0.5)*0.00002;
    int size = buf.getSize();
    FloatArray out = buf.getSamples(LEFT_CHANNEL);
    float amp;
    for(int t=1; t<tones; ++t)
      inc[t] = inc[t-1]*spread;
    for(int i=0; i<size; ++i){
      for(int t=0; t<tones; ++t){
	amp = getAmplitude((inc[t]-minf)/(maxf-minf));
	out[i] += amp * getWave(acc[t]);
        acc[t] += inc[t];
	if(acc[t] > 1.0)
	  acc[t] -= 1.0;
	else if(acc[t] < 0.0)
	  acc[t] += 1.0;
        inc[t] *= rate;
      }
    }
    if(inc[0] > maxf)
      inc[0] = minf;
      // while(inc[0] > minf)
      // 	inc[0] *= 0.5;
    else if(inc[0] < minf)
      inc[0] = maxf;
      // while(inc[0] < maxf)
      // 	inc[0] *= 2.0;
  }
Esempio n. 4
0
    void processAudio(AudioBuffer &buffer){

    double rate = getSampleRate();
    

    unsigned int sampleDelay = getSampleDelay(getRampedParameterValue(PARAMETER_A), rate);
    sampleDelay = min(sampleDelay, bufferSize);
    float feedback = getRampedParameterValue(PARAMETER_B);
    float bias = getBiasExponent(1 - getRampedParameterValue(PARAMETER_C));
    float dryWetMix = getRampedParameterValue(PARAMETER_D);
    

    int size = buffer.getSize();

 	for(int ch = 0; ch<buffer.getChannels(); ++ch)
 	{
	    float* buf = buffer.getSamples(ch);

	    for (int i=0; i<size; ++i)
	    {
	      float delaySample = circularBuffer[writeIdx];
	      float v = buf[i] + circularBuffer[writeIdx] * feedback;
	      v = applyBias(v, bias);
	      circularBuffer[writeIdx] = min(1, max(-1, v)); // Guard: hard range limits.
	      buf[i] = linearBlend(buf[i], delaySample, dryWetMix);

	      writeIdx = (++writeIdx) % sampleDelay;
	    }
		
  	}
  }
 void processAudio(AudioBuffer &buffer){
   FloatArray l1 = buffer.getSamples(LEFT_CHANNEL);
   FloatArray r1 = buffer.getSamples(RIGHT_CHANNEL);
   FloatArray l2 = buf->getSamples(LEFT_CHANNEL);
   FloatArray r2 = buf->getSamples(RIGHT_CHANNEL);
   float morph = getParameterValue(MORPH_PARAMETER);
   l2.copyFrom(l1);
   r2.copyFrom(r1);
   green.processAudio(*buf);
   red.processAudio(buffer);
   int size = buffer.getSize();
   for(int i=0; i<size; ++i){
     l1[i] = l1[i]*(1-morph) + l2[i]*morph;
     r1[i] = r1[i]*(1-morph) + r2[i]*morph;
   }
 }
    void processAudio(AudioBuffer &buffer) {
        double rate = getSampleRate();

        float p1 = getRampedParameterValue(PARAMETER_A);
        float freq1 = p1*p1 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
        double step1 = freq1 / rate;
        float amt1 = getRampedParameterValue(PARAMETER_B);

        float p2 = getRampedParameterValue(PARAMETER_C);
        float freq2 = p2*p2 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
        float amt2 = getRampedParameterValue(PARAMETER_D);
        double step2 = freq2 / rate;

        int size = buffer.getSize();

        for(int ch = 0; ch<buffer.getChannels(); ++ch)
        {
            float* buf = buffer.getSamples(ch);

            for (int i=0; i<size; ++i)
            {
                float mod1 = sin(2 * M_PI * phase1) / 2 + .5; // 0..1
                float mod2 = sin(2 * M_PI * phase2) / 2 + .5; // 0..1
                float gain1 = (amt1 * mod1) + (1 - amt1);
                float gain2 = (amt2 * mod2) + (1 - amt2);
                buf[i] = (gain1 * gain2) * buf[i];
                phase1 += step1;
                phase2 += step2;
            }
        }

    }
Esempio n. 7
0
  void processAudio(AudioBuffer &buffer){
    float y[getBlockSize()];
    setCoeffs(getLpFreq(), 0.8f);
    float delayTime = getParameterValue(PARAMETER_A); // get delay time value    
    float feedback  = getParameterValue(PARAMETER_B); // get feedback value
    float wetDry    = getParameterValue(PARAMETER_D); // get gain value

    if(abs(time - delayTime) < 0.01)
      delayTime = time;
    else
      time = delayTime;
        
    float delaySamples = delayTime * (delayBuffer.getSize()-1);        
    int size = buffer.getSize();
    float* x = buffer.getSamples(0);
    process(size, x, y);     // low pass filter for delay buffer
    for(int n = 0; n < size; n++){
        
      //linear interpolation for delayBuffer index
      dSamples = olddelaySamples + (delaySamples - olddelaySamples) * n / size;
        
      y[n] = y[n] + feedback * delayBuffer.read(dSamples);
      x[n] = (1.f - wetDry) * x[n] + wetDry * y[n];  //crossfade for wet/dry balance
      delayBuffer.write(x[n]);
    }
    olddelaySamples = delaySamples;
  }
Esempio n. 8
0
  void processAudio(AudioBuffer &buffer){
    
    setCoeffs(getLpFreq(), 0.8f);
        
    float delayTime = getParameterValue(PARAMETER_A); // get delay time value    
    float feedback  = getParameterValue(PARAMETER_B); // get feedback value
    float wetDry    = getParameterValue(PARAMETER_D); // get gain value
        
    float delaySamples = delayTime * (DELAY_BUFFER_LENGTH-1);
        
    int size = buffer.getSize();
      
      for (int ch = 0; ch<buffer.getChannels(); ++ch) {
          
          float* buf = buffer.getSamples(ch);
          process(size, buf, outBuf);     // low pass filter for delay buffer
          
          for(int i = 0; i < size; i++){

              outBuf[i] = outBuf[i] + feedback * delayBuffer.read(delaySamples);
              buf[i] = (1.f - wetDry) * buf[i] + wetDry * outBuf[i];  //crossfade for wet/dry balance
              delayBuffer.write(buf[i]);
          }
      }
  }
Esempio n. 9
0
  void processAudio(AudioBuffer &buffer) {
        
    int size  = buffer.getSize();
    float y;
        
    rate      = Rate(getParameterValue(PARAMETER_A));
    depth     = getParameterValue(PARAMETER_B);
    feedback  = getParameterValue(PARAMETER_C);
        
    //calculate and update phaser sweep lfo...
    float d  = _dmin + (_dmax-_dmin) * ((sin( _lfoPhase ) + 1.f)/2.f);
        
    _lfoPhase += rate;
    if( _lfoPhase >= M_PI * 2.f )
      _lfoPhase -= M_PI * 2.f;
        
    //update filter coeffs
    for( int i=0; i<6; i++ )
      _alps[i].Delay( d );
      
      
//       for (int ch = 0; ch<buffer.getChannels(); ++ch) {
          
            float* buf  = buffer.getSamples(0);
            for (int i = 0; i < size; i++) {
              //calculate output
              y = _alps[0].Update(_alps[1].Update(_alps[2].Update(_alps[3].Update(_alps[4].Update(
                                                      _alps[5].Update( buf[i] + _zm1 * feedback ))))));
              _zm1 = y;
                
              buf[i] = buf[i] + y * depth;
                
//             }
      }
  }
Esempio n. 10
0
  void processAudio(AudioBuffer &buffer){
//     assert_param(buffer.getChannels() > 1);
    float gainLL = getParameterValue(PARAMETER_A);
    float gainLR = getParameterValue(PARAMETER_B);
    float gainRL = getParameterValue(PARAMETER_C);
    float gainRR = getParameterValue(PARAMETER_D);
    int size = buffer.getSize();
    float* left = buffer.getSamples(0);
    float* right = buffer.getChannels() > 1 ? buffer.getSamples(1) : left;
    float l, r;
    for(int i=0; i<size; ++i){
      l = gainLL*left[i] + gainLR*right[i];
      r = gainRL*left[i] + gainRR*right[i];
      left[i] = l;
      right[i] = r;
    }
  }
Esempio n. 11
0
 FixedDelayPatch() {
   AudioBuffer* buffer = createMemoryBuffer(1, REQUEST_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
   registerParameter(PARAMETER_A, "Feedback");
   registerParameter(PARAMETER_B, "Mix");
   registerParameter(PARAMETER_C, "");    
   registerParameter(PARAMETER_D, "");    
 }
Esempio n. 12
0
 void processAudio(AudioBuffer &buffer) {
   float fundamental = getParameterValue(PARAMETER_A)*5.0 - 1.0;
   FloatArray left = buffer.getSamples(LEFT_CHANNEL);
   hz.setTune(fundamental);
   float freq = hz.getFrequency(0);
   algo.setFrequency(freq);
   algo.getSamples(left);
 }
Esempio n. 13
0
void PatchController::process(AudioBuffer& buffer){
  if(activeSlot == GREEN && green.index != settings.patch_green){
    memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float));
    // green must be active slot when patch constructor is called
    green.setPatch(settings.patch_green);
    codec.softMute(false);
    debugClear();
    return;
  }else if(activeSlot == RED && red.index != settings.patch_red){
    memset(buffer.getSamples(0), 0, buffer.getChannels()*buffer.getSize()*sizeof(float));
    // red must be active slot when constructor is called
    red.setPatch(settings.patch_red);
    codec.softMute(false);
    debugClear();
    return;
  }
  switch(mode){
  case SINGLE_MODE:
  case DUAL_GREEN_MODE:
    green.setParameterValues(getAnalogValues());
    green.patch->processAudio(buffer);
    break;
  case DUAL_RED_MODE:
    red.setParameterValues(getAnalogValues());
    red.patch->processAudio(buffer);
    break;
  case SERIES_GREEN_MODE:
    green.setParameterValues(getAnalogValues());
    green.patch->processAudio(buffer);
    red.patch->processAudio(buffer);
    break;
  case SERIES_RED_MODE:
    red.setParameterValues(getAnalogValues());
    green.patch->processAudio(buffer);
    red.patch->processAudio(buffer);
    break;
  case PARALLEL_GREEN_MODE:
    green.setParameterValues(getAnalogValues());
    processParallel(buffer);
    break;
  case PARALLEL_RED_MODE:
    red.setParameterValues(getAnalogValues());
    processParallel(buffer);
    break;
  }
}
Esempio n. 14
0
 void processAudio(AudioBuffer &buffer) {
   float* x = buffer.getSamples(0);
   float feedback = getParameterValue(PARAMETER_A);
   float mix = getParameterValue(PARAMETER_B);
   for(int n = 0; n < buffer.getSize(); n++){
     x[n] = delayBuffer.tail()*mix + x[n]*(1.0f-mix);
     delayBuffer.write(feedback * x[n]);
   }
 }
Esempio n. 15
0
 FlangerPatch(){
   AudioBuffer* buffer = createMemoryBuffer(1, FLANGER_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
   registerParameter(PARAMETER_A, "Rate");
   registerParameter(PARAMETER_B, "Depth");
   registerParameter(PARAMETER_C, "Feedback");
   registerParameter(PARAMETER_D, "");    
   phase = 0;
 }
Esempio n. 16
0
    DubDelayPatch() {
        registerParameter(PARAMETER_A, "Time");
        registerParameter(PARAMETER_B, "Feedback");
        registerParameter(PARAMETER_C, "Tone");
        registerParameter(PARAMETER_D, "Wet");

        AudioBuffer* buffer = createMemoryBuffer(1, REQUEST_BUFFER_SIZE);
        delayBuffer.initialise(buffer->getSamples(0), REQUEST_BUFFER_SIZE);
    }
Esempio n. 17
0
 void processAudio(AudioBuffer& buffer){
   float tone = 120*powf(2, getParameterValue(PARAMETER_A)*4);
   float decay = getParameterValue(PARAMETER_B);
   float accent = getParameterValue(PARAMETER_E);
   hat->setFrequency(tone);
   hat->setFilter(getParameterValue(PARAMETER_A)*0.3 + 0.5);
   hat->setDecay(decay);
   hat->setAccent(accent);
   tone = 20*powf(2, getParameterValue(PARAMETER_C)*4);
   decay = getParameterValue(PARAMETER_D);
   kick->setFrequency(tone);
   kick->setDecay(decay);
   kick->setAccent(accent);
   FloatArray left = buffer.getSamples(LEFT_CHANNEL);
   FloatArray right = buffer.getSamples(RIGHT_CHANNEL);
   drum[0]->getSamples(left);
   drum[1]->getSamples(right);
 }
Esempio n. 18
0
 LpfDelayPatch() : x1(0.0f), x2(0.0f), y1(0.0f), y2(0.0f), olddelaySamples(0.0f) {
   AudioBuffer* buffer = createMemoryBuffer(1, REQUEST_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
   registerParameter(PARAMETER_A, "Delay", "Delay time");
   registerParameter(PARAMETER_B, "Feedback", "Delay loop feedback");
   registerParameter(PARAMETER_C, "Fc", "Filter cutoff frequency");
   registerParameter(PARAMETER_D, "Dry/Wet", "Dry/wet mix");
   setCoeffs(getLpFreq()/getSampleRate(), 0.6f);
 }    
 SimpleDelayPatch() : delay(0), alpha(0.04), dryWet(0.f)
 {
   registerParameter(PARAMETER_A, "Delay");
   registerParameter(PARAMETER_B, "Feedback");
   registerParameter(PARAMETER_C, "");
   registerParameter(PARAMETER_D, "Dry/Wet");
   AudioBuffer* buffer = createMemoryBuffer(1, SIMPLE_DELAY_REQUEST_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
 }
Esempio n. 20
0
 void processAudio(AudioBuffer &buffer){
   prepare();
   int size = buffer.getSize();
     
   for (int ch = 0; ch<buffer.getChannels(); ++ch) {
       float* buf = buffer.getSamples(ch);
       for(int i = 0; i < size; ++i) buf[i] = processSample(buf[i]);
   }
 }
Esempio n. 21
0
 void processAudio(AudioBuffer &buffer) {
   float paramA = getParameterValue(PARAMETER_A);
   float paramB = getParameterValue(PARAMETER_B);
   float paramC = getParameterValue(PARAMETER_C);
   float paramD = getParameterValue(PARAMETER_D);
   float paramE = getParameterValue(PARAMETER_E);    
   // Note: The third parameter is the timestamp at which to execute the message,
   // but in this case it simply means to execute it immediately. "f" says that
   // the message contains one element and its type is float. paramA is then the
   // value.
   hv_vscheduleMessageForReceiver(context, "Channel-A", 0.0, "f", paramA);
   hv_vscheduleMessageForReceiver(context, "Channel-B", 0.0, "f", paramB);
   hv_vscheduleMessageForReceiver(context, "Channel-C", 0.0, "f", paramC);
   hv_vscheduleMessageForReceiver(context, "Channel-D", 0.0, "f", paramD);
   hv_vscheduleMessageForReceiver(context, "Channel-E", 0.0, "f", paramE);
   float* outputs[] = {buffer.getSamples(0), buffer.getSamples(1) };    
   hv_owl_process(context, outputs, outputs, getBlockSize());		     
 }
Esempio n. 22
0
  void processAudio(AudioBuffer &buffer){
    float gain = getParameterValue(PARAMETER_A)*2;
    int size = buffer.getSize();
    for(int ch=0; ch<buffer.getChannels(); ++ch){
      float* buf = buffer.getSamples(ch);
      for(int i=0; i<size; ++i)
	buf[i] = gain*buf[i];
    }
  }
  void processAudio(AudioBuffer &buffer)
  {

    int size = buffer.getSize();
	
	float samp_float = getParameterValue(PARAMETER_A);
	int samp_freq = ceil(samp_float*63+0.1);
	
	float mayhem_rate = getParameterValue(PARAMETER_B);
	mayhem_rate *= 0.03;
	float mayhem = 1;
	
	if(abs(getParameterValue(PARAMETER_C)*2+1-prev_freq)>0.01)	//if the knob was turned
	{
		mayhem_freq = getParameterValue(PARAMETER_C);	//update center frequency					
		mayhem_freq *= 2;
		mayhem_freq += 1;			//mayhem_freq range = 1 to 3 --> 375 -- 1125 Hz
		prev_freq = mayhem_freq;	//store value to compare next time	
	}
	
	float mayhem_depth = getParameterValue(PARAMETER_D);
	mayhem_depth *= depth;
	
    //for(int ch=0; ch<buffer.getChannels(); ++ch){
      float* buf = buffer.getSamples(0);
      for(int i=0; i<size; ++i)
		{
			if(i%samp_freq==0)
			{	
				buf[i] = buf[i]*((1-mayhem)+mayhem*abs(cos(2*M_PI*mayhem_freq*(i+update_freq_cnt*size)/size)));
				samp = buf[i];	
			}
			else
				buf[i] = samp;
//				buf[i] = samp*(1-mayhem)+buf[i]*mayhem*abs(cos(2*M_PI*mayhem_freq*(i+update_freq_cnt*size)/size));
		}
//		update_freq_cnt++;
//		if(update_freq_cnt == 10)
		{
		update_freq_cnt = 0;
		if(mayhem_freq>=prev_freq+mayhem_depth || mayhem_freq>=3) inc_flag = 0;			//sets maximum freq 3*fs/size = 1125 Hz
		if(mayhem_freq<=prev_freq-mayhem_depth || mayhem_freq<=1)inc_flag = 1;		//minimum freq that can be achieved in 128 samples is 375 Hz
		if(inc_flag == 0) 
		{
			mayhem_freq /= 1+mayhem_rate*mayhem_depth/depth;
	//		freq = floor(fs/size*mayhem_freq);	//only integer frequencies
		}
		
		if(inc_flag == 1) 
		{
			mayhem_freq *= 1+mayhem_rate*mayhem_depth/depth;
	//		freq = ceil(fs/size*mayhem_freq);	//only integer frequencies
		}
		
	//	mayhem_freq = freq*size/fs;		//only integer frequencies
		}
	}
 SimpleDriveDelayPatch() : delay(0)
   {
   registerParameter(PARAMETER_A, "Delay");
   registerParameter(PARAMETER_B, "Feedback");
   registerParameter(PARAMETER_C, "Drive");
   registerParameter(PARAMETER_D, "Wet/Dry ");
   AudioBuffer* buffer = createMemoryBuffer(1, REQUEST_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
     }
Esempio n. 25
0
  void processAudio(AudioBuffer& _buf){
    uint32_t sample_count = _buf.getSize();
    float s_rate = getSampleRate();
  bits = getParameterValue(PARAMETER_A)*23 - 1;
  fs = getParameterValue(PARAMETER_B)*0.999 - 0.001;
  input = _buf.getSamples(0);
  output = _buf.getSamples(0);
DecimatorPatch* plugin_data = this;    

unsigned long pos;
float step, stepr, delta, ratio;
double dummy;

if (bits >= 31.0f || bits < 1.0f) {
	step = 0.0f;
	stepr = 1.0f;
} else {
	step = pow(0.5f, bits - 0.999f);
	stepr = 1/step;
}

if (fs >= sample_rate) {
	ratio = 1.0f;
} else {
	ratio = fs/sample_rate;
}

for (pos = 0; pos < sample_count; pos++) {
	count += ratio;

	if (count >= 1.0f) {
		count -= 1.0f;
		delta = modf((input[pos] + (input[pos]<0?-1.0:1.0)*step*0.5) * stepr, &dummy) * step;
		last_out = input[pos] - delta;
		buffer_write(output[pos], last_out);
	} else {
		buffer_write(output[pos], last_out);
	}
}

plugin_data->last_out = last_out;
plugin_data->count = count;
    
  }
Esempio n. 26
0
 void processAudio(AudioBuffer &buffer){
   float rate = getParameterValue(PARAMETER_A);
   rate = rate*rate*0.005;
   float a = getParameterValue(PARAMETER_B) - 0.5;
   if(!triggered && isButtonPressed(PUSHBUTTON)){
     x = -6+getParameterValue(PARAMETER_C)*6;
     triggered = true;
   }else{
     triggered = false;
   }
   int size = buffer.getSize();
   FloatArray left = buffer.getSamples(LEFT_CHANNEL);
   FloatArray right = buffer.getSamples(RIGHT_CHANNEL);
   for(int i=0; i<size; ++i){
     left[i] = agnesi(x, a);
     right[i] = serpentine(x, a);
     x += rate;
   }
 }
  void processAudio(AudioBuffer &buffer) {
    float tune = getParameterValue(PARAMETER_A)*2.0 - 1.0;
    int octave = round(getParameterValue(PARAMETER_B)*8)-6;
    float gain = getParameterValue(PARAMETER_D);
    gain = gain*gain*0.8;
    FloatArray left = buffer.getSamples(LEFT_CHANNEL);
    FloatArray right = buffer.getSamples(RIGHT_CHANNEL);
    hz.setTune(tune+octave);
    for(int n = 0; n<buffer.getSize(); n++){
      float frequency = hz.getFrequency(left[n]);
      float amp = hz.sampleToVolts(right[n]);
      amp = gain + amp*amp*0.5;
      left[n] = amp*wave(pos);
      right[n] = left[n];
      pos += frequency*mul;
      if(pos > 1.0)
	pos -= 2.0;
    }
  }
  void processAudio(AudioBuffer &buffer){
    data.numSamps = getParameterValue(PARAMETER_A) * (KP_NUM_SAMPLES-8)+8;
    data.amp = getParameterValue(PARAMETER_B);
    data.g = getParameterValue(PARAMETER_C)*(0.5-0.48)+0.48;
    data.duration = getParameterValue(PARAMETER_D) * KP_NUM_BUFFER;

    if(isButtonPressed(PUSHBUTTON) && !data.noteOn){
      data.noteOn = true;
      pressButton(RED_BUTTON);
    }

    int size = buffer.getSize();
    float* left = buffer.getSamples(0);
    float* right = buffer.getChannels() > 1 ? buffer.getSamples(1) : left;
    for(int i=0; i<size; ++i){
      if(data.noteOn){
	if(data.phase > (data.numSamps +  1)){
	  // if we have filled up our delay line, y(n) = g * (y(n-N) + y( n-(N+1) ))
	  data.pluck[data.phase] = data.g * ( data.pluck[data.phase-data.numSamps]
					      + data.pluck[data.phase - (data.numSamps + 1)] );
	}else{
	  // computing the first N samples, y(n) = x(n)
	  if(data.noiseType == KP_NOISETYPE_GAUSSIAN)
	    data.pluck[data.phase] = data.noise[data.phase]; // use gaussian white noise
	  if(data.noiseType == KP_NOISETYPE_RANDOM)
	    data.pluck[data.phase] = rand()%100000/100000.;  // use random noise
	}
	left[i] = data.amp * data.pluck[data.phase];  // left channel
	right[i] = data.amp * data.pluck[data.phase];  // right channel
	if(data.phase >= data.duration){
	  // if we have reached the end of our duration
	  data.phase = 0;
	  data.noteOn = false;
	  pressButton(GREEN_BUTTON);
	}else{
	  data.phase++;
	}
      }else{
	left[i] = right[i] = 0;
      }
    }
  }
Esempio n. 29
0
  BiasedDelayPatch() : MIN_DELAY(0.01), MAX_DELAY(4), MIN_BIAS(0.5), MED_BIAS(1), MAX_BIAS(3), ramp(0.1), circularBuffer(NULL) {
    registerParameter(PARAMETER_A, "Delay");
    registerParameter(PARAMETER_B, "Feedback");
    registerParameter(PARAMETER_C, "Bias");
    registerParameter(PARAMETER_D, "Dry/Wet");
    memset(oldVal, 0, sizeof(oldVal));

    AudioBuffer* buffer = createMemoryBuffer(1, MAX_DELAY * getSampleRate());
    bufferSize = buffer->getSize();    
    circularBuffer = buffer->getSamples(0);
  }
Esempio n. 30
0
  void processAudio(AudioBuffer &buffer) {
    float frequency = getParameterValue(PARAMETER_A) * 10000;
    float amplitude = getParameterValue(PARAMETER_B);
    float* left = buffer.getSamples(LEFT_CHANNEL);
    float linc = frequency/getSampleRate();
    int size = buffer.getSize();
    for(int n = 0; n<size; n++){
      left[n] = sinf(2*M_PI*pos) * amplitude;
      if((pos += linc) > 1.0f)
	pos -= 1.0f;
    }
  }