void processAudio(AudioBuffer& buf){
    float minf = getParameterValue(PARAMETER_A)*0.1 + 0.001;
    float maxf = min(0.4, minf + getParameterValue(PARAMETER_B)*0.2);
    // range should be exponentially related to minf
    //    int tones = getParameterValue(PARAMETER_C)*(TONES-1) + 1;
    int tones = 12;
    float spread = getParameterValue(PARAMETER_C) + 1.0;
    float rate = 1.0 + (getParameterValue(PARAMETER_D) - 0.5)*0.00002;
    int size = buf.getSize();
    FloatArray out = buf.getSamples(LEFT_CHANNEL);
    float amp;
    for(int t=1; t<tones; ++t)
      inc[t] = inc[t-1]*spread;
    for(int i=0; i<size; ++i){
      for(int t=0; t<tones; ++t){
	amp = getAmplitude((inc[t]-minf)/(maxf-minf));
	out[i] += amp * getWave(acc[t]);
        acc[t] += inc[t];
	if(acc[t] > 1.0)
	  acc[t] -= 1.0;
	else if(acc[t] < 0.0)
	  acc[t] += 1.0;
        inc[t] *= rate;
      }
    }
    if(inc[0] > maxf)
      inc[0] = minf;
      // while(inc[0] > minf)
      // 	inc[0] *= 0.5;
    else if(inc[0] < minf)
      inc[0] = maxf;
      // while(inc[0] < maxf)
      // 	inc[0] *= 2.0;
  }
Пример #2
0
void
JackLayer::read(AudioBuffer &buffer)
{
    for (unsigned i = 0; i < in_ringbuffers_.size(); ++i) {

        const size_t incomingSamples = jack_ringbuffer_read_space(in_ringbuffers_[i]) / sizeof(captureFloatBuffer_[0]);
        if (!incomingSamples)
            continue;

        captureFloatBuffer_.resize(incomingSamples);
        buffer.resize(incomingSamples);

        // write to output
        const size_t from_ringbuffer = jack_ringbuffer_read_space(in_ringbuffers_[i]);
        const size_t expected_bytes = std::min(incomingSamples * sizeof(captureFloatBuffer_[0]), from_ringbuffer);
        // FIXME: while we have samples to write AND while we have space to write them
        const size_t read_bytes = jack_ringbuffer_read(in_ringbuffers_[i],
                (char *) captureFloatBuffer_.data(), expected_bytes);
        if (read_bytes < expected_bytes) {
            RING_WARN("Dropped %zu bytes", expected_bytes - read_bytes);
            break;
        }

        /* Write the data one frame at a time.  This is
         * inefficient, but makes things simpler. */
        // FIXME: this is braindead, we should write blocks of samples at a time
        // convert a vector of samples from 1 channel to a float vector
        convertFromFloat(captureFloatBuffer_, *buffer.getChannel(i));
    }
}
Пример #3
0
void AudioBuffer::convert_channels(AudioBuffer &_dest, unsigned _frames_count)
{
	AudioSpec destspec{m_spec.format, _dest.channels(), m_spec.rate};
	if(_dest.spec() != destspec) {
		throw std::logic_error("unsupported format");
	}

	_frames_count = std::min(frames(),_frames_count);
	if(m_spec.channels == destspec.channels) {
		_dest.add_frames(*this,_frames_count);
		return;
	}

	switch(m_spec.format) {
		case AUDIO_FORMAT_U8:
			convert_channels<uint8_t>(*this,_dest,_frames_count);
			break;
		case AUDIO_FORMAT_S16:
			convert_channels<int16_t>(*this,_dest,_frames_count);
			break;
		case AUDIO_FORMAT_F32:
			convert_channels<float>(*this,_dest,_frames_count);
			break;
		default:
			throw std::logic_error("unsupported format");
	}
}
Пример #4
0
  void processAudio(AudioBuffer &buffer){
    float y[getBlockSize()];
    setCoeffs(getLpFreq(), 0.8f);
    float delayTime = getParameterValue(PARAMETER_A); // get delay time value    
    float feedback  = getParameterValue(PARAMETER_B); // get feedback value
    float wetDry    = getParameterValue(PARAMETER_D); // get gain value

    if(abs(time - delayTime) < 0.01)
      delayTime = time;
    else
      time = delayTime;
        
    float delaySamples = delayTime * (delayBuffer.getSize()-1);        
    int size = buffer.getSize();
    float* x = buffer.getSamples(0);
    process(size, x, y);     // low pass filter for delay buffer
    for(int n = 0; n < size; n++){
        
      //linear interpolation for delayBuffer index
      dSamples = olddelaySamples + (delaySamples - olddelaySamples) * n / size;
        
      y[n] = y[n] + feedback * delayBuffer.read(dSamples);
      x[n] = (1.f - wetDry) * x[n] + wetDry * y[n];  //crossfade for wet/dry balance
      delayBuffer.write(x[n]);
    }
    olddelaySamples = delaySamples;
  }
Пример #5
0
  DroneBoxPatch()
  : mRamp(0.1)
  , mPrevCoarsePitch(-1.)
  , mPrevFinePitch(-1.)
  , mPrevDecay(-1.)
  {
    registerParameter(PARAMETER_A, "Coarse Pitch", "Coarse Pitch");
    registerParameter(PARAMETER_B, "Fine Pitch", "Fine Pitch");
    registerParameter(PARAMETER_C, "Decay", "Decay");
    registerParameter(PARAMETER_D, "Mix", "Mix");

    mOldValues[0] = 0.f; 
    mOldValues[1] = 0.f;
    mOldValues[2] = 0.f;
    mOldValues[3] = 0.f;
    
    for (int c=0;c<NUM_COMBS;c++)
    {
      AudioBuffer* buffer = createMemoryBuffer(2, BUF_SIZE);
      mCombs[c].setBuffer(buffer->getSamples(0), buffer->getSamples(1));
      mCombs[c].setSampleRate(getSampleRate());
      mCombs[c].clearBuffer();
    }
    
    mDCBlockerL.setSampleRate(getSampleRate());
    mDCBlockerR.setSampleRate(getSampleRate());
  }
Пример #6
0
int lua_AudioBuffer_addRef(lua_State* state)
{
    // Get the number of parameters.
    int paramCount = lua_gettop(state);

    // Attempt to match the parameters to a valid binding.
    switch (paramCount)
    {
        case 1:
        {
            if ((lua_type(state, 1) == LUA_TUSERDATA))
            {
                AudioBuffer* instance = getInstance(state);
                instance->addRef();
                
                return 0;
            }

            lua_pushstring(state, "lua_AudioBuffer_addRef - Failed to match the given parameters to a valid function signature.");
            lua_error(state);
            break;
        }
        default:
        {
            lua_pushstring(state, "Invalid number of parameters (expected 1).");
            lua_error(state);
            break;
        }
    }
    return 0;
}
Пример #7
0
  void processAudio(AudioBuffer &buffer){
    
    setCoeffs(getLpFreq(), 0.8f);
        
    float delayTime = getParameterValue(PARAMETER_A); // get delay time value    
    float feedback  = getParameterValue(PARAMETER_B); // get feedback value
    float wetDry    = getParameterValue(PARAMETER_D); // get gain value
        
    float delaySamples = delayTime * (DELAY_BUFFER_LENGTH-1);
        
    int size = buffer.getSize();
      
      for (int ch = 0; ch<buffer.getChannels(); ++ch) {
          
          float* buf = buffer.getSamples(ch);
          process(size, buf, outBuf);     // low pass filter for delay buffer
          
          for(int i = 0; i < size; i++){

              outBuf[i] = outBuf[i] + feedback * delayBuffer.read(delaySamples);
              buf[i] = (1.f - wetDry) * buf[i] + wetDry * outBuf[i];  //crossfade for wet/dry balance
              delayBuffer.write(buf[i]);
          }
      }
  }
Пример #8
0
// This one puts some data inside the ring buffer.
void RingBuffer::put(AudioBuffer& buf)
{
    std::lock_guard<std::mutex> l(lock_);
    const size_t sample_num = buf.frames();
    const size_t buffer_size = buffer_.frames();
    if (buffer_size == 0)
        return;

    size_t len = putLength();
    if (buffer_size - len < sample_num)
        discard(sample_num);
    size_t toCopy = sample_num;

    // Add more channels if the input buffer holds more channels than the ring.
    if (buffer_.channels() < buf.channels())
        buffer_.setChannelNum(buf.channels());

    size_t in_pos = 0;
    size_t pos = endPos_;

    while (toCopy) {
        size_t block = toCopy;

        if (block > buffer_size - pos) // Wrap block around ring ?
            block = buffer_size - pos; // Fill in to the end of the buffer

        buffer_.copy(buf, block, in_pos, pos);
        in_pos += block;
        pos = (pos + block) % buffer_size;
        toCopy -= block;
    }

    endPos_ = pos;
    not_empty_.notify_all();
}
Пример #9
0
void SampleCollector::append(const AudioBuffer & buf)
{
    uint32_t count = buf.get_count();
    uint32_t last = (m_first + m_count) % m_length;
    uint32_t firstHalf = std::min(count, m_length - last);
    uint32_t secondHalf = count - firstHalf;

    // Copy first half.
    std::memcpy(m_samples + last, buf.get_buffer(), firstHalf * sizeof(float));

    // Copy wrapped.
    if (secondHalf)
    {
        std::memcpy(m_samples, buf.get_buffer() + firstHalf, secondHalf * sizeof(float));
    }

    uint32_t newLast = (last + count) % m_length;
    if (m_count >= m_length && newLast > m_first)
    {
        m_first = newLast;
    }

    if (m_count < m_length)
    {
        m_count = std::min(m_count + count, m_length);
    }
}
Пример #10
0
void JuceDemoPluginAudioProcessor::process (AudioBuffer<FloatType>& buffer,
                                            MidiBuffer& midiMessages,
                                            AudioBuffer<FloatType>& delayBuffer)
{
    const int numSamples = buffer.getNumSamples();

    // apply our gain-change to the incoming data..
    applyGain (buffer, delayBuffer);

    // Now pass any incoming midi messages to our keyboard state object, and let it
    // add messages to the buffer if the user is clicking on the on-screen keys
    keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);

    // and now get our synth to process these midi events and generate its output.
    synth.renderNextBlock (buffer, midiMessages, 0, numSamples);

    // Apply our delay effect to the new output..
    applyDelay (buffer, delayBuffer);

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, numSamples);

    // Now ask the host for the current time so we can store it to be displayed later...
    updateCurrentTimeInfoFromHost();
}
Пример #11
0
    void processAudio(AudioBuffer &buffer){

    double rate = getSampleRate();
    

    unsigned int sampleDelay = getSampleDelay(getRampedParameterValue(PARAMETER_A), rate);
    sampleDelay = min(sampleDelay, bufferSize);
    float feedback = getRampedParameterValue(PARAMETER_B);
    float bias = getBiasExponent(1 - getRampedParameterValue(PARAMETER_C));
    float dryWetMix = getRampedParameterValue(PARAMETER_D);
    

    int size = buffer.getSize();

 	for(int ch = 0; ch<buffer.getChannels(); ++ch)
 	{
	    float* buf = buffer.getSamples(ch);

	    for (int i=0; i<size; ++i)
	    {
	      float delaySample = circularBuffer[writeIdx];
	      float v = buf[i] + circularBuffer[writeIdx] * feedback;
	      v = applyBias(v, bias);
	      circularBuffer[writeIdx] = min(1, max(-1, v)); // Guard: hard range limits.
	      buf[i] = linearBlend(buf[i], delaySample, dryWetMix);

	      writeIdx = (++writeIdx) % sampleDelay;
	    }
		
  	}
  }
Пример #12
0
void JuceDemoPluginAudioProcessor::applyDelay (AudioBuffer<FloatType>& buffer, AudioBuffer<FloatType>& delayBuffer)
{
    const int numSamples = buffer.getNumSamples();
    const float delayLevel = *delayParam;

    int delayPos = 0;

    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {
        FloatType* const channelData = buffer.getWritePointer (channel);
        FloatType* const delayData = delayBuffer.getWritePointer (jmin (channel, delayBuffer.getNumChannels() - 1));
        delayPos = delayPosition;

        for (int i = 0; i < numSamples; ++i)
        {
            const FloatType in = channelData[i];
            channelData[i] += delayData[delayPos];
            delayData[delayPos] = (delayData[delayPos] + in) * delayLevel;

            if (++delayPos >= delayBuffer.getNumSamples())
                delayPos = 0;
        }
    }

    delayPosition = delayPos;
}
Пример #13
0
    void processAudio(AudioBuffer &buffer) {
        double rate = getSampleRate();

        float p1 = getRampedParameterValue(PARAMETER_A);
        float freq1 = p1*p1 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
        double step1 = freq1 / rate;
        float amt1 = getRampedParameterValue(PARAMETER_B);

        float p2 = getRampedParameterValue(PARAMETER_C);
        float freq2 = p2*p2 * (MAX_FREQ-MIN_FREQ) + MIN_FREQ;
        float amt2 = getRampedParameterValue(PARAMETER_D);
        double step2 = freq2 / rate;

        int size = buffer.getSize();

        for(int ch = 0; ch<buffer.getChannels(); ++ch)
        {
            float* buf = buffer.getSamples(ch);

            for (int i=0; i<size; ++i)
            {
                float mod1 = sin(2 * M_PI * phase1) / 2 + .5; // 0..1
                float mod2 = sin(2 * M_PI * phase2) / 2 + .5; // 0..1
                float gain1 = (amt1 * mod1) + (1 - amt1);
                float gain2 = (amt2 * mod2) + (1 - amt2);
                buf[i] = (gain1 * gain2) * buf[i];
                phase1 += step1;
                phase2 += step2;
            }
        }

    }
Пример #14
0
int lua_AudioBuffer_getRefCount(lua_State* state)
{
    // Get the number of parameters.
    int paramCount = lua_gettop(state);

    // Attempt to match the parameters to a valid binding.
    switch (paramCount)
    {
        case 1:
        {
            if ((lua_type(state, 1) == LUA_TUSERDATA))
            {
                AudioBuffer* instance = getInstance(state);
                unsigned int result = instance->getRefCount();

                // Push the return value onto the stack.
                lua_pushunsigned(state, result);

                return 1;
            }

            lua_pushstring(state, "lua_AudioBuffer_getRefCount - Failed to match the given parameters to a valid function signature.");
            lua_error(state);
            break;
        }
        default:
        {
            lua_pushstring(state, "Invalid number of parameters (expected 1).");
            lua_error(state);
            break;
        }
    }
    return 0;
}
 void processAudio(AudioBuffer &buffer)
 {
     // Reasonably assume we will not have more than 32 channels
     float*  ins[32];
     float*  outs[32];
     int     n = buffer.getChannels();
     
     if ( (fDSP.getNumInputs() < 32) && (fDSP.getNumOutputs() < 32) ) {
         
         // create the table of input channels
         for(int ch=0; ch<fDSP.getNumInputs(); ++ch) {
             ins[ch] = buffer.getSamples(ch%n);
         }
         
         // create the table of output channels
         for(int ch=0; ch<fDSP.getNumOutputs(); ++ch) {
             outs[ch] = buffer.getSamples(ch%n);
         }
         
         // read OWL parameters and updates corresponding Faust Widgets zones
         fUI.update(); 
         
         // Process the audio samples
         fDSP.compute(buffer.getSize(), ins, outs);
     }
 }
Пример #16
0
  void processAudio(AudioBuffer &buffer) {
    float delayTime, feedback, wetDry;
    delayTime = getParameterValue(PARAMETER_A);
    feedback  = getParameterValue(PARAMETER_B);
    wetDry    = getParameterValue(PARAMETER_D);
    int size = buffer.getSize();
    int32_t newDelay;
    if(abs(time - delayTime) > 0.01){
      newDelay = delayTime * (delayBuffer.getSize()-1);
      time = delayTime;
    }else{
      newDelay = delay;
    }
    float* x = buffer.getSamples(0);
    float y;
    for (int n = 0; n < size; n++){
//       y = buf[i] + feedback * delayBuffer.read(delay);
//       buf[i] = wetDry * y + (1.f - wetDry) * buf[i];
//       delayBuffer.write(buf[i]);
      if(newDelay - delay > 4){
	y = getDelayAverage(delay-5, 5);
	delay -= 5;
      }else if(delay - newDelay > 4){
	y = getDelayAverage(delay+5, 5);
	delay += 5;
      }else{
	y = delayBuffer.read(delay);
      }
      x[n] = wetDry * y + (1.f - wetDry) * x[n];  // crossfade for wet/dry balance
      delayBuffer.write(feedback * x[n]);
    }
  }
Пример #17
0
  void processAudio(AudioBuffer &buffer) {
    float paramA = getParameterValue(PARAMETER_A);
    float paramB = getParameterValue(PARAMETER_B);
    float paramC = getParameterValue(PARAMETER_C);
    float paramD = getParameterValue(PARAMETER_D);
    
    // Note: The 0.0 parameter is the timestamp at which to execute the message,
    // but in this case it simply means to execute it immediately. "f" says that
    // the message contains one element and its type is float. paramA is then the
    // value.
    hv_vscheduleMessageForReceiver(context, "Channel-A", 0.0, "f", paramA);
    hv_vscheduleMessageForReceiver(context, "Channel-B", 0.0, "f", paramB);
    hv_vscheduleMessageForReceiver(context, "Channel-C", 0.0, "f", paramC);
    hv_vscheduleMessageForReceiver(context, "Channel-D", 0.0, "f", paramD);

    // int nbSples = buffer.getSize()*buffer.getChannels();
    // int nbSples = buffer.getSize()*HEAVY_CHANNELS;
    // float* inputCopy = (float*)malloc(nbSples*sizeof(float));
    // memcpy(inputCopy, buffer.getSamples(0), nbSples*sizeof(float));

    // float** inputs = { &inputCopy, &inputCopy+getBlockSize()};
    float* outputs[] = {buffer.getSamples(0), buffer.getSamples(1) };
    
    hv_owl_process(context, outputs, outputs, getBlockSize());		     
  }
Пример #18
0
	value lime_audio_load (value data) {
		
		AudioBuffer audioBuffer;
		Resource resource;
		
		if (val_is_string (data)) {
			
			resource = Resource (val_string (data));
			
		} else {
			
			Bytes bytes (data);
			resource = Resource (&bytes);
			
		}
		
		if (WAV::Decode (&resource, &audioBuffer)) {
			
			return audioBuffer.Value ();
			
		}
		
		#ifdef LIME_OGG
		if (OGG::Decode (&resource, &audioBuffer)) {
			
			return audioBuffer.Value ();
			
		}
		#endif
		
		return alloc_null ();
		
	}
Пример #19
0
size_t
RingBufferPool::getData(AudioBuffer& buffer, const std::string& call_id)
{
    std::lock_guard<std::recursive_mutex> lk(stateLock_);

    const auto bindings = getReadBindings(call_id);
    if (not bindings)
        return 0;

    // No mixing
    if (bindings->size() == 1)
        return (*bindings->cbegin())->get(buffer, call_id);

    buffer.reset();
    buffer.setFormat(internalAudioFormat_);

    size_t size = 0;
    AudioBuffer mixBuffer(buffer);

    for (const auto& rbuf : *bindings) {
        // XXX: is it normal to only return the last positive size?
        size = rbuf->get(mixBuffer, call_id);
        if (size > 0)
            buffer.mix(mixBuffer);
    }

    return size;
}
Пример #20
0
  void processAudio(AudioBuffer &buffer) {
        
    int size  = buffer.getSize();
    float y;
        
    rate      = Rate(getParameterValue(PARAMETER_A));
    depth     = getParameterValue(PARAMETER_B);
    feedback  = getParameterValue(PARAMETER_C);
        
    //calculate and update phaser sweep lfo...
    float d  = _dmin + (_dmax-_dmin) * ((sin( _lfoPhase ) + 1.f)/2.f);
        
    _lfoPhase += rate;
    if( _lfoPhase >= M_PI * 2.f )
      _lfoPhase -= M_PI * 2.f;
        
    //update filter coeffs
    for( int i=0; i<6; i++ )
      _alps[i].Delay( d );
      
      
//       for (int ch = 0; ch<buffer.getChannels(); ++ch) {
          
            float* buf  = buffer.getSamples(0);
            for (int i = 0; i < size; i++) {
              //calculate output
              y = _alps[0].Update(_alps[1].Update(_alps[2].Update(_alps[3].Update(_alps[4].Update(
                                                      _alps[5].Update( buf[i] + _zm1 * feedback ))))));
              _zm1 = y;
                
              buf[i] = buf[i] + y * depth;
                
//             }
      }
  }
Пример #21
0
 void processAudio(AudioBuffer &buffer){
   if(isButtonPressed(PUSHBUTTON))
     reset();
   dt = getParameterValue(PARAMETER_A)*getParameterValue(PARAMETER_A)*0.0250;
   float rotateX = getParameterValue(PARAMETER_B)*M_PI;
   float rotateY = getParameterValue(PARAMETER_C)*M_PI;
   float rotateZ = getParameterValue(PARAMETER_E)*M_PI;
   float gainL, gainR;
   gainL = gainR = getParameterValue(PARAMETER_D)*2/25.0;
   int size = buffer.getSize();
   float* left = buffer.getSamples(0);
   float* right = buffer.getSamples(1);
   float dx, dy, dz;
   updateMatrix(rotateX, rotateY, rotateZ);
   for(int i=0;i<size;i++){
     dx = a*(y - x);
     dy = (x * (c - z) - y);
     dz = (x*y - b * z);
     x += dx*dt;
     y += dy*dt;
     z += dz*dt;
     P[0] = x;
     P[1] = y;
     P[2] = z-25; // centre on z axis
     rotateP();
     left[i] = Pprime[0] * gainL;
     right[i] = Pprime[1] * gainR;
   }
   // debugMessage("x/y/z", x, y, z);
 }
 void processAudio(AudioBuffer &buffer){
   FloatArray fa=buffer.getSamples(0);
   float mix=getParameterValue(PARAMETER_A);
   zcc.setHighPassCutoff(getParameterValue(PARAMETER_B)*15000+150);
   zcc.setLowPassCutoff(getParameterValue(PARAMETER_C)*500+50);
   zcc.process(fa);
   float frequency=zcc.getFrequency();
   float envelope=fa.getRms();
   fa.multiply(1-mix);
   for(int n=0;n<fa.getSize(); n++){
     static float phase=0;
     static float pastEnvelope=0;
     phase += 2.0 * M_PI * frequency/getSampleRate();
     if(phase > 2.0 * M_PI)
       phase -= 2.0 * M_PI;
     if(phase > 4.0*M_PI)
       phase=0;
     envelope=0.1*envelope + pastEnvelope*0.9;
     pastEnvelope=envelope;
     fa[n]+=sin(phase)*mix*envelope;
   }
   fa.multiply(getParameterValue(PARAMETER_D)*10);
   fa.copyTo(buffer.getSamples(1));
   debugMessage("frequency/envelope: ", frequency, envelope);
   // float *coeffs=zcc.getFilter()->getFilterStage(0).getCoefficients();
   // debugMessage("coeffs: ", coeffs[3], coeffs[4], coeffs[2] );
 }
Пример #23
0
void JuceDemoPluginAudioProcessor::applyGain (AudioBuffer<FloatType>& buffer, AudioBuffer<FloatType>& delayBuffer)
{
	ignoreUnused (delayBuffer);
    const float gainLevel = *gainParam;

    for (int channel = 0; channel < getNumInputChannels(); ++channel)
        buffer.applyGain (channel, 0, buffer.getNumSamples(), gainLevel);
}
Пример #24
0
void
AudioSender::cleanup()
{
    audioEncoder_.reset();
    muxContext_.reset();
    micData_.clear();
    resampledData_.clear();
}
Пример #25
0
 FixedDelayPatch() {
   AudioBuffer* buffer = createMemoryBuffer(1, REQUEST_BUFFER_SIZE);
   delayBuffer.initialise(buffer->getSamples(0), buffer->getSize());
   registerParameter(PARAMETER_A, "Feedback");
   registerParameter(PARAMETER_B, "Mix");
   registerParameter(PARAMETER_C, "");    
   registerParameter(PARAMETER_D, "");    
 }
void DaalDelAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages)
{
    ScopedNoDenormals noDenormals;
    
    auto totalNumInputChannels  = getTotalNumInputChannels();
    auto totalNumOutputChannels = getTotalNumOutputChannels();

    // In case we have more outputs than inputs, this code clears any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    // This is here to avoid people getting screaming feedback
    // when they first compile a plugin, but obviously you don't need to keep
    // this code if your algorithm always overwrites all the output channels.
    for (auto i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
    
    // ====
    // Lengths for circular buffer
    const int bufferLength = buffer.getNumSamples();
    const int delayBufferLength = _delayBuffer.getNumSamples();
    
    
    // This is the place where you'd normally do the guts of your plugin's
    // audio processing...
    // Make sure to reset the state if your inner loop is processing
    // the samples and the outer loop is handling the channels.
    // Alternatively, you can process the samples with the channels
    // interleaved by keeping the same state.
    for (int channel = 0; channel < totalNumInputChannels; ++channel)
    {
        //auto* channelData = buffer.getWritePointer (channel);

        // ..do something to the data...
        
        // Set up circular buffer
        const float* bufferData = buffer.getReadPointer(channel);
        const float* delayBufferData = _delayBuffer.getReadPointer(channel);
        float* dryBuffer = buffer.getWritePointer(channel);
        
        // Apply gains (now do this before getting from delay)
        applyDryWetToBuffer(buffer, channel, bufferLength, dryBuffer);
        
        // Copy data from main to delay buffer
        fillDelayBuffer(channel, bufferLength, delayBufferLength, bufferData, delayBufferData);
        
        // Copy data from delay buffer to output buffer
        getFromDelayBuffer(buffer, channel, bufferLength, delayBufferLength, bufferData, delayBufferData);
        
        // Feedback
        feedbackDelay(channel, bufferLength, delayBufferLength, dryBuffer);
    }
    
    _writePosition += bufferLength; // Increment
    _writePosition %= delayBufferLength; // Wrap around position index
    
    // Update values from tree
    updateTreeParams();
}
Пример #27
0
  void processAudio(AudioBuffer &buffer){
    float gain = getParameterValue(PARAMETER_A)*2;
    int size = buffer.getSize();
    for(int ch=0; ch<buffer.getChannels(); ++ch){
      float* buf = buffer.getSamples(ch);
      for(int i=0; i<size; ++i)
	buf[i] = gain*buf[i];
    }
  }
 void processAudio(AudioBuffer &buffer) {
   float tune = getParameterValue(PARAMETER_A)*10.0 - 6.0;
   float fc = getParameterValue(PARAMETER_B)*10.0 - 4.0;
   float q = getParameterValue(PARAMETER_C)*3+0.75;
   float shape = getParameterValue(PARAMETER_E)*2;
   float pw = 0.5;
   if(shape > 1.0){
     pw += 0.49*(shape-1.0); // pw 0.5 to 0.99
     shape = 1.0; // square wave
   }
   float df = getParameterValue(PARAMETER_D)*4;
   int di = (int)df;
   float gain = 0.0f;
   switch(di){
     // a/d
   case 0: // l/s
     env.setAttack(1.0-df);
     env.setRelease(0.0);
     break;
   case 1: // s/s
     env.setAttack(0.0);
     env.setRelease(df-1);
     break;
   case 2: // s/l
     env.setAttack(df-2);
     env.setRelease(1.0);
     break;
   case 3: // l/l
     env.setAttack(1.0);
     env.setRelease(1.0);
     gain = df-3;
     break;
   }
   env.trigger(isButtonPressed(PUSHBUTTON), getSamplesSinceButtonPressed(PUSHBUTTON));
   FloatArray left = buffer.getSamples(LEFT_CHANNEL);
   FloatArray right = buffer.getSamples(RIGHT_CHANNEL);
   // vco
   hz.setTune(tune);
   float lfreq = hz.getFrequency(left[0]);
   osc.setFrequency(lfreq);
   osc.setShape(shape);
   osc.setPulseWidth(pw);
   osc.getSamples(left);
   // vcf
   hz.setTune(fc);
   fc = hz.getFrequency(right[0]);
   fc = min(0.999, max(0.01, fc/(getSampleRate()*2))); // normalised and bounded
   filter->setLowPass(fc, q);
   right.copyFrom(left);
   filter->process(right);
   right.multiply(0.8-q*0.2); // gain compensation for high q
   // vca
   env.getEnvelope(envelope);
   envelope.add(gain);
   left.multiply(envelope);
   right.multiply(envelope);
 }
Пример #29
0
void VAOscillator::postFilter(AudioBuffer<float> buffer)
{
  float* const data = buffer.getWritePointer(0);
  for(int sampleIndex = 0; sampleIndex < buffer.getNumSamples(); sampleIndex++)
  {
    postFilterState = (1/0.65) * data[sampleIndex];
    data[sampleIndex] -= (float)((0.35/0.65) * postFilterState);
  }
}
Пример #30
0
 void processAudio(AudioBuffer &buffer){
   prepare();
   int size = buffer.getSize();
     
   for (int ch = 0; ch<buffer.getChannels(); ++ch) {
       float* buf = buffer.getSamples(ch);
       for(int i = 0; i < size; ++i) buf[i] = processSample(buf[i]);
   }
 }