コード例 #1
0
ファイル: PluginProcessor.cpp プロジェクト: OpenDAWN/mcfx
void Mcfx_convolverAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    
    // std::cout << "in: " << getNumInputChannels() << " out: " << getNumOutputChannels() << std::endl;
    
    if (_configLoaded)
    {
        
        _isProcessing = true;
        
#ifdef USE_ZITA_CONVOLVER
        
        for (int i=0; i < jmin(conv_data.getNumInputChannels(), getNumInputChannels()) ; i++)
        {
            float* indata = zita_conv.inpdata(i)+_ConvBufferPos;
            memcpy(indata, buffer.getReadPointer(i), getBlockSize()*sizeof(float));
        }
        
        _ConvBufferPos += getBlockSize();
        
        if (_ConvBufferPos >= _ConvBufferSize) {
            zita_conv.process(THREAD_SYNC_MODE);
            _ConvBufferPos = 0;
        }
        
        
        
        for (int i=0; i < jmin(conv_data.getNumOutputChannels(), getNumOutputChannels()) ; i++)
        {
            float* outdata = zita_conv.outdata(i)+_ConvBufferPos;
            memcpy(buffer.getWritePointer(i), outdata, getBlockSize()*sizeof(float));
        }
        
#else
        mtxconv_.processBlock(buffer, buffer);
        
#endif
        
        _isProcessing = false;
        
    } else { // config loaded
        
        // clear output in case no config is loaded!
        buffer.clear();
    }
    

}
コード例 #2
0
void DetunerPlugin::processBlock (AudioSampleBuffer& buffer,
                                   MidiBuffer& midiMessages)
{
	
	int blockSamples = buffer.getNumSamples();
	detune(inputBuffer, outputBuffer, blockSamples);
	
	setMagnus(outputBuffer->getMagnitude(0, buffer.getNumSamples()));


    // in case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }

    // if any midi messages come in, use them to update the keyboard state object. This
    // object sends notification to the UI component about key up/down changes
    keyboardState.processNextMidiBuffer (midiMessages,
                                         0, buffer.getNumSamples(),
                                         true);

    // have a go at getting the current time from the host, and if it's changed, tell
    // our UI to update itself.
    AudioPlayHead::CurrentPositionInfo pos;

    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (pos))
    {
        if (memcmp (&pos, &lastPosInfo, sizeof (pos)) != 0)
        {
            lastPosInfo = pos;
            sendChangeMessage (this);
        }
    }
    else
    {
        zeromem (&lastPosInfo, sizeof (lastPosInfo));
        lastPosInfo.timeSigNumerator = 4;
        lastPosInfo.timeSigDenominator = 4;
        lastPosInfo.bpm = 120;
    }

}
コード例 #3
0
void BlankenhainAudioProcessor::initializing(AudioSampleBuffer& buffer)
{
	//For Metering, reset "current" Values
	for (unsigned int i = 0; i < meterValues.size(); i++)
	{
		meterValues[i] = 0.f;
	}

	//Set lastKnownSampleRate and lastKnownBlockSize
	this->setLastKnownSampleRate(this->getSampleRate());
	this->setLastKnownBlockSize(this->getBlockSize());

	// In case we have more outputs than inputs, this code clears any output
	// channels that didn't contain input data.
	for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) {
		buffer.clear(i, 0, buffer.getNumSamples());
	}
}
コード例 #4
0
ファイル: PluginProcessor.cpp プロジェクト: OpenDAWN/ambix
void Ambix_rotator_zAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
{
    // Use this method as the place to do any pre-playback
    // initialisation that you need..
    
    // resize buffer if necessary
    output_buffer.setSize((std::max)(getNumOutputChannels(), getNumInputChannels()), samplesPerBlock);
    
    /*
    int l=0;
    int m=0;
    for (int i=0; i <= 25; i++)
    {
        ACNtoLM(i, l, m);
        std::cout << "ACN: " << i << " l: " << l << " m: " << m << std::endl;
    }
     */
    
}
コード例 #5
0
    void processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
    {
        const int numSamples = buffer.getNumSamples();

        if (initialised && plugin != nullptr && handle != nullptr)
        {
            for (int i = 0; i < inputs.size(); ++i)
                plugin->connect_port (handle, inputs[i],
                                      i < buffer.getNumChannels() ? buffer.getWritePointer (i) : nullptr);

            if (plugin->run != nullptr)
            {
                for (int i = 0; i < outputs.size(); ++i)
                    plugin->connect_port (handle, outputs.getUnchecked(i),
                                          i < buffer.getNumChannels() ? buffer.getWritePointer (i) : nullptr);

                plugin->run (handle, numSamples);
                return;
            }

            if (plugin->run_adding != nullptr)
            {
                tempBuffer.setSize (outputs.size(), numSamples);
                tempBuffer.clear();

                for (int i = 0; i < outputs.size(); ++i)
                    plugin->connect_port (handle, outputs.getUnchecked(i), tempBuffer.getWritePointer (i));

                plugin->run_adding (handle, numSamples);

                for (int i = 0; i < outputs.size(); ++i)
                    if (i < buffer.getNumChannels())
                        buffer.copyFrom (i, 0, tempBuffer, i, 0, numSamples);

                return;
            }

            jassertfalse; // no callback to use?
        }

        for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
            buffer.clear (i, 0, numSamples);
    }
コード例 #6
0
void DemoJuceFilter::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{	
	assert(mpDSP->getNumInputs()<= buffer.getNumChannels());
	assert(mpDSP->getNumOutputs()<= buffer.getNumChannels());
	if(mpDSP	&& mpDSP->getNumInputs()<= buffer.getNumChannels() 
						&& mpDSP->getNumOutputs() <= buffer.getNumChannels() )
	{
		mpDSP->compute(buffer.getNumSamples(), 
									 buffer.getArrayOfChannels(), 
									 buffer.getArrayOfChannels());
	}
	
	// in case we have more outputs than inputs, we'll clear any output
	// channels that didn't contain input data, (because these aren't
	// guaranteed to be empty - they may contain garbage).
	for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
	{
		buffer.clear (i, 0, buffer.getNumSamples());
	}
}
コード例 #7
0
void HoaToolsAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    int i;
    int numins = getNumInputChannels();
    int numouts = getNumOutputChannels();
    int nharmo = NHARMO;
    int vectorsize = buffer.getNumSamples();

    for(i = 0; i < numins; i++)
    {
        cblas_scopy(vectorsize, buffer.getReadPointer(i), 1, m_input_vector+i, numins);
        m_lines->setRadius(i, m_sources->sourceGetRadius(i));
        m_lines->setAzimuth(i, m_sources->sourceGetAzimuth(i));
        if(m_sources->sourceGetExistence(i))
            m_map->setMute(i, 0);
        else
            m_map->setMute(i, 1);
    }
    for(; i < 16; i++)
    {
        m_map->setMute(i, 1);
    }
    for(i = 0; i < vectorsize; i++)
    {
        m_lines->process(m_lines_vector);
        for(int j = 0; j < numins; j++)
            m_map->setRadius(j, m_lines_vector[j]);
        for(int j = 0; j < numins; j++)
            m_map->setAzimuth(j, m_lines_vector[j+numins]);

        m_map->process(m_input_vector+ numins * i, m_harmo_vector + nharmo * i);
        m_optim->process(m_harmo_vector + nharmo * i, m_harmo_vector + nharmo * i);
        m_decoder->process(m_harmo_vector + nharmo * i, m_output_vector + numouts * i);
        m_meter->process(m_output_vector + numouts * i);
    }

    for(i = 0; i < numouts; i++)
    {
        cblas_scopy(vectorsize, m_output_vector+i, numouts, buffer.getWritePointer(i), 1);
    }
}
コード例 #8
0
ファイル: PluginProcessor.cpp プロジェクト: EQ4/StereoChorus
void StereoChorusAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    
    if (getNumInputChannels() == 1) buffer.copyFrom(1, 0, buffer, 0, 0, buffer.getNumSamples());
    
    //float* channelData = buffer.getSampleData (0);
    for (int i = 0; i < buffer.getNumSamples(); i++) {
        
        
        float leftMod = (leftOsc.nextSample()+1.01) * getParameter(modParam) * 100;
        float rightMod = (leftOsc.nextSample()+1.01) * getParameter(modParam) * 100;
        
        leftDelayTime = (getParameter(delayParam) * 200) + leftMod + .002;
        rightDelayTime = (getParameter(delayParam) * 220) + rightMod + .0015;
        
        float l_xn = buffer.getReadPointer(0)[i];
        float r_xn = buffer.getReadPointer(1)[i];
        
        float l_combined;
        float r_combined;
        float l_yn;
        float r_yn;
        

        l_yn = leftBuffer.getSample(leftDelayTime);
        r_yn = rightBuffer.getSample(rightDelayTime);
            
        l_combined = l_xn + r_yn * getParameter(feedbackParam);
        r_combined = r_xn + l_yn * getParameter(feedbackParam);
      
        
        leftBuffer.addSample(l_combined);
        rightBuffer.addSample(r_combined);
        
        buffer.getWritePointer(0)[i] = (l_xn * (1-getParameter(mixParam)) + l_yn * getParameter(mixParam));
        buffer.getWritePointer(1)[i] = (r_xn * (1-getParameter(mixParam)) + r_yn * getParameter(mixParam));
        
    }
}
コード例 #9
0
ファイル: PluginProcessor.cpp プロジェクト: JayaWei/ambix
void Ambix_vmicAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    _Sh_transf = Sh_transf; // buffer old values
    
    calcParams(); // calc new transformation matrix
    
    int NumSamples = buffer.getNumSamples();
    
    output_buffer.setSize(buffer.getNumChannels(), NumSamples);
    output_buffer.clear();
    
    
    for (int out = 0; out < std::min(NUM_FILTERS_VMIC,getNumOutputChannels()); out++)
    {
        for (int in = 0; in < std::min(AMBI_CHANNELS,getNumInputChannels()); in++)
        {
            if (_Sh_transf(out, in) != 0.f || Sh_transf(out, in) != 0.f)
            {
                if (_Sh_transf(out, in) == Sh_transf(out, in))
                {
                    output_buffer.addFrom(out, 0, buffer, in, 0, NumSamples, (float)Sh_transf(out, in));
                } else {
                    output_buffer.addFromWithRamp(out, 0, buffer.getReadPointer(in), NumSamples, (float)_Sh_transf(out, in), (float)Sh_transf(out, in));
                }
                
            }
        }
    }
    
    // clear unused channels
    for (int out = std::min(NUM_FILTERS_VMIC,getNumOutputChannels()); out < output_buffer.getNumChannels(); out++)
    {
        output_buffer.clear(out, 0, NumSamples);
    }
    
    
    buffer = output_buffer;
}
コード例 #10
0
void LyrebirdAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    
    // Do Midi things
    buffer.clear();
    
    int time;
    MidiMessage m;
    
    for (MidiBuffer::Iterator i (midiMessages); i.getNextEvent (m, time);)
    {
        sawGenerator->setWavelength(currentSampleRate, m.getMidiNoteInHertz(m.getNoteNumber()));
        if (m.isNoteOff())
        {
            sawGenerator->setWavelength(currentSampleRate, 0);
        }
    }
    
    
    // In case we have more outputs than inputs, this code clears any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    // I've added this to avoid people getting screaming feedback
    // when they first compile the plugin, but obviously you don't need to
    // this code if your algorithm already fills all the output channels.
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());


    float* leftData = buffer.getWritePointer (0);
    float* rightData = buffer.getWritePointer(1);
    for (int sample = 0; sample < buffer.getNumSamples(); sample++)
    {
        leftData[sample] = sawGenerator->getCurrentAmplitude();
        rightData[sample] = sawGenerator->getCurrentAmplitude();
        sawGenerator->incrementSaw();
    }
}
コード例 #11
0
ファイル: ChannelStripProcessor.cpp プロジェクト: Bk8/mordaw
/*
This method is where user chosen values are applied to the audio buffer.
If the track is not muted then gain is applied to both the left and right channels
based on both the current gain value as well as taking into consideration the current
panning value.
@param &buffer the buffer to be processed
*/
void ChannelStripProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer&)
{
	//Check to see if the track is un-muted
	if (!_muted)
	{
		//Apply the gain value to the left channel relative to the panning value
		//buffer.applyGain(0, 0, buffer.getNumSamples(), _gain*(0));
		float leftGain_ = _gain *(1.0f - _panning);
		float rightGain_ = _gain *(1.0f - _panning);
		buffer.applyGain(0, 0, buffer.getNumSamples(), _gain*(1.0f - _panning));
		//Apply the gain value to the right channel relative to the panning value
		//buffer.applyGain(1, 0, buffer.getNumSamples(), 0);
		buffer.applyGain(1, 0, buffer.getNumSamples(), _gain*_panning);
	}
	//Check to see if the track is muted
	if (_muted)
	{
		//Apply a gain value of 0 to the track
		buffer.applyGain(_muteGain);
	}
	for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
		buffer.clear(i, 0, buffer.getNumSamples());
}
コード例 #12
0
ファイル: DRowAudioFilter.cpp プロジェクト: harry-g/drowaudio
//==============================================================================
void DRowAudioFilter::prepareToPlay (double sampleRate, int samplesPerBlock)
{
    currentSampleRate = sampleRate;
    oneOverCurrentSampleRate = 1.0f/currentSampleRate;

    // set up wave buffer and fill with triangle data
    iLookupTableSize = 8192;
    iLookupTableSizeMask =  iLookupTableSize-1;

    pfLookupTable = new float[iLookupTableSize];
    float fPhaseStep = (2 * double_Pi) / iLookupTableSize;
    for(int i = 0; i < iLookupTableSize; i++) {
        if(i < iLookupTableSize * 0.5)
            pfLookupTable[i] = -1.0f + (2.0/double_Pi)*i*fPhaseStep;
        else
            pfLookupTable[i] = 3.0f - (2.0/double_Pi)*i*fPhaseStep;
    }
    iLookupTablePos = 0;
    iSamplesProcessed = 0;


    // set up circular buffers
    iBufferSize = (int)sampleRate;
    pfCircularBufferL = new float[iBufferSize];
    for (int i = 0; i < iBufferSize; i++)
        pfCircularBufferL[i] = 0;

    if (getNumInputChannels() == 2) {
        pfCircularBufferR = new float[iBufferSize];
        for (int i = 0; i < iBufferSize; i++)
            pfCircularBufferR[i] = 0;
    }
    iBufferWritePos = 0;

    updateFilters();
}
コード例 #13
0
ファイル: PluginProcessor.cpp プロジェクト: gtbts/MIDItoOSC
void MiditoOscAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    static float cv[8], shiftcv[8];
    static bool _calibMode;
    
    MidiBuffer processedMidi;
    MidiMessage m;
    int time;
    
    char oscBuffer[IP_MTU_SIZE];
    osc::OutboundPacketStream p(oscBuffer, IP_MTU_SIZE);
    
    if (calibMode) // Calibration Mode A440Hz(MIDI number 69)
    {
        p << osc::BeginBundleImmediate
        << osc::BeginMessage( "/fader1" )
        << calibMap[69] << osc::EndMessage
        << osc::BeginMessage( "/fader2" )
        << calibMap[69] << osc::EndMessage
        << osc::BeginMessage( "/fader3" )
        << calibMap[69] << osc::EndMessage
        << osc::BeginMessage( "/fader4" )
        << calibMap[69] << osc::EndMessage
        << osc::BeginMessage( "/fader5" )
        << calibMap[69] << osc::EndMessage
        << osc::BeginMessage( "/fader6" )
        << calibMap[69] << osc::EndMessage
        << osc::BeginMessage( "/fader7" )
        << calibMap[69] << osc::EndMessage
        << osc::BeginMessage( "/fader8" )
        << calibMap[69] << osc::EndMessage
        << osc::BeginMessage( "/gate1" )
        << 1 << osc::EndMessage
        << osc::BeginMessage( "/gate2" )
        << 1 << osc::EndMessage
        << osc::EndBundle;
        
        sendOSCData(p);
        
        _calibMode = true;
        
        return;
        
    } else {
        
        if (_calibMode)
        {
            p << osc::BeginBundleImmediate
            << osc::BeginMessage( "/gate1" )
            << 0 << osc::EndMessage
            << osc::BeginMessage( "/gate2" )
            << 0 << osc::EndMessage
            << osc::EndBundle;
            
            sendOSCData(p);
            
            _calibMode = false;
            
        }
    }
    
    for (MidiBuffer::Iterator i (midiMessages); i.getNextEvent (m, time);)
    {
        p.Clear();
        usleep(30);
        
        if (m.isNoteOn())
        {
            if (monoMode) // mono Mode
            {
                uint32_t midiCh = m.getChannel();
                
                if (midiCh == 0 || midiCh > 7)
                {
                    midiCh = 1;
                }
                
                cv[midiCh - 1] = calibMap[m.getNoteNumber()];
                
                switch (midiCh)
                {
                    case 1:
                        p << osc::BeginMessage("/fader1")
                        << cv[0] << osc::EndMessage;
                        break;
                        
                    case 2:
                        p << osc::BeginMessage("/fader2")
                        << cv[1] << osc::EndMessage;
                        break;
                        
                    case 3:
                        p << osc::BeginMessage("/fader3")
                        << cv[2] << osc::EndMessage;
                        break;
                        
                    case 4:
                        p << osc::BeginMessage("/fader4")
                        << cv[3] << osc::EndMessage;
                        break;
                        
                    case 5:
                        p << osc::BeginMessage("/fader5")
                        << cv[4] << osc::EndMessage;
                        break;
                        
                    case 6:
                        p << osc::BeginMessage("/fader6")
                        << cv[5] << osc::EndMessage;
                        break;
                        
                    case 7:
                        p << osc::BeginMessage("/fader7")
                        << cv[6] << osc::EndMessage;
                        break;
                        
                    case 8:
                        p << osc::BeginMessage("/fader8")
                        << cv[7] << osc::EndMessage;
                        break;
                        
                    default:
                        break;
                }
                
                sendOSCData(p);
                
            } else if (shiftMode) { // shift Mode
                
                cv[0] = calibMap[m.getNoteNumber()];
                
                for (int i = 7; i > 0; i--)
                {
                    shiftcv[i] = shiftcv[i-1];
                }
                
                p << osc::BeginBundleImmediate
                << osc::BeginMessage( "/fader1" )
                << cv[0] << osc::EndMessage
                << osc::BeginMessage( "/fader2" )
                << shiftcv[1] << osc::EndMessage
                << osc::BeginMessage( "/fader3" )
                << shiftcv[2] << osc::EndMessage
                << osc::BeginMessage( "/fader4" )
                << shiftcv[3] << osc::EndMessage
                << osc::BeginMessage( "/fader5" )
                << shiftcv[4] << osc::EndMessage
                << osc::BeginMessage( "/fader6" )
                << shiftcv[5] << osc::EndMessage
                << osc::BeginMessage( "/fader7" )
                << shiftcv[6] << osc::EndMessage
                << osc::BeginMessage( "/fader8" )
                << shiftcv[7] << osc::EndMessage
                << osc::BeginMessage( "/gate1" )
                << 1 << osc::EndMessage
                << osc::BeginMessage( "/gate2" )
                << 1 << osc::EndMessage
                << osc::EndBundle;
                
                sendOSCData(p);
                
                shiftcv[0] = cv[0];
                
            } else { // poly Mode
                
                cv[ch] = calibMap[m.getNoteNumber()];
                
                if (currentMaxPoly == 1)
                {
                    cv[1] = cv[0];
                }
                
                p << osc::BeginBundleImmediate
                << osc::BeginMessage( "/fader1" )
                << cv[0] << osc::EndMessage
                << osc::BeginMessage( "/fader2" )
                << cv[1] << osc::EndMessage
                << osc::BeginMessage( "/fader3" )
                << cv[2] << osc::EndMessage
                << osc::BeginMessage( "/fader4" )
                << cv[3] << osc::EndMessage
                << osc::BeginMessage( "/fader5" )
                << cv[4] << osc::EndMessage
                << osc::BeginMessage( "/fader6" )
                << cv[5] << osc::EndMessage
                << osc::BeginMessage( "/fader7" )
                << m.getFloatVelocity() << osc::EndMessage
                << osc::BeginMessage( "/gate1" )
                << 1 << osc::EndMessage
                << osc::BeginMessage( "/gate2" )
                << 1 << osc::EndMessage
                << osc::EndBundle;
                
                sendOSCData(p);
                
                ch++;
                gateCount++;
                
                if (ch >= currentMaxPoly)
                {
                    ch = 0;
                }
                
            }
            
        } else if (m.isNoteOff()) {
            
            if (monoMode)
            {
                switch (m.getChannel())
                {
                    case 1:
                        p << osc::BeginMessage( "/gate1" )
                        << 0 << osc::EndMessage;
                        break;
                        
                    case 2:
                        p << osc::BeginMessage( "/gate2" )
                        << 0 << osc::EndMessage;
                        break;
                        
                    case 3:
                        p << osc::BeginMessage( "/gate3" )
                        << 0 << osc::EndMessage;
                        break;
                        
                    case 4:
                        p << osc::BeginMessage( "/gate4" )
                        << 0 << osc::EndMessage;
                        break;
                        
                    default:
                        break;
                }
                
                sendOSCData(p);
                
            } else if (shiftMode) {
                
                p << osc::BeginBundleImmediate
                << osc::BeginMessage( "/gate1" )
                << 0 << osc::EndMessage
                << osc::BeginMessage( "/gate2" )
                << 0 << osc::EndMessage
                << osc::EndBundle;
                
                sendOSCData(p);
                
            } else {
                
                gateCount --;
                
                if (gateCount <= 0)
                {
                    p << osc::BeginBundleImmediate
                    << osc::BeginMessage( "/gate1" )
                    << 0 << osc::EndMessage
                    << osc::BeginMessage( "/gate2" )
                    << 0 << osc::EndMessage
                    << osc::EndBundle;
                    
                    sendOSCData(p);
                    
                    gateCount = 0;
                }
                
                ch--;
                
                if (ch == -1)
                {
                    ch = 0;
                }
                
            }
            
        } else if (m.isControllerOfType(1)) { // Modulation Wheel
            
            float modulation = m.getControllerValue();
            
            if (!monoMode && !shiftMode)
            {
                p << osc::BeginMessage("/fader8")
                << (modulation / 127) << osc::EndMessage;
                
                sendOSCData(p);
            }
            
        }
        
        processedMidi.addEvent (m, time);
    }
    
    midiMessages.swapWith (processedMidi);
    
    buffer.clear();
    
    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {
        float* channelData = 0;
    }
}
コード例 #14
0
ファイル: vstSynthProcessor.cpp プロジェクト: rsenn/vstsynth
void vstSynthAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    // number of samples in current buffer
	const int numSamples = buffer.getNumSamples();

	
    /* 
     Checks to see if delay size has changed since the last block. If it has,
     the delay buffer is resized and cleared (to prevent garbage in the output)
     The read and write pointers are also reset to their starting positions and
     the saved filter states are removed to reduce transients.
     */
    
    // delayTimeParam controlled by vstSynthEditor::delayTimeSlider
    if (delayBuffer.getNumSamples() != getParameter(delayTimeParam) + numSamples)
	{
		delayBuffer.setSize(1, getParameter(delayTimeParam) + numSamples);
		delayBuffer.clear();
		delayWritePtr =  delayBuffer.getSampleData(0) + (int) getParameter(delayTimeParam);
		delayReadPtr = delayBuffer.getSampleData(0);
		//hpeqFilter.reset();
	}

	// Receives MIDI data from host
    keyboardState.processNextMidiBuffer(midiMessages, 0, numSamples, true);
	
    // Call to vstSynthVoice::renderNextBlock where buffer is filled with raw oscillator data
    vstSynth.renderNextBlock(buffer, midiMessages, 0, numSamples);

    // Pointer to beginning of buffer
	float* bufferPtr = buffer.getSampleData(0, 0);

    // Performs tremolo (AM) if enabled, overdrive and delay operation
	for (int currentSample = 0; currentSample < numSamples; currentSample++)
	{
		// Apply tremolo if enabled
        if (getParameter(lfoDestParam) == 2) // Controlled by vstSynthEditor::lfoDestComboBox
		{
			tremolo.setVibratoRate(getParameter(lfoFreqParam)); // Controlled by vstSynthEditor::lfoFreqSlider
			tremolo.setVibratoGain(getParameter(lfoDevParam)/10); // Controlled by vstSynthEditor::lfoDevSlider
			*bufferPtr *= (float) (1+tremolo.tick()); // Modulate amplitude with tremolo output
		}

        // Push signal through tahn to introduce nonlinear distortion
		*bufferPtr = tanhf(getParameter(driveParam) * *bufferPtr); // Controlled by vstSynthEditor::driveSlider

        // Process delay if enabled
		if (getParameter(delayTimeParam) > 0) // Controlled by vstSynthEditor::delayTimeSlider
		{
            // Add existing delay data into buffer
			*bufferPtr += getParameter(delayFeedbackParam) * *delayReadPtr; // Controlled by vstSynthEditor::delayFBSlider

            // Save current output data into delay buffer
			*delayWritePtr = *bufferPtr;
			
            // Increment pointers
            delayWritePtr++;
			delayReadPtr++;

            // Circular buffer implementation: reset pointers to beginning of buffers when end is reached
			if (delayReadPtr > delayBuffer.getSampleData(0) + delayBuffer.getNumSamples())
			{
				delayReadPtr = delayBuffer.getSampleData(0);
			}

			if (delayWritePtr > delayBuffer.getSampleData(0) + delayBuffer.getNumSamples())
			{
				delayWritePtr = delayBuffer.getSampleData(0);
			}
		}
        
        // Increment pointer
		bufferPtr++;
	}
    
    // Send buffer to vstSynthFilter where it is replaced with filtered data
	hpeqFilter.processSamples(buffer.getSampleData(0, 0), numSamples);

    // All processing happens in only one channel for speed; the other channel is filled here.
	buffer.addFrom(1, 0, buffer, 0, 0, numSamples);

    // Apply overall output gain to buffer before playback
	buffer.applyGain(0, numSamples, 10 * getParameter(outputGainParam)); // Controlled by vstSynthEditor::outputGainSlider

	// In case we have more outputs than inputs, we'll clear any output
	// channels that didn't contain input data, (because these aren't
	// guaranteed to be empty - they may contain garbage).
	for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
	{
		buffer.clear (i, 0, buffer.getNumSamples());
	}
}
コード例 #15
0
void ChorusAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    // In case we have more outputs than inputs, this code clears any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    // I've added this to avoid people getting screaming feedback
    // when they first compile the plugin, but obviously you don't need to
    // this code if your algorithm already fills all the output channels.
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());

	// If the sample rate has changed resize the buffers
	if (FS != getSampleRate())resizeBuffers(depthOSC, delayOSC, delayBufferL, delayBufferR);

	// If the modulation rate or the depth have been changed recreate LFO
	if (freq != modRate || Amp != depth)LFOBuffer();

	// get current write pointers
	depthOSCwp = depthOSC.getWritePointer(0);
	delayOSCwp = delayOSC.getWritePointer(0);
	delayBufferLwp = delayBufferL.getWritePointer(0);
	delayBufferRwp = delayBufferR.getWritePointer(0);

	// get current read pointers
	depthOSCrp = depthOSC.getReadPointer(0);
	delayOSCrp = delayOSC.getReadPointer(0);
	delayBufferLrp = delayBufferL.getReadPointer(0);
	delayBufferRrp = delayBufferR.getReadPointer(0);
		
	// Calculate delay in samples
	int delay = (int)(delayTime*FS);


	// create input and output channels to read from and write to
	const float *inL = buffer.getReadPointer(0);
	const float *inR = buffer.getReadPointer(1);
	float *outL = {};
	outL = buffer.getWritePointer(0);
	float *outR = {};
	outR = buffer.getWritePointer(1);

	// Step through each sample in the current buffer
	for (int i = 0; i < buffer.getNumSamples(); i++)
	{

		// Calculate the delay point, and write to delay buffer
		double dSamp = i + bidx - delay;

		// Current sample for the delay buffer
		int ridx = i + bidx;
		
		// Wrap ridx if it is larger than the size of the delay buffer (equal to the sample rate)
		if (ridx >= FS)ridx -= FS;

		// Calculate delay after modulation has been applied
		double modDelay = (delay * 0.3 * depthOSCrp[ridx]); 
		double dSampMod = dSamp - modDelay;

		// If the delay time exceeds or is under the buffer length wrap around
		if ((int)dSampMod < 0)dSampMod += FS;
		if ((int)dSampMod >= FS)dSampMod -= FS;

		double delaySampleL = 0;
		double delaySampleR = 0;

		// write current input data to delayBuffer
		delayBufferRwp[ridx] = inR[i];
		delayBufferLwp[ridx] = inL[i];

		// Check whether the delay is fractional
		double dSampModround = round(dSampMod);
		double check = dSampMod - dSampModround;
		int inc = 0;

		// If the fractional content of the delay is > 0 then interpolate against the next sample
		// If the fractional ocntent of the delay is < 0 then interpolate against the previous sample
		// Else take the current sample without interpolation
		if (check > 0)
		{
			inc = (int)dSampModround+1;
			
			if (inc >= FS)
			{
				delaySampleL = interpolateLinear(delayBufferLrp[int(dSampMod)], delayBufferLrp[inc-FS], dSampMod, round(dSampMod));
				delaySampleR = interpolateLinear(delayBufferRrp[int(dSampMod)], delayBufferRrp[inc - FS], dSampMod, round(dSampMod));
			}
			else
			{
				delaySampleL = interpolateLinear(delayBufferLrp[int(dSampMod)], delayBufferLrp[inc], dSampMod, round(dSampMod));
				delaySampleR = interpolateLinear(delayBufferRrp[int(dSampMod)], delayBufferRrp[inc], dSampMod, round(dSampMod));
			}
		}
		else if (check < 0)
		{
			inc = (int)dSampModround-1;
			
			if (inc < 0)
			{
				delaySampleL = interpolateLinear(delayBufferLrp[inc + FS], delayBufferLrp[int(dSampMod)], dSampMod, inc);
				delaySampleR = interpolateLinear(delayBufferRrp[inc + FS], delayBufferRrp[int(dSampMod)], dSampMod, inc);
			}
			else
			{
				delaySampleL = interpolateLinear(delayBufferLrp[inc], delayBufferLrp[int(dSampMod)], dSampMod, inc);
				delaySampleR = interpolateLinear(delayBufferRrp[inc], delayBufferRrp[int(dSampMod)], dSampMod, inc);
			}
		}
		else
		{
			delaySampleL = delayBufferLrp[int(dSampMod)];
			delaySampleR = delayBufferRrp[int(dSampMod)];
		}

		// Add the delayed sample to the current sample
		// Weight each sample against how much of the wet/dry mix has been select
		outL[i] = ((1 - wetDry)*inL[i] + wetDry*delaySampleL)*0.5;
		outR[i] = ((1 - wetDry)*inR[i] + wetDry*delaySampleR)*0.5;

	}

	// Increment index in relation to where the current buffer will be placed within the delay buffer
	// wrap bidx if greater than sample rate
	bidx += buffer.getNumSamples();
	if (bidx >= FS)bidx -= FS;

	// store current freqency and modulation rates
	freq = modRate;
	Amp = depth;
}
コード例 #16
0
void MLPluginProcessor::prepareToPlay (double sr, int maxFramesPerBlock)
{
	MLProc::err prepareErr;
	MLProc::err r = preflight();
	
	if (!mpPluginDoc.get()) return;
	
	if (r == MLProc::OK)
	{
		// get the Juce process lock  // TODO ???
		const ScopedLock sl (getCallbackLock());

		unsigned inChans = getNumInputChannels();
		unsigned outChans = getNumOutputChannels();
		mEngine.setInputChannels(inChans);
		mEngine.setOutputChannels(outChans);

		unsigned bufSize = 0;
		unsigned chunkSize = 0;

		// choose new buffer size and vector size.
		{
			// bufSize is the smallest power of two greater than maxFramesPerBlock.
			int maxFramesBits = bitsToContain(maxFramesPerBlock);
			bufSize = 1 << maxFramesBits;
			
			// vector size is desired processing block size, set this to default size of signal.
			chunkSize = min((int)bufSize, (int)kMLProcessChunkSize);
		}	
		
		// dsp engine has one chunkSize of latency in order to run constant block size.
		setLatencySamples(chunkSize);
		
		// debug() << "MLPluginProcessor: prepareToPlay: rate " << sr << ", buffer size " << bufSize << ", vector size " << vecSize << ". \n";	
		
		// build: turn XML description into graph of processors
		if (mEngine.getGraphStatus() != MLProc::OK)
		{
			bool makeSignalInputs = inChans > 0;
			r = mEngine.buildGraphAndInputs(&*mpPluginDoc, makeSignalInputs, wantsMIDI()); 
			// debug() << getNumParameters() << " parameters in description.\n";
		}
		else
		{
			// debug() << "MLPluginProcessor graph OK.\n";
		}

#ifdef DEBUG
		theSymbolTable().audit();
		//theSymbolTable().dump();
#endif

		// compile: schedule graph of processors , setup connections, allocate buffers
		if (mEngine.getCompileStatus() != MLProc::OK)
		{
			mEngine.compileEngine();
		}
		else
		{
			debug() << "compile OK.\n";
		}

		// prepare to play: resize and clear processors
		prepareErr = mEngine.prepareEngine(sr, bufSize, chunkSize);
		if (prepareErr != MLProc::OK)
		{
			debug() << "MLPluginProcessor: prepareToPlay error: \n";
		}
		
		// mEngine.dump();
			
		// after prepare to play, set state from saved blob if one exists
		const unsigned blobSize = mSavedParamBlob.getSize();
		if (blobSize > 0)
		{
			setStateFromBlob (mSavedParamBlob.getData(), blobSize);
			mSavedParamBlob.setSize(0);
		}
		else 
		{
			mEngine.clear();
			if (!mHasParametersSet)
			{
				loadDefaultPreset();
			}
		}		
		
		if(!mInitialized)
		{					
			initializeProcessor();
			mInitialized = true;
		}		
		
		mEngine.setEnabled(prepareErr == MLProc::OK);
	}
}
コード例 #17
0
ファイル: PluginProcessor.cpp プロジェクト: OpenDAWN/mcfx
void Mcfx_convolverAudioProcessor::LoadConfiguration(File configFile)
{
    if (!configFile.existsAsFile())
    {
        
        String debug;
        debug << "Configuration file does not exist!" << configFile.getFullPathName() << "\n\n";
        
        //std::cout << debug << std::endl;
        DebugPrint(debug);
        
        return;
    }
    
    // unload first....
    if (_configLoaded) {
        
        while (_isProcessing) {
            Sleep(1);
        }
        
        std::cout << "Unloading Config..." << std::endl;
        UnloadConfiguration();
        _DebugText = String(); // clear debug window
        std::cout << "Config Unloaded..." << std::endl;
    }
    
    if (_ConvBufferSize < _BufferSize)
        _ConvBufferSize = _BufferSize;
    
    _ConvBufferSize = nextPowerOfTwo(_ConvBufferSize);
    
    String debug;
    debug << "\ntrying to load " << configFile.getFullPathName() << "\n\n";
    
    DebugPrint(debug);
    
    // debug print samplerate and buffer size
    debug = "Samplerate: ";
    debug << _SampleRate;
    debug << " Host Buffer Size: ";
    debug << (int)_BufferSize;
    debug << " Internal Buffer Size: ";
    debug << (int)_ConvBufferSize;
    DebugPrint(debug);
    
    activePreset = configFile.getFileName(); // store filename only, on restart search preset folder for it!
    box_preset_str = configFile.getFileNameWithoutExtension();
    
    StringArray myLines;
    
    configFile.readLines(myLines);
    
    // global settings
    
    String directory("");
    
    AudioSampleBuffer TempAudioBuffer(1,256);
    
    conv_data.setSampleRate(_SampleRate);
    
    // iterate over all lines
    for (int currentLine = 0; currentLine < myLines.size(); currentLine++)
    {
        // get the line and remove spaces from start and end
        String line (myLines[currentLine].trim());
        
        if (line.startsWith("#"))
        {
            
            // ignore these lines
            
        } else if (line.contains("/cd")) {
            
            line = line.trimCharactersAtStart("/cd").trim();
            directory = line;
            
            std::cout << "Dir: " << directory << std::endl;
        
        } else if (line.contains("/convolver/new")) {
            int t_in_ch = 0;
            int t_out_ch = 0;
            
            line = line.trimCharactersAtStart("/convolver/new").trim();
            String::CharPointerType lineChar = line.getCharPointer();
            
            sscanf(lineChar, "%i%i", &t_in_ch, &t_out_ch);
            
            
        } else if (line.contains("/impulse/read"))
        {
            int in_ch = 0;
            int out_ch = 0;
            float gain = 1.f;
            int delay = 0;
            int offset = 0;
            int length = 0;
            int channel = 0;
            char filename[100];
            
            line = line.trimCharactersAtStart("/impulse/read").trim();
            
            String::CharPointerType lineChar = line.getCharPointer();
            
            
            sscanf(lineChar, "%i%i%f%i%i%i%i%s", &in_ch, &out_ch, &gain, &delay, &offset, &length, &channel, filename);
            
            // printf("load ir: %i %i %f %i %i %i %i %s \n", in_ch, out_ch, gain, delay, offset, length, channel, filename);
            
            File IrFilename;
            
            
            // check if /cd is defined in config
            if (directory.isEmpty()) {
                IrFilename = configFile.getParentDirectory().getChildFile(String(filename));
                
            } else { // /cd is defined
                if (directory.startsWith("/"))
                {
                    // absolute path is defined
                    File path(directory);
                    
                    IrFilename = path.getChildFile(String(filename));
                } else {
                    
                    // relative path to the config file is defined
                    IrFilename = configFile.getParentDirectory().getChildFile(directory).getChildFile(String(filename));
                }
            }
            
            if ( ( in_ch < 1 ) || ( in_ch > getNumInputChannels() ) || ( out_ch < 1 ) || ( out_ch > getNumOutputChannels() ) )
            {
                
                String debug;
                debug << "ERROR: channel assignment not feasible: In: " << in_ch << " Out: " << out_ch;
                DebugPrint(debug << "\n");
                
            } else {
                
                double src_samplerate;
                if (loadIr(&TempAudioBuffer, IrFilename, channel-1, src_samplerate, gain, offset, length))
                {
                    // std::cout << "Length: " <<  TempAudioBuffer.getNumSamples() << " Channels: " << TempAudioBuffer.getNumChannels() << " MaxLevel: " << TempAudioBuffer.getRMSLevel(0, 0, 2048) << std::endl;
                    
                    // add IR to my convolution data - offset and length are already done while reading file
                    conv_data.addIR(in_ch-1, out_ch-1, 0, delay, 0, &TempAudioBuffer, src_samplerate);
                    
                    String debug;
                    debug << "conv # " << conv_data.getNumIRs() << " " << IrFilename.getFullPathName() << " loaded";
                    DebugPrint(debug << "\n");
                    
                } else {
                    String debug;
                    debug << "ERROR: not loaded: " << IrFilename.getFullPathName();
                    DebugPrint(debug << "\n");
                }
                
             
            } // end check channel assignment
            
            
        } // end "/impulse/read" line
        else if (line.contains("/impulse/densewav"))
        {
            // TODO!
        } // end "/impulse/densewav" line
        
        
    } // iterate over lines
    
    // initiate convolution
    
#ifdef USE_ZITA_CONVOLVER
    int err=0;
    
    unsigned int   options = 0;
    
    options |= Convproc::OPT_FFTW_MEASURE;
    options |= Convproc::OPT_VECTOR_MODE;
    
    zita_conv.set_options (options);
    zita_conv.set_density(0.5);
    
    printf("max length: %lli \n", conv_data.getMaxLength());
    
    err = zita_conv.configure(conv_data.getNumInputChannels(), conv_data.getNumOutputChannels(), (unsigned int)conv_data.getMaxLength(), _ConvBufferSize, _ConvBufferSize, Convproc::MAXPART);
    
    for (int i=0; i < conv_data.getNumIRs(); i++)
    {

        err = zita_conv.impdata_create(conv_data.getInCh(i), conv_data.getOutCh(i), 1, (float *)conv_data.getIR(i)->getReadPointer(0), (unsigned int)conv_data.getDelay(i), (unsigned int)conv_data.getLength(i));
        
    }
    
    zita_conv.print();
    zita_conv.start_process(CONVPROC_SCHEDULER_PRIORITY, CONVPROC_SCHEDULER_CLASS);
    
#else
    mtxconv_.Configure(conv_data.getNumInputChannels(), conv_data.getNumOutputChannels(), _ConvBufferSize, conv_data.getMaxLength(), 8192);
    
    for (int i=0; i < conv_data.getNumIRs(); i++)
    {
        
        mtxconv_.AddFilter(conv_data.getInCh(i), conv_data.getOutCh(i), *conv_data.getIR(i));
        // no delay and length yet!
        
    }
    
    mtxconv_.StartProc();
    
#endif
    
    _configLoaded = true;
    
    setLatencySamples(_ConvBufferSize-_BufferSize);
    
    
    _min_in_ch = conv_data.getNumInputChannels();
    _min_out_ch = conv_data.getNumOutputChannels();
    _num_conv = conv_data.getNumIRs();
    
    _configFile = configFile;
    
    sendChangeMessage(); // notify editor
}
コード例 #18
0
void PitchTuneAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    AudioPlayHead::CurrentPositionInfo posInfo;
    bool isHostGoing_ = false;
    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition(posInfo)) {
        isHostGoing_ = posInfo.isPlaying;//(posInfo.isPlaying || posInfo.isRecording);
        updateBpm(posInfo.bpm, posInfo.timeSigDenominator);
    }
    
    PitchTuneAudioProcessorEditor *e = (PitchTuneAudioProcessorEditor*)getActiveEditor();
    if (e) {
        if (e->isVisible()) {
            e->ppq = posInfo.ppqPosition;
            e->type = 1;
            e->triggerAsyncUpdate();
        }
    }

    if (isRecording) {
        if (isHostGoing_) {
            //record when host is playing
            if (sampleBeingRecorded && sampleBeingRecorded->getCursor() == 0) {
                // first time recording, store ppq
                sampleBeingRecorded->startPpq = posInfo.ppqPosition;
                //set recording flag
                sampleBeingRecorded->startRecording();
            }
            float* channelData = buffer.getSampleData (0);
            sampleBeingRecorded->record(channelData, buffer.getNumSamples());
        }
        else {
            if (sampleBeingRecorded && sampleBeingRecorded->isRecording) {
                //store stop ppq
                sampleBeingRecorded->stopPpq = posInfo.ppqPosition;
                //daw has stopped
                stopTransferring();
                //process pitch
                processPitch();
            }
        }
    }
    else {
        //playback the processed
        float* channelData = buffer.getSampleData (0);
        if (isHostGoing_) {
            int nClips = (int)samples.size();
            for (int i = 0; i < nClips; ++i) {
                Sample *curSample = samples[i];
                if (curSample ->startPpq >= posInfo.ppqPosition && !curSample ->isPlaying) {
                    //reach the start ppq
                    curSample ->startPlay();
                }
                
                if (posInfo.ppqPosition >= curSample ->stopPpq && curSample ->isPlaying) {
                    //reach the end ppq
                    curSample->stopPlay();
                }
                
                if (curSample ->isPlaying) {
                    curSample ->play(channelData, buffer.getNumSamples(), pitchProcessor->psola, (long)(posInfo.ppqPosition / Utility::numBeatsPerSample));
                }
            }
        }
        
    }

    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
    
    lastBlockPpq = posInfo.ppqPosition;
}
コード例 #19
0
ファイル: PluginProcessor.cpp プロジェクト: OpenDAWN/mcfx
void LowhighpassAudioProcessor::checkFilters()
{
    
    ///////////////////////
    // low cut parameters and filters
    if (_lc_freq_param != _lc_freq_param_)
    {
        _IIR_LC_Coeff = _IIR_LC_Coeff.makeHighPass(getSampleRate(), param2freq(_lc_freq_param));
        
        for (int i = 0; i < _LC_IIR_1.size(); i++) {
            _LC_IIR_1.getUnchecked(i)->setCoefficients(_IIR_LC_Coeff);
            _LC_IIR_2.getUnchecked(i)->setCoefficients(_IIR_LC_Coeff);
        }
        _lc_freq_param_ = _lc_freq_param; // set old value
    }
    // add filters if necessary
    if (_LC_IIR_1.size() < getNumInputChannels()) {
        while (getNumInputChannels() - _LC_IIR_1.size() > 0) {
            _LC_IIR_1.add(new IIRFilter());
            _LC_IIR_1.getLast()->setCoefficients(_IIR_LC_Coeff);
            _LC_IIR_2.add(new IIRFilter());
            _LC_IIR_2.getLast()->setCoefficients(_IIR_LC_Coeff);
        }
    }
    
    ///////////////////////
    // high cut parameters and filters
    if (_hc_freq_param != _hc_freq_param_)
    {
        _IIR_HC_Coeff = _IIR_HC_Coeff.makeLowPass(getSampleRate(), param2freq(_hc_freq_param));
        
        for (int i = 0; i < _HC_IIR_1.size(); i++) {
            _HC_IIR_1.getUnchecked(i)->setCoefficients(_IIR_HC_Coeff);
            _HC_IIR_2.getUnchecked(i)->setCoefficients(_IIR_HC_Coeff);
        }
        _hc_freq_param_ = _hc_freq_param; // set old value
    }
    // add filters if necessary
    if (_HC_IIR_1.size() < getNumInputChannels()) {
        while (getNumInputChannels() - _HC_IIR_1.size() > 0) {
            _HC_IIR_1.add(new IIRFilter());
            _HC_IIR_1.getLast()->setCoefficients(_IIR_HC_Coeff);
            _HC_IIR_2.add(new IIRFilter());
            _HC_IIR_2.getLast()->setCoefficients(_IIR_HC_Coeff);
        }
    }
    
    ///////////////////
    // Peak Filter 1
    if (_pf1_freq_param != _pf1_freq_param_ || _pf1_gain_param != _pf1_gain_param_ || _pf1_q_param != _pf1_q_param_)
    {
        // get new coefficients
        _IIR_PF_Coeff_1 = _IIR_PF_Coeff_1.makePeakFilter(getSampleRate(), param2freq(_pf1_freq_param), param2q(_pf1_q_param), param2gain(_pf1_gain_param));
       // update coefficients for filters
        for (int i = 0; i < _PF_IIR_1.size(); i++) {
            _PF_IIR_1.getUnchecked(i)->setCoefficients(_IIR_PF_Coeff_1);
        }
        
        // save "old" values
        _pf1_freq_param_ = _pf1_freq_param;
        _pf1_gain_param_ = _pf1_gain_param;
        _pf1_q_param_ = _pf1_q_param;
        
    }
    // add filters if necessary
    if (_PF_IIR_1.size() < getNumInputChannels()) {
        while (getNumInputChannels() - _PF_IIR_1.size() > 0) {
            _PF_IIR_1.add(new IIRFilter());
            _PF_IIR_1.getLast()->setCoefficients(_IIR_PF_Coeff_1);
            _PF_IIR_1.add(new IIRFilter());
            _PF_IIR_1.getLast()->setCoefficients(_IIR_PF_Coeff_1);
        }
    }
    
    ///////////////////
    // Peak Filter 2
    if (_pf2_freq_param != _pf2_freq_param_ || _pf2_gain_param != _pf2_gain_param_ || _pf2_q_param != _pf2_q_param_)
    {
        // get new coefficients
        _IIR_PF_Coeff_2 = _IIR_PF_Coeff_2.makePeakFilter(getSampleRate(), param2freq(_pf2_freq_param), param2q(_pf2_q_param), param2gain(_pf2_gain_param));
        // update coefficients for filters
        for (int i = 0; i < _PF_IIR_2.size(); i++) {
            _PF_IIR_2.getUnchecked(i)->setCoefficients(_IIR_PF_Coeff_2);
        }
        
        // save "old" values
        _pf2_freq_param_ = _pf2_freq_param;
        _pf2_gain_param_ = _pf2_gain_param;
        _pf2_q_param_ = _pf2_q_param;
        
    }
    // add filters if necessary
    if (_PF_IIR_2.size() < getNumInputChannels()) {
        while (getNumInputChannels() - _PF_IIR_2.size() > 0) {
            _PF_IIR_2.add(new IIRFilter());
            _PF_IIR_2.getLast()->setCoefficients(_IIR_PF_Coeff_2);
            _PF_IIR_2.add(new IIRFilter());
            _PF_IIR_2.getLast()->setCoefficients(_IIR_PF_Coeff_2);
        }
    }
  
    ///////////////////
    // Low Shelf Filter
    if (_ls_freq_param != _ls_freq_param_ || _ls_gain_param != _ls_gain_param_ || _ls_q_param != _ls_q_param_)
    {
        // get new coefficients
        _IIR_LS_Coeff = _IIR_LS_Coeff.makeLowShelf(getSampleRate(), param2freq(_ls_freq_param), param2q(_ls_q_param), param2gain(_ls_gain_param));
        // update coefficients for filters
        for (int i = 0; i < _LS_IIR.size(); i++) {
            _LS_IIR.getUnchecked(i)->setCoefficients(_IIR_LS_Coeff);
        }
        
        // save "old" values
        _ls_freq_param_ = _ls_freq_param;
        _ls_gain_param_ = _ls_gain_param;
        _ls_q_param_ = _ls_q_param;
        
    }
    // add filters if necessary
    if (_LS_IIR.size() < getNumInputChannels()) {
        while (getNumInputChannels() - _LS_IIR.size() > 0) {
            _LS_IIR.add(new IIRFilter());
            _LS_IIR.getLast()->setCoefficients(_IIR_LS_Coeff);
            _LS_IIR.add(new IIRFilter());
            _LS_IIR.getLast()->setCoefficients(_IIR_LS_Coeff);
        }
    }
    
    ///////////////////
    // High Shelf Filter
    if (_hs_freq_param != _hs_freq_param_ || _hs_gain_param != _hs_gain_param_ || _hs_q_param != _hs_q_param_)
    {
        // get new coefficients
        _IIR_HS_Coeff = _IIR_HS_Coeff.makeHighShelf(getSampleRate(), param2freq(_hs_freq_param), param2q(_hs_q_param), param2gain(_hs_gain_param));
        // update coefficients for filters
        for (int i = 0; i < _HS_IIR.size(); i++) {
            _HS_IIR.getUnchecked(i)->setCoefficients(_IIR_HS_Coeff);
        }
        
        // save "old" values
        _hs_freq_param_ = _hs_freq_param;
        _hs_gain_param_ = _hs_gain_param;
        _hs_q_param_ = _hs_q_param;
        
    }
    // add filters if necessary
    if (_HS_IIR.size() < getNumInputChannels()) {
        while (getNumInputChannels() - _HS_IIR.size() > 0) {
            _HS_IIR.add(new IIRFilter());
            _HS_IIR.getLast()->setCoefficients(_IIR_HS_Coeff);
            _HS_IIR.add(new IIRFilter());
            _HS_IIR.getLast()->setCoefficients(_IIR_HS_Coeff);
        }
    }
    
}
コード例 #20
0
ファイル: PluginProcessor.cpp プロジェクト: BrainDamage/ambix
//==============================================================================
void Ambix_converterAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
{
    // Use this method as the place to do any pre-playback
    // initialisation that you need..
    output_buffer.setSize(std::max(getNumOutputChannels(), getNumInputChannels()), samplesPerBlock);
}
コード例 #21
0
void TheFunctionAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
// This is the place where you'd normally do the guts of your plugin's
    // audio processing...

        int numberOfSamples = buffer.getNumSamples();

        float* channelDataL = buffer.getWritePointer (0);
        float* channelDataR = buffer.getWritePointer (1);

        tmpBuffer.copyFrom(0, 0, channelDataL, numberOfSamples);
        tmpBuffer.copyFrom(1, 0, channelDataR, numberOfSamples);

        float* inputDataL = tmpBuffer.getWritePointer (0);
        float* inputDataR = tmpBuffer.getWritePointer (1);

		float LinLout; // Left IN Left OUT - Gain
		float LinRout; // Left IN Right OUT - Gain

		float RinLout; // Right IN Left OUT - Gain
		float RinRout; // Right IN Right OUT - Gain


	// Work out L+R channel pan positions
		if (panL < 0.5)
		{
			LinLout = 1;
			LinRout = panL * 2;
		}
		else
		{
			LinLout = ((panL *2) -2) *-1;
		    LinRout = 1;
		}

		if (panR < 0.5)
		{
			RinLout = 1;
			RinRout = panR * 2;
		}
		else
		{
			RinLout = ((panR *2) -2) *-1;
		    RinRout = 1;
		}
	//******************

	// Apply individual channel phase, pan and gain
		float peakLevelL = 0;
		float peakLevelR = 0;
		float RMSLevelL = 0;
		float RMSLevelR = 0;
		for (int i = 0; i < numberOfSamples; ++i)
		{
		// Phase
			if (phaseL >= 0.5)
				inputDataL[i] *= -1;

			if (phaseR >= 0.5)
				inputDataR[i] *= -1;

		// Pan
			channelDataR[i] = (inputDataR[i] * RinRout) + (inputDataL[i] * LinRout);
			channelDataL[i] = (inputDataL[i] * LinLout) + (inputDataR[i] * RinLout);

		// Gain
			channelDataL[i] *= gainL;
			channelDataR[i] *= gainR;

			if (channelDataL[i] > peakLevelL)
				peakLevelL = channelDataL[i];
			if (channelDataR[i] > peakLevelR)
				peakLevelR = channelDataR[i];

			RMSLevelL += std::abs(channelDataL[i]);
			RMSLevelR += std::abs(channelDataR[i]);

		}
	//******************


	// Master Gain
		buffer.applyGain (0, numberOfSamples, gain);


    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
}
コード例 #22
0
ファイル: Processor.cpp プロジェクト: 0x4d52/KlangFalter
void Processor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& /*midiMessages*/)
{ 
  const int numInputChannels = getNumInputChannels();
  const int numOutputChannels = getNumOutputChannels();
  const size_t samplesToProcess = buffer.getNumSamples();

  // Determine channel data
  float* channelData0 = nullptr;
  float* channelData1 = nullptr;
  if (numInputChannels == 1)
  {    
    channelData0 = buffer.getSampleData(0);
    channelData1 = buffer.getSampleData(0);
  }
  else if (numInputChannels == 2)
  {
    channelData0 = buffer.getSampleData(0);
    channelData1 = buffer.getSampleData(1);
  }

  // Convolution
  _wetBuffer.clear();
  if (numInputChannels > 0 && numOutputChannels > 0)
  {
    float autoGain = 1.0f;
    if (getParameter(Parameters::AutoGainOn))
    {
      autoGain = DecibelScaling::Db2Gain(getParameter(Parameters::AutoGainDecibels));
    }

    // Convolve
    IRAgent* irAgent00 = getAgent(0, 0);
    if (irAgent00 && irAgent00->getConvolver() && numInputChannels >= 1 && numOutputChannels >= 1)
    {
      irAgent00->process(channelData0, &_convolutionBuffer[0], samplesToProcess);
      _wetBuffer.addFrom(0, 0, &_convolutionBuffer[0], samplesToProcess, autoGain);
    }

    IRAgent* irAgent01 = getAgent(0, 1);
    if (irAgent01 && irAgent01->getConvolver() && numInputChannels >= 1 && numOutputChannels >= 2)
    {
      irAgent01->process(channelData0, &_convolutionBuffer[0], samplesToProcess);
      _wetBuffer.addFrom(1, 0, &_convolutionBuffer[0], samplesToProcess, autoGain);
    }

    IRAgent* irAgent10 = getAgent(1, 0);
    if (irAgent10 && irAgent10->getConvolver() && numInputChannels >= 2 && numOutputChannels >= 1)
    {      
      irAgent10->process(channelData1, &_convolutionBuffer[0], samplesToProcess);
      _wetBuffer.addFrom(0, 0, &_convolutionBuffer[0], samplesToProcess, autoGain);
    }

    IRAgent* irAgent11 = getAgent(1, 1);
    if (irAgent11 && irAgent11->getConvolver() && numInputChannels >= 2 && numOutputChannels >= 2)
    {
      irAgent11->process(channelData1, &_convolutionBuffer[0], samplesToProcess);
      _wetBuffer.addFrom(1, 0, &_convolutionBuffer[0], samplesToProcess, autoGain);
    }
  }

  // Stereo width
  if (numOutputChannels >= 2)
  {
    _stereoWidth.updateWidth(getParameter(Parameters::StereoWidth));
    _stereoWidth.process(_wetBuffer.getSampleData(0), _wetBuffer.getSampleData(1), samplesToProcess);
  }

  // Dry/wet gain
  {
    float dryGain0, dryGain1;
    _dryGain.updateValue(DecibelScaling::Db2Gain(getParameter(Parameters::DryDecibels)));
    _dryGain.getSmoothValues(samplesToProcess, dryGain0, dryGain1);
    buffer.applyGainRamp(0, samplesToProcess, dryGain0, dryGain1);
  }
  {
    float wetGain0, wetGain1;
    _wetGain.updateValue(DecibelScaling::Db2Gain(getParameter(Parameters::WetDecibels)));
    _wetGain.getSmoothValues(samplesToProcess, wetGain0, wetGain1);
    _wetBuffer.applyGainRamp(0, samplesToProcess, wetGain0, wetGain1);
  }

  // Level measurement (dry)
  if (numInputChannels == 1)
  {    
    _levelMeasurementsDry[0].process(samplesToProcess, buffer.getSampleData(0));
    _levelMeasurementsDry[1].reset();
  }
  else if (numInputChannels == 2)
  {
    _levelMeasurementsDry[0].process(samplesToProcess, buffer.getSampleData(0));
    _levelMeasurementsDry[1].process(samplesToProcess, buffer.getSampleData(1));
  }

  // Sum wet to dry signal
  {
    float dryOnGain0, dryOnGain1;
    _dryOn.updateValue(getParameter(Parameters::DryOn) ? 1.0f : 0.0f);
    _dryOn.getSmoothValues(samplesToProcess, dryOnGain0, dryOnGain1);
    buffer.applyGainRamp(0, samplesToProcess, dryOnGain0, dryOnGain1);
  }
  {
    float wetOnGain0, wetOnGain1;
    _wetOn.updateValue(getParameter(Parameters::WetOn) ? 1.0f : 0.0f);
    _wetOn.getSmoothValues(samplesToProcess, wetOnGain0, wetOnGain1);
    if (numOutputChannels > 0)
    {
      buffer.addFromWithRamp(0, 0, _wetBuffer.getSampleData(0), samplesToProcess, wetOnGain0, wetOnGain1);
    }
    if (numOutputChannels > 1)
    {
      buffer.addFromWithRamp(1, 0, _wetBuffer.getSampleData(1), samplesToProcess, wetOnGain0, wetOnGain1);
    }
  }

  // Level measurement (wet/out)
  if (numOutputChannels == 1)
  {
    _levelMeasurementsWet[0].process(samplesToProcess, _wetBuffer.getSampleData(0));
    _levelMeasurementsWet[1].reset();
    _levelMeasurementsOut[0].process(samplesToProcess, buffer.getSampleData(0));
    _levelMeasurementsOut[1].reset();
  }
  else if (numOutputChannels == 2)
  {
    _levelMeasurementsWet[0].process(samplesToProcess, _wetBuffer.getSampleData(0));
    _levelMeasurementsWet[1].process(samplesToProcess, _wetBuffer.getSampleData(1));
    _levelMeasurementsOut[0].process(samplesToProcess, buffer.getSampleData(0));
    _levelMeasurementsOut[1].process(samplesToProcess, buffer.getSampleData(1));
  }

  // In case we have more outputs than inputs, we'll clear any output
  // channels that didn't contain input data, (because these aren't
  // guaranteed to be empty - they may contain garbage).
  for (int i=numInputChannels; i<numOutputChannels; ++i)
  {
    buffer.clear(i, 0, buffer.getNumSamples());
  }

  // Update beats per minute info
  float beatsPerMinute = 0.0f;
  juce::AudioPlayHead* playHead = getPlayHead();
  if (playHead)
  {
    juce::AudioPlayHead::CurrentPositionInfo currentPositionInfo;
    if (playHead->getCurrentPosition(currentPositionInfo))
    {
      beatsPerMinute = static_cast<float>(currentPositionInfo.bpm);
    }
  }
  if (::fabs(_beatsPerMinute.exchange(beatsPerMinute)-beatsPerMinute) > 0.001f)
  {
    notifyAboutChange();
  }
}
コード例 #23
0
void DRowAudioFilter::processBlock (AudioSampleBuffer& buffer,
									MidiBuffer& midiMessages)
{
	smoothParameters();
	const int numInputChannels = getNumInputChannels();
	int numSamples = buffer.getNumSamples();

	// set up the parameters to be used
	float preDelay = (float)params[PREDELAY].getSmoothedValue();
	float earlyDecay = (float)params[EARLYDECAY].getSmoothedNormalisedValue();
	earlyDecay = sqrt(sqrt(earlyDecay));
	float late = (float)params[EARLYLATEMIX].getSmoothedNormalisedValue();
	float early = 1.0f - late;
	float fbCoeff = (float)params[FBCOEFF].getSmoothedNormalisedValue();
	fbCoeff = -sqrt(sqrt(fbCoeff));
	float delayTime = (float)params[DELTIME].getSmoothedValue();
	float filterCf = (float)params[FILTERCF].getSmoothedValue();
	float allpassCoeff = (float)params[DIFFUSION].getSmoothedNormalisedValue();
	float spread1 = (float)params[SPREAD].getSmoothedNormalisedValue();
	float spread2 = 1.0f - spread1;
	float lowEQGain = (float)decibelsToAbsolute(params[LOWEQ].getSmoothedValue());
	float highEQGain = (float)decibelsToAbsolute(params[HIGHEQ].getSmoothedValue());
	float wet = (float)params[WETDRYMIX].getSmoothedNormalisedValue();
	float dry = 1.0f - wet;

	float width = (spread1-0.5f) * 0.1f * delayTime;


	// we can only deal with 2-in, 2-out at the moment
	if (numInputChannels == 2)
	{
		// pre-delay section
		preDelayFilterL.setDelayTime(currentSampleRate, preDelay);
		preDelayFilterR.setDelayTime(currentSampleRate, preDelay);


		// early reflections section
		int roomShape = roundFloatToInt(params[ROOMSHAPE].getValue());
		float delayCoeff = 0.08f * delayTime;
		if (roomShape != prevRoomShape)
		{
			delayLineL.removeAllTaps();
			delayLineR.removeAllTaps();

			for(int i = 0; i < 5; i++) {
				delayLineL.addTapAtTime(earlyReflectionCoeffs[roomShape-3][i], currentSampleRate);
				delayLineR.addTapAtTime(earlyReflectionCoeffs[roomShape-3][i], currentSampleRate);
			}

			// we have to set this here incase the delay time has not changed
			delayLineL.setTapSpacingExplicitly(delayCoeff);
			delayLineR.setTapSpacingExplicitly(delayCoeff + spread1);

			prevRoomShape = roomShape;
		}

		delayLineL.setTapSpacing(delayCoeff);
		delayLineL.scaleFeedbacks(earlyDecay);
		delayLineR.setTapSpacing(delayCoeff + spread1);
		delayLineR.scaleFeedbacks(earlyDecay);


		// comb filter section
		for (int i = 0; i < 8; ++i)
		{
			delayTime *= filterMultCoeffs[i];
			delayTime += Random::getSystemRandom().nextInt(100)*0.0001;

			setupFilter(combFilterL[i], fbCoeff, delayTime, filterCf);
			setupFilter(combFilterR[i], fbCoeff, delayTime + width, filterCf);
		}

		// allpass section
		for (int i = 0; i < 4; ++i)
		{
			delayTime *= allpassMultCoeffs[i];
			delayTime -= Random::getSystemRandom().nextInt(100)*0.0001;

			allpassFilterL[i].setGain(allpassCoeff);
			allpassFilterL[i].setDelayTime(currentSampleRate, delayTime);

			allpassFilterR[i].setGain(allpassCoeff);
			allpassFilterR[i].setDelayTime(currentSampleRate, delayTime + width);
		}

		// final EQ section
		lowEQL.makeLowShelf(currentSampleRate, 500, 1, lowEQGain);
		lowEQR.makeLowShelf(currentSampleRate, 500, 1, lowEQGain);
		highEQL.makeHighShelf(currentSampleRate, 3000, 1, highEQGain);
		highEQR.makeHighShelf(currentSampleRate, 3000, 1, highEQGain);


		//========================================================================
		//	Processing
		//========================================================================
		int noSamples = buffer.getNumSamples();
		int noChannels = buffer.getNumChannels();

		// create a copy of the input buffer so we can apply a wet/dry mix later
		AudioSampleBuffer wetBuffer(noChannels, noSamples);
		wetBuffer.copyFrom(0, 0, buffer, 0, 0, noSamples);
		wetBuffer.copyFrom(1, 0, buffer, 1, 0, noSamples);

		// mono mix wet buffer (used for stereo spread later)
		float *pfWetL = wetBuffer.getSampleData(0);
		float *pfWetR = wetBuffer.getSampleData(1);
		while (--numSamples >= 0)
		{
			*pfWetL = *pfWetR = (0.5f * (*pfWetL + *pfWetR));
			pfWetL++;
			pfWetR++;
		}
		numSamples = buffer.getNumSamples();

		// apply the pre-delay to the wet buffer
		preDelayFilterL.processSamples(wetBuffer.getSampleData(0), noSamples);
		preDelayFilterR.processSamples(wetBuffer.getSampleData(1), noSamples);


		// create a buffer to hold the early reflections
		AudioSampleBuffer earlyReflections(noChannels, noSamples);
		earlyReflections.copyFrom(0, 0, wetBuffer, 0, 0, noSamples);
		earlyReflections.copyFrom(1, 0, wetBuffer, 1, 0, noSamples);

		// and process the early reflections
		delayLineL.processSamples(earlyReflections.getSampleData(0), noSamples);
		delayLineR.processSamples(earlyReflections.getSampleData(1), noSamples);


		// create a buffer to hold the late reverb
		AudioSampleBuffer lateReverb(noChannels, noSamples);
		lateReverb.clear();

		float *pfLateL = lateReverb.getSampleData(0);
		float *pfLateR = lateReverb.getSampleData(1);
		pfWetL = wetBuffer.getSampleData(0);
		pfWetR = wetBuffer.getSampleData(1);

		// comb filter section
		for (int i = 0; i < 8; ++i)
		{
			combFilterL[i].processSamplesAdding(pfWetL, pfLateL, noSamples);
			combFilterR[i].processSamplesAdding(pfWetR, pfLateR, noSamples);
		}

		// allpass filter section
		for (int i = 0; i < 4; ++i)
		{
			allpassFilterL[i].processSamples(lateReverb.getSampleData(0), noSamples);
			allpassFilterR[i].processSamples(lateReverb.getSampleData(1), noSamples);
		}


		// clear wet buffer
		wetBuffer.clear();
		// add early reflections to wet buffer
		wetBuffer.addFrom(0, 0, earlyReflections, 0, 0, noSamples, early);
		wetBuffer.addFrom(1, 0, earlyReflections, 1, 0, noSamples, early);
		// add late reverb to wet buffer
		lateReverb.applyGain(0, noSamples, 0.1f);
		wetBuffer.addFrom(0, 0, lateReverb, 0, 0, noSamples, late);
		wetBuffer.addFrom(1, 0, lateReverb, 1, 0, noSamples, late);

		// final EQ
		lowEQL.processSamples(pfWetL, noSamples);
		lowEQR.processSamples(pfWetR, noSamples);

		highEQL.processSamples(pfWetL, noSamples);
		highEQR.processSamples(pfWetR, noSamples);

		// create stereo spread
		while (--numSamples >= 0)
		{
			float fLeft = *pfWetL;
			float fRight = *pfWetR;
			*pfWetL = (fLeft * spread1) + (fRight * spread2);
			*pfWetR = (fRight * spread1) + (fLeft * spread2);
			pfWetL++;
			pfWetR++;
		}
		numSamples = buffer.getNumSamples();

		// apply wet/dry mix gains
		wetBuffer.applyGain(0, noSamples, wet);
		buffer.applyGain(0, noSamples, dry);

		// add wet buffer to output buffer
		buffer.addFrom(0, 0, wetBuffer, 0, 0, noSamples);
		buffer.addFrom(1, 0, wetBuffer, 1, 0, noSamples);
	}
	//========================================================================


    // in case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
}
コード例 #24
0
void DRowAudioFilter::processBlock (AudioSampleBuffer& buffer,
                                   MidiBuffer& midiMessages)
{
	smoothParameters();
	
	const int numInputChannels = getNumInputChannels();


	// create parameters to use
	fPreCf = params[PRE].getSmoothedValue();
	fPostCf = params[POST].getSmoothedValue();
	float fInGain = params[INGAIN].getSmoothedValue();
	float fOutGain = params[OUTGAIN].getSmoothedValue();
	float fColour = 100 * params[COLOUR].getSmoothedNormalisedValue();

	// set up array of pointers to samples
	int numSamples = buffer.getNumSamples();
	int samplesLeft = numSamples;
	float* pfSample[numInputChannels];
	for (int channel = 0; channel < numInputChannels; channel++)
		pfSample[channel] = buffer.getSampleData(channel);
		
		
	if (numInputChannels == 2)
	{
		// input filter the samples
		inFilterL.processSamples(pfSample[0], numSamples);
		inFilterR.processSamples(pfSample[1], numSamples);
		
		//========================================================================
		while (--samplesLeft >= 0)
		{
			// distort
			*pfSample[0] *= fInGain;
			*pfSample[1] *= fInGain;
		
			// shape (using the limit of tanh is 1 so no cliping required)
			*pfSample[0] = tanh(*pfSample[0] * fColour);
			*pfSample[1] = tanh(*pfSample[1] * fColour);

			// apply output gain
			*pfSample[0] *= fOutGain;
			*pfSample[1] *= fOutGain;
		
			// incriment sample pointers
			pfSample[0]++;
			pfSample[1]++;
		}
		//========================================================================
		
		// output filter the samples
		outFilterL.processSamples(buffer.getSampleData(0), numSamples);
		outFilterR.processSamples(buffer.getSampleData(1), numSamples);
	}
	else if (numInputChannels == 1)
	{
		// input filter the samples
		inFilterL.processSamples(pfSample[0], numSamples);
		
		//========================================================================
		while (--samplesLeft >= 0)
		{
			// distort
			*pfSample[0] *= fInGain;
			
			// shape (using the limit of tanh is 1 so no cliping required)
			*pfSample[0] = tanh(*pfSample[0] * fColour);
			
			// apply output gain
			*pfSample[0] *= fOutGain;
			
			// incriment sample pointers
			pfSample[0]++;
		}
		//========================================================================
		
		// output filter the samples
		outFilterL.processSamples(buffer.getSampleData(0), numSamples);
	}
	
		
    // clear any output channels that didn't contain input data
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
}
コード例 #25
0
ファイル: PluginProcessor.cpp プロジェクト: BrainDamage/ambix
void Ambix_converterAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    // do the audio processing....
    // defines are until 10th order 3d (121 channels)!
    
    // resize output buffer if necessare
    int NumSamples = buffer.getNumSamples();
    
    output_buffer.setSize(std::max(getNumOutputChannels(), getNumInputChannels()), NumSamples);
  
    output_buffer.clear(); // in case of 2d where we might throw away some channels
    
    // std::cout << "NumInputChannels: " << getNumInputChannels() << " Buffersize: " << buffer.getNumChannels() << std::endl;
    
    for (int i = 0; i < getNumInputChannels(); i++) // iterate over acn channel numbering
    {
        
        int l = 0; // sometimes called n
        int m = 0; // -l...l
        
        ACNtoLM(i, l, m);
        
        
        int _in_ch_seq = in_ch_seq[i];
        int _out_ch_seq = out_ch_seq[i];
        
        if (in_2d)
        {
            _in_ch_seq = in_2d_ch_seq[ACN3DtoACN2D(i)];
            if (_in_ch_seq > getNumInputChannels())
                _in_ch_seq = -1;
        }
        
        if (out_2d)
        {
            _out_ch_seq = out_2d_ch_seq[ACN3DtoACN2D(i)];
            if (_out_ch_seq > getNumOutputChannels())
                _out_ch_seq = -1;
        }
        
        
        // std::cout << "InputCh: " << i << " IN_CHANNEL: " << _in_ch_seq << " OUT_CHANNEL: " << _out_ch_seq << std::endl;
        
        if (_in_ch_seq != -1 && _out_ch_seq != -1 && _in_ch_seq < getNumInputChannels())
        {
            // copy input channels to output channels!
            output_buffer.copyFrom(_out_ch_seq, 0, buffer, _in_ch_seq, 0, NumSamples);
            
            //  apply normalization conversion gain if different input/output scheme
            if (!ch_norm_flat) {
                output_buffer.applyGain(_out_ch_seq, 0, NumSamples, in_ch_norm[i]);
            }
            
            // do the inversions...
            if (flip_cs_phase || flip_param || flop_param || flap_param)
            {
                
                
                signed int flip, flop, flap, total;
                flip = flop = flap = total = 1;
                
                // taken from paper Symmetries of Spherical Harmonics by Michael Chapman (Ambi Symp 2009),
                
                // mirror left right
                if ( flip_param && (m < 0) ) // m < 0 -> invert
                    flip = -1;
                
                // mirror front back
                if ( flop_param && ( ((m < 0) && !(m % 2)) || ((m >= 0) && (m % 2)) ) ) // m < 0 and even || m >= 0 and odd
                    flop = -1;
                
                // mirror top bottom
                if ( flap_param && ( (l + m) % 2 ) ) // l+m odd   ( (odd, even) or (even, odd) )
                    flap = -1;
                
                // compute total multiplicator
                if (flip_cs_phase)
                    total = acn_cs[i] * flip * flop * flap;
                else
                    total = flip * flop * flap;
                
                output_buffer.applyGain(_out_ch_seq, 0, NumSamples, (float)total);
            }
        } // index not -1
        
    } // iterate over all input channels
    
    
    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    /*
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        output_buffer.clear (i, 0, buffer.getNumSamples());
    }
    */
    
    buffer = output_buffer;
}
コード例 #26
0
void AdmvAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
	double cp[2];
	
	int channelCount = 0;
	size_t sampleRate = getSampleRate();

	for (int channel = 0; channel < (getNumInputChannels() - 1); channel += 2)
	{
		// No need to process signal if editor is closed
		if (getActiveEditor() == NULL)
		{
			break;
		}

		// TODO: investigate how to get number of input channels really connected to the plugin ATM.
		// It seems that getNumInputChannels() will always return max possible defined by JucePlugin_MaxNumInputChannels
		// This solution is bad, because it iterates through all input buffers.
		if (!isBlockInformative(buffer, channel / 2))
		{
			mGonioSegments[channel / 2] = GonioPoints<double>();
			mSpectroSegments[channel / 2] = tomatl::dsp::SpectrumBlock();

			continue;
		}
		
		channelCount += 2;

		float* l = buffer.getWritePointer(channel + 0);
		float* r = buffer.getWritePointer(channel + 1);

		for (int i = 0; i < buffer.getNumSamples(); ++i)
		{
			std::pair<double, double>* res = mGonioCalcs[channel / 2]->handlePoint(l[i], r[i], sampleRate);

			cp[0] = l[i];
			cp[1] = r[i];

			mSpectroCalcs[channel / 2]->checkSampleRate(getSampleRate());
			tomatl::dsp::SpectrumBlock spectroResult = mSpectroCalcs[channel / 2]->process((double*)&cp);

			if (res != NULL)
			{
				mGonioSegments[channel / 2] = GonioPoints<double>(res, mGonioCalcs[channel / 2]->getSegmentLength(), channel / 2, sampleRate);
				mLastGonioScale = mGonioCalcs[channel / 2]->getCurrentScaleValue();
			}

			if (spectroResult.mLength > 0)
			{
				mSpectroSegments[channel / 2] = spectroResult;
			}
		}
	}
	
	mCurrentInputCount = channelCount;

	if (getState().mOutputMode == AdmvPluginState::outputMute)
	{
		buffer.clear();
	}
	else
	{
		// In case we have more outputs than inputs, we'll clear any output
		// channels that didn't contain input data, (because these aren't
		// guaranteed to be empty - they may contain garbage).
		for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
		{
			buffer.clear(i, 0, buffer.getNumSamples());
		}
	}
}
コード例 #27
0
ファイル: PluginProcessor.cpp プロジェクト: OpenDAWN/mcfx
void LowhighpassAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    checkFilters();
    
    // std::cout << "Processing channels: " << getNumInputChannels() << std::endl;
    // std::cout << "Buffer channels: " << buffer.getNumChannels() << std::endl;
    
    int numSamples = buffer.getNumSamples();
    
    for (int channel = 0; channel < getNumInputChannels(); channel++)
    {
        // LOW CUT
        if (_lc_on_param > 0.5f) {
            
            _LC_IIR_1.getUnchecked(channel)->processSamples(buffer.getWritePointer(channel), numSamples);
            
            // second stage -> 4th order butterworth
            if (_lc_order_param > 0.5f)
                _LC_IIR_2.getUnchecked(channel)->processSamples(buffer.getWritePointer(channel), numSamples);
        }
        
        
        // HIGH CUT
        if (_hc_on_param > 0.5f) {
            
            _HC_IIR_1.getUnchecked(channel)->processSamples(buffer.getWritePointer(channel), numSamples);
            
            // second stage -> 4th order butterworth
            if (_hc_order_param > 0.5f)
                _HC_IIR_2.getUnchecked(channel)->processSamples(buffer.getWritePointer(channel), numSamples);
        }
        
        // PF1
        if (_pf1_gain_param != 0.5f) {
            
            _PF_IIR_1.getUnchecked(channel)->processSamples(buffer.getWritePointer(channel), numSamples);
            
        }
        
        // PF2
        if (_pf2_gain_param != 0.5f) {
            
            _PF_IIR_2.getUnchecked(channel)->processSamples(buffer.getWritePointer(channel), numSamples);
            
        }
        
        // LS
        if (_ls_gain_param != 0.5f) {
            
            _LS_IIR.getUnchecked(channel)->processSamples(buffer.getWritePointer(channel), numSamples);
            
        }
        
        // HS
        if (_hs_gain_param != 0.5f) {
            
            _HS_IIR.getUnchecked(channel)->processSamples(buffer.getWritePointer(channel), numSamples);
            
        }
        
    }
    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
}
コード例 #28
0
void PitchedDelayAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& /*midiMessages*/)
{
	ScopedSSECSR csr;

	const int numSamples = buffer.getNumSamples();
	float* chL = buffer.getSampleData(0);
	float* chR = buffer.getSampleData(buffer.getNumChannels()-1);


	float* dspProcL = osBufferL[0];
	float* dspProcR = osBufferR[0];

	{
		for (int i=0; i< numSamples; ++i)
		{
			dspProcL[i] = chL[i];
			dspProcR[i] = chR[i];
		}

		int blockSize = numSamples;

		for (int i=0; i<downSamplers.size(); ++i)
		{
			blockSize /= 2;
			DownSampler2x* down = downSamplers[i];
			jassert(down != nullptr);
			down->processBlock(dspProcL, dspProcR, osBufferL[i+1], osBufferR[i+1], blockSize);
			dspProcL = osBufferL[i+1];
			dspProcR = osBufferR[i+1];
		}

		for (int i=0; i<NUMDELAYTABS; ++i)
		{
			DelayTabDsp* dsp = delays[i];
			dsp->processBlock(dspProcL, dspProcR, blockSize);
		}

		for (int n=0; n<blockSize; ++n)
		{
			dspProcL[n] = 0;
			dspProcR[n] = 0;
		}

		for (int i=0; i<NUMDELAYTABS; ++i)
		{
			DelayTabDsp* dsp = delays[i];

			if (! dsp->isEnabled())
				continue;

			const float* procL = dsp->getLeftData();
			const float* procR = dsp->getRightData();

			for (int n=0; n<blockSize; ++n)
			{
				dspProcL[n] += procL[n];
				dspProcR[n] += procR[n];
			}
		}

		for (int i=upSamplers.size()-1; i >= 0; --i)
		{
			UpSampler2x* up = upSamplers[i];
			jassert(up != nullptr);
			up->processBlock(dspProcL, dspProcR, osBufferL[i], osBufferR[i], blockSize);
			blockSize *= 2;
			dspProcL = osBufferL[i];
			dspProcR = osBufferR[i];
		}
	}

	for (int i=0; i<numSamples; ++i)
	{
		chL[i] *= params[kDryVolume] * 3.981f; // params[kDryVolume] == 1 -> +12 dB
		chR[i] *= params[kDryVolume] * 3.981f; // params[kDryVolume] == 1 -> +12 dB
	}

	latencyCompensation.processBlock(chL, chR, numSamples);

	for (int i=0; i<numSamples; ++i)
	{
		chL[i] = (chL[i] + dspProcL[i]) * params[kMasterVolume] * 3.981f; // params[kMasterVolume] == 1 -> +12 dB
		chR[i] = (chR[i] + dspProcR[i]) * params[kMasterVolume] * 3.981f; // params[kMasterVolume] == 1 -> +12 dB
	}


  for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
  {
      buffer.clear (i, 0, buffer.getNumSamples());
  }
}
コード例 #29
0
 bool isOutputChannelStereoPair (int index) const   { return isPositiveAndBelow (index, getNumInputChannels()); }
コード例 #30
0
void DRowAudioFilter::processBlock (AudioSampleBuffer& buffer,
									MidiBuffer& midiMessages)
{
	waveformDisplayPre->processBlock(buffer.getSampleData(0), buffer.getNumSamples());
	
	smoothParameters();
	
	const int numInputChannels = getNumInputChannels();
	const float oneOverNumInputChannels = 1.0f / numInputChannels;
	int numSamples = buffer.getNumSamples();


	// create parameters to use
	float fThresh = params[THRESH].getNormalisedValue();
	float fClosedLevel = params[REDUCTION].getSmoothedNormalisedValue();
	
	float fAttack	= params[ATTACK].getSmoothedValue();
	float fHold	= params[HOLD].getSmoothedValue();
	float fRelease	= params[RELEASE].getSmoothedValue();
	
	float fMonitor = params[MONITOR].getNormalisedValue();
	float fFilter = params[FILTER].getNormalisedValue();
	
	// stereo
	if (numInputChannels == 2)
	{
		// set up array of pointers to samples
		float* pfSample[2];
		pfSample[0] = buffer.getSampleData(0);
		pfSample[1] = buffer.getSampleData(1);
		
		
		// set-up mixed mono buffer
		AudioSampleBuffer mixedBuffer(1, buffer.getNumSamples());
		float* pfMixedSample = mixedBuffer.getSampleData(0);
		
		// fill mono mixed buffer
		for(int i = 0; i < mixedBuffer.getNumSamples(); i++) {
			*pfMixedSample = 0.0;
			pfMixedSample++;
		}
		pfMixedSample = mixedBuffer.getSampleData(0);

		for(int i = 0; i < mixedBuffer.getNumSamples(); i++)
		{
			*pfMixedSample += oneOverNumInputChannels * (*pfSample[0]);
			pfSample[0]++;
			*pfMixedSample += oneOverNumInputChannels * (*pfSample[1]);
			pfSample[1]++;
			pfMixedSample++;
		}
		
		// filter mixed buffer
		if(fFilter > 0.5f)
			bandpassFilter.processSamples(mixedBuffer.getSampleData(0), mixedBuffer.getNumSamples());
		
		
		// reset buffer pointers
		pfSample[0] = buffer.getSampleData(0);
		pfSample[1] = buffer.getSampleData(1);
		pfMixedSample = mixedBuffer.getSampleData(0);
		
				
		//========================================================================
		while (--numSamples >= 0)
		{
			float fMix = *pfMixedSample;
			float fAbsMix = fabsf(fMix);
			pfMixedSample++;
			
			if (fAbsMix > fMax)
				fMax = fAbsMix;
			
			if (iMeasuredItems >= iMeasureLength)
			{
				if ( (fMax >= fThresh) && (currentState != attack) )		// opening gate
				{
					DBG("Attack");
					currentState = attack;
					noStageSamples = fAttack * currentSampleRate * 0.001;
					fOutMultIncriment = (1.0 - fOutMultCurrent) / noStageSamples;
					currentStageSample = 0;
					changingState = true;
				}
				else if ( (fMax < fThresh) && ((currentState == attack) || (currentState == open)) ) // closing gate, hold
				{
					DBG("Hold");
					currentState = hold;
					noStageSamples = fHold * currentSampleRate * 0.001;
					fOutMultIncriment = 0;
					currentStageSample = 0;
					changingState = true;
				}
				
				fMax = 0;
				iMeasuredItems = 0;
			}
			iMeasuredItems++;
			
			
			// apply appropriate gains to output
			if (fMonitor > 0.5f)	{
				*pfSample[0] = fMix;
				*pfSample[1] = fMix;
			}
			else	{
				*pfSample[0] *= fOutMultCurrent;
				*pfSample[1] *= fOutMultCurrent;
			}

			// incriment sample pointers
			pfSample[0]++;			
			pfSample[1]++;			
			

			fOutMultCurrent += fOutMultIncriment;
			currentStageSample++;
			if ( (currentStageSample == noStageSamples) && changingState)
			{
				DBG("Stage finished");
				fOutMultIncriment = 0.0;
				currentStageSample = 1;
				noStageSamples = 0;
				
				// if ended hold do release stage
				if (currentState == hold)
				{
					DBG("Release");
					currentState = release;
					noStageSamples = fRelease * currentSampleRate * 0.001;
					fOutMultIncriment = -((fOutMultCurrent - fClosedLevel) / noStageSamples);
					currentStageSample = 0;
					changingState = true;
				}
				else if (currentState == release)	{
					DBG("Closed");
					currentState = closed;
					changingState = false;
				}
				else if (currentState == attack)	{
					DBG("Open");
					currentState = open;
					changingState = false;
				}
			}
		}
		//========================================================================	
		
		// update the sample to use in the meter display
//		RMSLeft = buffer.getRMSLevel(0, 0, buffer.getNumSamples());
//		peakLeft = buffer.getMagnitude(0, 0, buffer.getNumSamples());
//		RMSRight = buffer.getRMSLevel(1 & (numInputChannels-1), 0, buffer.getNumSamples());
//		peakRight = buffer.getMagnitude(1 & (numInputChannels-1), 0, buffer.getNumSamples());
		
		waveformDisplayPost->processBlock(buffer.getSampleData(0), buffer.getNumSamples());

		RMSLeft = fOutMultIncriment;
		RMSRight = fOutMultCurrent;

	}

		
    // in case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }

}