//==============================================================================
void PluginAudioProcessor::prepareToPlay (double sRate, int samplesPerBlock)
{
    ignoreUnused(samplesPerBlock);
    synth.setCurrentPlaybackSampleRate(sRate);
    synth.clearVoices();

    for (int i = 8; --i >= 0;)
    {
        synth.addVoice(new Voice(*this, samplesPerBlock));
    }
    synth.clearSounds();
    synth.addSound(new Sound());

    delay.init(getNumOutputChannels(), sRate);
    chorus.init(getNumOutputChannels(), sRate);
}
void Pfm2AudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    handleIncomingMidiBuffer(midiMessages, buffer.getNumSamples());

    // Clear sound
    for (int i = 0; i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());

    // dispatch realtime events to non realtime observer
    parameterSet.processRealtimeEvents();
    midiMessageCollector.removeNextBlockOfMessages(midiMessages,  buffer.getNumSamples());
    /*
    if (midiMessages.getNumEvents() > 0) {
        printf("processBlock : %d midi messages \n", midiMessages.getNumEvents());
    }
     */
    if (parametersToUpdate.size() > 0 ) {
		if (parametersToUpdateMutex.try_lock()) {
	        std::unordered_set<const char*> newSet;
			newSet.swap(parametersToUpdate);
			parametersToUpdateMutex.unlock();
			if (pfm2Editor) {
				pfm2Editor->updateUIWith(newSet);
			}
		}
    }
}
void PitchestimatorpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    // In case we have more outputs than inputs, this code clears any output
    // channels that didn't contain input data
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
 
    int bufsize = buffer.getNumSamples();
    
    //main process loop
    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {
        float* channelData = buffer.getWritePointer (channel);
        fft->processForward(channelData, fftData, bufsize, nFFT);
        buffer.applyGain (channel, 0, bufsize, gain);
    }
    for (int i=0; i<bufsize; i++) {
        X[i] = fft->cartopolRadius(fftData[i][0], fftData[i][1]);
    }
    
    HS->generateCost(X, f0Area, numberOfHarmonics, bufsize, f0AreaSize, getSampleRate(), nFFT);
    pitchEstimate = HS->estimatePitch(f0Area, f0AreaSize);
    
    pitchText = String (pitchEstimate, 1);

}
示例#4
0
void AudioFilterAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    // This is the place where you'd normally do the guts of your plugin's
    // audio processing...
    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {
        float* channelData = buffer.getSampleData (channel);

		for (int i = 0; i < buffer.getNumSamples(); i++)
		{
			//channelData[i] = atan(channelData[i]);
			if (channelData[i] > cutoff)
				channelData[i] = cutoff;
		}

        // ..do something to the data...
    }

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
}
示例#5
0
void Ambix_directional_loudnessAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    _Sh_transf = Sh_transf; // buffer old values
    
    calcParams(); // calc new transformation matrix
    
    int NumSamples = buffer.getNumSamples();
    
    output_buffer.setSize(buffer.getNumChannels(), NumSamples);
    output_buffer.clear();
    
    
    for (int out = 0; out < std::min(AMBI_CHANNELS,getNumOutputChannels()); out++)
    {
        for (int in = 0; in < std::min(AMBI_CHANNELS,getNumInputChannels()); in++)
        {
            if (_Sh_transf(in, out) != 0.f || Sh_transf(in, out) != 0.f)
            {
                if (_Sh_transf(in, out) == Sh_transf(in, out))
                {
                    output_buffer.addFrom(out, 0, buffer, in, 0, NumSamples, (float)Sh_transf(in, out));
                } else {
                    output_buffer.addFromWithRamp(out, 0, buffer.getReadPointer(in), NumSamples, (float)_Sh_transf(in, out), (float)Sh_transf(in, out));
                }
                
            }
        }
    }
    
    
    buffer = output_buffer;
}
示例#6
0
void NewProjectAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    const int numSamples = buffer.getNumSamples();

    // output buffers will initially be garbage, must be cleared:
    for (int i = 0; i < getNumOutputChannels(); ++i) {
        buffer.clear (i, 0, numSamples);
    }
    
    // Now pass any incoming midi messages to our keyboard state object, and let it
    // add messages to the buffer if the user is clicking on the on-screen keys
    keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
    
    // and now get the synth to process these midi events and generate its output.
    synth.renderNextBlock (buffer, midiMessages, 0, numSamples);

    // ask the host for the current time so we can display it...
    AudioPlayHead::CurrentPositionInfo newTime;
    
    if (getPlayHead() != nullptr && getPlayHead()->getCurrentPosition (newTime))
    {
        // Successfully got the current time from the host..
        lastPosInfo = newTime;
    }
    else
    {
        // If the host fails to fill-in the current time, we'll just clear it to a default..
        lastPosInfo.resetToDefault();
    }

}
void PluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
	if (customPlayHead != 0) customPlayHead->processBlock (buffer, midiMessages);
	
	// Record the current time
	AudioPlayHead::CurrentPositionInfo newTime;
	if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (newTime)) {
		lastPosInfo = newTime;
	} else {
		lastPosInfo.resetToDefault();
	}
	
	// Run the sequencer
	if (sequencer != 0) sequencer->processBlock (buffer, midiMessages);
	
	buffer.clear();
	
    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
	for (int i = 0; i < getNumOutputChannels(); ++i)
    {
        //buffer.clear (i, 0, buffer.getNumSamples());
		float* samples = buffer.getSampleData (i);
		for (int j = 0; j < buffer.getNumSamples(); j++) {
			samples[j] = 0.001;
		}
    }
	
}
示例#8
0
void JuceDemoPluginAudioProcessor::process (AudioBuffer<FloatType>& buffer,
                                            MidiBuffer& midiMessages,
                                            AudioBuffer<FloatType>& delayBuffer)
{
    const int numSamples = buffer.getNumSamples();

    // apply our gain-change to the incoming data..
    applyGain (buffer, delayBuffer);

    // Now pass any incoming midi messages to our keyboard state object, and let it
    // add messages to the buffer if the user is clicking on the on-screen keys
    keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);

    // and now get our synth to process these midi events and generate its output.
    synth.renderNextBlock (buffer, midiMessages, 0, numSamples);

    // Apply our delay effect to the new output..
    applyDelay (buffer, delayBuffer);

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, numSamples);

    // Now ask the host for the current time so we can store it to be displayed later...
    updateCurrentTimeInfoFromHost();
}
示例#9
0
void BeepBoxAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    AudioPlayHead::CurrentPositionInfo posInfo;
	this->getPlayHead()->getCurrentPosition(posInfo);
	
	synthChannels->onClockStep(posInfo.isPlaying, posInfo.ppqPosition);

	synthChannels->processBlock(buffer, getNumInputChannels(), getNumOutputChannels());

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
}
示例#10
0
void Csc344filterAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    const int numSamples = buffer.getNumSamples();
    int channel, dp = 0;
    
    // This is the place where you'd normally do the guts of your plugin's
    // audio processing...
    
    for (channel = 0; channel < getNumInputChannels(); ++channel)
    {
        float* channelData = buffer.getSampleData (channel);
        float* delayData = delayBuffer.getSampleData (jmin (channel, delayBuffer.getNumChannels() - 1));
        dp = delayPosition;
        
        for (int i = 0; i < numSamples; ++i)
        {
            const float in = channelData[i];
            channelData[i] += delayData[dp];
            delayData[dp] = (delayData[dp] + in) * delay;
            if (++dp >= delayBuffer.getNumSamples())
                dp = 0;
        }
    }

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
}
void C74GenAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
	assureBufferSize(buffer.getNumSamples());
	
	// fill input buffers
	for (int i = 0; i < C74_GENPLUGIN::num_inputs(); i++) {
		if (i < getNumInputChannels()) {
			for (int j = 0; j < m_CurrentBufferSize; j++) {
				m_InputBuffers[i][j] = buffer.getReadPointer(i)[j];
			}
		} else {
			memset(m_InputBuffers[i], 0, m_CurrentBufferSize *  sizeof(double));
		}
	}
	
	// process audio
	C74_GENPLUGIN::perform(m_C74PluginState,
								  m_InputBuffers,
								  C74_GENPLUGIN::num_inputs(),
								  m_OutputBuffers,
								  C74_GENPLUGIN::num_outputs(),
								  buffer.getNumSamples());

	// fill output buffers
	for (int i = 0; i < getNumOutputChannels(); i++) {
		if (i < C74_GENPLUGIN::num_outputs()) {
			for (int j = 0; j < buffer.getNumSamples(); j++) {
				buffer.getWritePointer(i)[j] = m_OutputBuffers[i][j];
			}
		} else {
			buffer.clear (i, 0, buffer.getNumSamples());
		}
	}
}
示例#12
0
void Plugin::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    if (getNumInputChannels() != 2 && getNumOutputChannels() != 2) {
        return;
    }
    float* chan1 = buffer.getWritePointer(0);
    float* chan2 = buffer.getWritePointer(1);
    int sampleframes = buffer.getNumSamples();
    int blocks = sampleframes / kInternalBlocksize;

    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition(pos)) {
    if ((&pos)->bpm == 0.0f) {
            parameters->setQuantizationDisabled();
            parameters->setParameter(kDelayQuant, 0.0f, false);
            parameters->setParameter(kIotQuant, 0.0f, false);
            parameters->setParameter(kDurQuant, 0.0f, false);
        }
        else
            parameters->time_quantizer->setPositionInfo(&pos);
    } else {
        parameters->setQuantizationDisabled();
    }

    block_sample_pos = 0;
    for (int i = 0; i < blocks; i++) {
        granulator->processInternalBlock(chan1, chan2, kInternalBlocksize);
        chan1 += kInternalBlocksize;
        chan2 += kInternalBlocksize;
        parameters->time_quantizer->incrementPositionInfo();
    }
    int samples_remaining = sampleframes % kInternalBlocksize;
    if (samples_remaining) {
        granulator->processInternalBlock(chan1, chan2, samples_remaining);
    }
}
示例#13
0
void AudioPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    synth.clearSounds();
    synth.addSound(getSound());
    
    const int numSamples = buffer.getNumSamples();
    int channel, dp = 0;
    
    // Go through the incoming data, and apply our gain to it...
    for (channel = 0; channel < getNumInputChannels(); ++channel)
        buffer.applyGain (channel, 0, buffer.getNumSamples(), gain);
    
    // Now pass any incoming midi messages to our keyboard state object, and let it
    // add messages to the buffer if the user is clicking on the on-screen keys
    keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
    
    // and now get the synth to process these midi events and generate its output.
    synth.renderNextBlock (buffer, midiMessages, 0, numSamples);
    
    // Apply our delay effect to the new output..
    for (channel = 0; channel < getNumInputChannels(); ++channel)
    {
        float* channelData = buffer.getSampleData (channel);
        float* delayData = delayBuffer.getSampleData (jmin (channel, delayBuffer.getNumChannels() - 1));
        dp = delayPosition;
        
        for (int i = 0; i < numSamples; ++i)
        {
            const float in = channelData[i];
            channelData[i] += delayData[dp];
            delayData[dp] = (delayData[dp] + in) * delay;
            if (++dp >= delayBuffer.getNumSamples())
                dp = 0;
        }
    }
    
    delayPosition = dp;
    
    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
    
    // ask the host for the current time so we can display it...
    AudioPlayHead::CurrentPositionInfo newTime;
    
    if (getPlayHead() != nullptr && getPlayHead()->getCurrentPosition (newTime))
    {
        // Successfully got the current time from the host..
        lastPosInfo = newTime;
    }
    else
    {
        // If the host fails to fill-in the current time, we'll just clear it to a default..
        lastPosInfo.resetToDefault();
    }
}
示例#14
0
void MLPluginProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
 	if (mEngine.isEnabled() && !isSuspended())
	{
		unsigned samples = buffer.getNumSamples();
		
		// get current time from host.
		// should refer to the start of the current block.
		AudioPlayHead::CurrentPositionInfo newTime;
		if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (newTime))
		{
			lastPosInfo = newTime;
		}
		else
		{
			lastPosInfo.resetToDefault();
		}

		// set host phasor 
		double bpm = lastPosInfo.isPlaying ? lastPosInfo.bpm : 0.;
		double ppqPosition = lastPosInfo.ppqPosition;
		double secsPosition = lastPosInfo.timeInSeconds;
		int64 samplesPosition = lastPosInfo.timeInSamples;
		bool isPlaying = lastPosInfo.isPlaying;
		
		// TEST
		if(0)
		if(lastPosInfo.isPlaying)
		{
			debug() << "bpm:" << lastPosInfo.bpm 
			<< " ppq:" << std::setprecision(5) << ppqPosition << std::setprecision(2) 
			<< " secs:" << secsPosition << "\n";
		}
			
		// set Engine I/O.  done here each time because JUCE may change pointers on us.  possibly.
		MLDSPEngine::ClientIOMap ioMap;
		for (int i=0; i<getNumInputChannels(); ++i)
		{
			ioMap.inputs[i] = buffer.getReadPointer(i);
		}		
		for (int i=0; i<getNumOutputChannels(); ++i)
		{
			ioMap.outputs[i] = buffer.getWritePointer(i);
		}
		mEngine.setIOBuffers(ioMap);
        
        if(acceptsMidi())
        {
            convertMIDIToEvents(midiMessages, mControlEvents);
            midiMessages.clear(); // otherwise messages will be passed back to the host
        }
        mEngine.processBlock(samples, mControlEvents, samplesPosition, secsPosition, ppqPosition, bpm, isPlaying);
    }
	else
	{
		buffer.clear();
	}
}
示例#15
0
void JuceDemoPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    const int numSamples = buffer.getNumSamples();
    keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
    synth.renderNextBlock (buffer, midiMessages, 0, numSamples);


    for (int channel = 0; channel < getNumInputChannels(); channel++)
    {
        float* channelData = buffer.getWritePointer (channel);
        std::deque<Particle> &particles = m_particles[channel];
        for(int sample = 0; sample < numSamples; sample++) {
            for(Particle& p : particles) {
                p.velocity() += p.acceleration() * 0.1 * 0.5;
                p.position() += p.velocity() * 0.1;
            }

            Particle* pFix = m_fixedParticle[channel];
            Particle* pOut = m_outputParticle[channel];
            Particle* pIn = m_inputParticle[channel];

            for(Particle& p : particles) {
                p.acceleration() = Vector3D(0.0, 0.0, 0.0);
            }

            pFix->position() = Vector3D(0.0, 0.0, 0.0);
            pIn->position() = Vector3D(channelData[sample] + m_particleCount + offset->getValue(), 0.0, 0.0);

            for(Spring& spring : m_springs[channel]) {
                Particle* pa = spring.from;
                Particle* pb = spring.to;

                double diff = pb->position().x - pa->position().x;
                double r = fabs(diff);

                double d = spring.d;
                double k = spring.k*springConstant->getValue();

                Vector3D force = Vector3D(k*(r-d), 0.0, 0.0);
                pa->acceleration() += force;
                pb->acceleration() -= force;
            }

            for(Particle& p : particles) {
                p.velocity() *= velocityFactor->getValue();
                p.velocity() += p.acceleration() * 0.1 * 0.5;
            }

            channelData[sample] = pOut->position().x - 1.0;
        }
    }

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
}
示例#16
0
void MidiplugAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    // flush audio outputs
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());

    midiMessages.clear();
    _midiMessages.swapWith(midiMessages);
}
示例#17
0
//==============================================================================
void GenieAudioProcessor::prepareToPlay (double sampleRate, int samplesPerBlock)
{
    // Use this method as the place to do any pre-playback
    // initialisation that you need..
    std::cout<<"samplerate : " <<sampleRate<<"\n";
    std::cout<<getNumOutputChannels()<<" outs \n";
    
   
    mixerAudioSource.prepareToPlay(samplesPerBlock, sampleRate);
}
示例#18
0
文件: CpuRam.cpp 项目: Amcut/pizmidi
void CpuRam::processBlock (AudioSampleBuffer& buffer,
                                   MidiBuffer& midiMessages)
{
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }

	midiMessages.clear();
}
示例#19
0
void Tunefish4AudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    MidiBuffer::Iterator it(midiMessages);
    MidiMessage midiMessage;
    eU32 messageOffset = 0;

    eU32 requestedLen = buffer.getNumSamples();

    eU32 sampleRate = (eU32)getSampleRate();
    if (sampleRate > 0)
        synth->sampleRate = sampleRate;

    for (int i = 0; i < getNumOutputChannels(); ++i)
    {
        buffer.clear(i, 0, buffer.getNumSamples());
    }

    if (buffer.getNumChannels() == 2)
    {
        eU32 len = requestedLen;
        eF32 **signal = buffer.getArrayOfWritePointers();
        eF32 *destL = signal[0];
        eF32 *destR = signal[1];

        while(len)
        {
            if (!adapterDataAvailable)
            {
                csSynth.enter();
                eMemSet(adapterBuffer[0], 0, TF_BUFFERSIZE * sizeof(eF32));
                eMemSet(adapterBuffer[1], 0, TF_BUFFERSIZE * sizeof(eF32));
                processEvents(midiMessages, messageOffset, TF_BUFFERSIZE);
                eTfInstrumentProcess(*synth, *tf, adapterBuffer, TF_BUFFERSIZE);
                messageOffset += TF_BUFFERSIZE;
                adapterDataAvailable = TF_BUFFERSIZE;
                csSynth.exit();
            }

            eF32 *srcL = &adapterBuffer[0][TF_BUFFERSIZE - adapterDataAvailable];
            eF32 *srcR = &adapterBuffer[1][TF_BUFFERSIZE - adapterDataAvailable];

            while (len && adapterDataAvailable)
            {
                *destL++ += *srcL++;
                *destR++ += *srcR++;

                len--;
                adapterDataAvailable--;
            }
        }
    }

    processEvents(midiMessages, messageOffset, requestedLen);
}
示例#20
0
void JbcfilterAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    const int samples = buffer.getNumSamples();
    const int delayBufferSamples = delayBuffer.getNumSamples();

    int dp = delayPosition;

    // This is the place where you'd normally do the guts of your plugin's
    // audio processing...
    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {
        float* channelData = buffer.getSampleData (channel);
        float* delayData = delayBuffer.getSampleData (channel);
        dp = delayPosition;

        for (int i = 0; i < samples; ++i)
        {
            const float in = channelData[i];
            channelData[i] += delayData[dp];

            if(distortionEnabledFlag) {
                channelData[i] = channelData[i] * distortion;

                if(channelData[i] > 0.03) {
                    channelData[i]  = 0.03;
                }
            }

            /*
            channelData[i] -= (delayData[dp] +
                              (std::real(cB) * delayData[dp - 1]) +
                              (std::real(cC) * delayData[dp - 2]) +
                              (std::real(cD) * delayData[dp - 3]) +
                              (std::real(cE) * delayData[dp - 4])) * .0001;

            */
            delayData[dp] = (delayData[dp] + in) * delay;
            dp += 1;
            if (dp >= delayBufferSamples)
                dp = 0;
        }
    }

    delayPosition = dp;

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
}
void BitcrushAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
	this->initializing(buffer);

	for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) {
		buffer.clear(i, 0, buffer.getNumSamples());
	}

	float crush = bitcrush->getValue();
	float wet_ = wet->getValue();
	int groupedSamples = std::max(1.f, downsample->getValue() * 100);
	float bitdepth = 12. * (1. - crush) + 1. * crush;
	int steps = exp2(bitdepth);

	// This is the place where you'd normally do the guts of your plugin's
	// audio processing...
	for (int channel = 0; channel < getNumInputChannels(); channel++) 
	{
		for (int sample = 0; sample < buffer.getNumSamples() - groupedSamples; sample += groupedSamples) {
			float averagedSample = 0.;
			for (int i = 0; i < groupedSamples; i++) {
				averagedSample += buffer.getSample(channel, i + sample) / groupedSamples;
			}

			int discretizedSample = averagedSample * steps;
			float crushed = float(discretizedSample) / steps;

			for (int i = 0; i < groupedSamples; i++) {
				float sampleValue = buffer.getSample(channel, i + sample);
				buffer.setSample(channel, i + sample, sampleValue * (1. - wet_) + crushed * wet_);
			}
		}

		float averagedSample = 0.;
		for (int i = (buffer.getNumSamples() / groupedSamples) * groupedSamples; i < buffer.getNumSamples(); i++) {
			averagedSample += buffer.getSample(channel, i) / (buffer.getNumSamples() % groupedSamples);
		}

		float bitdepth = 12. * (1. - crush) + 1. * crush;
		int steps = exp2(bitdepth);
		int discretizedSample = averagedSample * steps;
		float crushed = float(discretizedSample) / steps;

		for (int i = (buffer.getNumSamples() / groupedSamples) * groupedSamples; i < buffer.getNumSamples(); i++) {
			float sampleValue = buffer.getSample(channel, i);
			buffer.setSample(channel, i, sampleValue * (1. - wet_) + crushed * wet_);
		}
	}

	this->meteringBuffer(buffer);
	this->finalizing(buffer);
}
示例#22
0
void NewProjectAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{

	if(editorIsReady)
		newNotesFromAnalyser->sendActionMessage("N");


	if(analyseNewFile){

		analyseNewFile = false;

		MessageManager* mm = MessageManager::getInstance();


		void* dummy = this;

		void* d = mm->callFunctionOnMessageThread(loadNewWaveFile, dummy);
	}







    // This is the place where you'd normally do the guts of your plugin's
    // audio processing...
    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {


    }

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }

	AudioPlayHead* playHead = getPlayHead();

	AudioPlayHead::CurrentPositionInfo info;

	playHead->getCurrentPosition(info);

	float* channelData = buffer.getSampleData (0);
	if(soundEditor != 0 && !loadingNewComponent)
		soundEditor->getAudioSamplesToPlay(buffer, info.ppqPositionOfLastBarStart, getSampleRate(), currentSamplesPerBlock);
}
示例#23
0
void UGenPlugin::clearExtraChannels(AudioSampleBuffer& buffer)
{
	// in case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
	// in addition, the output UGen might have fewer channels than the number of 
	// outputs the plug-in has
	
	const int numOutputChannels = jmin(getNumOutputChannels(), outputUGen.getNumChannels());
	for (int i = getNumInputChannels(); i < numOutputChannels; ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
}
示例#24
0
 void fillInPluginDescription (PluginDescription& desc) const
 {
     desc.name = getName();
     desc.fileOrIdentifier = module->file.getFullPathName();
     desc.uid = getUID();
     desc.lastFileModTime = module->file.getLastModificationTime();
     desc.pluginFormatName = "LADSPA";
     desc.category = getCategory();
     desc.manufacturerName = plugin != nullptr ? String (plugin->Maker) : String::empty;
     desc.version = getVersion();
     desc.numInputChannels  = getNumInputChannels();
     desc.numOutputChannels = getNumOutputChannels();
     desc.isInstrument = false;
 }
示例#25
0
void Fyp_samplerPrototype2AudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
    
    const int numSamples = buffer.getNumSamples();
    
    //    midiMessages.addEvents(const juce::MidiBuffer &otherBuffer, <#int startSample#>, <#int numSamples#>, <#int sampleDeltaToAdd#>) //GRAB FROM A MIDI BUFFER IN THE UI
    //    SET START SAMPLE TO 0 NUM SAMPLES TO numSamples  AND SAMPLEDATA TO 0.
    
    synth.renderNextBlock(buffer, midiMessages, 0, numSamples);
    
}
示例#26
0
UGen UGenPlugin::constructGraph(UGen const& input)
{    
	UGen wet = getMappedParameterControl(UGenInterface::Parameters::Wet).lag();
	UGen dry = getMappedParameterControl(UGenInterface::Parameters::Dry).lag();

    plug = Plug::AR(UGen::emptyChannels(getNumOutputChannels()));
    
    if(irBuffer.isNull())
        plug.setSource(inputUGen);
    else
        plug.setSource(getConv());
    
    return (plug * wet.dbamp() + input * dry.dbamp());
}
示例#27
0
//=============================================================================
void KawaMidSideAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    //=========================================================================
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
    //=========================================================================
    if ( getNumInputChannels() == 2 )
    {
        if ( isMidMode() )
            _processMid ( buffer );
        else
            _processSide( buffer );
    }
    //=========================================================================
}
示例#28
0
void Mcfx_convolverAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    
    // std::cout << "in: " << getNumInputChannels() << " out: " << getNumOutputChannels() << std::endl;
    
    if (_configLoaded)
    {
        
        _isProcessing = true;
        
#ifdef USE_ZITA_CONVOLVER
        
        for (int i=0; i < jmin(conv_data.getNumInputChannels(), getNumInputChannels()) ; i++)
        {
            float* indata = zita_conv.inpdata(i)+_ConvBufferPos;
            memcpy(indata, buffer.getReadPointer(i), getBlockSize()*sizeof(float));
        }
        
        _ConvBufferPos += getBlockSize();
        
        if (_ConvBufferPos >= _ConvBufferSize) {
            zita_conv.process(THREAD_SYNC_MODE);
            _ConvBufferPos = 0;
        }
        
        
        
        for (int i=0; i < jmin(conv_data.getNumOutputChannels(), getNumOutputChannels()) ; i++)
        {
            float* outdata = zita_conv.outdata(i)+_ConvBufferPos;
            memcpy(buffer.getWritePointer(i), outdata, getBlockSize()*sizeof(float));
        }
        
#else
        mtxconv_.processBlock(buffer, buffer);
        
#endif
        
        _isProcessing = false;
        
    } else { // config loaded
        
        // clear output in case no config is loaded!
        buffer.clear();
    }
    

}
void DetunerPlugin::processBlock (AudioSampleBuffer& buffer,
                                   MidiBuffer& midiMessages)
{
	
	int blockSamples = buffer.getNumSamples();
	detune(inputBuffer, outputBuffer, blockSamples);
	
	setMagnus(outputBuffer->getMagnitude(0, buffer.getNumSamples()));


    // in case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }

    // if any midi messages come in, use them to update the keyboard state object. This
    // object sends notification to the UI component about key up/down changes
    keyboardState.processNextMidiBuffer (midiMessages,
                                         0, buffer.getNumSamples(),
                                         true);

    // have a go at getting the current time from the host, and if it's changed, tell
    // our UI to update itself.
    AudioPlayHead::CurrentPositionInfo pos;

    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (pos))
    {
        if (memcmp (&pos, &lastPosInfo, sizeof (pos)) != 0)
        {
            lastPosInfo = pos;
            sendChangeMessage (this);
        }
    }
    else
    {
        zeromem (&lastPosInfo, sizeof (lastPosInfo));
        lastPosInfo.timeSigNumerator = 4;
        lastPosInfo.timeSigDenominator = 4;
        lastPosInfo.bpm = 120;
    }

}
void BlankenhainAudioProcessor::initializing(AudioSampleBuffer& buffer)
{
	//For Metering, reset "current" Values
	for (unsigned int i = 0; i < meterValues.size(); i++)
	{
		meterValues[i] = 0.f;
	}

	//Set lastKnownSampleRate and lastKnownBlockSize
	this->setLastKnownSampleRate(this->getSampleRate());
	this->setLastKnownBlockSize(this->getBlockSize());

	// In case we have more outputs than inputs, this code clears any output
	// channels that didn't contain input data.
	for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) {
		buffer.clear(i, 0, buffer.getNumSamples());
	}
}