Ejemplo n.º 1
0
void Plugin::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    if (getNumInputChannels() != 2 && getNumOutputChannels() != 2) {
        return;
    }
    float* chan1 = buffer.getWritePointer(0);
    float* chan2 = buffer.getWritePointer(1);
    int sampleframes = buffer.getNumSamples();
    int blocks = sampleframes / kInternalBlocksize;

    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition(pos)) {
    if ((&pos)->bpm == 0.0f) {
            parameters->setQuantizationDisabled();
            parameters->setParameter(kDelayQuant, 0.0f, false);
            parameters->setParameter(kIotQuant, 0.0f, false);
            parameters->setParameter(kDurQuant, 0.0f, false);
        }
        else
            parameters->time_quantizer->setPositionInfo(&pos);
    } else {
        parameters->setQuantizationDisabled();
    }

    block_sample_pos = 0;
    for (int i = 0; i < blocks; i++) {
        granulator->processInternalBlock(chan1, chan2, kInternalBlocksize);
        chan1 += kInternalBlocksize;
        chan2 += kInternalBlocksize;
        parameters->time_quantizer->incrementPositionInfo();
    }
    int samples_remaining = sampleframes % kInternalBlocksize;
    if (samples_remaining) {
        granulator->processInternalBlock(chan1, chan2, samples_remaining);
    }
}
Ejemplo n.º 2
0
void NewProjectAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    const int numSamples = buffer.getNumSamples();

    // output buffers will initially be garbage, must be cleared:
    for (int i = 0; i < getNumOutputChannels(); ++i) {
        buffer.clear (i, 0, numSamples);
    }
    
    // Now pass any incoming midi messages to our keyboard state object, and let it
    // add messages to the buffer if the user is clicking on the on-screen keys
    keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
    
    // and now get the synth to process these midi events and generate its output.
    synth.renderNextBlock (buffer, midiMessages, 0, numSamples);

    // ask the host for the current time so we can display it...
    AudioPlayHead::CurrentPositionInfo newTime;
    
    if (getPlayHead() != nullptr && getPlayHead()->getCurrentPosition (newTime))
    {
        // Successfully got the current time from the host..
        lastPosInfo = newTime;
    }
    else
    {
        // If the host fails to fill-in the current time, we'll just clear it to a default..
        lastPosInfo.resetToDefault();
    }

}
void PluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
	if (customPlayHead != 0) customPlayHead->processBlock (buffer, midiMessages);
	
	// Record the current time
	AudioPlayHead::CurrentPositionInfo newTime;
	if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (newTime)) {
		lastPosInfo = newTime;
	} else {
		lastPosInfo.resetToDefault();
	}
	
	// Run the sequencer
	if (sequencer != 0) sequencer->processBlock (buffer, midiMessages);
	
	buffer.clear();
	
    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
	for (int i = 0; i < getNumOutputChannels(); ++i)
    {
        //buffer.clear (i, 0, buffer.getNumSamples());
		float* samples = buffer.getSampleData (i);
		for (int j = 0; j < buffer.getNumSamples(); j++) {
			samples[j] = 0.001;
		}
    }
	
}
void SignalProcessorAudioProcessor::sendTimeinfoMsg() {
    AudioPlayHead::CurrentPositionInfo currentTime;
    if (getPlayHead() != nullptr && getPlayHead()->getCurrentPosition (currentTime))
    {
        // Update the variable used to display the latest time in the GUI
        lastPosInfo = currentTime;
        
        // Successfully got the current time from the host, set the pulses-per-quarter-note value inside the timeInfo message
        if (sendBinaryUDP) {
            timeInfo.set_position((float)currentTime.ppqPosition);
            timeInfo.set_isplaying(currentTime.isPlaying);
            timeInfo.set_tempo((float)currentTime.bpm);
            timeInfo.SerializeToArray(dataArrayTimeInfo, timeInfo.GetCachedSize());
            udpClientTimeInfo.send(dataArrayTimeInfo, timeInfo.GetCachedSize());
        }
        if (sendOSC) {
            oscOutputStream->Clear();
            *oscOutputStream << osc::BeginBundleImmediate
            << osc::BeginMessage( "TIME" )
            << ((float)currentTime.ppqPosition) << osc::EndMessage
            << osc::BeginMessage( "BPM" )
            << ((float)currentTime.bpm) << osc::EndMessage
            << osc::EndBundle;
            oscTransmissionSocket.Send( oscOutputStream->Data(), oscOutputStream->Size() );
        }
    }
}
Ejemplo n.º 5
0
void MLPluginProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
 	if (mEngine.isEnabled() && !isSuspended())
	{
		unsigned samples = buffer.getNumSamples();
		
		// get current time from host.
		// should refer to the start of the current block.
		AudioPlayHead::CurrentPositionInfo newTime;
		if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (newTime))
		{
			lastPosInfo = newTime;
		}
		else
		{
			lastPosInfo.resetToDefault();
		}

		// set host phasor 
		double bpm = lastPosInfo.isPlaying ? lastPosInfo.bpm : 0.;
		double ppqPosition = lastPosInfo.ppqPosition;
		double secsPosition = lastPosInfo.timeInSeconds;
		int64 samplesPosition = lastPosInfo.timeInSamples;
		bool isPlaying = lastPosInfo.isPlaying;
		
		// TEST
		if(0)
		if(lastPosInfo.isPlaying)
		{
			debug() << "bpm:" << lastPosInfo.bpm 
			<< " ppq:" << std::setprecision(5) << ppqPosition << std::setprecision(2) 
			<< " secs:" << secsPosition << "\n";
		}
			
		// set Engine I/O.  done here each time because JUCE may change pointers on us.  possibly.
		MLDSPEngine::ClientIOMap ioMap;
		for (int i=0; i<getNumInputChannels(); ++i)
		{
			ioMap.inputs[i] = buffer.getReadPointer(i);
		}		
		for (int i=0; i<getNumOutputChannels(); ++i)
		{
			ioMap.outputs[i] = buffer.getWritePointer(i);
		}
		mEngine.setIOBuffers(ioMap);
        
        if(acceptsMidi())
        {
            convertMIDIToEvents(midiMessages, mControlEvents);
            midiMessages.clear(); // otherwise messages will be passed back to the host
        }
        mEngine.processBlock(samples, mControlEvents, samplesPosition, secsPosition, ppqPosition, bpm, isPlaying);
    }
	else
	{
		buffer.clear();
	}
}
Ejemplo n.º 6
0
void AudioPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    synth.clearSounds();
    synth.addSound(getSound());
    
    const int numSamples = buffer.getNumSamples();
    int channel, dp = 0;
    
    // Go through the incoming data, and apply our gain to it...
    for (channel = 0; channel < getNumInputChannels(); ++channel)
        buffer.applyGain (channel, 0, buffer.getNumSamples(), gain);
    
    // Now pass any incoming midi messages to our keyboard state object, and let it
    // add messages to the buffer if the user is clicking on the on-screen keys
    keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true);
    
    // and now get the synth to process these midi events and generate its output.
    synth.renderNextBlock (buffer, midiMessages, 0, numSamples);
    
    // Apply our delay effect to the new output..
    for (channel = 0; channel < getNumInputChannels(); ++channel)
    {
        float* channelData = buffer.getSampleData (channel);
        float* delayData = delayBuffer.getSampleData (jmin (channel, delayBuffer.getNumChannels() - 1));
        dp = delayPosition;
        
        for (int i = 0; i < numSamples; ++i)
        {
            const float in = channelData[i];
            channelData[i] += delayData[dp];
            delayData[dp] = (delayData[dp] + in) * delay;
            if (++dp >= delayBuffer.getNumSamples())
                dp = 0;
        }
    }
    
    delayPosition = dp;
    
    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());
    
    // ask the host for the current time so we can display it...
    AudioPlayHead::CurrentPositionInfo newTime;
    
    if (getPlayHead() != nullptr && getPlayHead()->getCurrentPosition (newTime))
    {
        // Successfully got the current time from the host..
        lastPosInfo = newTime;
    }
    else
    {
        // If the host fails to fill-in the current time, we'll just clear it to a default..
        lastPosInfo.resetToDefault();
    }
}
Ejemplo n.º 7
0
void DemoJuceFilter::processBlock (AudioSampleBuffer& buffer,
                                   MidiBuffer& midiMessages)
{
	if (isSyncedToHost)
	{
		AudioPlayHead::CurrentPositionInfo pos;

		if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (pos))
		{
	        if (memcmp (&pos, &lastPosInfo, sizeof (pos)) != 0)
			{
	            lastPosInfo = pos;

				const int ppqPerBar		= (pos.timeSigNumerator * 4 / pos.timeSigDenominator);
				const double beats		= (fmod (pos.ppqPosition, ppqPerBar) / ppqPerBar) * pos.timeSigNumerator;
				const double position	= beats*4;
				const int beat			= (int)position;
				
				currentBpm	= (int)pos.bpm;

				if (_p != beat)
				{
					for (int x=0; x<64; x++)
					{
						if (activePatterns[x])
						{
							patterns[x]->forward(beat+1);
						}
					}

					currentBeat = currentPatternPtr->getCurrentPosition();

					if (currentBeat > 16)
						currentBeat = currentBeat - 16;

					/* process midi events to their devices */
					midiMessages.addEvents (midiManager.getVstMidiEvents(),0,-1,0);

					/* clean the buffers */
					midiManager.clear();

					sendChangeMessage (this);
				}
				
				_p = beat;
			}
		}
		else
		{
	        zeromem (&lastPosInfo, sizeof (lastPosInfo));
			lastPosInfo.timeSigNumerator = 4;
	        lastPosInfo.timeSigDenominator = 4;
			lastPosInfo.bpm = 120;
	   }	
	}
}
Ejemplo n.º 8
0
void ZenAutoTrimAudioProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
	if (isEnabled())
	{
		aPlayHead = getPlayHead();
		AudioPlayHead::CurrentPositionInfo posInfo;
		aPlayHead->getCurrentPosition(posInfo);
		
		//float* leftData = buffer.getWritePointer(0); //leftData references left channel now
		//float* rightData = buffer.getWritePointer(1); //right data references right channel now
		//unsigned int numSamples = buffer.getNumSamples();

		if (prevSampleRate != this->getSampleRate())
		{
			prevSampleRate = this->getSampleRate();
			levelAnalysisManager.sampleRateChanged(prevSampleRate);
		}

		//don't process if all samples are 0 or if autogain button is off
		if (buffer.getMagnitude(0, buffer.getNumSamples()) > 0.0f && autoGainEnableParam->isOn())
			levelAnalysisManager.processSamples(&buffer, posInfo);

		
			// Calibrate gain param based on which value is target
			double peakToHit;
			int targetType = targetTypeParam->getValueAsInt();
			if (targetType == Peak)
			{
				peakToHit = levelAnalysisManager.getMaxChannelPeak();
			}
			else if (targetType == MaxRMS)
			{
				peakToHit = levelAnalysisManager.getMaxChannelRMS();
			}
			else if (targetType == AverageRMS)
			{
				peakToHit = levelAnalysisManager.getMaxCurrentRunningRMS();
			}
			else
			{
				peakToHit = levelAnalysisManager.getMaxChannelPeak();
				jassertfalse;
			}

			//double targParamGain = params->getDecibelParameter(targetGainParamID)->getValueInGain();
			

			//division in log equiv to subtract in base
			double gainValueToAdd = targetGainParam->getValueInGain() / peakToHit;

			if (!almostEqual(gainValueToAdd, gainParam->getValueInGain())) // gain value changed
			{
				gainParam->setValueFromGain(gainValueToAdd);
				//gainParam->setNeedsUIUpdate(true); // removed because done in setValueFromGain
			}

			//in gain, multiply in log equivalent to add in base
			buffer.applyGain(gainParam->getValueInGain());		
	}
}
Ejemplo n.º 9
0
// SliderListener methods
void TransportComponent::sliderValueChanged (Slider* slider)
{
	if (slider == bpmSlider->getSlider()) {
		CustomPlayHead* customPlayHead = getPlayHead();
		customPlayHead->setBPM (slider->getValue());
	}
}
Ejemplo n.º 10
0
// Timer methods
void TransportComponent::timerCallback()
{
    Slider* slider = bpmSlider->getSlider();
    float bpm = getPlayHead()->getBPM();
    if (slider->getValue() != bpm) {
        slider->setValue (bpm);
    }
}
Ejemplo n.º 11
0
void DetunerPlugin::processBlock (AudioSampleBuffer& buffer,
                                   MidiBuffer& midiMessages)
{
	
	int blockSamples = buffer.getNumSamples();
	detune(inputBuffer, outputBuffer, blockSamples);
	
	setMagnus(outputBuffer->getMagnitude(0, buffer.getNumSamples()));


    // in case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }

    // if any midi messages come in, use them to update the keyboard state object. This
    // object sends notification to the UI component about key up/down changes
    keyboardState.processNextMidiBuffer (midiMessages,
                                         0, buffer.getNumSamples(),
                                         true);

    // have a go at getting the current time from the host, and if it's changed, tell
    // our UI to update itself.
    AudioPlayHead::CurrentPositionInfo pos;

    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (pos))
    {
        if (memcmp (&pos, &lastPosInfo, sizeof (pos)) != 0)
        {
            lastPosInfo = pos;
            sendChangeMessage (this);
        }
    }
    else
    {
        zeromem (&lastPosInfo, sizeof (lastPosInfo));
        lastPosInfo.timeSigNumerator = 4;
        lastPosInfo.timeSigDenominator = 4;
        lastPosInfo.bpm = 120;
    }

}
Ejemplo n.º 12
0
void CtrlrProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    AudioPlayHead::CurrentPositionInfo info;
    if (getPlayHead())
    {
        getPlayHead()->getCurrentPosition(info);
    }

	if (midiMessages.getNumEvents() > 0)
	{
		processPanels(midiMessages, info);
	}

	midiCollector.removeNextBlockOfMessages (midiMessages, buffer.getNumSamples());

	MidiBuffer::Iterator i(midiMessages);
	while (i.getNextEvent(logResult, logSamplePos))
		_MOUT("VST OUTPUT", logResult, logSamplePos);
}
Ejemplo n.º 13
0
void Key_valueAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    // get the number of samples for this block
    int blockSizeInSamples = buffer.getNumSamples();
    
    // ask the host for the current time so we can calculate the size of our measure...
    AudioPlayHead::CurrentPositionInfo newTime;

    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (newTime))
    {
        if(newTime.isPlaying) {
            midiAggregater.addMidiBuffer(midiMessages, newTime, getSampleRate());  
            
            if (midiAggregater.getMode() == "Performance") {
                midiAggregater.getMidiBuffer(midiMessages, blockSizeInSamples);
            }
        }
    }
    
}
Ejemplo n.º 14
0
void PluginAudioProcessor::updateHostInfo()
{
    // currentPositionInfo used for getting the bpm.
    if (AudioPlayHead* pHead = getPlayHead())
    {
        if (pHead->getCurrentPosition (positionInfo[getAudioIndex()])) {
            positionIndex.exchange(getGUIIndex());
            return;
        }
    }
    positionInfo[getAudioIndex()].resetToDefault();
}
Ejemplo n.º 15
0
double BeatCounterAudioProcessor::getHostTempo() const {
    double result = kDefaultTempo;

    AudioPlayHead *playHead = getPlayHead();
    if(playHead != NULL) {
        AudioPlayHead::CurrentPositionInfo currentPosition;
        playHead->getCurrentPosition(currentPosition);
        result = currentPosition.bpm;
    }

    return result;
}
Ejemplo n.º 16
0
// ButtonListener methods
void TransportComponent::buttonClicked (Button* button)
{
	if (button == playButton) {
		CustomPlayHead* customPlayHead = getPlayHead();
		if (customPlayHead->isPlaying()) {
			customPlayHead->stop();
			playButton->setButtonText ("Play");
		} else {
			customPlayHead->play();
			playButton->setButtonText ("Stop");
		}
	}
}
Ejemplo n.º 17
0
void NewProjectAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{

	if(editorIsReady)
		newNotesFromAnalyser->sendActionMessage("N");


	if(analyseNewFile){

		analyseNewFile = false;

		MessageManager* mm = MessageManager::getInstance();


		void* dummy = this;

		void* d = mm->callFunctionOnMessageThread(loadNewWaveFile, dummy);
	}







    // This is the place where you'd normally do the guts of your plugin's
    // audio processing...
    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {


    }

    // In case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }

	AudioPlayHead* playHead = getPlayHead();

	AudioPlayHead::CurrentPositionInfo info;

	playHead->getCurrentPosition(info);

	float* channelData = buffer.getSampleData (0);
	if(soundEditor != 0 && !loadingNewComponent)
		soundEditor->getAudioSamplesToPlay(buffer, info.ppqPositionOfLastBarStart, getSampleRate(), currentSamplesPerBlock);
}
Ejemplo n.º 18
0
void JuceDemoPluginAudioProcessor::updateCurrentTimeInfoFromHost()
{
    if (AudioPlayHead* ph = getPlayHead())
    {
        AudioPlayHead::CurrentPositionInfo newTime;

        if (ph->getCurrentPosition (newTime))
        {
            lastPosInfo = newTime;  // Successfully got the current time from the host..
            return;
        }
    }

    // If the host fails to provide the current time, we'll just reset our copy to a default..
    lastPosInfo.resetToDefault();
}
Ejemplo n.º 19
0
// ButtonListener methods
void TransportComponent::buttonClicked (Button* button)
{
	if (button == playButton) {
		CustomPlayHead* customPlayHead = getPlayHead();
		if (customPlayHead->isPlaying()) {
			customPlayHead->stop();
			playButton->setButtonText ("Play");
		} else {
			customPlayHead->play();
			playButton->setButtonText ("Stop");
		}
	} else if (button == setupButton) {
		StandaloneFilterWindow* standaloneFilterWindow = (StandaloneFilterWindow*) getPeer()->getComponent();
		if (standaloneFilterWindow != 0) {
			standaloneFilterWindow->showAudioSettingsDialog();
		}
	}
}
Ejemplo n.º 20
0
bool IAAEffectProcessor::updateCurrentTimeInfoFromHost (AudioPlayHead::CurrentPositionInfo &posInfo)
{
    if (AudioPlayHead* ph = getPlayHead())
    {
        AudioPlayHead::CurrentPositionInfo newTime;

        if (ph->getCurrentPosition (newTime))
        {
            posInfo = newTime;  // Successfully got the current time from the host.
            return true;
        }
    }

    // If the host fails to provide the current time, we'll just reset our copy to a default.
    lastPosInfo.resetToDefault();

    return false;
}
Ejemplo n.º 21
0
void HelmPlugin::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midi_messages) {
  int total_samples = buffer.getNumSamples();
  int num_channels = getTotalNumOutputChannels();
  AudioPlayHead::CurrentPositionInfo position_info;
  getPlayHead()->getCurrentPosition(position_info);
  synth_.setBpm(position_info.bpm);

  if (position_info.isPlaying || position_info.isLooping || position_info.isRecording)
    synth_.correctToTime(position_info.timeInSamples);

  for (int sample_offset = 0; sample_offset < total_samples;) {
    int num_samples = std::min<int>(total_samples - sample_offset, MAX_BUFFER_PROCESS);

    processMidi(midi_messages, sample_offset, sample_offset + num_samples);

    if (synth_.getBufferSize() != num_samples)
      synth_.setBufferSize(num_samples);
    synth_.process();

    const mopo::mopo_float* synth_output_left = synth_.output(0)->buffer;
    const mopo::mopo_float* synth_output_right = synth_.output(1)->buffer;
    for (int channel = 0; channel < num_channels; ++channel) {
      float* channelData = buffer.getWritePointer(channel, sample_offset);
      const mopo::mopo_float* synth_output = (channel % 2) ? synth_output_right : synth_output_left;

      for (int i = 0; i < num_samples; ++i)
        channelData[i] = synth_output[i];
    }

    int output_inc = synth_.getSampleRate() / mopo::MEMORY_SAMPLE_RATE;
    for (int i = 0; i < num_samples; i += output_inc)
      output_memory_->push(synth_output_left[i] + synth_output_right[i]);

    sample_offset += num_samples;
  }
}
void DemoJuceFilter::processBlock (AudioSampleBuffer& buffer,
                                   MidiBuffer& midiMessages)
{
/*
    // for each of our input channels, we'll attenuate its level by the
    // amount that our volume parameter is set to.
    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {
        buffer.applyGain (channel, 0, buffer.getNumSamples(), gain);
        
        // mix in opposite ratio of noise (i.e. generator)
        float* sampleData = buffer.getSampleData(channel);
         for (int sample = 0; sample < buffer.getNumSamples(); sample++)
            sampleData[sample] = (rand() / static_cast<float>(RAND_MAX)) * (1.0 - gain) + sampleData[sample];
    }

    // in case we have more outputs than inputs, we'll clear any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
*/
    // if any midi messages come in, use them to update the keyboard state object. This
    // object sends notification to the UI component about key up/down changes
    keyboardState.processNextMidiBuffer (midiMessages,
                                         0, buffer.getNumSamples(),
                                         true);

    // have a go at getting the current time from the host, and if it's changed, tell
    // our UI to update itself.
    AudioPlayHead::CurrentPositionInfo pos;

    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (pos))
    {
        if (memcmp (&pos, &lastPosInfo, sizeof (pos)) != 0)
        {
            lastPosInfo = pos;
            sendChangeMessage (this);
        }
    }
    else
    {
        zeromem (&lastPosInfo, sizeof (lastPosInfo));
        lastPosInfo.timeSigNumerator = 4;
        lastPosInfo.timeSigDenominator = 4;
        lastPosInfo.bpm = 120;
    }
    
    if (ptrPlug && ladspa)
    {
      int blockSize = buffer.getNumSamples();
      
        // convert midi messages internally
        midiManager.convertMidiMessages (midiMessages, blockSize);

        // connect ports
//        for (int i = 0; i < ins.size (); i++)
//            ladspa->connect_port (plugin, ins [i], inputBuffer->getSampleData (i));
        for (int i = 0; i < outs.size (); i++)
		{
            ladspa->connect_port (plugin, outs [i], buffer.getSampleData (i));
//			std::cerr << " connecting output " << i << std::endl;
		}

        if (ptrPlug->run_synth)
        {
            ptrPlug->run_synth (plugin,
                                blockSize,
                                midiManager.getMidiEvents (),
                                midiManager.getMidiEventsCount ());

			// now paste the data into the right channel
			// not generic, this assumes we are a mono plugin and we've claimed 2 output channels
			buffer.copyFrom(1, 0, buffer, 0, 0, blockSize);			

			return;
        }
        else if (ptrPlug->run_synth_adding)
        {
            buffer.clear ();
            ptrPlug->run_synth_adding (plugin,
                                       blockSize,
                                       midiManager.getMidiEvents (),
                                       midiManager.getMidiEventsCount ());

			// now paste the data into the right channel
			// not generic, this assumes we are a mono plugin and we've claimed 2 output channels
			buffer.copyFrom(1, 0, buffer, 0, 0, blockSize);			

			return;
        }

        // run ladspa if present as 
        if (ladspa->run)
        {
            ladspa->run (plugin, blockSize);
        }
        else if (ladspa->run_adding)
        {
            buffer.clear ();
            ladspa->run_adding (plugin, blockSize);
        }
        else
        {
            buffer.clear ();
        }
		
    }
    
}
Ejemplo n.º 23
0
void PitchTuneAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    AudioPlayHead::CurrentPositionInfo posInfo;
    bool isHostGoing_ = false;
    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition(posInfo)) {
        isHostGoing_ = posInfo.isPlaying;//(posInfo.isPlaying || posInfo.isRecording);
        updateBpm(posInfo.bpm, posInfo.timeSigDenominator);
    }
    
    PitchTuneAudioProcessorEditor *e = (PitchTuneAudioProcessorEditor*)getActiveEditor();
    if (e) {
        if (e->isVisible()) {
            e->ppq = posInfo.ppqPosition;
            e->type = 1;
            e->triggerAsyncUpdate();
        }
    }

    if (isRecording) {
        if (isHostGoing_) {
            //record when host is playing
            if (sampleBeingRecorded && sampleBeingRecorded->getCursor() == 0) {
                // first time recording, store ppq
                sampleBeingRecorded->startPpq = posInfo.ppqPosition;
                //set recording flag
                sampleBeingRecorded->startRecording();
            }
            float* channelData = buffer.getSampleData (0);
            sampleBeingRecorded->record(channelData, buffer.getNumSamples());
        }
        else {
            if (sampleBeingRecorded && sampleBeingRecorded->isRecording) {
                //store stop ppq
                sampleBeingRecorded->stopPpq = posInfo.ppqPosition;
                //daw has stopped
                stopTransferring();
                //process pitch
                processPitch();
            }
        }
    }
    else {
        //playback the processed
        float* channelData = buffer.getSampleData (0);
        if (isHostGoing_) {
            int nClips = (int)samples.size();
            for (int i = 0; i < nClips; ++i) {
                Sample *curSample = samples[i];
                if (curSample ->startPpq >= posInfo.ppqPosition && !curSample ->isPlaying) {
                    //reach the start ppq
                    curSample ->startPlay();
                }
                
                if (posInfo.ppqPosition >= curSample ->stopPpq && curSample ->isPlaying) {
                    //reach the end ppq
                    curSample->stopPlay();
                }
                
                if (curSample ->isPlaying) {
                    curSample ->play(channelData, buffer.getNumSamples(), pitchProcessor->psola, (long)(posInfo.ppqPosition / Utility::numBeatsPerSample));
                }
            }
        }
        
    }

    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }
    
    lastBlockPpq = posInfo.ppqPosition;
}
Ejemplo n.º 24
0
void Processor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& /*midiMessages*/)
{ 
  const int numInputChannels = getTotalNumInputChannels();
  const int numOutputChannels = getTotalNumOutputChannels();
  const size_t samplesToProcess = buffer.getNumSamples();

  // Determine channel data
  const float* channelData0 = nullptr;
  const float* channelData1 = nullptr;
  if (numInputChannels == 1)
  {    
    channelData0 = buffer.getReadPointer(0);
    channelData1 = buffer.getReadPointer(0);
  }
  else if (numInputChannels == 2)
  {
    channelData0 = buffer.getReadPointer(0);
    channelData1 = buffer.getReadPointer(1);
  }

  // Convolution
  _wetBuffer.clear();
  if (numInputChannels > 0 && numOutputChannels > 0)
  {
    float autoGain = 1.0f;
    if (getParameter(Parameters::AutoGainOn))
    {
      autoGain = DecibelScaling::Db2Gain(getParameter(Parameters::AutoGainDecibels));
    }

    // Convolve
    IRAgent* irAgent00 = getAgent(0, 0);
    if (irAgent00 && irAgent00->getConvolver() && numInputChannels >= 1 && numOutputChannels >= 1)
    {
      irAgent00->process(channelData0, &_convolutionBuffer[0], samplesToProcess);
      _wetBuffer.addFrom(0, 0, &_convolutionBuffer[0], samplesToProcess, autoGain);
    }

    IRAgent* irAgent01 = getAgent(0, 1);
    if (irAgent01 && irAgent01->getConvolver() && numInputChannels >= 1 && numOutputChannels >= 2)
    {
      irAgent01->process(channelData0, &_convolutionBuffer[0], samplesToProcess);
      _wetBuffer.addFrom(1, 0, &_convolutionBuffer[0], samplesToProcess, autoGain);
    }

    IRAgent* irAgent10 = getAgent(1, 0);
    if (irAgent10 && irAgent10->getConvolver() && numInputChannels >= 2 && numOutputChannels >= 1)
    {      
      irAgent10->process(channelData1, &_convolutionBuffer[0], samplesToProcess);
      _wetBuffer.addFrom(0, 0, &_convolutionBuffer[0], samplesToProcess, autoGain);
    }

    IRAgent* irAgent11 = getAgent(1, 1);
    if (irAgent11 && irAgent11->getConvolver() && numInputChannels >= 2 && numOutputChannels >= 2)
    {
      irAgent11->process(channelData1, &_convolutionBuffer[0], samplesToProcess);
      _wetBuffer.addFrom(1, 0, &_convolutionBuffer[0], samplesToProcess, autoGain);
    }
  }

  // Stereo width
  if (numOutputChannels >= 2)
  {
    _stereoWidth.updateWidth(getParameter(Parameters::StereoWidth));
    _stereoWidth.process(_wetBuffer.getWritePointer(0), _wetBuffer.getWritePointer(1), samplesToProcess);
  }

  // Dry/wet gain
  {
    float dryGain0, dryGain1;
    _dryGain.updateValue(DecibelScaling::Db2Gain(getParameter(Parameters::DryDecibels)));
    _dryGain.getSmoothValues(samplesToProcess, dryGain0, dryGain1);
    buffer.applyGainRamp(0, samplesToProcess, dryGain0, dryGain1);
  }
  {
    float wetGain0, wetGain1;
    _wetGain.updateValue(DecibelScaling::Db2Gain(getParameter(Parameters::WetDecibels)));
    _wetGain.getSmoothValues(samplesToProcess, wetGain0, wetGain1);
    _wetBuffer.applyGainRamp(0, samplesToProcess, wetGain0, wetGain1);
  }

  // Level measurement (dry)
  if (numInputChannels == 1)
  {    
    _levelMeasurementsDry[0].process(samplesToProcess, buffer.getReadPointer(0));
    _levelMeasurementsDry[1].reset();
  }
  else if (numInputChannels == 2)
  {
    _levelMeasurementsDry[0].process(samplesToProcess, buffer.getReadPointer(0));
    _levelMeasurementsDry[1].process(samplesToProcess, buffer.getReadPointer(1));
  }

  // Sum wet to dry signal
  {
    float dryOnGain0, dryOnGain1;
    _dryOn.updateValue(getParameter(Parameters::DryOn) ? 1.0f : 0.0f);
    _dryOn.getSmoothValues(samplesToProcess, dryOnGain0, dryOnGain1);
    buffer.applyGainRamp(0, samplesToProcess, dryOnGain0, dryOnGain1);
  }
  {
    float wetOnGain0, wetOnGain1;
    _wetOn.updateValue(getParameter(Parameters::WetOn) ? 1.0f : 0.0f);
    _wetOn.getSmoothValues(samplesToProcess, wetOnGain0, wetOnGain1);
    if (numOutputChannels > 0)
    {
      buffer.addFromWithRamp(0, 0, _wetBuffer.getReadPointer(0), samplesToProcess, wetOnGain0, wetOnGain1);
    }
    if (numOutputChannels > 1)
    {
      buffer.addFromWithRamp(1, 0, _wetBuffer.getReadPointer(1), samplesToProcess, wetOnGain0, wetOnGain1);
    }
  }

  // Level measurement (wet/out)
  if (numOutputChannels == 1)
  {
    _levelMeasurementsWet[0].process(samplesToProcess, _wetBuffer.getReadPointer(0));
    _levelMeasurementsWet[1].reset();
    _levelMeasurementsOut[0].process(samplesToProcess, buffer.getReadPointer(0));
    _levelMeasurementsOut[1].reset();
  }
  else if (numOutputChannels == 2)
  {
    _levelMeasurementsWet[0].process(samplesToProcess, _wetBuffer.getReadPointer(0));
    _levelMeasurementsWet[1].process(samplesToProcess, _wetBuffer.getReadPointer(1));
    _levelMeasurementsOut[0].process(samplesToProcess, buffer.getReadPointer(0));
    _levelMeasurementsOut[1].process(samplesToProcess, buffer.getReadPointer(1));
  }

  // In case we have more outputs than inputs, we'll clear any output
  // channels that didn't contain input data, (because these aren't
  // guaranteed to be empty - they may contain garbage).
  for (int i=numInputChannels; i<numOutputChannels; ++i)
  {
    buffer.clear(i, 0, buffer.getNumSamples());
  }

  // Update beats per minute info
  float beatsPerMinute = 0.0f;
  juce::AudioPlayHead* playHead = getPlayHead();
  if (playHead)
  {
    juce::AudioPlayHead::CurrentPositionInfo currentPositionInfo;
    if (playHead->getCurrentPosition(currentPositionInfo))
    {
      beatsPerMinute = static_cast<float>(currentPositionInfo.bpm);
    }
  }
  if (::fabs(_beatsPerMinute.exchange(beatsPerMinute)-beatsPerMinute) > 0.001f)
  {
    notifyAboutChange();
  }
}
Ejemplo n.º 25
0
void MidiOutFilter::processBlock (AudioSampleBuffer& buffer,
                                   MidiBuffer& midiMessages)
{
    for (int i = 0; i < getNumOutputChannels(); ++i)
    {
        buffer.clear (i, 0, buffer.getNumSamples());
    }

    const double SR=getSampleRate();
	const double iSR=1.0/SR;
    AudioPlayHead::CurrentPositionInfo pos;
    if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (pos))
    {
        if (memcmp (&pos, &lastPosInfo, sizeof (pos)) != 0)
        {
            if(param[kMTC]>=0.5f) {
                double frameRate=24.0;
                int mtcFrameRate=0;

                const double samplesPerPpq=60.0*SR/pos.bpm;
                const double samplesPerClock = SR/(4.0*frameRate);
                const long double seconds = (long double)(pos.ppqPosition*60.0f/pos.bpm) /*+ smpteOffset*/;
                const long double absSecs = fabs (seconds);
                const bool neg  = seconds < 0.0;

                int hours, mins, secs, frames;
                if (frameRate==29.97) {
                    int64 frameNumber = int64(absSecs*29.97);
                    frameNumber +=  18*(frameNumber/17982) + 2*(((frameNumber%17982) - 2) / 1798);

                    hours  = int((((frameNumber / 30) / 60) / 60) % 24);
                    mins   = int(((frameNumber / 30) / 60) % 60);
                    secs   = int((frameNumber / 30) % 60);
                    frames = int(frameNumber % 30);
                }
                else {
                    hours  = (int) (absSecs / (60.0 * 60.0));
                    mins   = ((int) (absSecs / 60.0)) % 60;
                    secs   = ((int) absSecs) % 60;
                    frames = (int)(int64(absSecs*frameRate) % (int)frameRate);
                }
                if (pos.isPlaying)
                {
                    double i=0.0;
                    const double clockppq = fmod(absSecs*frameRate*4.0,(long double)1.0);
                    samplesToNextMTC = (int)(samplesPerClock * (clockppq+i));
                    i+=1.0;
                    if (!wasPlaying) {
                        //this is so the song position pointer will be sent before any
                        //other data at the beginning of the song
                        MidiBuffer temp = midiMessages;
                        midiMessages.clear();

                        if (samplesToNextMTC<buffer.getNumSamples()) {
                            int mtcData;
                            switch (mtcNumber)
                            {
                            case 0: mtcData=frames&0x0f; break;
                            case 1: mtcData=(frames&0xf0)>>4; break;
                            case 2: mtcData=secs&0x0f; break;
                            case 3: mtcData=(secs&0xf0)>>4; break;
                            case 4: mtcData=mins&0x0f; break;
                            case 5: mtcData=(mins&0xf0)>>4; break;
                            case 6: mtcData=hours&0x0f; break;
                            case 7: mtcData=(hours&0x10)>>4 | mtcFrameRate; break;
                            }
                            MidiMessage midiclock(0xf1,(mtcNumber<<4)|(mtcData));
                            ++mtcNumber;
                            mtcNumber&=0x07;
                            midiMessages.addEvent(midiclock,samplesToNextMTC);
                            samplesToNextMTC = (int)(samplesPerClock * (clockppq+i));
                            i+=1.0;
                            startMTCAt=-999.0;
                            sendmtc=true;
                        }

                        midiMessages.addEvents(temp,0,buffer.getNumSamples(),0);
                    }
                    if (startMTCAt >-999.0 && (int)(samplesPerPpq*(startMTCAt-pos.ppqPosition))<buffer.getNumSamples()) {
                            samplesToNextMTC = (int)(samplesPerPpq*(startMTCAt-pos.ppqPosition));
                            int mtcData;
                            switch (mtcNumber)
                            {
                            case 0: mtcData=frames&0x0f; break;
                            case 1: mtcData=(frames&0xf0)>>4; break;
                            case 2: mtcData=secs&0x0f; break;
                            case 3: mtcData=(secs&0xf0)>>4; break;
                            case 4: mtcData=mins&0x0f; break;
                            case 5: mtcData=(mins&0xf0)>>4; break;
                            case 6: mtcData=hours&0x0f; break;
                            case 7: mtcData=(hours&0x10)>>4 | mtcFrameRate; break;
                            }
                            MidiMessage midiclock(0xf1,(mtcNumber<<4)|(mtcData));
                            ++mtcNumber;
                            mtcNumber&=0x07;
                            midiMessages.addEvent(midiclock,samplesToNextMTC);
                            samplesToNextMTC = (int)(samplesPerClock * (clockppq+i));
                            i+=1.0;
                            startMTCAt=-999.0;
                            sendmtc=true;
                    }
Ejemplo n.º 26
0
void InstanceProcessor::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    for(int i = getTotalNumInputChannels(); i < getTotalNumOutputChannels(); ++i)
    {
        buffer.clear(i, 0, buffer.getNumSamples());
    }
    bool infos = false;
    
    AudioPlayHead* playhead = getPlayHead();
    if(playhead && m_patch_tie)
    {
        infos = playhead->getCurrentPosition(m_playinfos);
    }
    lock();
    {
        m_midi.clear();
        if(infos)
        {
            m_playing_list.setFloat(0, m_playinfos.isPlaying);
            m_playing_list.setFloat(1, m_playinfos.timeInSeconds);
            sendMessageAnything(m_patch_tie, s_playing, m_playing_list);
            m_measure_list.setFloat(0, m_playinfos.bpm);
            m_measure_list.setFloat(1, m_playinfos.timeSigNumerator);
            m_measure_list.setFloat(2, m_playinfos.timeSigDenominator);
            m_measure_list.setFloat(3, m_playinfos.ppqPosition);
            m_measure_list.setFloat(4, m_playinfos.ppqPositionOfLastBarStart);
            sendMessageAnything(m_patch_tie, s_measure, m_measure_list);
        }
        for(size_t i = 0; i < m_parameters.size() && m_parameters[i].isValid(); ++i)
        {
            sendMessageFloat(m_parameters[i].getTie(), m_parameters[i].getValueNonNormalized());
        }
        
        MidiMessage message;
        MidiBuffer::Iterator it(midiMessages);
        int position = midiMessages.getFirstEventTime();
        while(it.getNextEvent(message, position))
        {
            if(message.isNoteOnOrOff())
            {
                sendMidiNote(message.getChannel(), message.getNoteNumber(), message.getVelocity());
            }
            else if(message.isController())
            {
                sendMidiControlChange(message.getChannel(), message.getControllerNumber(), message.getControllerValue());
            }
            else if(message.isPitchWheel())
            {
                sendMidiPitchBend(message.getChannel(), message.getPitchWheelValue());
            }
            else if(message.isChannelPressure())
            {
                sendMidiAfterTouch(message.getChannel(), message.getChannelPressureValue());
            }
            else if(message.isAftertouch())
            {
                sendMidiPolyAfterTouch(message.getChannel(), message.getNoteNumber(), message.getAfterTouchValue());
            }
            else if(message.isProgramChange())
            {
                sendMidiProgramChange(message.getChannel(), message.getProgramChangeNumber());
            }
        }
    }
    midiMessages.clear();
    performDsp(buffer.getNumSamples(),
               getTotalNumInputChannels(), buffer.getArrayOfReadPointers(),
               getTotalNumOutputChannels(), buffer.getArrayOfWritePointers());
    midiMessages.swapWith(m_midi);
    unlock();
}
Ejemplo n.º 27
0
void MumuAudioFlangerAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
{
    //Get BPM/////////////////
    AudioPlayHead::CurrentPositionInfo pos; //temp position to store current ops
    
    //If playhead is active then store its position info in temporary position object
    if (getPlayHead() != nullptr && getPlayHead()->getCurrentPosition(pos)){
        // If our BPM has changed, print our bpm
        if (pos.bpm != lastPosInfo.bpm) {
            //std::cout << pos.bpm << std::endl;
        }
        lastPosInfo = pos;
    }
    ///////////////////////////
    
    // In case we have more outputs than inputs, this code clears any output
    // channels that didn't contain input data, (because these aren't
    // guaranteed to be empty - they may contain garbage).
    // I've added this to avoid people getting screaming feedback
    // when they first compile the plugin, but obviously you don't need to
    // this code if your algorithm already fills all the output channels.
    for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i)
        buffer.clear (i, 0, buffer.getNumSamples());

    // This is the place where you'd normally do the guts of your plugin's
    // audio processing...
    
    for (int channel = 0; channel < getNumInputChannels(); ++channel)
    {
        float* channelData = buffer.getWritePointer (channel);
        m_bufferSize = buffer.getNumSamples();

        for ( int i = 0; i < buffer.getNumSamples(); i++ )
        {
            if(channel == 0)
            {
                float lfoValue = 0;
                
                if (button1 == 1)
                {
                    lfoValue = SineLFOL.calcSineLFO(centerValue, m_fModDepth);
                }
                
                if (button2 == 1)
                {
                    lfoValue = TriLFOL.calcTriLFO(centerValue, m_fModDepth);
                }
                
                FourPDelayL.setDelayTime(m_sampleRate, lfoValue);
                FourPDelayL.setPlayheads();
                channelData[i] = FourPDelayL.process(channelData[i]);
                
                channelData[i] = (m_HPF_Left.doBiQuad(channelData[i]) * -1) + m_LPF_Left.doBiQuad(channelData[i]);
            }
            else if(channel == 1)
            {
                float lfoValue = 0;
                
                if (button1 == 1)
                {
                    lfoValue = SineLFOR.calcSineLFO(centerValue, m_fModDepth);
                }
                
                if (button2 == 1)
                {
                    lfoValue = TriLFOR.calcTriLFO(centerValue, m_fModDepth);
                }

                FourPDelayR.setDelayTime(m_sampleRate, lfoValue);
                FourPDelayR.setPlayheads();
                channelData[i] = FourPDelayR.process(channelData[i]);

                channelData[i] = (m_HPF_Right.doBiQuad(channelData[i]) * -1) + m_LPF_Right.doBiQuad(channelData[i]);
            }
        }
     }
    
    currentSampleBuffer = buffer;
    sendChangeMessage();

}