void SecondOrderIIRFilter::processBlock (AudioSampleBuffer& buffer) { for (int channel = 0; channel < buffer.getNumChannels(); ++channel) { float* samples = buffer.getSampleData (channel); for (int i = 0; i < buffer.getNumSamples(); ++i) { const float in = samples[i]; double factorForB0 = in - a1 * z1[channel] - a2 * z2[channel]; double out = b0 * factorForB0 + b1 * z1[channel] + b2 * z2[channel]; // This is copied from juce_IIRFilter.cpp, processSamples(), // line 101. #if JUCE_INTEL if (!(out < -1.0e-8 || out > 1.0e-8)) out = 0.0; #endif z2.set(channel, z1[channel]); z1.set(channel, factorForB0); samples[i] = float(out); } } }
void CtrlrWaveform::addBlock (double sampleNumberInsource, const AudioSampleBuffer &newData, int startOffsetInBuffer, int numSamples) { int sampleToAddAt = 0; if (audioBufferCopy.getNumSamples() == 0) { /* Initialize the buffer */ audioBufferCopy.setSize (newData.getNumChannels(), newData.getNumSamples()); } else { /* it's already filled, just extend it */ sampleToAddAt = audioBufferCopy.getNumSamples(); if (newData.getNumChannels() > audioBufferCopy.getNumChannels()) { audioBufferCopy.setSize (newData.getNumChannels(), audioBufferCopy.getNumSamples() + newData.getNumSamples(), true); } else { audioBufferCopy.setSize (audioBufferCopy.getNumChannels(), audioBufferCopy.getNumSamples() + newData.getNumSamples(), true); } } for (int i=0; i<newData.getNumChannels(); i++) { audioBufferCopy.copyFrom (i, sampleToAddAt, newData, i, startOffsetInBuffer, numSamples); } audioThumbnail->addBlock (sampleNumberInsource, newData, startOffsetInBuffer, numSamples); repaint(); }
void PitchestimatorpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) buffer.clear (i, 0, buffer.getNumSamples()); int bufsize = buffer.getNumSamples(); //main process loop for (int channel = 0; channel < getNumInputChannels(); ++channel) { float* channelData = buffer.getWritePointer (channel); fft->processForward(channelData, fftData, bufsize, nFFT); buffer.applyGain (channel, 0, bufsize, gain); } for (int i=0; i<bufsize; i++) { X[i] = fft->cartopolRadius(fftData[i][0], fftData[i][1]); } HS->generateCost(X, f0Area, numberOfHarmonics, bufsize, f0AreaSize, getSampleRate(), nFFT); pitchEstimate = HS->estimatePitch(f0Area, f0AreaSize); pitchText = String (pitchEstimate, 1); }
//this plugin can only handle one channel of input and has only one channel of output void SpectralDelayPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { // This is the place where you'd normally do the guts of your plugin's // audio processing... int numSamples = buffer.getNumSamples(); ScopedPointer<double> doubleInput = new double[numSamples]; ScopedPointer<double> doubleOutput = new double[numSamples]; ScopedPointer<float> functionOutput = new float[numSamples]; // ..do something to the data... float* channelData = buffer.getSampleData (0); //convert input data to doubles for now, ask on forums if it's possible use use doubles instead w/ AudioSampleBuffer otherwise rewrite filter's to use floats instead (maybe make a template for different numeric types) for(int i = 0; i < numSamples; ++i) { doubleInput[i] = channelData[i]; functionOutput[i] = 0.0; } for(int i = 0; i < numFilters; ++i) { FFTfilter& currentFilter = *(filterVector[i]); CircularBuffer<double>& currentDelayLine = *(delayLineVector[i]); currentFilter.filter(doubleInput, doubleOutput, numSamples); //copy filter output to the correct delay line and copy to the function's output at the same time for(int j = 0; j < numSamples; ++j) { currentDelayLine.addData(doubleOutput[j]); functionOutput[j] += float(currentDelayLine[delayAmounts[i]]); } } //clear the buffer and copy the output data to it buffer.clear(); buffer.copyFrom(0, 0, functionOutput, numSamples); }
virtual void getNextAudioBlock(const AudioSourceChannelInfo& bufferToFill) { AudioSampleBuffer* destBuffer = bufferToFill.buffer; const int len = std::min(bufferToFill.numSamples, static_cast<int>(_len-_pos)); if (destBuffer) { for (int channel=0; channel<destBuffer->getNumChannels(); ++channel) { if (channel == 0 && _buffer) { destBuffer->copyFrom(channel, bufferToFill.startSample, _buffer+_pos, len); if (len < bufferToFill.numSamples) { const int startClear = bufferToFill.startSample + len; const int lenClear = bufferToFill.numSamples - len; destBuffer->clear(startClear, lenClear); } } else { destBuffer->clear(channel, bufferToFill.startSample, len); } } } _pos += len; }
void FileReader::process (AudioSampleBuffer& buffer) { const int samplesNeededPerBuffer = int (float (buffer.getNumSamples()) * (getDefaultSampleRate() / m_sysSampleRate)); m_samplesPerBuffer.set(samplesNeededPerBuffer); // FIXME: needs to account for the fact that the ratio might not be an exact // integer value // if cache window id == 0, we need to read and cache BUFFER_WINDOW_CACHE_SIZE more buffer windows if (bufferCacheWindow == 0) { switchBuffer(); } for (int i = 0; i < currentNumChannels; ++i) { // offset readBuffer index by current cache window count * buffer window size * num channels input->processChannelData (*readBuffer + (samplesNeededPerBuffer * currentNumChannels * bufferCacheWindow), buffer.getWritePointer (i, 0), i, samplesNeededPerBuffer); } setTimestampAndSamples(timestamp, samplesNeededPerBuffer); timestamp += samplesNeededPerBuffer; static_cast<FileReaderEditor*> (getEditor())->setCurrentTime(samplesToMilliseconds(startSample + timestamp % (stopSample - startSample))); bufferCacheWindow += 1; bufferCacheWindow %= BUFFER_WINDOW_CACHE_SIZE; }
void writeBuffer (const AudioSampleBuffer& buffer, Thread& thread) { jassert (buffer.getNumChannels() == bufferList.numChannels); jassert (buffer.getNumSamples() < bufferList.numSamples * bufferList.numBuffers); int offset = 0; int numSamples = buffer.getNumSamples(); while (numSamples > 0) { int16* const destBuffer = bufferList.waitForFreeBuffer (thread); if (destBuffer == nullptr) break; for (int i = 0; i < bufferList.numChannels; ++i) { typedef AudioData::Pointer <AudioData::Int16, AudioData::LittleEndian, AudioData::Interleaved, AudioData::NonConst> DstSampleType; typedef AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const> SrcSampleType; DstSampleType dstData (destBuffer + i, bufferList.numChannels); SrcSampleType srcData (buffer.getSampleData (i, offset)); dstData.convertSamples (srcData, bufferList.numSamples); } check ((*playerBufferQueue)->Enqueue (playerBufferQueue, destBuffer, bufferList.getBufferSizeBytes())); bufferList.bufferSent(); numSamples -= bufferList.numSamples; offset += bufferList.numSamples; } }
void IAAEffectProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer&) { const float gain = *parameters.getRawParameterValue ("gain"); const int totalNumInputChannels = getTotalNumInputChannels(); const int totalNumOutputChannels = getTotalNumOutputChannels(); const int numSamples = buffer.getNumSamples(); for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear (i, 0, buffer.getNumSamples()); // Apply the gain to the samples using a ramp to avoid discontinuities in // the audio between processed buffers. for (int channel = 0; channel < totalNumInputChannels; ++channel) { buffer.applyGainRamp (channel, 0, numSamples, previousGain, gain); meterListeners.call (&IAAEffectProcessor::MeterListener::handleNewMeterValue, channel, buffer.getMagnitude (channel, 0, numSamples)); } previousGain = gain; // Now ask the host for the current time so we can store it to be displayed later. updateCurrentTimeInfoFromHost (lastPosInfo); }
void JuceVibAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { if (bypassed) { processBlockBypassed(buffer, midiMessages); } else { const int totalNumInputChannels = getTotalNumInputChannels(); const int totalNumOutputChannels = getTotalNumOutputChannels(); //Set parameters lfoFreq = freqParam->get(); lfoAmp = depthParam->get(); Vib->setFreq(lfoFreq*maxFreq); Vib->setDepth(lfoAmp); // In case we have more outputs than inputs, this code clears any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). // This is here to avoid people getting screaming feedback // when they first compile a plugin, but obviously you don't need to keep // this code if your algorithm always overwrites all the output channels. for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i) buffer.clear(i, 0, buffer.getNumSamples()); float** ppfWriteBuffer = buffer.getArrayOfWritePointers(); Vib->process(ppfWriteBuffer, ppfWriteBuffer, buffer.getNumSamples()); } }
void Pfm2AudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { handleIncomingMidiBuffer(midiMessages, buffer.getNumSamples()); // Clear sound for (int i = 0; i < getNumOutputChannels(); ++i) buffer.clear (i, 0, buffer.getNumSamples()); // dispatch realtime events to non realtime observer parameterSet.processRealtimeEvents(); midiMessageCollector.removeNextBlockOfMessages(midiMessages, buffer.getNumSamples()); /* if (midiMessages.getNumEvents() > 0) { printf("processBlock : %d midi messages \n", midiMessages.getNumEvents()); } */ if (parametersToUpdate.size() > 0 ) { if (parametersToUpdateMutex.try_lock()) { std::unordered_set<const char*> newSet; newSet.swap(parametersToUpdate); parametersToUpdateMutex.unlock(); if (pfm2Editor) { pfm2Editor->updateUIWith(newSet); } } } }
void readNextBlock (AudioSampleBuffer& buffer, Thread& thread) { jassert (buffer.getNumChannels() == bufferList.numChannels); jassert (buffer.getNumSamples() < bufferList.numSamples * bufferList.numBuffers); jassert ((buffer.getNumSamples() % bufferList.numSamples) == 0); int offset = 0; int numSamples = buffer.getNumSamples(); while (numSamples > 0) { int16* const srcBuffer = bufferList.waitForFreeBuffer (thread); if (srcBuffer == nullptr) break; for (int i = 0; i < bufferList.numChannels; ++i) { typedef AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst> DstSampleType; typedef AudioData::Pointer <AudioData::Int16, AudioData::LittleEndian, AudioData::Interleaved, AudioData::Const> SrcSampleType; DstSampleType dstData (buffer.getSampleData (i, offset)); SrcSampleType srcData (srcBuffer + i, bufferList.numChannels); dstData.convertSamples (srcData, bufferList.numSamples); } enqueueBuffer (srcBuffer); numSamples -= bufferList.numSamples; offset += bufferList.numSamples; } }
void Csc344filterAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { const int numSamples = buffer.getNumSamples(); int channel, dp = 0; // This is the place where you'd normally do the guts of your plugin's // audio processing... for (channel = 0; channel < getNumInputChannels(); ++channel) { float* channelData = buffer.getSampleData (channel); float* delayData = delayBuffer.getSampleData (jmin (channel, delayBuffer.getNumChannels() - 1)); dp = delayPosition; for (int i = 0; i < numSamples; ++i) { const float in = channelData[i]; channelData[i] += delayData[dp]; delayData[dp] = (delayData[dp] + in) * delay; if (++dp >= delayBuffer.getNumSamples()) dp = 0; } } // In case we have more outputs than inputs, we'll clear any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) { buffer.clear (i, 0, buffer.getNumSamples()); } }
bool AudioCDBurner::addAudioTrack (AudioSource* audioSource, int numSamples) { if (audioSource == 0) return false; ScopedPointer<AudioSource> source (audioSource); long bytesPerBlock; HRESULT hr = pimpl->redbook->GetAudioBlockSize (&bytesPerBlock); const int samplesPerBlock = bytesPerBlock / 4; bool ok = true; hr = pimpl->redbook->CreateAudioTrack ((long) numSamples / (bytesPerBlock * 4)); HeapBlock <byte> buffer (bytesPerBlock); AudioSampleBuffer sourceBuffer (2, samplesPerBlock); int samplesDone = 0; source->prepareToPlay (samplesPerBlock, 44100.0); while (ok) { { AudioSourceChannelInfo info; info.buffer = &sourceBuffer; info.numSamples = samplesPerBlock; info.startSample = 0; sourceBuffer.clear(); source->getNextAudioBlock (info); } buffer.clear (bytesPerBlock); typedef AudioData::Pointer <AudioData::Int16, AudioData::LittleEndian, AudioData::Interleaved, AudioData::NonConst> CDSampleFormat; typedef AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const> SourceSampleFormat; CDSampleFormat left (buffer, 2); left.convertSamples (SourceSampleFormat (sourceBuffer.getSampleData (0)), samplesPerBlock); CDSampleFormat right (buffer + 2, 2); right.convertSamples (SourceSampleFormat (sourceBuffer.getSampleData (1)), samplesPerBlock); hr = pimpl->redbook->AddAudioTrackBlocks (buffer, bytesPerBlock); if (FAILED (hr)) ok = false; samplesDone += samplesPerBlock; if (samplesDone >= numSamples) break; } hr = pimpl->redbook->CloseAudioTrack(); return ok && hr == S_OK; }
void createTestSound() { const int length = ((int) sampleRate) / 4; testSound.setSize (1, length); testSound.clear(); float* s = testSound.getSampleData (0, 0); Random rand (0); rand.setSeedRandomly(); for (int i = 0; i < length; ++i) s[i] = (rand.nextFloat() - rand.nextFloat() + rand.nextFloat() - rand.nextFloat()) * 0.06f; spikes.clear(); int spikePos = 0; int spikeDelta = 50; while (spikePos < length) { spikes.add (spikePos); s [spikePos] = 0.99f; s [spikePos + 1] = -0.99f; spikePos += spikeDelta; spikeDelta += spikeDelta / 6 + rand.nextInt (5); } }
void AudioThumbnail::addBlock (const int64 startSample, const AudioSampleBuffer& incoming, int startOffsetInBuffer, int numSamples) { jassert (startSample >= 0); const int firstThumbIndex = (int) (startSample / samplesPerThumbSample); const int lastThumbIndex = (int) ((startSample + numSamples + (samplesPerThumbSample - 1)) / samplesPerThumbSample); const int numToDo = lastThumbIndex - firstThumbIndex; if (numToDo > 0) { const int numChans = jmin (channels.size(), incoming.getNumChannels()); const HeapBlock<MinMaxValue> thumbData ((size_t) (numToDo * numChans)); const HeapBlock<MinMaxValue*> thumbChannels ((size_t) numChans); for (int chan = 0; chan < numChans; ++chan) { const float* const sourceData = incoming.getSampleData (chan, startOffsetInBuffer); MinMaxValue* const dest = thumbData + numToDo * chan; thumbChannels [chan] = dest; for (int i = 0; i < numToDo; ++i) { float low, high; const int start = i * samplesPerThumbSample; FloatVectorOperations::findMinAndMax (sourceData + start, jmin (samplesPerThumbSample, numSamples - start), low, high); dest[i].setFloat (low, high); } } setLevels (thumbChannels, firstThumbIndex, numChans, numToDo); } }
void Plugin::processBlock(AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { if (getNumInputChannels() != 2 && getNumOutputChannels() != 2) { return; } float* chan1 = buffer.getWritePointer(0); float* chan2 = buffer.getWritePointer(1); int sampleframes = buffer.getNumSamples(); int blocks = sampleframes / kInternalBlocksize; if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition(pos)) { if ((&pos)->bpm == 0.0f) { parameters->setQuantizationDisabled(); parameters->setParameter(kDelayQuant, 0.0f, false); parameters->setParameter(kIotQuant, 0.0f, false); parameters->setParameter(kDurQuant, 0.0f, false); } else parameters->time_quantizer->setPositionInfo(&pos); } else { parameters->setQuantizationDisabled(); } block_sample_pos = 0; for (int i = 0; i < blocks; i++) { granulator->processInternalBlock(chan1, chan2, kInternalBlocksize); chan1 += kInternalBlocksize; chan2 += kInternalBlocksize; parameters->time_quantizer->incrementPositionInfo(); } int samples_remaining = sampleframes % kInternalBlocksize; if (samples_remaining) { granulator->processInternalBlock(chan1, chan2, samples_remaining); } }
void NewProjectAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { const int numSamples = buffer.getNumSamples(); // output buffers will initially be garbage, must be cleared: for (int i = 0; i < getNumOutputChannels(); ++i) { buffer.clear (i, 0, numSamples); } // Now pass any incoming midi messages to our keyboard state object, and let it // add messages to the buffer if the user is clicking on the on-screen keys keyboardState.processNextMidiBuffer (midiMessages, 0, numSamples, true); // and now get the synth to process these midi events and generate its output. synth.renderNextBlock (buffer, midiMessages, 0, numSamples); // ask the host for the current time so we can display it... AudioPlayHead::CurrentPositionInfo newTime; if (getPlayHead() != nullptr && getPlayHead()->getCurrentPosition (newTime)) { // Successfully got the current time from the host.. lastPosInfo = newTime; } else { // If the host fails to fill-in the current time, we'll just clear it to a default.. lastPosInfo.resetToDefault(); } }
// create a test sound which consists of a series of randomly-spaced audio spikes.. void createTestSound() { const int length = ((int) sampleRate) / 4; testSound.setSize (1, length); testSound.clear(); Random rand; for (int i = 0; i < length; ++i) testSound.setSample (0, i, (rand.nextFloat() - rand.nextFloat() + rand.nextFloat() - rand.nextFloat()) * 0.06f); spikePositions.clear(); int spikePos = 0; int spikeDelta = 50; while (spikePos < length - 1) { spikePositions.add (spikePos); testSound.setSample (0, spikePos, 0.99f); testSound.setSample (0, spikePos + 1, -0.99f); spikePos += spikeDelta; spikeDelta += spikeDelta / 6 + rand.nextInt (5); } }
void SamplerProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { midiBuffer.clear(); tfMidiBuffer.clear(); midiBuffer = midiMessages; tfMidiBuffer = midiMessages; //buffer.clear(); //MidiBuffer incomingMidi; //midiCollector.removeNextBlockOfMessages(midiMessages, buffer.getNumSamples()); if (midiCallback != nullptr){ if (!midiMessages.isEmpty()){ midiCallback->handleMidiBuffer(midiMessages); tf_selector->setMidiBuffer(tfMidiBuffer); } } int x = midiBuffer.getNumEvents(); synth.renderNextBlock(buffer, midiBuffer, 0, buffer.getNumSamples()); peak = 0.0; for (int i = 0; i<buffer.getNumSamples(); i++){ if (buffer.getWritePointer(0)[i] > peak){ peak = buffer.getWritePointer(0)[i]; } } }
void AudioFilterAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { // This is the place where you'd normally do the guts of your plugin's // audio processing... for (int channel = 0; channel < getNumInputChannels(); ++channel) { float* channelData = buffer.getSampleData (channel); for (int i = 0; i < buffer.getNumSamples(); i++) { //channelData[i] = atan(channelData[i]); if (channelData[i] > cutoff) channelData[i] = cutoff; } // ..do something to the data... } // In case we have more outputs than inputs, we'll clear any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). for (int i = getNumInputChannels(); i < getNumOutputChannels(); ++i) { buffer.clear (i, 0, buffer.getNumSamples()); } }
bool AudioFormatWriter::writeFromAudioSampleBuffer (const AudioSampleBuffer& source, int startSample, int numSamples) { jassert (startSample >= 0 && startSample + numSamples <= source.getNumSamples() && source.getNumChannels() > 0); if (numSamples <= 0) return true; HeapBlock<int> tempBuffer; HeapBlock<int*> chans (numChannels + 1); chans [numChannels] = 0; if (isFloatingPoint()) { for (int i = (int) numChannels; --i >= 0;) chans[i] = reinterpret_cast<int*> (source.getSampleData (i, startSample)); } else { tempBuffer.malloc (numSamples * numChannels); for (unsigned int i = 0; i < numChannels; ++i) { typedef AudioData::Pointer <AudioData::Int32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst> DestSampleType; typedef AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const> SourceSampleType; DestSampleType destData (chans[i] = tempBuffer + i * numSamples); SourceSampleType sourceData (source.getSampleData ((int) i, startSample)); destData.convertSamples (sourceData, numSamples); } } return write ((const int**) chans.getData(), numSamples); }
void SoftSynthAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { MidiBuffer processedMidi; int time; MidiMessage m; for (MidiBuffer::Iterator i(midiMessages); i.getNextEvent(m, time);) { if (m.isNoteOn()) { m = MidiMessage::noteOn(m.getChannel(), m.getNoteNumber(), m.getVelocity()); synth.keyPressed(m.getNoteNumber(), m.getVelocity()); } else if (m.isNoteOff()) { m = MidiMessage::noteOff(m.getChannel(), m.getNoteNumber(), m.getVelocity()); synth.keyReleased(m.getNoteNumber()); } processedMidi.addEvent(m, time); } auto synthBuffer = synth.getNextBuffer(buffer.getNumSamples()); float *leftData = buffer.getWritePointer(0); float *rightData = buffer.getWritePointer(1); for (int i = 0; i < buffer.getNumSamples(); ++i) { leftData[i] = synthBuffer[i]; rightData[i] = synthBuffer[i]; } midiMessages.swapWith(processedMidi); }
void DemoJuceFilter::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { if (!isProcessing()) return; for (int channel = 0; channel < getNumInputChannels(); ++channel) { float *p = buffer.getSampleData (channel); int size = buffer.getNumSamples() / buffer.getNumChannels(); for (int x=0; x<size; x++) { currentSample = *(p+x); /* conversion */ *(p+x) = currentConvertedSample = process (*(p+x)); if (x == 2 && channel == 0 && bufferCycle == 0 && editor) { sendChangeMessage (0); } } } bufferCycle++; if (bufferCycle == 20) bufferCycle = 0; }
//============================================================================== OggReader (InputStream* const inp) : AudioFormatReader (inp, TRANS (oggFormatName)), reservoir (2, 4096), reservoirStart (0), samplesInReservoir (0) { using namespace OggVorbisNamespace; sampleRate = 0; usesFloatingPointData = true; callbacks.read_func = &oggReadCallback; callbacks.seek_func = &oggSeekCallback; callbacks.close_func = &oggCloseCallback; callbacks.tell_func = &oggTellCallback; const int err = ov_open_callbacks (input, &ovFile, 0, 0, callbacks); if (err == 0) { vorbis_info* info = ov_info (&ovFile, -1); lengthInSamples = (uint32) ov_pcm_total (&ovFile, -1); numChannels = info->channels; bitsPerSample = 16; sampleRate = info->rate; reservoir.setSize (numChannels, (int) jmin (lengthInSamples, (int64) reservoir.getNumSamples())); } }
void Ambix_warpAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { _Sh_transf = Sh_transf; // buffer old values calcParams(); // calc new transformation matrix int NumSamples = buffer.getNumSamples(); output_buffer.setSize(buffer.getNumChannels(), NumSamples); output_buffer.clear(); for (int out = 0; out < std::min(AMBI_CHANNELS,getNumOutputChannels()); out++) { for (int in = 0; in < std::min(AMBI_CHANNELS,getNumInputChannels()); in++) { if (_Sh_transf(out, in) != 0.f || Sh_transf(out, in) != 0.f) { if (_Sh_transf(out, in) == Sh_transf(out, in)) { output_buffer.addFrom(out, 0, buffer, in, 0, NumSamples, (float)Sh_transf(out, in)); } else { output_buffer.addFromWithRamp(out, 0, buffer.getReadPointer(in), NumSamples, (float)_Sh_transf(out, in), (float)Sh_transf(out, in)); } } } } buffer = output_buffer; }
void ReferenceNode::process(AudioSampleBuffer& buffer, MidiBuffer& midiMessages, int& nSamples) { if (referenceChannel > -1) { referenceBuffer.clear(0, 0, nSamples); referenceBuffer.addFrom(0, // destChannel 0, // destStartSample buffer, // source referenceChannel, // sourceChannel 0, // sourceStartSample nSamples, // numSamples -1.0f // gain to apply to source ); for (int i = 0; i < buffer.getNumChannels(); i++) { buffer.addFrom(i, // destChannel 0, // destStartSample referenceBuffer, // source 0, // sourceChannel 0, // sourceStartSample nSamples, // numSamples 1.0f // gain to apply to source ); } } }
void PluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) { if (customPlayHead != 0) customPlayHead->processBlock (buffer, midiMessages); // Record the current time AudioPlayHead::CurrentPositionInfo newTime; if (getPlayHead() != 0 && getPlayHead()->getCurrentPosition (newTime)) { lastPosInfo = newTime; } else { lastPosInfo.resetToDefault(); } // Run the sequencer if (sequencer != 0) sequencer->processBlock (buffer, midiMessages); buffer.clear(); // In case we have more outputs than inputs, we'll clear any output // channels that didn't contain input data, (because these aren't // guaranteed to be empty - they may contain garbage). for (int i = 0; i < getNumOutputChannels(); ++i) { //buffer.clear (i, 0, buffer.getNumSamples()); float* samples = buffer.getSampleData (i); for (int j = 0; j < buffer.getNumSamples(); j++) { samples[j] = 0.001; } } }
void renderNextBlock (AudioSampleBuffer& outputBuffer, int startSample, int numSamples) { if (playing != notPlaying) { const double levelMult = level * (playing == keyReleased ? tailOff : 1.0); for (int sample = startSample; sample < startSample + numSamples; sample++){ const double o1 = (sin (o1_angle * 2.0 * double_Pi)); const double amplitude = 1.0 + (0.5 * o1); const float currentSampleVal = (float) (wavetable.lookup(currentAngle) * levelMult * amplitude); for (int i = outputBuffer.getNumChannels(); --i >= 0;) { *outputBuffer.getSampleData (i, sample) += currentSampleVal; } currentAngle = angleWrap(currentAngle + angleDelta); o1_angle = angleWrap(o1_angle + o1_angleDelta); if (playing == keyReleased) { tailOff *= 0.99; if (tailOff <= 0.005) { clearCurrentNote(); playing = notPlaying; angleDelta = 0.0; break; } } } } }
void renderNextBlock (AudioSampleBuffer& outputBuffer, int startSample, int numSamples) { if (angleDelta != 0.0) { if (tailOff > 0) { SynthesiserSound *sound = getCurrentlyPlayingSound(); while (--numSamples >= 0) { float currentSample; // LFO used to create tremolo const float lfoCurrentSample = (float) ((4.0 * sin (lfoCurrentAngle)) * level * tailOff) + 0.7; currentSample = calculateSample(sound, currentSample, lfoCurrentSample, true); for (int i = outputBuffer.getNumChannels(); --i >= 0;) *outputBuffer.getSampleData (i, startSample) += (currentSample); currentAngle += angleDelta; lfoCurrentAngle += lfoAngleDelta; ++startSample; tailOff *= 0.99; if (tailOff <= 0.005) { clearCurrentNote(); angleDelta = 0.0; lfoAngleDelta = 0.0; break; } } } else { SynthesiserSound *sound = getCurrentlyPlayingSound(); while (--numSamples >= 0) { float currentSample; // LFO used to create tremolo const float lfoCurrentSample = (float) ((4.0 * sin (lfoCurrentAngle)) * level) + 0.7; currentSample = calculateSample(sound, currentSample, lfoCurrentSample, false); for (int i = outputBuffer.getNumChannels(); --i >= 0;) *outputBuffer.getSampleData (i, startSample) += (currentSample); currentAngle += angleDelta; lfoCurrentAngle += lfoAngleDelta; ++startSample; } } } }
void sfzero::SFZeroAudioProcessor::processBlock(AudioSampleBuffer &buffer, MidiBuffer &midiMessages) { int numSamples = buffer.getNumSamples(); keyboardState.processNextMidiBuffer(midiMessages, 0, numSamples, true); buffer.clear(); synth.renderNextBlock(buffer, midiMessages, 0, numSamples); }