bool BassBoosterEffect::processAudioBuffer( sampleFrame* buf, const fpp_t frames ) { if( !isEnabled() || !isRunning () ) { return( false ); } // check out changed controls if( m_frequencyChangeNeeded || m_bbControls.m_freqModel.isValueChanged() ) { changeFrequency(); m_frequencyChangeNeeded = false; } if( m_bbControls.m_gainModel.isValueChanged() ) { changeGain(); } if( m_bbControls.m_ratioModel.isValueChanged() ) { changeRatio(); } float gain = m_bbControls.m_gainModel.value(); ValueBuffer *gainBuffer = m_bbControls.m_gainModel.valueBuffer(); int gainInc = gainBuffer ? 1 : 0; float *gainPtr = gainBuffer ? &( gainBuffer->values()[ 0 ] ) : &gain; double outSum = 0.0; const float d = dryLevel(); const float w = wetLevel(); if( gainBuffer ) { //process period using sample exact data for( fpp_t f = 0; f < frames; ++f ) { m_bbFX.leftFX().setGain( *gainPtr ); m_bbFX.rightFX().setGain( *gainPtr ); outSum += buf[f][0]*buf[f][0] + buf[f][1]*buf[f][1]; sample_t s[2] = { buf[f][0], buf[f][1] }; m_bbFX.nextSample( s[0], s[1] ); buf[f][0] = d * buf[f][0] + w * s[0]; buf[f][1] = d * buf[f][1] + w * s[1]; gainPtr += gainInc; } } else { //process period without sample exact data m_bbFX.leftFX().setGain( *gainPtr ); m_bbFX.rightFX().setGain( *gainPtr ); for( fpp_t f = 0; f < frames; ++f ) { outSum += buf[f][0]*buf[f][0] + buf[f][1]*buf[f][1]; sample_t s[2] = { buf[f][0], buf[f][1] }; m_bbFX.nextSample( s[0], s[1] ); buf[f][0] = d * buf[f][0] + w * s[0]; buf[f][1] = d * buf[f][1] + w * s[1]; } } checkGate( outSum / frames ); return isRunning(); }
void AudioPort::doProcessing() { if( m_mutedModel && m_mutedModel->value() ) { return; } const fpp_t fpp = Engine::mixer()->framesPerPeriod(); m_portBuffer = BufferManager::acquire(); // get buffer for processing Engine::mixer()->clearAudioBuffer( m_portBuffer, fpp ); // clear the audioport buffer so we can use it //qDebug( "Playhandles: %d", m_playHandles.size() ); foreach( PlayHandle * ph, m_playHandles ) // now we mix all playhandle buffers into the audioport buffer { if( ph->buffer() ) { if( ph->usesBuffer() ) { m_bufferUsage = true; MixHelpers::add( m_portBuffer, ph->buffer(), fpp ); } ph->releaseBuffer(); // gets rid of playhandle's buffer and sets // pointer to null, so if it doesn't get re-acquired we know to skip it next time } } if( m_bufferUsage ) { // handle volume and panning // has both vol and pan models if( m_volumeModel && m_panningModel ) { ValueBuffer * volBuf = m_volumeModel->valueBuffer(); ValueBuffer * panBuf = m_panningModel->valueBuffer(); // both vol and pan have s.ex.data: if( volBuf && panBuf ) { for( f_cnt_t f = 0; f < fpp; ++f ) { float v = volBuf->values()[ f ] * 0.01f; float p = panBuf->values()[ f ] * 0.01f; m_portBuffer[f][0] *= ( p <= 0 ? 1.0f : 1.0f - p ) * v; m_portBuffer[f][1] *= ( p >= 0 ? 1.0f : 1.0f + p ) * v; } } // only vol has s.ex.data: else if( volBuf ) { float p = m_panningModel->value() * 0.01f; float l = ( p <= 0 ? 1.0f : 1.0f - p ); float r = ( p >= 0 ? 1.0f : 1.0f + p ); for( f_cnt_t f = 0; f < fpp; ++f ) { float v = volBuf->values()[ f ] * 0.01f; m_portBuffer[f][0] *= v * l; m_portBuffer[f][1] *= v * r; } } // only pan has s.ex.data: else if( panBuf ) { float v = m_volumeModel->value() * 0.01f; for( f_cnt_t f = 0; f < fpp; ++f ) { float p = panBuf->values()[ f ] * 0.01f; m_portBuffer[f][0] *= ( p <= 0 ? 1.0f : 1.0f - p ) * v; m_portBuffer[f][1] *= ( p >= 0 ? 1.0f : 1.0f + p ) * v; } } // neither has s.ex.data: else { float p = m_panningModel->value() * 0.01f; float v = m_volumeModel->value() * 0.01f; for( f_cnt_t f = 0; f < fpp; ++f ) { m_portBuffer[f][0] *= ( p <= 0 ? 1.0f : 1.0f - p ) * v; m_portBuffer[f][1] *= ( p >= 0 ? 1.0f : 1.0f + p ) * v; } } } // has vol model only else if( m_volumeModel ) { ValueBuffer * volBuf = m_volumeModel->valueBuffer(); if( volBuf ) { for( f_cnt_t f = 0; f < fpp; ++f ) { float v = volBuf->values()[ f ] * 0.01f; m_portBuffer[f][0] *= v; m_portBuffer[f][1] *= v; } } else { float v = m_volumeModel->value() * 0.01f; for( f_cnt_t f = 0; f < fpp; ++f ) { m_portBuffer[f][0] *= v; m_portBuffer[f][1] *= v; } } } } // as of now there's no situation where we only have panning model but no volume model // if we have neither, we don't have to do anything here - just pass the audio as is // handle effects const bool me = processEffects(); if( me || m_bufferUsage ) { Engine::fxMixer()->mixToChannel( m_portBuffer, m_nextFxChannel ); // send output to fx mixer // TODO: improve the flow here - convert to pull model m_bufferUsage = false; } BufferManager::release( m_portBuffer ); // release buffer, we don't need it anymore }
bool DelayEffect::processAudioBuffer( sampleFrame* buf, const fpp_t frames ) { if( !isEnabled() || !isRunning () ) { return( false ); } double outSum = 0.0; const float sr = Engine::mixer()->processingSampleRate(); const float d = dryLevel(); const float w = wetLevel(); sample_t dryS[2]; float lPeak = 0.0; float rPeak = 0.0; float length = m_delayControls.m_delayTimeModel.value(); float amplitude = m_delayControls.m_lfoAmountModel.value() * sr; float lfoTime = 1.0 / m_delayControls.m_lfoTimeModel.value(); float feedback = m_delayControls.m_feedbackModel.value(); ValueBuffer *lengthBuffer = m_delayControls.m_delayTimeModel.valueBuffer(); ValueBuffer *feedbackBuffer = m_delayControls.m_feedbackModel.valueBuffer(); ValueBuffer *lfoTimeBuffer = m_delayControls.m_lfoTimeModel.valueBuffer(); ValueBuffer *lfoAmountBuffer = m_delayControls.m_lfoAmountModel.valueBuffer(); int lengthInc = lengthBuffer ? 1 : 0; int amplitudeInc = lfoAmountBuffer ? 1 : 0; int lfoTimeInc = lfoTimeBuffer ? 1 : 0; int feedbackInc = feedbackBuffer ? 1 : 0; float *lengthPtr = lengthBuffer ? &( lengthBuffer->values()[ 0 ] ) : &length; float *amplitudePtr = lfoAmountBuffer ? &( lfoAmountBuffer->values()[ 0 ] ) : &litude; float *lfoTimePtr = lfoTimeBuffer ? &( lfoTimeBuffer->values()[ 0 ] ) : &lfoTime; float *feedbackPtr = feedbackBuffer ? &( feedbackBuffer->values()[ 0 ] ) : &feedback; if( m_delayControls.m_outGainModel.isValueChanged() ) { m_outGain = dbfsToAmp( m_delayControls.m_outGainModel.value() ); } int sampleLength; for( fpp_t f = 0; f < frames; ++f ) { dryS[0] = buf[f][0]; dryS[1] = buf[f][1]; m_delay->setFeedback( *feedbackPtr ); m_lfo->setFrequency( *lfoTimePtr ); sampleLength = *lengthPtr * Engine::mixer()->processingSampleRate(); m_currentLength = sampleLength; m_delay->setLength( m_currentLength + ( *amplitudePtr * ( float )m_lfo->tick() ) ); m_delay->tick( buf[f] ); buf[f][0] *= m_outGain; buf[f][1] *= m_outGain; lPeak = buf[f][0] > lPeak ? buf[f][0] : lPeak; rPeak = buf[f][1] > rPeak ? buf[f][1] : rPeak; buf[f][0] = ( d * dryS[0] ) + ( w * buf[f][0] ); buf[f][1] = ( d * dryS[1] ) + ( w * buf[f][1] ); outSum += buf[f][0]*buf[f][0] + buf[f][1]*buf[f][1]; lengthPtr += lengthInc; amplitudePtr += amplitudeInc; lfoTimePtr += lfoTimeInc; feedbackPtr += feedbackInc; } checkGate( outSum / frames ); m_delayControls.m_outPeakL = lPeak; m_delayControls.m_outPeakR = rPeak; return isRunning(); }
void FxMixer::masterMix( sampleFrame * _buf ) { const int fpp = Engine::mixer()->framesPerPeriod(); if( m_sendsMutex.tryLock() ) { // add the channels that have no dependencies (no incoming senders, ie. no receives) // to the jobqueue. The channels that have receives get added when their senders get processed, which // is detected by dependency counting. // also instantly add all muted channels as they don't need to care about their senders, and can just increment the deps of // their recipients right away. MixerWorkerThread::resetJobQueue( MixerWorkerThread::JobQueue::Dynamic ); for( FxChannel * ch : m_fxChannels ) { ch->m_muted = ch->m_muteModel.value(); if( ch->m_muted ) // instantly "process" muted channels { ch->processed(); ch->done(); } else if( ch->m_receives.size() == 0 ) { ch->m_queued = true; MixerWorkerThread::addJob( ch ); } } while( m_fxChannels[0]->state() != ThreadableJob::Done ) { MixerWorkerThread::startAndWaitForJobs(); } m_sendsMutex.unlock(); } // handle sample-exact data in master volume fader ValueBuffer * volBuf = m_fxChannels[0]->m_volumeModel.valueBuffer(); if( volBuf ) { for( int f = 0; f < fpp; f++ ) { m_fxChannels[0]->m_buffer[f][0] *= volBuf->values()[f]; m_fxChannels[0]->m_buffer[f][1] *= volBuf->values()[f]; } } const float v = volBuf ? 1.0f : m_fxChannels[0]->m_volumeModel.value(); MixHelpers::addSanitizedMultiplied( _buf, m_fxChannels[0]->m_buffer, v, fpp ); // clear all channel buffers and // reset channel process state for( int i = 0; i < numChannels(); ++i) { BufferManager::clear( m_fxChannels[i]->m_buffer, Engine::mixer()->framesPerPeriod() ); m_fxChannels[i]->reset(); m_fxChannels[i]->m_queued = false; // also reset hasInput m_fxChannels[i]->m_hasInput = false; m_fxChannels[i]->m_dependenciesMet = 0; } }
bool AmplifierEffect::processAudioBuffer( sampleFrame* buf, const fpp_t frames ) { if( !isEnabled() || !isRunning () ) { return( false ); } double outSum = 0.0; const float d = dryLevel(); const float w = wetLevel(); ValueBuffer * volBuf = m_ampControls.m_volumeModel.valueBuffer(); ValueBuffer * panBuf = m_ampControls.m_panModel.valueBuffer(); ValueBuffer * leftBuf = m_ampControls.m_leftModel.valueBuffer(); ValueBuffer * rightBuf = m_ampControls.m_rightModel.valueBuffer(); for( fpp_t f = 0; f < frames; ++f ) { // qDebug( "offset %d, value %f", f, m_ampControls.m_volumeModel.value( f ) ); sample_t s[2] = { buf[f][0], buf[f][1] }; // vol knob if( volBuf ) { s[0] *= volBuf->values()[ f ] * 0.01f; s[1] *= volBuf->values()[ f ] * 0.01f; } else { s[0] *= m_ampControls.m_volumeModel.value() * 0.01f; s[1] *= m_ampControls.m_volumeModel.value() * 0.01f; } // convert pan values to left/right values const float pan = panBuf ? panBuf->values()[ f ] : m_ampControls.m_panModel.value(); const float left1 = pan <= 0 ? 1.0 : 1.0 - pan * 0.01f; const float right1 = pan >= 0 ? 1.0 : 1.0 + pan * 0.01f; // second stage amplification const float left2 = leftBuf ? leftBuf->values()[ f ] : m_ampControls.m_leftModel.value(); const float right2 = rightBuf ? rightBuf->values()[ f ] : m_ampControls.m_rightModel.value(); s[0] *= left1 * left2 * 0.01; s[1] *= right1 * right2 * 0.01; buf[f][0] = d * buf[f][0] + w * s[0]; buf[f][1] = d * buf[f][1] + w * s[1]; outSum += buf[f][0]*buf[f][0] + buf[f][1]*buf[f][1]; } checkGate( outSum / frames ); return isRunning(); }
bool EqEffect::processAudioBuffer( sampleFrame *buf, const fpp_t frames ) { // setup sample exact controls float hpRes = m_eqControls.m_hpResModel.value(); float lowShelfRes = m_eqControls.m_lowShelfResModel.value(); float para1Bw = m_eqControls.m_para1BwModel.value(); float para2Bw = m_eqControls.m_para2BwModel.value(); float para3Bw = m_eqControls.m_para3BwModel.value(); float para4Bw = m_eqControls.m_para4BwModel.value(); float highShelfRes = m_eqControls.m_highShelfResModel.value(); float lpRes = m_eqControls.m_lpResModel.value(); float hpFreq = m_eqControls.m_hpFeqModel.value(); float lowShelfFreq = m_eqControls.m_lowShelfFreqModel.value(); float para1Freq = m_eqControls.m_para1FreqModel.value(); float para2Freq = m_eqControls.m_para2FreqModel.value(); float para3Freq = m_eqControls.m_para3FreqModel.value(); float para4Freq = m_eqControls.m_para4FreqModel.value(); float highShelfFreq = m_eqControls.m_highShelfFreqModel.value(); float lpFreq = m_eqControls.m_lpFreqModel.value(); ValueBuffer *hpResBuffer = m_eqControls.m_hpResModel.valueBuffer(); ValueBuffer *lowShelfResBuffer = m_eqControls.m_lowShelfResModel.valueBuffer(); ValueBuffer *para1BwBuffer = m_eqControls.m_para1BwModel.valueBuffer(); ValueBuffer *para2BwBuffer = m_eqControls.m_para2BwModel.valueBuffer(); ValueBuffer *para3BwBuffer = m_eqControls.m_para3BwModel.valueBuffer(); ValueBuffer *para4BwBuffer = m_eqControls.m_para4BwModel.valueBuffer(); ValueBuffer *highShelfResBuffer = m_eqControls.m_highShelfResModel.valueBuffer(); ValueBuffer *lpResBuffer = m_eqControls.m_lpResModel.valueBuffer(); ValueBuffer *hpFreqBuffer = m_eqControls.m_hpFeqModel.valueBuffer(); ValueBuffer *lowShelfFreqBuffer = m_eqControls.m_lowShelfFreqModel.valueBuffer(); ValueBuffer *para1FreqBuffer = m_eqControls.m_para1FreqModel.valueBuffer(); ValueBuffer *para2FreqBuffer = m_eqControls.m_para2FreqModel.valueBuffer(); ValueBuffer *para3FreqBuffer = m_eqControls.m_para3FreqModel.valueBuffer(); ValueBuffer *para4FreqBuffer = m_eqControls.m_para4FreqModel.valueBuffer(); ValueBuffer *highShelfFreqBuffer = m_eqControls.m_highShelfFreqModel.valueBuffer(); ValueBuffer *lpFreqBuffer = m_eqControls.m_lpFreqModel.valueBuffer(); int hpResInc = hpResBuffer ? 1 : 0; int lowShelfResInc = lowShelfResBuffer ? 1 : 0; int para1BwInc = para1BwBuffer ? 1 : 0; int para2BwInc = para2BwBuffer ? 1 : 0; int para3BwInc = para3BwBuffer ? 1 : 0; int para4BwInc = para4BwBuffer ? 1 : 0; int highShelfResInc = highShelfResBuffer ? 1 : 0; int lpResInc = lpResBuffer ? 1 : 0; int hpFreqInc = hpFreqBuffer ? 1 : 0; int lowShelfFreqInc = lowShelfFreqBuffer ? 1 : 0; int para1FreqInc = para1FreqBuffer ? 1 : 0; int para2FreqInc = para2FreqBuffer ? 1 : 0; int para3FreqInc = para3FreqBuffer ? 1 : 0; int para4FreqInc = para4FreqBuffer ? 1 : 0; int highShelfFreqInc = highShelfFreqBuffer ? 1 : 0; int lpFreqInc = lpFreqBuffer ? 1 : 0; float *hpResPtr = hpResBuffer ? &( hpResBuffer->values()[ 0 ] ) : &hpRes; float *lowShelfResPtr = lowShelfResBuffer ? &( lowShelfResBuffer->values()[ 0 ] ) : &lowShelfRes; float *para1BwPtr = para1BwBuffer ? &( para1BwBuffer->values()[ 0 ] ) : ¶1Bw; float *para2BwPtr = para2BwBuffer ? &( para2BwBuffer->values()[ 0 ] ) : ¶2Bw; float *para3BwPtr = para3BwBuffer ? &( para3BwBuffer->values()[ 0 ] ) : ¶3Bw; float *para4BwPtr = para4BwBuffer ? &( para4BwBuffer->values()[ 0 ] ) : ¶4Bw; float *highShelfResPtr = highShelfResBuffer ? &( highShelfResBuffer->values()[ 0 ] ) : &highShelfRes; float *lpResPtr = lpResBuffer ? &( lpResBuffer->values()[ 0 ] ) : &lpRes; float *hpFreqPtr = hpFreqBuffer ? &( hpFreqBuffer->values()[ 0 ] ) : &hpFreq; float *lowShelfFreqPtr = lowShelfFreqBuffer ? &( lowShelfFreqBuffer->values()[ 0 ] ) : &lowShelfFreq; float *para1FreqPtr = para1FreqBuffer ? &(para1FreqBuffer->values()[ 0 ] ) : ¶1Freq; float *para2FreqPtr = para2FreqBuffer ? &(para2FreqBuffer->values()[ 0 ] ) : ¶2Freq; float *para3FreqPtr = para3FreqBuffer ? &(para3FreqBuffer->values()[ 0 ] ) : ¶3Freq; float *para4FreqPtr = para4FreqBuffer ? &(para4FreqBuffer->values()[ 0 ] ) : ¶4Freq; float *hightShelfFreqPtr = highShelfFreqBuffer ? &(highShelfFreqBuffer->values()[ 0 ] ) : &highShelfFreq; float *lpFreqPtr = lpFreqBuffer ? &(lpFreqBuffer ->values()[ 0 ] ) : &lpFreq; bool hpActive = m_eqControls.m_hpActiveModel.value(); bool hp24Active = m_eqControls.m_hp24Model.value(); bool hp48Active = m_eqControls.m_hp48Model.value(); bool lowShelfActive = m_eqControls.m_lowShelfActiveModel.value(); bool para1Active = m_eqControls.m_para1ActiveModel.value(); bool para2Active = m_eqControls.m_para2ActiveModel.value(); bool para3Active = m_eqControls.m_para3ActiveModel.value(); bool para4Active = m_eqControls.m_para4ActiveModel.value(); bool highShelfActive = m_eqControls.m_highShelfActiveModel.value(); bool lpActive = m_eqControls.m_lpActiveModel.value(); bool lp24Active = m_eqControls.m_lp24Model.value(); bool lp48Active = m_eqControls.m_lp48Model.value(); float lowShelfGain = m_eqControls.m_lowShelfGainModel.value(); float para1Gain = m_eqControls.m_para1GainModel.value(); float para2Gain = m_eqControls.m_para2GainModel.value(); float para3Gain = m_eqControls.m_para3GainModel.value(); float para4Gain = m_eqControls.m_para4GainModel.value(); float highShelfGain = m_eqControls.m_highShelfGainModel.value(); if( !isEnabled() || !isRunning () ) { return( false ); } if( m_eqControls.m_outGainModel.isValueChanged() ) { m_outGain = dbfsToAmp(m_eqControls.m_outGainModel.value()); } if( m_eqControls.m_inGainModel.isValueChanged() ) { m_inGain = dbfsToAmp(m_eqControls.m_inGainModel.value()); } m_eqControls.m_inProgress = true; double outSum = 0.0; for( fpp_t f = 0; f < frames; ++f ) { outSum += buf[f][0]*buf[f][0] + buf[f][1]*buf[f][1]; } const float outGain = m_outGain; const int sampleRate = Engine::mixer()->processingSampleRate(); sampleFrame m_inPeak = { 0, 0 }; if(m_eqControls.m_analyseInModel.value( true ) && outSum > 0 ) { m_eqControls.m_inFftBands.analyze( buf, frames ); } else { m_eqControls.m_inFftBands.clear(); } gain( buf, frames, m_inGain, &m_inPeak ); m_eqControls.m_inPeakL = m_eqControls.m_inPeakL < m_inPeak[0] ? m_inPeak[0] : m_eqControls.m_inPeakL; m_eqControls.m_inPeakR = m_eqControls.m_inPeakR < m_inPeak[1] ? m_inPeak[1] : m_eqControls.m_inPeakR; for( fpp_t f = 0; f < frames; f++) { if( hpActive ) { m_hp12.setParameters( sampleRate, *hpFreqPtr, *hpResPtr, 1 ); buf[f][0] = m_hp12.update( buf[f][0], 0 ); buf[f][1] = m_hp12.update( buf[f][1], 1 ); if( hp24Active || hp48Active ) { m_hp24.setParameters( sampleRate, *hpFreqPtr, *hpResPtr, 1 ); buf[f][0] = m_hp24.update( buf[f][0], 0 ); buf[f][1] = m_hp24.update( buf[f][1], 1 ); } if( hp48Active ) { m_hp480.setParameters( sampleRate, *hpFreqPtr, *hpResPtr, 1 ); buf[f][0] = m_hp480.update( buf[f][0], 0 ); buf[f][1] = m_hp480.update( buf[f][1], 1 ); m_hp481.setParameters( sampleRate, *hpFreqPtr, *hpResPtr, 1 ); buf[f][0] = m_hp481.update( buf[f][0], 0 ); buf[f][1] = m_hp481.update( buf[f][1], 1 ); } } if( lowShelfActive ) { m_lowShelf.setParameters( sampleRate, *lowShelfFreqPtr, *lowShelfResPtr, lowShelfGain ); buf[f][0] = m_lowShelf.update( buf[f][0], 0 ); buf[f][1] = m_lowShelf.update( buf[f][1], 1 ); } if( para1Active ) { m_para1.setParameters( sampleRate, *para1FreqPtr, *para1BwPtr, para1Gain ); buf[f][0] = m_para1.update( buf[f][0], 0 ); buf[f][1] = m_para1.update( buf[f][1], 1 ); } if( para2Active ) { m_para2.setParameters( sampleRate, *para2FreqPtr, *para2BwPtr, para2Gain ); buf[f][0] = m_para2.update( buf[f][0], 0 ); buf[f][1] = m_para2.update( buf[f][1], 1 ); } if( para3Active ) { m_para3.setParameters( sampleRate, *para3FreqPtr, *para3BwPtr, para3Gain ); buf[f][0] = m_para3.update( buf[f][0], 0 ); buf[f][1] = m_para3.update( buf[f][1], 1 ); } if( para4Active ) { m_para4.setParameters( sampleRate, *para4FreqPtr, *para4BwPtr, para4Gain ); buf[f][0] = m_para4.update( buf[f][0], 0 ); buf[f][1] = m_para4.update( buf[f][1], 1 ); } if( highShelfActive ) { m_highShelf.setParameters( sampleRate, *hightShelfFreqPtr, *highShelfResPtr, highShelfGain ); buf[f][0] = m_highShelf.update( buf[f][0], 0 ); buf[f][1] = m_highShelf.update( buf[f][1], 1 ); } if( lpActive ){ m_lp12.setParameters( sampleRate, *lpFreqPtr, *lpResPtr, 1 ); buf[f][0] = m_lp12.update( buf[f][0], 0 ); buf[f][1] = m_lp12.update( buf[f][1], 1 ); if( lp24Active || lp48Active ) { m_lp24.setParameters( sampleRate, *lpFreqPtr, *lpResPtr, 1 ); buf[f][0] = m_lp24.update( buf[f][0], 0 ); buf[f][1] = m_lp24.update( buf[f][1], 1 ); } if( lp48Active ) { m_lp480.setParameters( sampleRate, *lpFreqPtr, *lpResPtr, 1 ); buf[f][0] = m_lp480.update( buf[f][0], 0 ); buf[f][1] = m_lp480.update( buf[f][1], 1 ); m_lp481.setParameters( sampleRate, *lpFreqPtr, *lpResPtr, 1 ); buf[f][0] = m_lp481.update( buf[f][0], 0 ); buf[f][1] = m_lp481.update( buf[f][1], 1 ); } } //increment pointers if needed hpResPtr += hpResInc; lowShelfResPtr += lowShelfResInc; para1BwPtr += para1BwInc; para2BwPtr += para2BwInc; para3BwPtr += para3BwInc; para4BwPtr += para4BwInc; highShelfResPtr += highShelfResInc; lpResPtr += lpResInc; hpFreqPtr += hpFreqInc; lowShelfFreqPtr += lowShelfFreqInc; para1FreqPtr += para1FreqInc; para2FreqPtr += para2FreqInc; para3FreqPtr += para3FreqInc; para4FreqPtr += para4FreqInc; hightShelfFreqPtr += highShelfFreqInc; lpFreqPtr += lpFreqInc; } sampleFrame outPeak = { 0, 0 }; gain( buf, frames, outGain, &outPeak ); m_eqControls.m_outPeakL = m_eqControls.m_outPeakL < outPeak[0] ? outPeak[0] : m_eqControls.m_outPeakL; m_eqControls.m_outPeakR = m_eqControls.m_outPeakR < outPeak[1] ? outPeak[1] : m_eqControls.m_outPeakR; checkGate( outSum / frames ); if(m_eqControls.m_analyseOutModel.value( true ) && outSum > 0 ) { m_eqControls.m_outFftBands.analyze( buf, frames ); setBandPeaks( &m_eqControls.m_outFftBands , ( int )( sampleRate ) ); } else { m_eqControls.m_outFftBands.clear(); } m_eqControls.m_inProgress = false; return isRunning(); }
void InstrumentTrack::processAudioBuffer( sampleFrame* buf, const fpp_t frames, NotePlayHandle* n ) { // we must not play the sound if this InstrumentTrack is muted... if( isMuted() || ( n && n->isBbTrackMuted() ) || ! m_instrument ) { return; } // Test for silent input data if instrument provides a single stream only (i.e. driven by InstrumentPlayHandle) // We could do that in all other cases as well but the overhead for silence test is bigger than // what we potentially save. While playing a note, a NotePlayHandle-driven instrument will produce sound in // 99 of 100 cases so that test would be a waste of time. if( m_instrument->flags().testFlag( Instrument::IsSingleStreamed ) && MixHelpers::isSilent( buf, frames ) ) { // at least pass one silent buffer to allow if( m_silentBuffersProcessed ) { // skip further processing return; } m_silentBuffersProcessed = true; } else { m_silentBuffersProcessed = false; } // if effects "went to sleep" because there was no input, wake them up // now m_audioPort.effects()->startRunning(); // get volume knob data static const float DefaultVolumeRatio = 1.0f / DefaultVolume; ValueBuffer * volBuf = m_volumeModel.valueBuffer(); float v_scale = volBuf ? 1.0f : getVolume() * DefaultVolumeRatio; // instruments using instrument-play-handles will call this method // without any knowledge about notes, so they pass NULL for n, which // is no problem for us since we just bypass the envelopes+LFOs if( m_instrument->flags().testFlag( Instrument::IsSingleStreamed ) == false && n != NULL ) { const f_cnt_t offset = n->noteOffset(); m_soundShaping.processAudioBuffer( buf + offset, frames - offset, n ); v_scale *= ( (float) n->getVolume() * DefaultVolumeRatio ); } m_audioPort.setNextFxChannel( m_effectChannelModel.value() ); // get panning knob data ValueBuffer * panBuf = m_panningModel.valueBuffer(); int panning = panBuf ? 0 : m_panningModel.value(); if( n ) { panning += n->getPanning(); panning = tLimit<int>( panning, PanningLeft, PanningRight ); } // apply sample-exact volume/panning data if( volBuf ) { for( f_cnt_t f = 0; f < frames; ++f ) { float v = volBuf->values()[ f ] * 0.01f; buf[f][0] *= v; buf[f][1] *= v; } } if( panBuf ) { for( f_cnt_t f = 0; f < frames; ++f ) { float p = panBuf->values()[ f ] * 0.01f; buf[f][0] *= ( p <= 0 ? 1.0f : 1.0f - p ); buf[f][1] *= ( p >= 0 ? 1.0f : 1.0f + p ); } } engine::mixer()->bufferToPort( buf, frames, panningToVolumeVector( panning, v_scale ), &m_audioPort ); }
bool DualFilterEffect::processAudioBuffer( sampleFrame* buf, const fpp_t frames ) { if( !isEnabled() || !isRunning () ) { return( false ); } double outSum = 0.0; const float d = dryLevel(); const float w = wetLevel(); if( m_dfControls.m_filter1Model.isValueChanged() || m_filter1changed ) { m_filter1->setFilterType( m_dfControls.m_filter1Model.value() ); m_filter1changed = true; } if( m_dfControls.m_filter2Model.isValueChanged() || m_filter2changed ) { m_filter2->setFilterType( m_dfControls.m_filter2Model.value() ); m_filter2changed = true; } float cut1 = m_dfControls.m_cut1Model.value(); float res1 = m_dfControls.m_res1Model.value(); float gain1 = m_dfControls.m_gain1Model.value(); float cut2 = m_dfControls.m_cut2Model.value(); float res2 = m_dfControls.m_res2Model.value(); float gain2 = m_dfControls.m_gain2Model.value(); float mix = m_dfControls.m_mixModel.value(); ValueBuffer *cut1Buffer = m_dfControls.m_cut1Model.valueBuffer(); ValueBuffer *res1Buffer = m_dfControls.m_res1Model.valueBuffer(); ValueBuffer *gain1Buffer = m_dfControls.m_gain1Model.valueBuffer(); ValueBuffer *cut2Buffer = m_dfControls.m_cut2Model.valueBuffer(); ValueBuffer *res2Buffer = m_dfControls.m_res2Model.valueBuffer(); ValueBuffer *gain2Buffer = m_dfControls.m_gain2Model.valueBuffer(); ValueBuffer *mixBuffer = m_dfControls.m_mixModel.valueBuffer(); int cut1Inc = cut1Buffer ? 1 : 0; int res1Inc = res1Buffer ? 1 : 0; int gain1Inc = gain1Buffer ? 1 : 0; int cut2Inc = cut2Buffer ? 1 : 0; int res2Inc = res2Buffer ? 1 : 0; int gain2Inc = gain2Buffer ? 1 : 0; int mixInc = mixBuffer ? 1 : 0; float *cut1Ptr = cut1Buffer ? &( cut1Buffer->values()[ 0 ] ) : &cut1; float *res1Ptr = res1Buffer ? &( res1Buffer->values()[ 0 ] ) : &res1; float *gain1Ptr = gain1Buffer ? &( gain1Buffer->values()[ 0 ] ) : &gain1; float *cut2Ptr = cut2Buffer ? &( cut2Buffer->values()[ 0 ] ) : &cut2; float *res2Ptr = res2Buffer ? &( res2Buffer->values()[ 0 ] ) : &res2; float *gain2Ptr = gain2Buffer ? &( gain2Buffer->values()[ 0 ] ) : &gain2; float *mixPtr = mixBuffer ? &( mixBuffer->values()[ 0 ] ) : &mix; const bool enabled1 = m_dfControls.m_enabled1Model.value(); const bool enabled2 = m_dfControls.m_enabled2Model.value(); // buffer processing loop for( fpp_t f = 0; f < frames; ++f ) { // get mix amounts for wet signals of both filters const float mix2 = ( ( *mixPtr + 1.0f ) * 0.5f ); const float mix1 = 1.0f - mix2; const float gain1 = *gain1Ptr * 0.01f; const float gain2 = *gain2Ptr * 0.01f; sample_t s[2] = { 0.0f, 0.0f }; // mix sample_t s1[2] = { buf[f][0], buf[f][1] }; // filter 1 sample_t s2[2] = { buf[f][0], buf[f][1] }; // filter 2 // update filter 1 if( enabled1 ) { //update filter 1 params here // recalculate only when necessary: either cut/res is changed, or the changed-flag is set (filter type or samplerate changed) if( ( ( *cut1Ptr != m_currentCut1 || *res1Ptr != m_currentRes1 ) ) || m_filter1changed ) { m_filter1->calcFilterCoeffs( *cut1Ptr, *res1Ptr ); m_filter1changed = false; m_currentCut1 = *cut1Ptr; m_currentRes1 = *res1Ptr; } s1[0] = m_filter1->update( s1[0], 0 ); s1[1] = m_filter1->update( s1[1], 1 ); // apply gain s1[0] *= gain1; s1[1] *= gain1; // apply mix s[0] += ( s1[0] * mix1 ); s[1] += ( s1[1] * mix1 ); } // update filter 2 if( enabled2 ) { //update filter 2 params here if( ( ( *cut2Ptr != m_currentCut2 || *res2Ptr != m_currentRes2 ) ) || m_filter2changed ) { m_filter2->calcFilterCoeffs( *cut2Ptr, *res2Ptr ); m_filter2changed = false; m_currentCut2 = *cut2Ptr; m_currentRes2 = *res2Ptr; } s2[0] = m_filter2->update( s2[0], 0 ); s2[1] = m_filter2->update( s2[1], 1 ); //apply gain s2[0] *= gain2; s2[1] *= gain2; // apply mix s[0] += ( s2[0] * mix2 ); s[1] += ( s2[1] * mix2 ); } outSum += buf[f][0]*buf[f][0] + buf[f][1]*buf[f][1]; // do another mix with dry signal buf[f][0] = d * buf[f][0] + w * s[0]; buf[f][1] = d * buf[f][1] + w * s[1]; //increment pointers cut1Ptr += cut1Inc; res1Ptr += res1Inc; gain1Ptr += gain1Inc; cut2Ptr += cut2Inc; res2Ptr += res2Inc; gain2Ptr += gain2Inc; mixPtr += mixInc; } checkGate( outSum / frames ); return isRunning(); }
ValueBuffer * AutomatableModel::valueBuffer() { // if we've already calculated the valuebuffer this period, return the cached buffer if( m_lastUpdatedPeriod == s_periodCounter ) { return m_hasSampleExactData ? &m_valueBuffer : NULL; } QMutexLocker m( &m_valueBufferMutex ); if( m_lastUpdatedPeriod == s_periodCounter ) { return m_hasSampleExactData ? &m_valueBuffer : NULL; } float val = m_value; // make sure our m_value doesn't change midway ValueBuffer * vb; if( m_controllerConnection && m_controllerConnection->getController()->isSampleExact() ) { vb = m_controllerConnection->valueBuffer(); if( vb ) { float * values = vb->values(); float * nvalues = m_valueBuffer.values(); switch( m_scaleType ) { case Linear: for( int i = 0; i < m_valueBuffer.length(); i++ ) { nvalues[i] = minValue<float>() + ( range() * values[i] ); } break; case Logarithmic: for( int i = 0; i < m_valueBuffer.length(); i++ ) { nvalues[i] = logToLinearScale( values[i] ); } break; default: qFatal("AutomatableModel::valueBuffer() " "lacks implementation for a scale type"); break; } m_lastUpdatedPeriod = s_periodCounter; m_hasSampleExactData = true; return &m_valueBuffer; } } AutomatableModel* lm = NULL; if( m_hasLinkedModels ) { lm = m_linkedModels.first(); } if( lm && lm->controllerConnection() && lm->controllerConnection()->getController()->isSampleExact() ) { vb = lm->valueBuffer(); float * values = vb->values(); float * nvalues = m_valueBuffer.values(); for( int i = 0; i < vb->length(); i++ ) { nvalues[i] = fittedValue( values[i] ); } m_lastUpdatedPeriod = s_periodCounter; m_hasSampleExactData = true; return &m_valueBuffer; } if( m_oldValue != val ) { m_valueBuffer.interpolate( m_oldValue, val ); m_oldValue = val; m_lastUpdatedPeriod = s_periodCounter; m_hasSampleExactData = true; return &m_valueBuffer; } // if we have no sample-exact source for a ValueBuffer, return NULL to signify that no data is available at the moment // in which case the recipient knows to use the static value() instead m_lastUpdatedPeriod = s_periodCounter; m_hasSampleExactData = false; return NULL; }
bool LadspaEffect::processAudioBuffer( sampleFrame * _buf, const fpp_t _frames ) { m_pluginMutex.lock(); if( !isOkay() || dontRun() || !isRunning() || !isEnabled() ) { m_pluginMutex.unlock(); return( false ); } int frames = _frames; sampleFrame * o_buf = NULL; sampleFrame sBuf [_frames]; if( m_maxSampleRate < Engine::mixer()->processingSampleRate() ) { o_buf = _buf; _buf = &sBuf[0]; sampleDown( o_buf, _buf, m_maxSampleRate ); frames = _frames * m_maxSampleRate / Engine::mixer()->processingSampleRate(); } // Copy the LMMS audio buffer to the LADSPA input buffer and initialize // the control ports. ch_cnt_t channel = 0; for( ch_cnt_t proc = 0; proc < processorCount(); ++proc ) { for( int port = 0; port < m_portCount; ++port ) { port_desc_t * pp = m_ports.at( proc ).at( port ); switch( pp->rate ) { case CHANNEL_IN: for( fpp_t frame = 0; frame < frames; ++frame ) { pp->buffer[frame] = _buf[frame][channel]; } ++channel; break; case AUDIO_RATE_INPUT: { ValueBuffer * vb = pp->control->valueBuffer(); if( vb ) { memcpy( pp->buffer, vb->values(), frames * sizeof(float) ); } else { pp->value = static_cast<LADSPA_Data>( pp->control->value() / pp->scale ); // This only supports control rate ports, so the audio rates are // treated as though they were control rate by setting the // port buffer to all the same value. for( fpp_t frame = 0; frame < frames; ++frame ) { pp->buffer[frame] = pp->value; } } break; } case CONTROL_RATE_INPUT: if( pp->control == NULL ) { break; } pp->value = static_cast<LADSPA_Data>( pp->control->value() / pp->scale ); pp->buffer[0] = pp->value; break; case CHANNEL_OUT: case AUDIO_RATE_OUTPUT: case CONTROL_RATE_OUTPUT: break; default: break; } } } // Process the buffers. for( ch_cnt_t proc = 0; proc < processorCount(); ++proc ) { (m_descriptor->run)( m_handles[proc], frames ); } // Copy the LADSPA output buffers to the LMMS buffer. double out_sum = 0.0; channel = 0; const float d = dryLevel(); const float w = wetLevel(); for( ch_cnt_t proc = 0; proc < processorCount(); ++proc ) { for( int port = 0; port < m_portCount; ++port ) { port_desc_t * pp = m_ports.at( proc ).at( port ); switch( pp->rate ) { case CHANNEL_IN: case AUDIO_RATE_INPUT: case CONTROL_RATE_INPUT: break; case CHANNEL_OUT: for( fpp_t frame = 0; frame < frames; ++frame ) { _buf[frame][channel] = d * _buf[frame][channel] + w * pp->buffer[frame]; out_sum += _buf[frame][channel] * _buf[frame][channel]; } ++channel; break; case AUDIO_RATE_OUTPUT: case CONTROL_RATE_OUTPUT: break; default: break; } } } if( o_buf != NULL ) { sampleBack( _buf, o_buf, m_maxSampleRate ); } checkGate( out_sum / frames ); bool is_running = isRunning(); m_pluginMutex.unlock(); return( is_running ); }