示例#1
0
/*
 * Butterworth Filter for HP or LP
 * out(n) = a1 * in + a2 * in(n-1) + a3 * in(n-2) - b1*out(n-1) - b2*out(n-2)
 * args:
 *   FILT - pointer to fx_filt_data_t
 *   Buff - sampe buffer
 *   NumSamples - samples in buffer
 *   channels - number of audio channels
 */
static void Butt(fx_filt_data_t *FILT,
	sample_t *Buff,
	int NumSamples,
	int channels)
{
	int index = 0;

	for (index = 0; index < NumSamples; index = index + channels)
	{
		sample_t out = FILT->a1 * Buff[index] + FILT->a2 * FILT->buff_in1[0] +
			FILT->a3 * FILT->buff_in1[1] - FILT->b1 * FILT->buff_out1[0] -
			FILT->b2 * FILT->buff_out1[1];
		FILT->buff_in1[1] = FILT->buff_in1[0]; //in(n-2) = in(n-1)
		FILT->buff_in1[0] = Buff[index]; // in(n-1) = in
		FILT->buff_out1[1] = FILT->buff_out1[0]; //out(n-2) = out(n-1)
		FILT->buff_out1[0] = out; //out(n-1) = out

		Buff[index] = clip_float(out);
		/*process second channel*/
		if(channels > 1)
		{
			out = FILT->a1 * Buff[index+1] + FILT->a2 * FILT->buff_in2[0] +
				FILT->a3 * FILT->buff_in2[1] - FILT->b1 * FILT->buff_out2[0] -
				FILT->b2 * FILT->buff_out2[1];
			FILT->buff_in2[1] = FILT->buff_in2[0]; //in(n-2) = in(n-1)
			FILT->buff_in2[0] = Buff[index+1]; // in(n-1) = in
			FILT->buff_out2[1] = FILT->buff_out2[0]; //out(n-2) = out(n-1)
			FILT->buff_out2[0] = out; //out(n-1) = out

			Buff[index+1] = clip_float(out);
		}
	}
}
示例#2
0
/*
 * Echo effect
 * args:
 *   audio_ctx - audio context
 *   data - audio buffer to be processed
 *   delay_ms - echo delay in ms (e.g: 300)
 *   decay - feedback gain (<1)  (e.g: 0.5)
 *
 * asserts:
 *   audio_ctx is not null
 *
 * returns: none
 */
static void audio_fx_echo(audio_context_t *audio_ctx,
	sample_t *data,
	int delay_ms,
	float decay)
{
	/*assertions*/
	assert(audio_ctx != NULL);

	int samp = 0;

	if(aud_fx->ECHO == NULL)
	{
		aud_fx->ECHO = calloc(1, sizeof(fx_delay_data_t));
		if(aud_fx->ECHO == NULL)
		{
			fprintf(stderr,"AUDIO: FATAL memory allocation failure (audio_fx_echo): %s\n", strerror(errno));
			exit(-1);
		}
		aud_fx->ECHO->buff_size = (int) delay_ms * audio_ctx->samprate * 0.001;
		aud_fx->ECHO->delayBuff1 = calloc(aud_fx->ECHO->buff_size, sizeof(sample_t));
		if(aud_fx->ECHO->delayBuff1 == NULL)
		{
			fprintf(stderr,"AUDIO: FATAL memory allocation failure (audio_fx_echo): %s\n", strerror(errno));
			exit(-1);
		}
		aud_fx->ECHO->delayBuff2 = NULL;
		if(audio_ctx->channels > 1)
		{
			aud_fx->ECHO->delayBuff2 = calloc(aud_fx->ECHO->buff_size, sizeof(sample_t));
			if(aud_fx->ECHO->delayBuff2 == NULL)
			{
				fprintf(stderr,"AUDIO: FATAL memory allocation failure (audio_fx_echo): %s\n", strerror(errno));
				exit(-1);
			}
		}
	}

	for(samp = 0; samp < audio_ctx->capture_buff_size; samp = samp + audio_ctx->channels)
	{
		sample_t out = (0.7 * data[samp]) +
			(0.3 * aud_fx->ECHO->delayBuff1[aud_fx->ECHO->delayIndex]);
		aud_fx->ECHO->delayBuff1[aud_fx->ECHO->delayIndex] = data[samp] +
			(aud_fx->ECHO->delayBuff1[aud_fx->ECHO->delayIndex] * decay);
		data[samp] = clip_float(out);
		/*if stereo process second channel in separate*/
		if (audio_ctx->channels > 1)
		{
			out = (0.7 * data[samp+1]) +
				(0.3 * aud_fx->ECHO->delayBuff2[aud_fx->ECHO->delayIndex]);
			aud_fx->ECHO->delayBuff2[aud_fx->ECHO->delayIndex] = data[samp] +
				(aud_fx->ECHO->delayBuff2[aud_fx->ECHO->delayIndex] * decay);
			data[samp+1] = clip_float(out);
		}

		if(++(aud_fx->ECHO->delayIndex) >= aud_fx->ECHO->buff_size) aud_fx->ECHO->delayIndex=0;
	}
}
示例#3
0
/*
 * WahWah effect
 * 	  !!!!!!!!!!!!! IMPORTANT!!!!!!!!! :
 * 	  depth and freqofs should be from 0(min) to 1(max) !
 * 	  res should be greater than 0 !
 * args:
 *   audio_ctx - audio context
 *   data - audio buffer to be processed
 *   freq - LFO frequency (1.5)
 *   startphase - LFO startphase in RADIANS - usefull for stereo WahWah (0)
 *   depth - Wah depth (0.7)
 *   freqofs - Wah frequency offset (0.3)
 *   res - Resonance (2.5)
 *
 * asserts:
 *   audio_ctx is not null
 *
 * returns: none
 */
static void audio_fx_wahwah (audio_context_t *audio_ctx,
	sample_t *data,
	float freq,
	float startphase,
	float depth,
	float freqofs,
	float res)
{
	/*assertions*/
	assert(audio_ctx != NULL);

	float frequency, omega, sn, cs, alpha;

	if(aud_fx->wahData == NULL)
	{
		aud_fx->wahData = calloc(1, sizeof(fx_wah_data_t));
		if(aud_fx->wahData == NULL)
		{
			fprintf(stderr,"AUDIO: FATAL memory allocation failure (audio_fx_wahwah): %s\n", strerror(errno));
			exit(-1);
		}
		aud_fx->wahData->lfoskip = freq * 2 * M_PI / audio_ctx->samprate;
		aud_fx->wahData->phase = startphase;
		/*if right channel set: phase += (float)M_PI;*/
	}

	int samp = 0;
	for(samp = 0; samp < audio_ctx->capture_buff_size; samp++)
	{
		float in = data[samp];

		if ((aud_fx->wahData->skipcount++) % lfoskipsamples == 0)
		{
			frequency = (1 + cos(aud_fx->wahData->skipcount * aud_fx->wahData->lfoskip + aud_fx->wahData->phase)) * 0.5;
			frequency = frequency * depth * (1 - freqofs) + freqofs;
			frequency = exp((frequency - 1) * 6);
			omega = M_PI * frequency;
			sn = sin(omega);
			cs = cos(omega);
			alpha = sn / (2 * res);
			aud_fx->wahData->b0 = (1 - cs) * 0.5;
			aud_fx->wahData->b1 = 1 - cs;
			aud_fx->wahData->b2 = (1 - cs) * 0.5;
			aud_fx->wahData->a0 = 1 + alpha;
			aud_fx->wahData->a1 = -2 * cs;
			aud_fx->wahData->a2 = 1 - alpha;
		}
		float out = (aud_fx->wahData->b0 * in + aud_fx->wahData->b1 * aud_fx->wahData->xn1 +
			aud_fx->wahData->b2 * aud_fx->wahData->xn2 - aud_fx->wahData->a1 * aud_fx->wahData->yn1 -
			aud_fx->wahData->a2 * aud_fx->wahData->yn2) / aud_fx->wahData->a0;
		aud_fx->wahData->xn2 = aud_fx->wahData->xn1;
		aud_fx->wahData->xn1 = in;
		aud_fx->wahData->yn2 = aud_fx->wahData->yn1;
		aud_fx->wahData->yn1 = out;

		data[samp] = clip_float(out);
	}
}
示例#4
0
void GenEVNODD(DspParamData& fblk)
{
    fblk._mods._evaluator = [=](FPARAM& cec) -> float
    {
        float kt = fblk._keyTrack*cec._keyOff;
        float vt = lerp(-fblk._velTrack,0.0f,cec._unitVel);
        float x = (cec._coarse)+cec._C1()+cec._C2()+kt+vt;
        //printf( "vt<%f> kt<%f> x<%f>\n", vt, kt, x );
        return clip_float(x,-10,10);
    };
}
示例#5
0
void TWOPARAM_SHAPER::compute(DspBuffer& dspbuf) //final
{
    float pad = _dbd._inputPad;
    int inumframes = dspbuf._numframes;
    float* ubuf = dspbuf.channel(0);
    float evn = _param[0].eval();
    float odd = _param[1].eval();

    //evn = -22;//(0.5f+sinf(ph1+pi)*0.5f)*-60.0f;
    //odd = (sinf(ph1)*30.f)-30.0f;
    ph1 += 0.0003f;

    _fval[0]=evn;
    _fval[1]=odd;
    //printf( "_dbd._inputPad<%f>\n", _dbd._inputPad );
    if(1) for( int i=0; i<inumframes; i++ )
    {
        float u = ubuf[i]*pad;
        float usq = u*u;
        float le = usq*decibel_to_linear_amp_ratio(evn);
        float lo = u*decibel_to_linear_amp_ratio(odd);


        //float e = (2.0f*powf(le,2.0f))-1.0f;
        //float e = ((2.0f*powf(le,2.0f))-1.0f)*1000.0f;//decibel_to_linear_amp_ratio(-evn);
        float index = clip_float(le*6,-12,12); //clip_float(powf(le,4),-12.0f,12.0f);
       // index *= index;
        float e = sinf(index*pi2); ///adj;
        

        index = clip_float(lo*6,-12.0f,12.0f);
        float o = sinf(index*pi2); ///adj;

        float r = (e+o)*0.5f;
        //r = wrap(r,-30);
        ubuf[i] = r;
    }

}
示例#6
0
void GenPOS(DspParamData& fblk)
{
    fblk._mods._evaluator = [=](FPARAM& cec) -> float
    {
        cec._kval = fblk._keyTrack*cec._keyOff;
        cec._vval = fblk._velTrack*cec._unitVel;
        cec._s1val = cec._C1();
        cec._s2val = cec._C2();
        float x = (cec._coarse)
                + cec._s1val
                + cec._s2val
                + cec._kval
                + cec._vval;
        return clip_float(x,-100,100);
    };
}
示例#7
0
文件: layer.cpp 项目: tweakoz/orkid
controller_t layer::getSRC2(const BlockModulationData& mods)
{
    auto src2 = this->getController(mods._src2);
    auto depthcon = this->getController(mods._src2DepthCtrl);
    float mindepth = mods._src2MinDepth;
    float maxdepth = mods._src2MaxDepth;
   
    auto it = [=]()->float
    {   
        float dc = clip_float(depthcon(),0,1);
        float depth = lerp(mindepth,maxdepth,dc);
        float out = src2()*depth;
        return out;
    };

    return it;
}
示例#8
0
/* Non-linear amplifier with soft distortion curve.
 * args:
 *   input - sample input
 *
 * asserts:
 *   none
 *
 * returns: processed sample
 */
static sample_t CubicAmplifier( sample_t input )
{
	sample_t out;
	float temp;
	if( input < 0 ) /*silence*/
	{

		temp = input + 1.0f;
		out = (temp * temp * temp) - 1.0f;
	}
	else
	{
		temp = input - 1.0f;
		out = (temp * temp * temp) + 1.0f;
	}
	return clip_float(out);
}
示例#9
0
文件: layer.cpp 项目: tweakoz/orkid
void layer::compute(outputBuffer& obuf)
{
    _HAF._items.clear();

    ///////////////////////

    if( nullptr == _layerData )
    {
        printf( "gotnull ld layer<%p>\n", this );
        return;
    }

    if( nullptr == _alg )
        return;

    //printf( "layer<%p> compute\n", this );
    int inumframes = obuf._numframes;
    float* outl = obuf._leftBuffer;
    float* outr = obuf._rightBuffer;

    float dt = float(inumframes)/_syn._sampleRate;

    //////////////////////////////////////
    // update controllers
    //////////////////////////////////////

    for( int i=0; i<kmaxctrlblocks; i++ )
    {
        if( _ctrlBlock[i] )
            _ctrlBlock[i]->compute(inumframes);    
    }

    ///////////////////////////////////
    // extract AMPENV
    ///////////////////////////////////
    // TODO - get rid og hard coded location in CB0

    auto CB0 = this->_ctrlBlock[0];
    if( CB0 )
    {
        if( auto as_env = dynamic_cast<RateLevelEnvInst*>(CB0->_cinst[0] ) ) // ampenv ?
        {
            for( int i=0; i<inumframes; i++ )
            {
                float e0 = as_env->_USERAMPENV[i];
                _USERAMPENV[i] = e0;
            }
        }
    }

    ///////////////////////////////////////////////
    // HUD AFRAME
    ///////////////////////////////////////////////
    int envcount = 0;
    int asrcount = 0;
    int lfocount = 0;
    int funcount = 0;
    for( int icb=0; icb<kmaxctrlblocks; icb++ )
    {   auto cb = _ctrlBlock[icb];
        if( cb )
        {   for( int ic=0; ic<kmaxctrlperblock; ic++)
            {   auto cinst = cb->_cinst[ic];
                if( auto env = dynamic_cast<RateLevelEnvInst*>(cinst)  )
                {   envframe envf;
                    envf._index = envcount++;
                    envf._value = env->_curval;
                    envf._data = env->_data;
                    envf._curseg = env->_curseg;
                    if( env->_data && env->_data->_ampenv )
                        envf._curseg = _useNatEnv ? _HAF_nenvseg : env->_curseg;
                    _HAF._items.push_back(envf);
                }
                else if( auto asr = dynamic_cast<AsrInst*>(cinst)  )
                {   asrframe asrf;
                    asrf._index = asrcount++;
                    asrf._value = asr->_curval;
                    asrf._curseg = asr->_curseg;
                    asrf._data = asr->_data;
                    _HAF._items.push_back(asrf);
                    //printf( "add asr item\n");
                }
                else if( auto lfo = dynamic_cast<LfoInst*>(cinst)  )
                {   lfoframe lfof;
                    lfof._index = lfocount++;
                    lfof._value = lfo->_curval;
                    lfof._currate = lfo->_currate;
                    lfof._data = lfo->_data;
                    _HAF._items.push_back(lfof);
                }
                else if( auto fun = dynamic_cast<FunInst*>(cinst)  )
                {   funframe funfr; 
                    funfr._index = funcount++;
                    funfr._data = fun->_data;
                    funfr._value = fun->_curval;
                    _HAF._items.push_back(funfr);
                }
            }
        }
    }
    ///////////////////////////////////////////////

    auto PCHBLK = _layerData->_dspBlocks[0];
    if( PCHBLK )
    {
        const int kNOTEC4 = 60;
        const auto& PCH = PCHBLK->_paramd[0];
        const auto& KMP = _layerData->_kmpBlock;

        int timbreshift = KMP._timbreShift; // 0
        int kmtrans = KMP._transpose/*+timbreshift*/; // -20
        int kmkeytrack = KMP._keyTrack; // 100

        int kmpivot = (kNOTEC4+kmtrans); // 48-20 = 28
        int kmdeltakey = (_curnote+kmtrans-kmpivot); // 48-28 = 28
        int kmdeltacents = kmdeltakey*kmkeytrack; // 8*0 = 0
        int kmfinalcents = (kmpivot*100)+kmdeltacents; // 4800


        int pchtrans = PCH._coarse;//-timbreshift; // 8
        int pchkeytrack = PCH._keyTrack; // 0
        int pchpivot = (kNOTEC4+pchtrans); // 48-0 = 48
        int pchdeltakey = (_curnote+pchtrans-pchpivot); // 48-48=0 //possible minus kmorigin?
        int pchdeltacents = pchdeltakey*pchkeytrack;//0*0=0
        int pchfinalcents = (pchtrans*100)+pchdeltacents;//0*100+0=0

        int kmcents = kmfinalcents;//+region->_tuning;
        //printf( "_curCentsOSC<%d>\n", int(_curCentsOSC) );

        auto dsp0 = _alg->_block[0];

        if( dsp0 )
        {
            float centoff = dsp0->_param[0].eval();
            _curPitchOffsetInCents = int(centoff);//kmcents+pchfinalcents;
        }
        _curCentsOSC = kmcents+pchfinalcents + _curPitchOffsetInCents;

    }
    else
    {
        _curCentsOSC = _curnote*100;
    }

    _curCentsOSC = clip_float( _curCentsOSC, -0,12700 );

    //printf( "pchc1<%f> pchc2<%f> poic<%f> currat<%f>\n", _pchc1, _pchc2, _curPitchOffsetInCents, currat );
    ////////////////////////////////////////

    //printf( "doBlockStereo<%d>\n", int(doBlockStereo) );

    ////////////////////////////////////////

    if( true )
    {
        bool bypassDSP = _syn._bypassDSP;
        DspBlock* lastblock = _alg->lastBlock();
        bool doBlockStereo = bypassDSP 
                           ? false 
                           : lastblock ? (lastblock->numOutputs()==2) 
                                       : false;

        float synsr = _syn._sampleRate;

        outputBuffer laybuf;

        _layerObuf.resize(inumframes);
        float* lyroutl = _layerObuf._leftBuffer;
        float* lyroutr = _layerObuf._rightBuffer;

        ///////////////////////////////////
        // sample osc
        ///////////////////////////////////

        bool do_noise = _doNoise;
        bool do_sine = false;
        bool do_input = false;

        switch(_syn._genmode )
        {
            case 1: // force sine
                do_sine = true;
                do_noise = false;
                break;
            case 2: // force noise
                do_sine = false;
                do_noise = true;
                break;
            case 3: // input
                do_sine = false;
                do_noise = false;
                do_input = true;
                break;
        }

        if( do_noise )
        {
            for( int i=0; i<inumframes; i++ )
            {
                float o = ((rand()&0xffff)/32768.0f)-1.0f;
                lyroutl[i] = o;
                lyroutr[i] = 0.0f;
            }
        }
        else if( do_input )
        {
            auto ibuf = _syn._ibuf._leftBuffer;
            for( int i=0; i<inumframes; i++ )
            {
                float o = ibuf[i]*8.0f;
                lyroutl[i] = o;
                lyroutr[i] = 0.0f;
            }

        }
        else if( do_sine )
        {
            float F = midi_note_to_frequency(float(_curCentsOSC)*0.01);
            float phaseinc =  pi2*F/synsr;

            for( int i=0; i<inumframes; i++ )
            {
                float o = sinf(_sinrepPH)*0.5;
                _sinrepPH += phaseinc;
                lyroutl[i] = o;
                lyroutr[i] = 0.0f;
            }

        }
        else // clear
        for( int i=0; i<inumframes; i++ )
        {
            lyroutl[i] = 0.0f;
            lyroutr[i] = 0.0f;
        }

        ///////////////////////////////////
        // DSP F1-F3
        ///////////////////////////////////

        if( false==bypassDSP )
            _alg->compute(_layerObuf);

        ///////////////////////////////////
        // amp / out
        ///////////////////////////////////

        if( doBlockStereo )
        {
            for( int i=0; i<inumframes; i++ )
            {
                float tgain = _layerGain*_masterGain;
                outl[i] += lyroutl[i]*tgain;
                outr[i] += lyroutr[i]*tgain;
            }
        }
        else if( bypassDSP )
        {
            for( int i=0; i<inumframes; i++ )
            {
                float tgain = _layerGain*_masterGain;
                float inp = lyroutl[i]; 
                outl[i] += inp*tgain*0.5f;
                outr[i] += inp*tgain*0.5f;
            }
        }
        else
        {
            for( int i=0; i<inumframes; i++ )
            {
                float tgain = _layerGain*_masterGain;
                outl[i] += lyroutl[i]*tgain;
                outr[i] += lyroutl[i]*tgain;
            }            
        }

        /////////////////
        // oscope
        /////////////////

        if( this == _syn._hudLayer )
        {
            _HAF._oscopebuffer.resize(inumframes);

            if( doBlockStereo )
            {
                for( int i=0; i<inumframes; i++ )
                {
                    float l = _layerObuf._leftBuffer[i];
                    float r = _layerObuf._rightBuffer[i];
                    //tailb[i] = l;//doBlockStereo ? l+r : l;
                    _HAF._oscopebuffer[i]=(l+r)*0.5f;
                }

            }
            else
            {
                for( int i=0; i<inumframes; i++ )
                {
                    float l = _layerObuf._leftBuffer[i];
                    float r = _layerObuf._rightBuffer[i];
                    //tailb[i] = l;//doBlockStereo ? l+r : l;
                    _HAF._oscopebuffer[i]=l;
                }

            }
            _syn._hudbuf.push(_HAF);

        }
    }


    _layerTime += dt;

}
示例#10
0
/*
 * four paralell Comb filters for reverb
 * args:
 *   audio_ctx - audio context
 *   data - audio buffer to be processed
 *   delay1_ms - delay for filter 1
 *   delay2_ms - delay for filter 2
 *   delay3_ms - delay for filter 3
 *   delay4_ms - delay for filter 4
 *   gain1 - feed gain for filter 1
 *   gain2 - feed gain for filter 2
 *   gain3 - feed gain for filter 3
 *   gain4 - feed gain for filter 4
 *   in_gain - input line gain
 *
 * asserts:
 *   none
 *
 * returns: none
 */
static void CombFilter4 (audio_context_t *audio_ctx,
	sample_t *data,
	int delay1_ms,
	int delay2_ms,
	int delay3_ms,
	int delay4_ms,
	float gain1,
	float gain2,
	float gain3,
	float gain4,
	float in_gain)
{
	int samp=0;
	/*buff_size in samples*/

	if (aud_fx->COMB4 == NULL)
	{
		aud_fx->COMB4 = calloc(1, sizeof(fx_comb4_data_t));
		if(aud_fx->COMB4 == NULL)
		{
			fprintf(stderr,"AUDIO: FATAL memory allocation failure (CombFilter4): %s\n", strerror(errno));
			exit(-1);
		}
		/*buff_size in samples*/
		aud_fx->COMB4->buff_size1 = (int) delay1_ms * (audio_ctx->samprate * 0.001);
		aud_fx->COMB4->buff_size2 = (int) delay2_ms * (audio_ctx->samprate * 0.001);
		aud_fx->COMB4->buff_size3 = (int) delay3_ms * (audio_ctx->samprate * 0.001);
		aud_fx->COMB4->buff_size4 = (int) delay4_ms * (audio_ctx->samprate * 0.001);

		aud_fx->COMB4->CombBuff10 = calloc(aud_fx->COMB4->buff_size1, sizeof(sample_t));
		if(aud_fx->COMB4->CombBuff10 == NULL)
		{
			fprintf(stderr,"AUDIO: FATAL memory allocation failure (CombFilter4): %s\n", strerror(errno));
			exit(-1);
		}
		aud_fx->COMB4->CombBuff20 = calloc(aud_fx->COMB4->buff_size2, sizeof(sample_t));
		if(aud_fx->COMB4->CombBuff20 == NULL)
		{
			fprintf(stderr,"AUDIO: FATAL memory allocation failure (CombFilter4): %s\n", strerror(errno));
			exit(-1);
		}
		aud_fx->COMB4->CombBuff30 = calloc(aud_fx->COMB4->buff_size3, sizeof(sample_t));
		if(aud_fx->COMB4->CombBuff30 == NULL)
		{
			fprintf(stderr,"AUDIO: FATAL memory allocation failure (CombFilter4): %s\n", strerror(errno));
			exit(-1);
		}
		aud_fx->COMB4->CombBuff40 = calloc(aud_fx->COMB4->buff_size4, sizeof(sample_t));
		if(aud_fx->COMB4->CombBuff40 == NULL)
		{
			fprintf(stderr,"AUDIO: FATAL memory allocation failure (CombFilter4): %s\n", strerror(errno));
			exit(-1);
		}
		aud_fx->COMB4->CombBuff11 = NULL;
		aud_fx->COMB4->CombBuff21 = NULL;
		aud_fx->COMB4->CombBuff31 = NULL;
		aud_fx->COMB4->CombBuff41 = NULL;
		if(audio_ctx->channels > 1)
		{
			aud_fx->COMB4->CombBuff11 = calloc(aud_fx->COMB4->buff_size1, sizeof(sample_t));
			if(aud_fx->COMB4->CombBuff11 == NULL)
			{
				fprintf(stderr,"AUDIO: FATAL memory allocation failure (CombFilter4): %s\n", strerror(errno));
				exit(-1);
			}
			aud_fx->COMB4->CombBuff21 = calloc(aud_fx->COMB4->buff_size2, sizeof(sample_t));
			if(aud_fx->COMB4->CombBuff21 == NULL)
			{
				fprintf(stderr,"AUDIO: FATAL memory allocation failure (CombFilter4): %s\n", strerror(errno));
				exit(-1);
			}
			aud_fx->COMB4->CombBuff31 = calloc(aud_fx->COMB4->buff_size3, sizeof(sample_t));
			if(aud_fx->COMB4->CombBuff31 == NULL)
			{
				fprintf(stderr,"AUDIO: FATAL memory allocation failure (CombFilter4): %s\n", strerror(errno));
				exit(-1);
			}
			aud_fx->COMB4->CombBuff41 = calloc(aud_fx->COMB4->buff_size4, sizeof(sample_t));
			if(aud_fx->COMB4->CombBuff41 == NULL)
			{
				fprintf(stderr,"AUDIO: FATAL memory allocation failure (CombFilter4): %s\n", strerror(errno));
				exit(-1);
			}
		}
	}

	for(samp = 0; samp < audio_ctx->capture_buff_size; samp = samp + audio_ctx->channels)
	{
		sample_t out1 = in_gain * data[samp] +
			gain1 * aud_fx->COMB4->CombBuff10[aud_fx->COMB4->CombIndex1];
		sample_t out2 = in_gain * data[samp] +
			gain2 * aud_fx->COMB4->CombBuff20[aud_fx->COMB4->CombIndex2];
		sample_t out3 = in_gain * data[samp] +
			gain3 * aud_fx->COMB4->CombBuff30[aud_fx->COMB4->CombIndex3];
		sample_t out4 = in_gain * data[samp] +
			gain4 * aud_fx->COMB4->CombBuff40[aud_fx->COMB4->CombIndex4];

		aud_fx->COMB4->CombBuff10[aud_fx->COMB4->CombIndex1] = data[samp] +
			gain1 * aud_fx->COMB4->CombBuff10[aud_fx->COMB4->CombIndex1];
		aud_fx->COMB4->CombBuff20[aud_fx->COMB4->CombIndex2] = data[samp] +
			gain2 * aud_fx->COMB4->CombBuff20[aud_fx->COMB4->CombIndex2];
		aud_fx->COMB4->CombBuff30[aud_fx->COMB4->CombIndex3] = data[samp] +
			gain3 * aud_fx->COMB4->CombBuff30[aud_fx->COMB4->CombIndex3];
		aud_fx->COMB4->CombBuff40[aud_fx->COMB4->CombIndex4] = data[samp] +
			gain4 * aud_fx->COMB4->CombBuff40[aud_fx->COMB4->CombIndex4];

		data[samp] = clip_float(out1 + out2 + out3 + out4);

		/*if stereo process second channel */
		if(audio_ctx->channels > 1)
		{
			out1 = in_gain * data[samp+1] +
				gain1 * aud_fx->COMB4->CombBuff11[aud_fx->COMB4->CombIndex1];
			out2 = in_gain * data[samp+1] +
				gain2 * aud_fx->COMB4->CombBuff21[aud_fx->COMB4->CombIndex2];
			out3 = in_gain * data[samp+1] +
				gain3 * aud_fx->COMB4->CombBuff31[aud_fx->COMB4->CombIndex3];
			out4 = in_gain * data[samp+1] +
				gain4 * aud_fx->COMB4->CombBuff41[aud_fx->COMB4->CombIndex4];

			aud_fx->COMB4->CombBuff11[aud_fx->COMB4->CombIndex1] = data[samp+1] +
				gain1 * aud_fx->COMB4->CombBuff11[aud_fx->COMB4->CombIndex1];
			aud_fx->COMB4->CombBuff21[aud_fx->COMB4->CombIndex2] = data[samp+1] +
				gain2 * aud_fx->COMB4->CombBuff21[aud_fx->COMB4->CombIndex2];
			aud_fx->COMB4->CombBuff31[aud_fx->COMB4->CombIndex3] = data[samp+1] +
				gain3 * aud_fx->COMB4->CombBuff31[aud_fx->COMB4->CombIndex3];
			aud_fx->COMB4->CombBuff41[aud_fx->COMB4->CombIndex4] = data[samp+1] +
				gain4 * aud_fx->COMB4->CombBuff41[aud_fx->COMB4->CombIndex4];

			data[samp+1] = clip_float(out1 + out2 + out3 + out4);
		}

		if(++(aud_fx->COMB4->CombIndex1) >= aud_fx->COMB4->buff_size1) aud_fx->COMB4->CombIndex1=0;
		if(++(aud_fx->COMB4->CombIndex2) >= aud_fx->COMB4->buff_size2) aud_fx->COMB4->CombIndex2=0;
		if(++(aud_fx->COMB4->CombIndex3) >= aud_fx->COMB4->buff_size3) aud_fx->COMB4->CombIndex3=0;
		if(++(aud_fx->COMB4->CombIndex4) >= aud_fx->COMB4->buff_size4) aud_fx->COMB4->CombIndex4=0;
	}
}