Esempio n. 1
0
void
Spatializer_Module::process ( nframes_t nframes )
{
    float azimuth = control_input[0].control_value();
    float elevation = control_input[1].control_value();
    float radius = control_input[2].control_value();
    float highpass_freq = control_input[3].control_value();
    float width = control_input[4].control_value();
    float angle = control_input[5].control_value();
//        bool more_options = control_input[6].control_value();
    bool speed_of_sound = control_input[7].control_value() > 0.5f;
    float late_gain = DB_CO( control_input[8].control_value() );
    float early_gain = DB_CO( control_input[9].control_value() );

    control_input[3].hints.visible = highpass_freq != 0.0f;

    float delay_seconds = 0.0f;

    if ( speed_of_sound && radius > 1.0f )
        delay_seconds = ( radius - 1.0f ) / 340.29f;

    /* direct sound follows inverse square law */
    /* but it's just the inverse as far as SPL goes */

    /* let's not go nuts... */
    if ( radius < 0.01f )
        radius = 0.01f;

    float gain = 1.0f / radius;

    /* float cutoff_frequency = gain * LOWPASS_FREQ; */

    sample_t gainbuf[nframes];
    sample_t delaybuf[nframes];

    bool use_gainbuf = false;
    bool use_delaybuf = delay_smoothing.apply( delaybuf, nframes, delay_seconds );

    for ( unsigned int i = 0; i < audio_input.size(); i++ )
    {
        sample_t *buf = (sample_t*) audio_input[i].buffer();

        /* frequency effects */
        _highpass[i]->run_highpass( buf, highpass_freq, nframes );

        /* send to late reverb */
        if ( i == 0 )
            buffer_copy( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), buf, nframes );
        else
            buffer_mix( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), buf, nframes );

    }

    {
        use_gainbuf = late_gain_smoothing.apply( gainbuf, nframes, late_gain );

        /* gain effects */
        if ( unlikely( use_gainbuf ) )
            buffer_apply_gain_buffer( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), gainbuf, nframes );
        else
            buffer_apply_gain( (sample_t*)aux_audio_output[0].jack_port()->buffer(nframes), nframes, late_gain );
    }

    float early_angle = azimuth - angle;
    if ( early_angle > 180.0f )
        early_angle = -180 - ( early_angle - 180 );
    else  if ( early_angle < -180.0f )
        early_angle = 180 - ( early_angle + 180 );

    /* send to early reverb */
    if ( audio_input.size() == 1 )
    {
        _early_panner->run_mono( (sample_t*)audio_input[0].buffer(),
                                 (sample_t*)aux_audio_output[1].jack_port()->buffer(nframes),
                                 (sample_t*)aux_audio_output[2].jack_port()->buffer(nframes),
                                 (sample_t*)aux_audio_output[3].jack_port()->buffer(nframes),
                                 (sample_t*)aux_audio_output[4].jack_port()->buffer(nframes),
                                 azimuth + angle,
                                 elevation,
                                 nframes );
    }
    else
    {
        _early_panner->run_stereo( (sample_t*)audio_input[0].buffer(),
                                   (sample_t*)audio_input[1].buffer(),
                                   (sample_t*)aux_audio_output[1].jack_port()->buffer(nframes),
                                   (sample_t*)aux_audio_output[2].jack_port()->buffer(nframes),
                                   (sample_t*)aux_audio_output[3].jack_port()->buffer(nframes),
                                   (sample_t*)aux_audio_output[4].jack_port()->buffer(nframes),
                                   azimuth + angle,
                                   elevation,
                                   width,
                                   nframes );
    }

    {
        use_gainbuf = early_gain_smoothing.apply( gainbuf, nframes, early_gain );

        for ( int i = 1; i < 5; i++ )
        {
            /* gain effects */
            if ( unlikely( use_gainbuf ) )
                buffer_apply_gain_buffer( (sample_t*)aux_audio_output[i].jack_port()->buffer(nframes), gainbuf, nframes );
            else
                buffer_apply_gain( (sample_t*)aux_audio_output[i].jack_port()->buffer(nframes), nframes, early_gain );
        }
    }

    float corrected_angle = fabs( angle ) - (fabs( width ) * 0.5f);

    if ( corrected_angle < 0.0f )
        corrected_angle = 0.0f;

    float cutoff_frequency = ( 1.0f / ( 1.0f + corrected_angle ) ) * 300000.0f;

    use_gainbuf = gain_smoothing.apply( gainbuf, nframes, gain );

    for ( unsigned int i = 0; i < audio_input.size(); i++ )
    {
        /* gain effects */
        if ( unlikely( use_gainbuf ) )
            buffer_apply_gain_buffer( (sample_t*)audio_input[i].buffer(), gainbuf, nframes );
        else
            buffer_apply_gain( (sample_t*)audio_input[i].buffer(), nframes, gain );

        /* frequency effects */
        _lowpass[i]->run_lowpass( (sample_t*)audio_input[i].buffer(), cutoff_frequency, nframes );

        /* delay effects */
        if ( likely( speed_of_sound ) )
        {
            if ( unlikely( use_delaybuf ) )
                _delay[i]->run( (sample_t*)audio_input[i].buffer(), delaybuf, 0, nframes );
            else
                _delay[i]->run( (sample_t*)audio_input[i].buffer(), 0, delay_seconds, nframes );
        }
    }

    /* now do direct outputs */
    if ( audio_input.size() == 1 )
    {
        _panner->run_mono( (sample_t*)audio_input[0].buffer(),
                           (sample_t*)audio_output[0].buffer(),
                           (sample_t*)audio_output[1].buffer(),
                           (sample_t*)audio_output[2].buffer(),
                           (sample_t*)audio_output[3].buffer(),
                           azimuth,
                           elevation,
                           nframes );
    }
    else
    {
        _panner->run_stereo( (sample_t*)audio_input[0].buffer(),
                             (sample_t*)audio_input[1].buffer(),
                             (sample_t*)audio_output[0].buffer(),
                             (sample_t*)audio_output[1].buffer(),
                             (sample_t*)audio_output[2].buffer(),
                             (sample_t*)audio_output[3].buffer(),
                             azimuth,
                             elevation,
                             width,
                             nframes );
    }
}
Esempio n. 2
0
/* FIXME: it is far more efficient to read all the channels from a
   multichannel source at once... But how should we handle the case of a
   mismatch between the number of channels in this region's source and
   the number of channels on the track/buffer this data is being read
   for? Would it not be better to simply buffer and deinterlace the
   frames in the Audio_File class instead, so that sequential requests
   for different channels at the same position avoid hitting the disk
   again? */
nframes_t
Audio_Region::read ( sample_t *buf, nframes_t pos, nframes_t nframes, int channel ) const
{
    THREAD_ASSERT( Playback );

    const Range r = _range;

    /* do nothing if we aren't covered by this frame range */
    if ( pos > r.start + r.length || pos + nframes < r.start )
        return 0;

    /* calculate offsets into file and sample buffer */

    nframes_t sofs,                                              /* offset into source */
        ofs,                                                    /* offset into buffer */
        cnt;                                                    /* number of frames to read  */

    cnt = nframes;

    if ( pos < r.start )
    {
        /* region starts somewhere after the beginning of this buffer */
        sofs = 0;
        ofs = r.start - pos;
        cnt -= ofs;
    }
    else
    {
        /* region started before this buffer */
        ofs = 0;
        sofs = pos - r.start;
    }

    if ( ofs >= nframes )
        return 0;

//    const nframes_t start = ofs + r.start + sofs;
    const nframes_t start = r.offset + sofs;
    const nframes_t len = cnt;

    if ( len == 0 )
        return 0;

    /* now that we know how much and where to read, get on with it */

    //    printf( "reading region ofs = %lu, sofs = %lu, %lu-%lu\n", ofs, sofs, start, end  );

    /* FIXME: keep the declick defults someplace else */
    Fade declick;

    declick.length = 256;
    declick.type   = Fade::Sigmoid;

    if ( _loop )
    {
        nframes_t lofs = sofs % _loop;
        nframes_t lstart = r.offset + lofs;


        if ( lofs + len > _loop )
        {
            /* this buffer covers a loop binary */

            /* read the first part */
            cnt = _clip->read( buf + ofs, channel, lstart, len - ( ( lofs + len ) - _loop ) );
            /* read the second part */
            cnt += _clip->read( buf + ofs + cnt, channel, lstart + cnt, len - cnt );

            /* TODO: declick/crossfade transition? */

            assert( cnt == len );
        }
        else
            cnt = _clip->read( buf + ofs, channel, lstart, len );

        /* this buffer is inside declicking proximity to the loop boundary */

        if ( lofs + cnt + declick.length > _loop /* buffer ends within declick length of the end of loop */
             &&
             sofs + declick.length < r.length /* not the last loop */
            )
        {
            /* */
            /* fixme: what if loop is shorter than declick? */
            const nframes_t declick_start = _loop - declick.length;

            /* when the buffer covers the beginning of the
             * declick, how many frames between the beginning of
             * the buffer and the beginning of the declick */
            const nframes_t declick_onset_offset = declick_start > lofs ? declick_start - lofs : 0;

            /* how far into the declick we are */
            const nframes_t declick_offset = lofs > declick_start ? lofs - declick_start : 0;

            /* this is the end side of the loop boundary */

            const nframes_t fl = cnt - declick_onset_offset;

            declick.apply( buf + ofs + declick_onset_offset,
                           Fade::Out,
                           declick_offset, fl );
        }
            
        if ( lofs < declick.length /* buffer begins within declick length of beginning of loop */
             &&
             sofs > _loop )               /* not the first loop */
        {
                
            const nframes_t declick_end = declick.length;
                
            const nframes_t click_len = lofs + cnt > declick_end ? declick_end - lofs : cnt;

            /* this is the beginning of the loop next boundary */
            declick.apply( buf + ofs, Fade::In, lofs, click_len );
        }
    }
    else
        cnt = _clip->read( buf + ofs, channel, start, len );

    if ( ! cnt )
        return 0;

    /* apply gain */

    buffer_apply_gain( buf + ofs, cnt, _scale );

    /* perform declicking if necessary */


    {
        assert( cnt <= nframes );
            
        Fade fade;

        fade = declick < _fade_in ? _fade_in : declick;

        /* do fade in if necessary */
        if ( sofs < fade.length )
            fade.apply( buf + ofs, Fade::In, sofs, cnt );

        fade = declick < _fade_out ? _fade_out : declick;

        /* do fade out if necessary */
        if ( start + fade.length > r.offset + r.length )
            fade.apply( buf, Fade::Out, ( start + fade.length ) - ( r.offset + r.length ), cnt );                
    }

    return cnt;
}