コード例 #1
0
ファイル: el_processor.cpp プロジェクト: Torc/torc
    // create an instance of the decoder
    //  blocksize is fixed over the lifetime of this object for performance reasons
    decoder_impl(unsigned blocksize=8192): N(blocksize), halfN(blocksize/2) {
#ifdef USE_FFTW3
        // create FFTW buffers
        lt = (float*)fftwf_malloc(sizeof(float)*N);
        rt = (float*)fftwf_malloc(sizeof(float)*N);
        dst = (float*)fftwf_malloc(sizeof(float)*N);
        dftL = (fftwf_complex*)fftwf_malloc(sizeof(fftwf_complex)*N);
        dftR = (fftwf_complex*)fftwf_malloc(sizeof(fftwf_complex)*N);
        src = (fftwf_complex*)fftwf_malloc(sizeof(fftwf_complex)*N);
        loadL = fftwf_plan_dft_r2c_1d(N, lt, dftL,FFTW_MEASURE);
        loadR = fftwf_plan_dft_r2c_1d(N, rt, dftR,FFTW_MEASURE);
        store = fftwf_plan_dft_c2r_1d(N, src, dst,FFTW_MEASURE);    
#else
        // create lavc fft buffers
        lt = (float*)av_malloc(sizeof(FFTSample)*N);
        rt = (float*)av_malloc(sizeof(FFTSample)*N);
        dftL = (FFTComplexArray*)av_malloc(sizeof(FFTComplex)*N*2);
        dftR = (FFTComplexArray*)av_malloc(sizeof(FFTComplex)*N*2);
        src = (FFTComplexArray*)av_malloc(sizeof(FFTComplex)*N*2);
        fftContextForward = av_fft_init(13, 0);
        fftContextReverse = av_fft_init(13, 1);
#endif
        // resize our own buffers
        frontR.resize(N);
        frontL.resize(N);
        avg.resize(N);
        surR.resize(N);
        surL.resize(N);
        trueavg.resize(N);
        xfs.resize(N);
        yfs.resize(N);
        inbuf[0].resize(N);
        inbuf[1].resize(N);
        for (unsigned c=0;c<6;c++) {
            outbuf[c].resize(N);
            filter[c].resize(N);
        }
        sample_rate(48000);
        // generate the window function (square root of hann, b/c it is applied before and after the transform)
        wnd.resize(N);
        for (unsigned k=0;k<N;k++)
            wnd[k] = sqrt(0.5*(1-cos(2*PI*k/N))/N);
        current_buf = 0;
        memset(inbufs, 0, sizeof(inbufs));
        memset(outbufs, 0, sizeof(outbufs));
        // set the default coefficients
        surround_coefficients(0.8165,0.5774);
        phase_mode(0);
        separation(1,1);
        steering_mode(1);
    }
コード例 #2
0
ファイル: fft.c プロジェクト: 411697643/FFmpeg
static inline void fft_init(FFTContext **s, int nbits, int inverse)
{
#if AVFFT
    *s = av_fft_init(nbits, inverse);
#else
    ff_fft_init(*s, nbits, inverse);
#endif
}
コード例 #3
0
ファイル: Equalizer.cpp プロジェクト: JERUKA9/QMPlay2
void Equalizer::alloc( bool b )
{
	mutex.lock();
	if ( !b && ( fftIn || fftOut ) )
	{
		canFilter = false;
		FFT_NBITS = FFT_SIZE = FFT_SIZE_2 = 0;
		av_fft_end( fftIn );
		av_fft_end( fftOut );
		fftIn = NULL;
		fftOut = NULL;
		av_free( complex );
		complex = NULL;
		input.clear();
		last_samples.clear();
		wind_f.clear();
		f.clear();
	}
	else if ( b )
	{
		if ( !fftIn || !fftOut )
		{
			FFT_NBITS  = sets().getInt( "Equalizer/nbits" );
			FFT_SIZE   = 1 << FFT_NBITS;
			FFT_SIZE_2 = FFT_SIZE / 2;
			fftIn  = av_fft_init( FFT_NBITS, false );
			fftOut = av_fft_init( FFT_NBITS, true );
			complex = ( FFTComplex * )av_malloc( FFT_SIZE * sizeof( FFTComplex ) );
			input.resize( chn );
			last_samples.resize( chn );
			wind_f.resize( FFT_SIZE );
			for ( int i = 0 ; i < FFT_SIZE ; ++i )
				wind_f[ i ] = 0.5f - 0.5f * cos( 2.0f * M_PI * i / ( FFT_SIZE - 1 ) );
		}
		interpolateFilterCurve();
		canFilter = true;
	}
	mutex.unlock();
}
コード例 #4
0
ファイル: fft.c プロジェクト: guyt101z/codec2-1
fft_cfg fft_new(int n, int inverse) {
#ifdef KISS_FFT
    kiss_fft_cfg cfg = kiss_fft_alloc (n, inverse, NULL, NULL);
    return cfg;
#elif defined(LIBAVCODEC_FFT)
    FFTContext *ctxt = av_fft_init(log2int(n), inverse);
    if (ctxt == NULL) return NULL;
    fft_cfg cfg = malloc(sizeof(*cfg));
    cfg->context = ctxt;
    cfg->size = sizeof(COMP) * n;
    return cfg;
#else
#error FFT engine was not defined
#endif
}
コード例 #5
0
ファイル: af_afftfilt.c プロジェクト: bavison/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    AFFTFiltContext *s = ctx->priv;
    char *saveptr = NULL;
    int ret = 0, ch, i;
    float overlap;
    char *args;
    const char *last_expr = "1";

    s->fft  = av_fft_init(s->fft_bits, 0);
    s->ifft = av_fft_init(s->fft_bits, 1);
    if (!s->fft || !s->ifft)
        return AVERROR(ENOMEM);

    s->window_size = 1 << s->fft_bits;

    s->fft_data = av_calloc(inlink->channels, sizeof(*s->fft_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);

    for (ch = 0; ch < inlink->channels; ch++) {
        s->fft_data[ch] = av_calloc(s->window_size, sizeof(**s->fft_data));
        if (!s->fft_data[ch])
            return AVERROR(ENOMEM);
    }

    s->real = av_calloc(inlink->channels, sizeof(*s->real));
    if (!s->real)
        return AVERROR(ENOMEM);

    s->imag = av_calloc(inlink->channels, sizeof(*s->imag));
    if (!s->imag)
        return AVERROR(ENOMEM);

    args = av_strdup(s->real_str);
    if (!args)
        return AVERROR(ENOMEM);

    for (ch = 0; ch < inlink->channels; ch++) {
        char *arg = av_strtok(ch == 0 ? args : NULL, "|", &saveptr);

        ret = av_expr_parse(&s->real[ch], arg ? arg : last_expr, var_names,
                            NULL, NULL, NULL, NULL, 0, ctx);
        if (ret < 0)
            break;
        if (arg)
            last_expr = arg;
        s->nb_exprs++;
    }

    av_free(args);

    args = av_strdup(s->img_str ? s->img_str : s->real_str);
    if (!args)
        return AVERROR(ENOMEM);

    for (ch = 0; ch < inlink->channels; ch++) {
        char *arg = av_strtok(ch == 0 ? args : NULL, "|", &saveptr);

        ret = av_expr_parse(&s->imag[ch], arg ? arg : last_expr, var_names,
                            NULL, NULL, NULL, NULL, 0, ctx);
        if (ret < 0)
            break;
        if (arg)
            last_expr = arg;
    }

    av_free(args);

    s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->window_size);
    if (!s->fifo)
        return AVERROR(ENOMEM);

    s->window_func_lut = av_realloc_f(s->window_func_lut, s->window_size,
                                      sizeof(*s->window_func_lut));
    if (!s->window_func_lut)
        return AVERROR(ENOMEM);
    ff_generate_window_func(s->window_func_lut, s->window_size, s->win_func, &overlap);
    if (s->overlap == 1)
        s->overlap = overlap;

    for (s->win_scale = 0, i = 0; i < s->window_size; i++) {
        s->win_scale += s->window_func_lut[i] * s->window_func_lut[i];
    }

    s->hop_size = s->window_size * (1 - s->overlap);
    if (s->hop_size <= 0)
        return AVERROR(EINVAL);

    s->buffer = ff_get_audio_buffer(inlink, s->window_size * 2);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    return ret;
}
コード例 #6
0
ファイル: psy_snr.c プロジェクト: Senjuti/FFmpeg
static int run_psnr(FILE *f[2], int len, int shift, int skip_bytes)
{
    int i, j;
    uint64_t sse = 0;
    double sse_d = 0.0;
    uint8_t buf[2][SIZE];
    int64_t max    = (1LL << (8 * len)) - 1;
    int size0      = 0;
    int size1      = 0;
    uint64_t maxdist = 0;
    double maxdist_d = 0.0;
    int noseek;


    noseek = fseek(f[0], 0, SEEK_SET) ||
             fseek(f[1], 0, SEEK_SET);

    if (!noseek) {
        for (i = 0; i < 2; i++) {
            uint8_t *p = buf[i];
            if (fread(p, 1, 12, f[i]) != 12)
                return 1;
            if (!memcmp(p, "RIFF", 4) &&
                !memcmp(p + 8, "WAVE", 4)) {
                if (fread(p, 1, 8, f[i]) != 8)
                    return 1;
                while (memcmp(p, "data", 4)) {
                    int s = p[4] | p[5] << 8 | p[6] << 16 | p[7] << 24;
                    fseek(f[i], s, SEEK_CUR);
                    if (fread(p, 1, 8, f[i]) != 8)
                        return 1;
                }
            } else {
                fseek(f[i], -12, SEEK_CUR);
            }
        }

        fseek(f[shift < 0], abs(shift), SEEK_CUR);

        fseek(f[0], skip_bytes, SEEK_CUR);
        fseek(f[1], skip_bytes, SEEK_CUR);
    }

    fflush(stdout);
    for (;;) {
        int s0 = fread(buf[0], 1, SIZE, f[0]);
        int s1 = fread(buf[1], 1, SIZE, f[1]);
        int tempsize = FFMIN(s0,s1);
        DECLARE_ALIGNED(32, FFTComplex, fftcomplexa)[SIZE/len];
        DECLARE_ALIGNED(32, FFTComplex, fftcomplexb)[SIZE/len];

        for (j = 0; j < tempsize; j += len) {
            switch (len) {
            case 1:
            case 2: {
                int64_t a = buf[0][j];
                int64_t b = buf[1][j];
                int dist;
                if (len == 2) {
                    fftcomplexa[j/len].re = get_s16l(buf[0] + j);
                    fftcomplexb[j/len].re = get_s16l(buf[1] + j);
                    fftcomplexa[j/len].im = 0;
                    fftcomplexb[j/len].im = 0;
                } else {
                    fftcomplexa[j/len].re = buf[0][j];
                    fftcomplexb[j/len].re = buf[1][j];
                    fftcomplexa[j/len].im = 0;
                    fftcomplexb[j/len].im = 0;
                }
                dist = abs(fftcomplexa[j/len].re-fftcomplexb[j/len].re);
                if (dist > maxdist)
                    maxdist = dist;
                break;
                break;
            }
            case 4:
            case 8: {
                double dist, a, b;
                if (len == 8) {
                    fftcomplexa[j/len].re = (float) get_f64l(buf[0] + j);
                    fftcomplexb[j/len].re = (float) get_f64l(buf[1] + j);
                    fftcomplexa[j/len].im = 0;
                    fftcomplexb[j/len].im = 0;
                } else {
                    fftcomplexa[j/len].re = (float) get_f32l(buf[0] + j);
                    fftcomplexb[j/len].re = (float) get_f32l(buf[1] + j);
                    fftcomplexa[j/len].im = 0;
                    fftcomplexb[j/len].im = 0;
                }
                dist = abs(fftcomplexa[j/len].re-fftcomplexb[j/len].re);
                if (dist > maxdist_d)
                    maxdist_d = dist;
                break;
            }
            }
        }

        for(;j<SIZE;j+=len){
            fftcomplexa[j/len].re = 0;
            fftcomplexb[j/len].re = 0;
            fftcomplexa[j/len].im = 0;
            fftcomplexb[j/len].im = 0;
        }

        size0 += s0;
        size1 += s1;
        if (s0 + s1 <= 0)
            break;

        FFTContext* fftcontexta = av_fft_init(floor(log2(SIZE/len)),0);
        av_fft_permute (fftcontexta, fftcomplexa);
        int temp = 0;
        av_fft_calc (fftcontexta, fftcomplexa);
        FFTContext* fftcontextb = av_fft_init(floor(log2(SIZE/len)),0);
        av_fft_permute (fftcontextb, fftcomplexb);
        av_fft_calc (fftcontextb, fftcomplexb);

        float* maskingfunc = get_mask_array(SIZE/len);
        float* mask = get_mask(fftcomplexa, SIZE/len, maskingfunc);
        double psysse = get_psy_sse(fftcomplexa,fftcomplexb, mask, SIZE/len);
        free(maskingfunc);
        free(mask);
        sse+=psysse;
        sse_d+=psysse;
    }

    fflush(stdout);
    i = FFMIN(size0, size1) / len;
    if (!i)
        i = 1;

    switch (len) {
    case 1:
    case 2: {
        uint64_t psnr;
        uint64_t dev = int_sqrt(((sse / i) * F * F) + (((sse % i) * F * F) + i / 2) / i);
        if (sse)
            psnr = ((2 * log16(max << 16) + log16(i) - log16(sse)) *
                    284619LL * F + (1LL << 31)) / (1LL << 32);
        else
            psnr = 1000 * F - 1; // floating point free infinity :)

        printf("stddev:%5d.%02d PSYSNR:%3d.%02d MAXDIFF:%5"PRIu64" bytes:%9d/%9d\n",
               (int)(dev / F), (int)(dev % F),
               (int)(psnr / F), (int)(psnr % F),
               maxdist, size0, size1);
        return psnr;
        }
    case 4:
    case 8: {
        char psnr_str[64];
        double psnr = INT_MAX;
        double dev = sqrt(sse_d / i);
        uint64_t scale = (len == 4) ? (1ULL << 24) : (1ULL << 32);

        if (sse_d) {
            psnr = 2 * log(DBL_MAX) - log(i / sse_d);
            snprintf(psnr_str, sizeof(psnr_str), "%5.02f", psnr);
        } else
            snprintf(psnr_str, sizeof(psnr_str), "inf");

        maxdist = maxdist_d * scale;

        printf("stddev:%10.2f PSYSNR:%s MAXDIFF:%10"PRIu64" bytes:%9d/%9d\n",
               dev * scale, psnr_str, maxdist, size0, size1);
        return psnr;
    }
    }
    return -1;
}
コード例 #7
0
/*
void fft_2k_test(  fftw_complex *out )
{
    memset(fftw_in, 0, sizeof(fftw_complex)*M2KS);
    int m = (M2KS/2)+32;//1704;
    fftw_in[m].re =  0.7;

    fftw_one( m_fftw_2k_plan, fftw_in, out );
    return;
}
*/
void init_dvb_t_fft( void )
{
    //
    // Plans
    //
#ifdef USE_AVFFT
    m_avfft_2k_context  = av_fft_init (11, 1);
    m_avfft_4k_context  = av_fft_init (12, 1);
    m_avfft_8k_context  = av_fft_init (13, 1);
    m_avfft_16k_context = av_fft_init (14, 1);
    m_fft_in  = (fft_complex*)av_malloc(sizeof(fft_complex)*M16KS);
    m_fft_out = (fft_complex*)av_malloc(sizeof(fft_complex)*M16KS);
#else

    FILE *fp;
    if((fp=fopen(dvb_config_get_path("fftw_wisdom"),"r"))!=NULL)
    {
        fftw_import_wisdom_from_file(fp);
        m_fftw_2k_plan  = fftw_create_plan(M2KS,  FFTW_BACKWARD, FFTW_USE_WISDOM);
        m_fftw_4k_plan  = fftw_create_plan(M4KS,  FFTW_BACKWARD, FFTW_USE_WISDOM);
        m_fftw_8k_plan  = fftw_create_plan(M8KS,  FFTW_BACKWARD, FFTW_USE_WISDOM);
        m_fftw_16k_plan = fftw_create_plan(M16KS, FFTW_BACKWARD, FFTW_USE_WISDOM);
        fftw_import_wisdom_from_file(fp);
    }
    else
    {
        if((fp=fopen(dvb_config_get_path("fftw_wisdom"),"w"))!=NULL)
        {
            m_fftw_2k_plan  = fftw_create_plan(M2KS,  FFTW_BACKWARD, FFTW_MEASURE | FFTW_USE_WISDOM);
            m_fftw_4k_plan  = fftw_create_plan(M4KS,  FFTW_BACKWARD, FFTW_MEASURE | FFTW_USE_WISDOM);
            m_fftw_8k_plan  = fftw_create_plan(M8KS,  FFTW_BACKWARD, FFTW_MEASURE | FFTW_USE_WISDOM);
            m_fftw_16k_plan = fftw_create_plan(M16KS, FFTW_BACKWARD, FFTW_MEASURE | FFTW_USE_WISDOM);
            if(fp!=NULL) fftw_export_wisdom_to_file(fp);
        }
    }
    m_fft_in  = (fft_complex*)fftw_malloc(sizeof(fft_complex)*M16KS);
    m_fft_out = (fft_complex*)fftw_malloc(sizeof(fft_complex)*M16KS);
#endif
    if( m_format.tm == TM_2K)
    {
        m_N = M2KS;
        switch( m_format.chan )
        {
        case CH_8M:
        case CH_7M:
        case CH_6M:
            m_IR = 1;
            break;
        case CH_4M:
        case CH_3M:
        case CH_2M:
        case CH_1M:
            m_IR = 2;
            break;
        case CH_500K:
            m_IR = 4;
            break;
        }
    }
    if( m_format.tm == TM_8K)
    {
        m_N = M8KS;
        switch( m_format.chan )
        {
        case CH_8M:
        case CH_7M:
        case CH_6M:
            m_IR = 1;
            break;
        case CH_4M:
        case CH_3M:
        case CH_2M:
        case CH_1M:
            m_IR = 2;
            break;
        }
    }
    create_correction_table( m_N, m_IR );
}
コード例 #8
0
ファイル: vaf_spectrumsynth.c プロジェクト: 0day-ci/FFmpeg
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    SpectrumSynthContext *s = ctx->priv;
    int width = ctx->inputs[0]->w;
    int height = ctx->inputs[0]->h;
    AVRational time_base  = ctx->inputs[0]->time_base;
    AVRational frame_rate = ctx->inputs[0]->frame_rate;
    int i, ch, fft_bits;
    float factor, overlap;

    outlink->sample_rate = s->sample_rate;
    outlink->time_base = (AVRational){1, s->sample_rate};

    if (width  != ctx->inputs[1]->w ||
        height != ctx->inputs[1]->h) {
        av_log(ctx, AV_LOG_ERROR,
               "Magnitude and Phase sizes differ (%dx%d vs %dx%d).\n",
               width, height,
               ctx->inputs[1]->w, ctx->inputs[1]->h);
        return AVERROR_INVALIDDATA;
    } else if (av_cmp_q(time_base, ctx->inputs[1]->time_base) != 0) {
        av_log(ctx, AV_LOG_ERROR,
               "Magnitude and Phase time bases differ (%d/%d vs %d/%d).\n",
               time_base.num, time_base.den,
               ctx->inputs[1]->time_base.num,
               ctx->inputs[1]->time_base.den);
        return AVERROR_INVALIDDATA;
    } else if (av_cmp_q(frame_rate, ctx->inputs[1]->frame_rate) != 0) {
        av_log(ctx, AV_LOG_ERROR,
               "Magnitude and Phase framerates differ (%d/%d vs %d/%d).\n",
               frame_rate.num, frame_rate.den,
               ctx->inputs[1]->frame_rate.num,
               ctx->inputs[1]->frame_rate.den);
        return AVERROR_INVALIDDATA;
    }

    s->size = s->orientation == VERTICAL ? height / s->channels : width / s->channels;
    s->xend = s->orientation == VERTICAL ? width : height;

    for (fft_bits = 1; 1 << fft_bits < 2 * s->size; fft_bits++);

    s->win_size = 1 << fft_bits;
    s->nb_freq = 1 << (fft_bits - 1);

    s->fft = av_fft_init(fft_bits, 1);
    if (!s->fft) {
        av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
               "The window size might be too high.\n");
        return AVERROR(EINVAL);
    }
    s->fft_data = av_calloc(s->channels, sizeof(*s->fft_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);
    for (ch = 0; ch < s->channels; ch++) {
        s->fft_data[ch] = av_calloc(s->win_size, sizeof(**s->fft_data));
        if (!s->fft_data[ch])
            return AVERROR(ENOMEM);
    }

    s->buffer = ff_get_audio_buffer(outlink, s->win_size * 2);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    /* pre-calc windowing function */
    s->window_func_lut = av_realloc_f(s->window_func_lut, s->win_size,
                                      sizeof(*s->window_func_lut));
    if (!s->window_func_lut)
        return AVERROR(ENOMEM);
    ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
    if (s->overlap == 1)
        s->overlap = overlap;
    s->hop_size = (1 - s->overlap) * s->win_size;
    for (factor = 0, i = 0; i < s->win_size; i++) {
        factor += s->window_func_lut[i] * s->window_func_lut[i];
    }
    s->factor = (factor / s->win_size) / FFMAX(1 / (1 - s->overlap) - 1, 1);

    return 0;
}
コード例 #9
0
ファイル: avf_showfreqs.c プロジェクト: MAXsundai/FFmpeg
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = ctx->inputs[0];
    ShowFreqsContext *s = ctx->priv;
    float overlap;
    int i;

    s->nb_freq = 1 << (s->fft_bits - 1);
    s->win_size = s->nb_freq << 1;
    av_audio_fifo_free(s->fifo);
    av_fft_end(s->fft);
    s->fft = av_fft_init(s->fft_bits, 0);
    if (!s->fft) {
        av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
               "The window size might be too high.\n");
        return AVERROR(ENOMEM);
    }

    /* FFT buffers: x2 for each (display) channel buffer.
     * Note: we use free and malloc instead of a realloc-like function to
     * make sure the buffer is aligned in memory for the FFT functions. */
    for (i = 0; i < s->nb_channels; i++) {
        av_freep(&s->fft_data[i]);
        av_freep(&s->avg_data[i]);
    }
    av_freep(&s->fft_data);
    av_freep(&s->avg_data);
    s->nb_channels = inlink->channels;

    s->fft_data = av_calloc(s->nb_channels, sizeof(*s->fft_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);
    s->avg_data = av_calloc(s->nb_channels, sizeof(*s->avg_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);
    for (i = 0; i < s->nb_channels; i++) {
        s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
        s->avg_data[i] = av_calloc(s->nb_freq, sizeof(**s->avg_data));
        if (!s->fft_data[i] || !s->avg_data[i])
            return AVERROR(ENOMEM);
    }

    /* pre-calc windowing function */
    s->window_func_lut = av_realloc_f(s->window_func_lut, s->win_size,
                                      sizeof(*s->window_func_lut));
    if (!s->window_func_lut)
        return AVERROR(ENOMEM);
    ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
    if (s->overlap == 1.)
        s->overlap = overlap;
    s->skip_samples = (1. - s->overlap) * s->win_size;
    if (s->skip_samples < 1) {
        av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
        return AVERROR(EINVAL);
    }

    for (s->scale = 0, i = 0; i < s->win_size; i++) {
        s->scale += s->window_func_lut[i] * s->window_func_lut[i];
    }

    outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
    outlink->sample_aspect_ratio = (AVRational){1,1};
    outlink->w = s->w;
    outlink->h = s->h;

    s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
    if (!s->fifo)
        return AVERROR(ENOMEM);
    return 0;
}