static void fft_4k( fft_complex *in, fft_complex *out )
{
    // Zero the unused parts of the array
    memset(&m_fft_in[M4KS/4],0,sizeof(fft_complex)*M4KS/2);
    // Copy the data into the correct bins
    //memcpy(&m_fft_in[M4KS*3/4], &in[0],      sizeof(fft_complex)*M2KS/2);
    //memcpy(&m_fft_in[0],        &in[M2KS/2], sizeof(fft_complex)*M2KS/2);

    int i,m;
    m = (M4KS/2)+(M4KS/4);
    for( i = 0; i < (M2KS); i++ )
    {
        m_fft_in[m].re = in[i].re*m_c[i];
        m_fft_in[m].im = in[i].im*m_c[i];
        m = (m+1)%(M2KS*2);
    }

#ifdef USE_AVFFT
    av_fft_permute( m_avfft_4k_context, m_fft_in );
    av_fft_calc(    m_avfft_4k_context, m_fft_in );
#else
    fftw_one( m_fftw_4k_plan, m_fft_in, out );
#endif
    return;
}
示例#2
0
文件: fft.c 项目: 411697643/FFmpeg
static inline void fft_calc(FFTContext *s, FFTComplex *z)
{
#if AVFFT
    av_fft_calc(s, z);
#else
    s->fft_calc(s, z);
#endif
}
示例#3
0
文件: fft.c 项目: guyt101z/codec2-1
void fft_do(const fft_cfg cfg, const COMP *in, COMP *out) {
#ifdef KISS_FFT
    kiss_fft(cfg, in, out);
#elif defined(LIBAVCODEC_FFT)
    memcpy(out, in, cfg->size);
    av_fft_permute(cfg->context, (FFTComplex *)out);
    av_fft_calc(cfg->context, (FFTComplex *)out);
#else
#error FFT engine was not defined
#endif
}
示例#4
0
    // filter the complex source signal and add it to target
    void apply_filter(cfloat *signal, float *flt, float *target) {
        // filter the signal
        unsigned f;
        for (f=0;f<=halfN;f++) {
            src[f][0] = signal[f].real() * flt[f];
            src[f][1] = signal[f].imag() * flt[f];
        }
#ifdef USE_FFTW3
        // transform into time domain
        fftwf_execute(store);

        float* pT1   = &target[current_buf*halfN];
        float* pWnd1 = &wnd[0];
        float* pDst1 = &dst[0];
        float* pT2   = &target[(current_buf^1)*halfN];
        float* pWnd2 = &wnd[halfN];
        float* pDst2 = &dst[halfN];
        // add the result to target, windowed
        for (unsigned int k=0;k<halfN;k++)
        {
            // 1st part is overlap add
            *pT1++ += *pWnd1++ * *pDst1++;
            // 2nd part is set as has no history
            *pT2++  = *pWnd2++ * *pDst2++;
        }
#else
        // enforce odd symmetry
        for (f=1;f<halfN;f++) {
            src[N-f][0] = src[f][0];
            src[N-f][1] = -src[f][1];   // complex conjugate
        }
        av_fft_permute(fftContextReverse, (FFTComplex*)&src[0]);
        av_fft_calc(fftContextReverse, (FFTComplex*)&src[0]);

        float* pT1   = &target[current_buf*halfN];
        float* pWnd1 = &wnd[0];
        float* pDst1 = &src[0][0];
        float* pT2   = &target[(current_buf^1)*halfN];
        float* pWnd2 = &wnd[halfN];
        float* pDst2 = &src[halfN][0];
        // add the result to target, windowed
        for (unsigned int k=0;k<halfN;k++)
        {
            // 1st part is overlap add
            *pT1++ += *pWnd1++ * *pDst1; pDst1 += 2;
            // 2nd part is set as has no history
            *pT2++  = *pWnd2++ * *pDst2; pDst2 += 2;
        }
#endif
    }
static void fft_8k( fft_complex *in, fft_complex *out )
{
    // Copy the data into the correct bins
//    memcpy(&m_fft_in[M8KS/2], &in[0],      sizeof(fft_complex)*M8KS/2);
//    memcpy(&m_fft_in[0],      &in[M8KS/2], sizeof(fft_complex)*M8KS/2);

    int i,m;
    m = (M8KS/2);
    for( i = 0; i < (M8KS); i++ )
    {
        m_fft_in[m].re = in[i].re*m_c[i];
        m_fft_in[m].im = in[i].im*m_c[i];
        m = (m+1)%M8KS;
    }

#ifdef USE_AVFFT
    av_fft_permute( m_avfft_8k_context, m_fft_in );
    av_fft_calc(    m_avfft_8k_context, m_fft_in );
#else
    fftw_one( m_fftw_8k_plan, m_fft_in, out );
#endif
}
示例#6
0
static void synth_window(AVFilterContext *ctx, int x)
{
    SpectrumSynthContext *s = ctx->priv;
    const int h = s->size;
    int nb = s->win_size;
    int y, f, ch;

    for (ch = 0; ch < s->channels; ch++) {
        read_fft_data(ctx, x, h, ch);

        for (y = h; y <= s->nb_freq; y++) {
            s->fft_data[ch][y].re = 0;
            s->fft_data[ch][y].im = 0;
        }

        for (y = s->nb_freq + 1, f = s->nb_freq - 1; y < nb; y++, f--) {
            s->fft_data[ch][y].re =  s->fft_data[ch][f].re;
            s->fft_data[ch][y].im = -s->fft_data[ch][f].im;
        }

        av_fft_permute(s->fft, s->fft_data[ch]);
        av_fft_calc(s->fft, s->fft_data[ch]);
    }
}
示例#7
0
    // CORE FUNCTION: decode a block of data
    void block_decode(float *input1[2], float *input2[2], float *output[6], float center_width, float dimension, float adaption_rate) {
        // 1. scale the input by the window function; this serves a dual purpose:
        // - first it improves the FFT resolution b/c boundary discontinuities (and their frequencies) get removed
        // - second it allows for smooth blending of varying filters between the blocks
        {
            float* pWnd = &wnd[0];
            float* pLt = &lt[0];
            float* pRt = &rt[0];
            float* pIn0 = input1[0];
            float* pIn1 = input1[1];
            for (unsigned k=0;k<halfN;k++) {
                *pLt++ = *pIn0++ * *pWnd;
                *pRt++ = *pIn1++ * *pWnd++;
            }
            pIn0 = input2[0];
            pIn1 = input2[1];
            for (unsigned k=0;k<halfN;k++) {
                *pLt++ = *pIn0++ * *pWnd;
                *pRt++ = *pIn1++ * *pWnd++;
            }
        }

#ifdef USE_FFTW3
        // ... and tranform it into the frequency domain
        fftwf_execute(loadL);
        fftwf_execute(loadR);
#else
        ff_fft_permuteRC(fftContextForward, lt, (FFTComplex*)&dftL[0]);
        av_fft_calc(fftContextForward, (FFTComplex*)&dftL[0]);

        ff_fft_permuteRC(fftContextForward, rt, (FFTComplex*)&dftR[0]);
        av_fft_calc(fftContextForward, (FFTComplex*)&dftR[0]);
#endif

        // 2. compare amplitude and phase of each DFT bin and produce the X/Y coordinates in the sound field
        //    but dont do DC or N/2 component
        for (unsigned f=0;f<halfN;f++) {           
            // get left/right amplitudes/phases
            float ampL = amplitude(dftL[f]), ampR = amplitude(dftR[f]);
            float phaseL = phase(dftL[f]), phaseR = phase(dftR[f]);
//          if (ampL+ampR < epsilon)
//              continue;       

            // calculate the amplitude/phase difference
            float ampDiff = clamp((ampL+ampR < epsilon) ? 0 : (ampR-ampL) / (ampR+ampL));
            float phaseDiff = phaseL - phaseR;
            if (phaseDiff < -PI) phaseDiff += 2*PI;
            if (phaseDiff > PI) phaseDiff -= 2*PI;
            phaseDiff = abs(phaseDiff);

            if (linear_steering) {
                // --- this is the fancy new linear mode ---

                // get sound field x/y position
                yfs[f] = get_yfs(ampDiff,phaseDiff);
                xfs[f] = get_xfs(ampDiff,yfs[f]);

                // add dimension control
                yfs[f] = clamp(yfs[f] - dimension);

                // add crossfeed control
                xfs[f] = clamp(xfs[f] * (front_separation*(1+yfs[f])/2 + rear_separation*(1-yfs[f])/2));

                // 3. generate frequency filters for each output channel
                float left = (1-xfs[f])/2, right = (1+xfs[f])/2;
                float front = (1+yfs[f])/2, back = (1-yfs[f])/2;
                float volume[5] = {
                    front * (left * center_width + max(0,-xfs[f]) * (1-center_width)),  // left
                    front * center_level*((1-abs(xfs[f])) * (1-center_width)),          // center
                    front * (right * center_width + max(0, xfs[f]) * (1-center_width)), // right
                    back * surround_level * left,                                       // left surround
                    back * surround_level * right                                       // right surround
                };

                // adapt the prior filter
                for (unsigned c=0;c<5;c++)
                    filter[c][f] = (1-adaption_rate)*filter[c][f] + adaption_rate*volume[c];

            } else {
                // --- this is the old & simple steering mode ---

                // calculate the amplitude/phase difference
                float ampDiff = clamp((ampL+ampR < epsilon) ? 0 : (ampR-ampL) / (ampR+ampL));
                float phaseDiff = phaseL - phaseR;
                if (phaseDiff < -PI) phaseDiff += 2*PI;
                if (phaseDiff > PI) phaseDiff -= 2*PI;
                phaseDiff = abs(phaseDiff);

                // determine sound field x-position
                xfs[f] = ampDiff;

                // determine preliminary sound field y-position from phase difference
                yfs[f] = 1 - (phaseDiff/PI)*2;

                if (abs(xfs[f]) > surround_balance) {
                    // blend linearly between the surrounds and the fronts if the balance exceeds the surround encoding balance
                    // this is necessary because the sound field is trapezoidal and will be stretched behind the listener
                    float frontness = (abs(xfs[f]) - surround_balance)/(1-surround_balance);
                    yfs[f]  = (1-frontness) * yfs[f] + frontness * 1; 
                }

                // add dimension control
                yfs[f] = clamp(yfs[f] - dimension);

                // add crossfeed control
                xfs[f] = clamp(xfs[f] * (front_separation*(1+yfs[f])/2 + rear_separation*(1-yfs[f])/2));

                // 3. generate frequency filters for each output channel, according to the signal position
                // the sum of all channel volumes must be 1.0
                float left = (1-xfs[f])/2, right = (1+xfs[f])/2;
                float front = (1+yfs[f])/2, back = (1-yfs[f])/2;
                float volume[5] = {
                    front * (left * center_width + max(0,-xfs[f]) * (1-center_width)),      // left
                    front * center_level*((1-abs(xfs[f])) * (1-center_width)),              // center
                    front * (right * center_width + max(0, xfs[f]) * (1-center_width)),     // right
                    back * surround_level*max(0,min(1,((1-(xfs[f]/surround_balance))/2))),  // left surround
                    back * surround_level*max(0,min(1,((1+(xfs[f]/surround_balance))/2)))   // right surround
                };

                // adapt the prior filter
                for (unsigned c=0;c<5;c++)
                    filter[c][f] = (1-adaption_rate)*filter[c][f] + adaption_rate*volume[c];
            }

            // ... and build the signal which we want to position
            frontL[f] = polar(ampL+ampR,phaseL);
            frontR[f] = polar(ampL+ampR,phaseR);
            avg[f] = frontL[f] + frontR[f];
            surL[f] = polar(ampL+ampR,phaseL+phase_offsetL);
            surR[f] = polar(ampL+ampR,phaseR+phase_offsetR);
            trueavg[f] = cfloat(dftL[f][0] + dftR[f][0], dftL[f][1] + dftR[f][1]);
        }

        // 4. distribute the unfiltered reference signals over the channels
        apply_filter(&frontL[0],&filter[0][0],&output[0][0]);   // front left
        apply_filter(&avg[0], &filter[1][0],&output[1][0]);     // front center
        apply_filter(&frontR[0],&filter[2][0],&output[2][0]);   // front right
        apply_filter(&surL[0],&filter[3][0],&output[3][0]);     // surround left
        apply_filter(&surR[0],&filter[4][0],&output[4][0]);     // surround right
        apply_filter(&trueavg[0],&filter[5][0],&output[5][0]);  // lfe
    }
示例#8
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    AFFTFiltContext *s = ctx->priv;
    const int window_size = s->window_size;
    const float f = 1. / s->win_scale;
    double values[VAR_VARS_NB];
    AVFrame *out, *in = NULL;
    int ch, n, ret, i, j, k;
    int start = s->start, end = s->end;

    av_audio_fifo_write(s->fifo, (void **)frame->extended_data, frame->nb_samples);
    av_frame_free(&frame);

    while (av_audio_fifo_size(s->fifo) >= window_size) {
        if (!in) {
            in = ff_get_audio_buffer(outlink, window_size);
            if (!in)
                return AVERROR(ENOMEM);
        }

        ret = av_audio_fifo_peek(s->fifo, (void **)in->extended_data, window_size);
        if (ret < 0)
            break;

        for (ch = 0; ch < inlink->channels; ch++) {
            const float *src = (float *)in->extended_data[ch];
            FFTComplex *fft_data = s->fft_data[ch];

            for (n = 0; n < in->nb_samples; n++) {
                fft_data[n].re = src[n] * s->window_func_lut[n];
                fft_data[n].im = 0;
            }

            for (; n < window_size; n++) {
                fft_data[n].re = 0;
                fft_data[n].im = 0;
            }
        }

        values[VAR_PTS]         = s->pts;
        values[VAR_SAMPLE_RATE] = inlink->sample_rate;
        values[VAR_NBBINS]      = window_size / 2;
        values[VAR_CHANNELS]    = inlink->channels;

        for (ch = 0; ch < inlink->channels; ch++) {
            FFTComplex *fft_data = s->fft_data[ch];
            float *buf = (float *)s->buffer->extended_data[ch];
            int x;

            values[VAR_CHANNEL] = ch;

            av_fft_permute(s->fft, fft_data);
            av_fft_calc(s->fft, fft_data);

            for (n = 0; n < window_size / 2; n++) {
                float fr, fi;

                values[VAR_BIN] = n;

                fr = av_expr_eval(s->real[ch], values, s);
                fi = av_expr_eval(s->imag[ch], values, s);

                fft_data[n].re *= fr;
                fft_data[n].im *= fi;
            }

            for (n = window_size / 2 + 1, x = window_size / 2 - 1; n < window_size; n++, x--) {
                fft_data[n].re =  fft_data[x].re;
                fft_data[n].im = -fft_data[x].im;
            }

            av_fft_permute(s->ifft, fft_data);
            av_fft_calc(s->ifft, fft_data);

            start = s->start;
            end = s->end;
            k = end;
            for (i = 0, j = start; j < k && i < window_size; i++, j++) {
                buf[j] += s->fft_data[ch][i].re * f;
            }

            for (; i < window_size; i++, j++) {
                buf[j] = s->fft_data[ch][i].re * f;
            }

            start += s->hop_size;
            end = j;
        }

        s->start = start;
        s->end = end;

        if (start >= window_size) {
            float *dst, *buf;

            start -= window_size;
            end   -= window_size;

            s->start = start;
            s->end = end;

            out = ff_get_audio_buffer(outlink, window_size);
            if (!out) {
                ret = AVERROR(ENOMEM);
                break;
            }

            out->pts = s->pts;
            s->pts += window_size;

            for (ch = 0; ch < inlink->channels; ch++) {
                dst = (float *)out->extended_data[ch];
                buf = (float *)s->buffer->extended_data[ch];

                for (n = 0; n < window_size; n++) {
                    dst[n] = buf[n] * (1 - s->overlap);
                }
                memmove(buf, buf + window_size, window_size * 4);
            }

            ret = ff_filter_frame(outlink, out);
            if (ret < 0)
                break;
        }

        av_audio_fifo_drain(s->fifo, s->hop_size);
    }

    av_frame_free(&in);
    return ret;
}
示例#9
0
static inline void fft_calc( FFTContext *fft_ctx, FFTComplex *cplx )
{
	av_fft_permute( fft_ctx, cplx );
	av_fft_calc( fft_ctx, cplx );
}
示例#10
0
文件: psy_snr.c 项目: Senjuti/FFmpeg
static int run_psnr(FILE *f[2], int len, int shift, int skip_bytes)
{
    int i, j;
    uint64_t sse = 0;
    double sse_d = 0.0;
    uint8_t buf[2][SIZE];
    int64_t max    = (1LL << (8 * len)) - 1;
    int size0      = 0;
    int size1      = 0;
    uint64_t maxdist = 0;
    double maxdist_d = 0.0;
    int noseek;


    noseek = fseek(f[0], 0, SEEK_SET) ||
             fseek(f[1], 0, SEEK_SET);

    if (!noseek) {
        for (i = 0; i < 2; i++) {
            uint8_t *p = buf[i];
            if (fread(p, 1, 12, f[i]) != 12)
                return 1;
            if (!memcmp(p, "RIFF", 4) &&
                !memcmp(p + 8, "WAVE", 4)) {
                if (fread(p, 1, 8, f[i]) != 8)
                    return 1;
                while (memcmp(p, "data", 4)) {
                    int s = p[4] | p[5] << 8 | p[6] << 16 | p[7] << 24;
                    fseek(f[i], s, SEEK_CUR);
                    if (fread(p, 1, 8, f[i]) != 8)
                        return 1;
                }
            } else {
                fseek(f[i], -12, SEEK_CUR);
            }
        }

        fseek(f[shift < 0], abs(shift), SEEK_CUR);

        fseek(f[0], skip_bytes, SEEK_CUR);
        fseek(f[1], skip_bytes, SEEK_CUR);
    }

    fflush(stdout);
    for (;;) {
        int s0 = fread(buf[0], 1, SIZE, f[0]);
        int s1 = fread(buf[1], 1, SIZE, f[1]);
        int tempsize = FFMIN(s0,s1);
        DECLARE_ALIGNED(32, FFTComplex, fftcomplexa)[SIZE/len];
        DECLARE_ALIGNED(32, FFTComplex, fftcomplexb)[SIZE/len];

        for (j = 0; j < tempsize; j += len) {
            switch (len) {
            case 1:
            case 2: {
                int64_t a = buf[0][j];
                int64_t b = buf[1][j];
                int dist;
                if (len == 2) {
                    fftcomplexa[j/len].re = get_s16l(buf[0] + j);
                    fftcomplexb[j/len].re = get_s16l(buf[1] + j);
                    fftcomplexa[j/len].im = 0;
                    fftcomplexb[j/len].im = 0;
                } else {
                    fftcomplexa[j/len].re = buf[0][j];
                    fftcomplexb[j/len].re = buf[1][j];
                    fftcomplexa[j/len].im = 0;
                    fftcomplexb[j/len].im = 0;
                }
                dist = abs(fftcomplexa[j/len].re-fftcomplexb[j/len].re);
                if (dist > maxdist)
                    maxdist = dist;
                break;
                break;
            }
            case 4:
            case 8: {
                double dist, a, b;
                if (len == 8) {
                    fftcomplexa[j/len].re = (float) get_f64l(buf[0] + j);
                    fftcomplexb[j/len].re = (float) get_f64l(buf[1] + j);
                    fftcomplexa[j/len].im = 0;
                    fftcomplexb[j/len].im = 0;
                } else {
                    fftcomplexa[j/len].re = (float) get_f32l(buf[0] + j);
                    fftcomplexb[j/len].re = (float) get_f32l(buf[1] + j);
                    fftcomplexa[j/len].im = 0;
                    fftcomplexb[j/len].im = 0;
                }
                dist = abs(fftcomplexa[j/len].re-fftcomplexb[j/len].re);
                if (dist > maxdist_d)
                    maxdist_d = dist;
                break;
            }
            }
        }

        for(;j<SIZE;j+=len){
            fftcomplexa[j/len].re = 0;
            fftcomplexb[j/len].re = 0;
            fftcomplexa[j/len].im = 0;
            fftcomplexb[j/len].im = 0;
        }

        size0 += s0;
        size1 += s1;
        if (s0 + s1 <= 0)
            break;

        FFTContext* fftcontexta = av_fft_init(floor(log2(SIZE/len)),0);
        av_fft_permute (fftcontexta, fftcomplexa);
        int temp = 0;
        av_fft_calc (fftcontexta, fftcomplexa);
        FFTContext* fftcontextb = av_fft_init(floor(log2(SIZE/len)),0);
        av_fft_permute (fftcontextb, fftcomplexb);
        av_fft_calc (fftcontextb, fftcomplexb);

        float* maskingfunc = get_mask_array(SIZE/len);
        float* mask = get_mask(fftcomplexa, SIZE/len, maskingfunc);
        double psysse = get_psy_sse(fftcomplexa,fftcomplexb, mask, SIZE/len);
        free(maskingfunc);
        free(mask);
        sse+=psysse;
        sse_d+=psysse;
    }

    fflush(stdout);
    i = FFMIN(size0, size1) / len;
    if (!i)
        i = 1;

    switch (len) {
    case 1:
    case 2: {
        uint64_t psnr;
        uint64_t dev = int_sqrt(((sse / i) * F * F) + (((sse % i) * F * F) + i / 2) / i);
        if (sse)
            psnr = ((2 * log16(max << 16) + log16(i) - log16(sse)) *
                    284619LL * F + (1LL << 31)) / (1LL << 32);
        else
            psnr = 1000 * F - 1; // floating point free infinity :)

        printf("stddev:%5d.%02d PSYSNR:%3d.%02d MAXDIFF:%5"PRIu64" bytes:%9d/%9d\n",
               (int)(dev / F), (int)(dev % F),
               (int)(psnr / F), (int)(psnr % F),
               maxdist, size0, size1);
        return psnr;
        }
    case 4:
    case 8: {
        char psnr_str[64];
        double psnr = INT_MAX;
        double dev = sqrt(sse_d / i);
        uint64_t scale = (len == 4) ? (1ULL << 24) : (1ULL << 32);

        if (sse_d) {
            psnr = 2 * log(DBL_MAX) - log(i / sse_d);
            snprintf(psnr_str, sizeof(psnr_str), "%5.02f", psnr);
        } else
            snprintf(psnr_str, sizeof(psnr_str), "inf");

        maxdist = maxdist_d * scale;

        printf("stddev:%10.2f PSYSNR:%s MAXDIFF:%10"PRIu64" bytes:%9d/%9d\n",
               dev * scale, psnr_str, maxdist, size0, size1);
        return psnr;
    }
    }
    return -1;
}
示例#11
0
static int plot_freqs(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ShowFreqsContext *s = ctx->priv;
    const int win_size = s->win_size;
    char *colors, *color, *saveptr = NULL;
    AVFrame *out;
    int ch, n;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out)
        return AVERROR(ENOMEM);

    for (n = 0; n < outlink->h; n++)
        memset(out->data[0] + out->linesize[0] * n, 0, outlink->w * 4);

    /* fill FFT input with the number of samples available */
    for (ch = 0; ch < s->nb_channels; ch++) {
        const float *p = (float *)in->extended_data[ch];

        for (n = 0; n < in->nb_samples; n++) {
            s->fft_data[ch][n].re = p[n] * s->window_func_lut[n];
            s->fft_data[ch][n].im = 0;
        }
        for (; n < win_size; n++) {
            s->fft_data[ch][n].re = 0;
            s->fft_data[ch][n].im = 0;
        }
    }

    /* run FFT on each samples set */
    for (ch = 0; ch < s->nb_channels; ch++) {
        av_fft_permute(s->fft, s->fft_data[ch]);
        av_fft_calc(s->fft, s->fft_data[ch]);
    }

#define RE(x, ch) s->fft_data[ch][x].re
#define IM(x, ch) s->fft_data[ch][x].im
#define M(a, b) (sqrt((a) * (a) + (b) * (b)))

    colors = av_strdup(s->colors);
    if (!colors) {
        av_frame_free(&out);
        return AVERROR(ENOMEM);
    }

    for (ch = 0; ch < s->nb_channels; ch++) {
        uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
        int prev_y = -1, f;
        double a;

        color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
        if (color)
            av_parse_color(fg, color, -1, ctx);

        a = av_clipd(M(RE(0, ch), 0) / s->scale, 0, 1);
        plot_freq(s, ch, a, 0, fg, &prev_y, out, outlink);

        for (f = 1; f < s->nb_freq; f++) {
            a = av_clipd(M(RE(f, ch), IM(f, ch)) / s->scale, 0, 1);

            plot_freq(s, ch, a, f, fg, &prev_y, out, outlink);
        }
    }

    av_free(colors);
    out->pts = in->pts;
    return ff_filter_frame(outlink, out);
}