Esempio n. 1
0
static int decode_packet(struct dec_audio *da, struct demux_packet *mpkt,
                         struct mp_audio **out)
{
    struct spdifContext *spdif_ctx = da->priv;

    spdif_ctx->out_buffer_len  = 0;

    if (!mpkt)
        return 0;

    double pts = mpkt->pts;

    AVPacket pkt;
    mp_set_av_packet(&pkt, mpkt, NULL);
    mpkt->len = 0; // will be fully consumed
    pkt.pts = pkt.dts = 0;
    if (!spdif_ctx->lavf_ctx) {
        if (init_filter(da, &pkt) < 0)
            return -1;
    }
    int ret = av_write_frame(spdif_ctx->lavf_ctx, &pkt);
    avio_flush(spdif_ctx->lavf_ctx->pb);
    if (ret < 0)
        return -1;

    int samples = spdif_ctx->out_buffer_len / spdif_ctx->fmt.sstride;
    *out = mp_audio_pool_get(spdif_ctx->pool, &spdif_ctx->fmt, samples);
    if (!*out)
        return -1;

    memcpy((*out)->planes[0], spdif_ctx->out_buffer, spdif_ctx->out_buffer_len);
    (*out)->pts = pts;

    return 0;
}
Esempio n. 2
0
static int filter_out(struct af_instance *af)
{
    struct priv *p = af->priv;

    while (rubberband_available(p->rubber) <= 0) {
        const float *dummy[MP_NUM_CHANNELS] = {0};
        const float **in_data = dummy;
        size_t in_samples = 0;
        if (p->pending) {
            if (!p->pending->samples)
                break;

            // recover from previous EOF
            if (p->needs_reset) {
                rubberband_reset(p->rubber);
                p->rubber_delay = 0;
            }
            p->needs_reset = false;

            size_t needs = rubberband_get_samples_required(p->rubber);
            in_data = (void *)&p->pending->planes;
            in_samples = MPMIN(p->pending->samples, needs);
        }

        if (p->needs_reset)
            break; // previous EOF
        p->needs_reset = !p->pending; // EOF

        rubberband_process(p->rubber, in_data, in_samples, p->needs_reset);
        p->rubber_delay += in_samples;

        if (!p->pending)
            break;
        mp_audio_skip_samples(p->pending, in_samples);
    }

    int out_samples = rubberband_available(p->rubber);
    if (out_samples > 0) {
        struct mp_audio *out =
            mp_audio_pool_get(af->out_pool, af->data, out_samples);
        if (!out)
            return -1;
        if (p->pending)
            mp_audio_copy_config(out, p->pending);

        float **out_data = (void *)&out->planes;
        out->samples = rubberband_retrieve(p->rubber, out_data, out->samples);
        p->rubber_delay -= out->samples * p->speed;

        af_add_output_frame(af, out);
    }

    int delay_samples = p->rubber_delay;
    if (p->pending)
        delay_samples += p->pending->samples;
    af->delay = delay_samples / (af->data->rate * p->speed);

    return 0;
}
Esempio n. 3
0
File: af_pan.c Progetto: ThreeGe/mpv
static int filter_frame(struct af_instance *af, struct mp_audio *c)
{
    if (!c)
        return 0;
    struct mp_audio *l = mp_audio_pool_get(af->out_pool, &af->fmt_out, c->samples);
    if (!l) {
        talloc_free(c);
        return -1;
    }
    mp_audio_copy_attributes(l, c);

    af_pan_t*     s    = af->priv;        // Setup for this instance
    float*        in   = c->planes[0];    // Input audio data
    float*        out  = NULL;            // Output audio data
    float*        end  = in+c->samples*c->nch;    // End of loop
    int           nchi = c->nch;          // Number of input channels
    int           ncho = l->nch;          // Number of output channels
    register int  j,k;

    out = l->planes[0];
    // Execute panning
    // FIXME: Too slow
    while(in < end) {
        for(j=0; j<ncho; j++) {
            register float  x   = 0.0;
            register float* tin = in;
            for(k=0; k<nchi; k++)
                x += tin[k] * s->level[j][k];
            out[j] = x;
        }
        out+= ncho;
        in+= nchi;
    }

    talloc_free(c);
    af_add_output_frame(af, l);
    return 0;
}
Esempio n. 4
0
static int filter(struct af_instance *af, struct mp_audio *data)
{
    if (!data)
        return 0;
    struct mp_audio *out =
        mp_audio_pool_get(af->out_pool, af->data, data->samples);
    if (!out) {
        talloc_free(data);
        return -1;
    }
    mp_audio_copy_attributes(out, data);

    size_t len = mp_audio_psize(data) / data->bps;
    if (data->bps == 4) {
        for (int s = 0; s < len; s++) {
            uint32_t val = *((uint32_t *)data->planes[0] + s);
            uint8_t *ptr = (uint8_t *)out->planes[0] + s * 3;
            ptr[0] = val >> SHIFT(0);
            ptr[1] = val >> SHIFT(1);
            ptr[2] = val >> SHIFT(2);
        }
    } else {
        for (int s = 0; s < len; s++) {
Esempio n. 5
0
static int decode_packet(struct dec_audio *da, struct mp_audio **out)
{
    struct spdifContext *spdif_ctx = da->priv;
    AVFormatContext     *lavf_ctx  = spdif_ctx->lavf_ctx;

    spdif_ctx->out_buffer_len  = 0;

    struct demux_packet *mpkt;
    if (demux_read_packet_async(da->header, &mpkt) == 0)
        return AD_WAIT;

    if (!mpkt)
        return AD_EOF;

    AVPacket pkt;
    mp_set_av_packet(&pkt, mpkt, NULL);
    pkt.pts = pkt.dts = 0;
    if (mpkt->pts != MP_NOPTS_VALUE) {
        da->pts        = mpkt->pts;
        da->pts_offset = 0;
    }
    int ret = av_write_frame(lavf_ctx, &pkt);
    talloc_free(mpkt);
    avio_flush(lavf_ctx->pb);
    if (ret < 0)
        return AD_ERR;

    int samples = spdif_ctx->out_buffer_len / spdif_ctx->fmt.sstride;
    *out = mp_audio_pool_get(da->pool, &spdif_ctx->fmt, samples);
    if (!*out)
        return AD_ERR;

    memcpy((*out)->planes[0], spdif_ctx->out_buffer, spdif_ctx->out_buffer_len);

    return 0;
}
Esempio n. 6
0
static int filter_out(struct af_instance *af)
{
    af_ac3enc_t *s = af->priv;
    if (!fill_buffer(af))
        return 0; // need more input

    AVFrame *frame = av_frame_alloc();
    if (!frame) {
        MP_FATAL(af, "Could not allocate memory \n");
        return -1;
    }
    frame->nb_samples = s->in_samples;
    frame->format = s->lavc_actx->sample_fmt;
    frame->channel_layout = s->lavc_actx->channel_layout;
    assert(s->input->num_planes <= AV_NUM_DATA_POINTERS);
    frame->extended_data = frame->data;
    for (int n = 0; n < s->input->num_planes; n++)
        frame->data[n] = s->input->planes[n];
    frame->linesize[0] = s->input->samples * s->input->sstride;

    int ok;
    int lavc_ret = avcodec_encode_audio2(s->lavc_actx, &s->pkt, frame, &ok);
    av_frame_free(&frame);
    s->input->samples = 0;
    if (lavc_ret < 0 || !ok) {
        MP_FATAL(af, "Encode failed.\n");
        return -1;
    }

    MP_DBG(af, "avcodec_encode_audio got %d, pending %d.\n",
            s->pkt.size, s->pending->samples);

    struct mp_audio *out =
        mp_audio_pool_get(af->out_pool, af->data, s->out_samples);
    if (!out)
        return -1;
    mp_audio_copy_attributes(out, s->pending);

    int frame_size = s->pkt.size;
    int header_len = 0;
    char hdr[8];

    if (s->cfg_add_iec61937_header && s->pkt.size > 5) {
        int bsmod = s->pkt.data[5] & 0x7;
        int len = frame_size;

        frame_size = AC3_FRAME_SIZE * 2 * 2;
        header_len = 8;

        AV_WL16(hdr,     0xF872);   // iec 61937 syncword 1
        AV_WL16(hdr + 2, 0x4E1F);   // iec 61937 syncword 2
        hdr[5] = bsmod;             // bsmod
        hdr[4] = 0x01;              // data-type ac3
        AV_WL16(hdr + 6, len << 3); // number of bits in payload
    }

    if (frame_size > out->samples * out->sstride)
        abort();

    char *buf = (char *)out->planes[0];
    memcpy(buf, hdr, header_len);
    memcpy(buf + header_len, s->pkt.data, s->pkt.size);
    memset(buf + header_len + s->pkt.size, 0,
            frame_size - (header_len + s->pkt.size));
    swap_16((uint16_t *)(buf + header_len), s->pkt.size / 2);
    out->samples = frame_size / out->sstride;
    af_add_output_frame(af, out);
    update_delay(af);
    return 0;
}
Esempio n. 7
0
static int filter_out(struct af_instance *af)
{
    af_ac3enc_t *s = af->priv;

    if (!s->pending)
        return 0;

    AVFrame *frame = av_frame_alloc();
    if (!frame) {
        MP_FATAL(af, "Could not allocate memory \n");
        return -1;
    }
    int err = -1;

    AVPacket pkt = {0};
    av_init_packet(&pkt);

#if HAVE_AVCODEC_NEW_CODEC_API
    // Send input as long as it wants.
    while (1) {
        err = read_input_frame(af, frame);
        if (err < 0)
            goto done;
        if (err == 0)
            break;
        err = -1;
        int lavc_ret = avcodec_send_frame(s->lavc_actx, frame);
        // On EAGAIN, we're supposed to read remaining output.
        if (lavc_ret == AVERROR(EAGAIN))
            break;
        if (lavc_ret < 0) {
            MP_FATAL(af, "Encode failed.\n");
            goto done;
        }
        s->encoder_buffered += s->input->samples;
        s->input->samples = 0;
    }
    int lavc_ret = avcodec_receive_packet(s->lavc_actx, &pkt);
    if (lavc_ret == AVERROR(EAGAIN)) {
        // Need to buffer more input.
        err = 0;
        goto done;
    }
    if (lavc_ret < 0) {
        MP_FATAL(af, "Encode failed.\n");
        goto done;
    }
#else
    err = read_input_frame(af, frame);
    if (err < 0)
        goto done;
    if (err == 0)
        goto done;
    err = -1;
    int ok;
    int lavc_ret = avcodec_encode_audio2(s->lavc_actx, &pkt, frame, &ok);
    s->input->samples = 0;
    if (lavc_ret < 0 || !ok) {
        MP_FATAL(af, "Encode failed.\n");
        goto done;
    }
#endif

    MP_DBG(af, "avcodec_encode_audio got %d, pending %d.\n",
           pkt.size, s->pending->samples + s->input->samples);

    s->encoder_buffered -= AC3_FRAME_SIZE;

    struct mp_audio *out =
        mp_audio_pool_get(af->out_pool, af->data, s->out_samples);
    if (!out)
        goto done;
    mp_audio_copy_attributes(out, s->pending);

    int frame_size = pkt.size;
    int header_len = 0;
    char hdr[8];

    if (s->cfg_add_iec61937_header && pkt.size > 5) {
        int bsmod = pkt.data[5] & 0x7;
        int len = frame_size;

        frame_size = AC3_FRAME_SIZE * 2 * 2;
        header_len = 8;

        AV_WL16(hdr,     0xF872);   // iec 61937 syncword 1
        AV_WL16(hdr + 2, 0x4E1F);   // iec 61937 syncword 2
        hdr[5] = bsmod;             // bsmod
        hdr[4] = 0x01;              // data-type ac3
        AV_WL16(hdr + 6, len << 3); // number of bits in payload
    }

    if (frame_size > out->samples * out->sstride)
        abort();

    char *buf = (char *)out->planes[0];
    memcpy(buf, hdr, header_len);
    memcpy(buf + header_len, pkt.data, pkt.size);
    memset(buf + header_len + pkt.size, 0,
           frame_size - (header_len + pkt.size));
    swap_16((uint16_t *)(buf + header_len), pkt.size / 2);
    out->samples = frame_size / out->sstride;
    af_add_output_frame(af, out);

    err = 0;
done:
    av_packet_unref(&pkt);
    av_frame_free(&frame);
    update_delay(af);
    return err;
}
Esempio n. 8
0
static int filter_frame(struct af_instance *af, struct mp_audio *data)
{
  if (!data)
    return 0;
  struct mp_audio *outframe =
    mp_audio_pool_get(af->out_pool, &af->fmt_out, data->samples);
  if (!outframe) {
    talloc_free(data);
    return -1;
  }
  mp_audio_copy_attributes(outframe, data);

  af_surround_t* s   = (af_surround_t*)af->priv;
  const float*   m   = steering_matrix[0];
  float*         in  = data->planes[0];         // Input audio data
  float*         out = outframe->planes[0];     // Output audio data
  float*         end = in + data->samples * data->nch;
  int            i   = s->i;    // Filter queue index
  int            ri  = s->ri;   // Read index for delay queue
  int            wi  = s->wi;   // Write index for delay queue

  while(in < end){
    /* Dominance:
       abs(in[0])  abs(in[1]);
       abs(in[0]+in[1])  abs(in[0]-in[1]);
       10 * log( abs(in[0]) / (abs(in[1])|1) );
       10 * log( abs(in[0]+in[1]) / (abs(in[0]-in[1])|1) ); */

    /* About volume balancing...
       Surround encoding does the following:
           Lt=L+.707*C+.707*S, Rt=R+.707*C-.707*S
       So S should be extracted as:
           (Lt-Rt)
       But we are splitting the S to two output channels, so we
       must take 3dB off as we split it:
           Ls=Rs=.707*(Lt-Rt)
       Trouble is, Lt could be +1, Rt -1, so possibility that S will
       overflow. So to avoid that, we cut L/R by 3dB (*.707), and S by
       6dB (/2). This keeps the overall balance, but guarantees no
       overflow. */

    // Output front left and right
    out[0] = m[0]*in[0] + m[1]*in[1];
    out[1] = m[2]*in[0] + m[3]*in[1];

    // Low-pass output @ 7kHz
    FIR((&s->lq[i]), s->w, s->dl[wi]);

    // Delay output by d ms
    out[2] = s->dl[ri];

#ifdef SPLITREAR
    // Low-pass output @ 7kHz
    FIR((&s->rq[i]), s->w, s->dr[wi]);

    // Delay output by d ms
    out[3] = s->dr[ri];
#else
    out[3] = -out[2];
#endif

    // Update delay queues indexes
    UPDATEQI(ri);
    UPDATEQI(wi);

    // Calculate and save surround in circular queue
#ifdef SPLITREAR
    ADDQUE(i, s->rq, s->lq, m[6]*in[0]+m[7]*in[1], m[8]*in[0]+m[9]*in[1]);
#else
    ADDQUE(i, s->lq, m[4]*in[0]+m[5]*in[1]);
#endif

    // Next sample...
    in = &in[data->nch];
    out = &out[af->data->nch];
  }

  // Save indexes
  s->i  = i; s->ri = ri; s->wi = wi;

  talloc_free(data);
  af_add_output_frame(af, outframe);
  return 0;
}
Esempio n. 9
0
static int filter(struct af_instance *af, struct mp_audio *data)
{
    af_scaletempo_t *s = af->priv;

    if (s->scale == 1.0) {
        af->delay = 0;
        af_add_output_frame(af, data);
        return 0;
    }

    int in_samples = data ? data->samples : 0;
    struct mp_audio *out = mp_audio_pool_get(af->out_pool, af->data,
        ((int)(in_samples / s->frames_stride_scaled) + 1) * s->frames_stride);
    if (!out) {
        talloc_free(data);
        return -1;
    }
    if (data)
        mp_audio_copy_attributes(out, data);

    int offset_in = fill_queue(af, data, 0);
    int8_t *pout = out->planes[0];
    while (s->bytes_queued >= s->bytes_queue) {
        int ti;
        float tf;
        int bytes_off = 0;

        // output stride
        if (s->output_overlap) {
            if (s->best_overlap_offset)
                bytes_off = s->best_overlap_offset(s);
            s->output_overlap(s, pout, bytes_off);
        }
        memcpy(pout + s->bytes_overlap,
               s->buf_queue + bytes_off + s->bytes_overlap,
               s->bytes_standing);
        pout += s->bytes_stride;

        // input stride
        memcpy(s->buf_overlap,
               s->buf_queue + bytes_off + s->bytes_stride,
               s->bytes_overlap);
        tf = s->frames_stride_scaled + s->frames_stride_error;
        ti = (int)tf;
        s->frames_stride_error = tf - ti;
        s->bytes_to_slide = ti * s->bytes_per_frame;

        offset_in += fill_queue(af, data, offset_in);
    }

    // This filter can have a negative delay when scale > 1:
    // output corresponding to some length of input can be decided and written
    // after receiving only a part of that input.
    af->delay = (s->bytes_queued - s->bytes_to_slide) / s->scale
                / out->sstride / out->rate;

    out->samples = (pout - (int8_t *)out->planes[0]) / out->sstride;
    talloc_free(data);
    if (out->samples) {
        af_add_output_frame(af, out);
    } else {
        talloc_free(out);
    }
    return 0;
}
Esempio n. 10
0
File: af_hrtf.c Progetto: c-14/mpv
/* Filter data through filter

Two "tricks" are used to compensate the "color" of the KEMAR data:

1. The KEMAR data is refiltered to ensure that the front L, R channels
on the same side of the ear are equalized (especially in the high
frequencies).

2. A bass compensation is introduced to ensure that 0-200 Hz are not
damped (without any real 3D acoustical image, however).
*/
static int filter(struct af_instance *af, struct mp_audio *data)
{
    af_hrtf_t *s = af->priv;

    if (!data)
        return 0;
    struct mp_audio *outframe =
        mp_audio_pool_get(af->out_pool, &af->fmt_out, data->samples);
    if (!outframe) {
        talloc_free(data);
        return -1;
    }
    mp_audio_copy_attributes(outframe, data);

    short *in = data->planes[0]; // Input audio data
    short *out = outframe->planes[0]; // Output audio data
    short *end = in + data->samples * data->nch; // Loop end
    float common, left, right, diff, left_b, right_b;
    const int dblen = s->dlbuflen, hlen = s->hrflen, blen = s->basslen;

    if(s->print_flag) {
        s->print_flag = 0;
        switch (s->decode_mode) {
        case HRTF_MIX_51:
          MP_INFO(af, "Using HRTF to mix %s discrete surround into "
                 "L, R channels\n", s->matrix_mode ? "5+1" : "5");
          break;
        case HRTF_MIX_STEREO:
          MP_INFO(af, "Using HRTF to mix stereo into "
                 "L, R channels\n");
          break;
        case HRTF_MIX_MATRIX2CH:
          MP_INFO(af, "Using active matrix to decode 2 channel "
                 "input, HRTF to mix %s matrix surround into "
                 "L, R channels\n", "3/2");
          break;
        default:
          MP_WARN(af, "bogus decode_mode: %d\n", s->decode_mode);
          break;
        }

       if(s->matrix_mode)
          MP_INFO(af, "Using active matrix to decode rear center "
                 "channel\n");
    }

    /* MPlayer's 5 channel layout (notation for the variable):
     *
     * 0: L (LF), 1: R (RF), 2: Ls (LR), 3: Rs (RR), 4: C (CF), matrix
     * encoded: Cs (CR)
     *
     * or: L = left, C = center, R = right, F = front, R = rear
     *
     * Filter notation:
     *
     *      CF
     * OF        AF
     *      Ear->
     * OR        AR
     *      CR
     *
     * or: C = center, A = same side, O = opposite, F = front, R = rear
     */

    while(in < end) {
        const int k = s->cyc_pos;

        update_ch(s, in, k);

        /* Simulate a 7.5 ms -20 dB echo of the center channel in the
           front channels (like reflection from a room wall) - a kind of
           psycho-acoustically "cheating" to focus the center front
           channel, which is normally hard to be perceived as front */
        s->lf[k] += CFECHOAMPL * s->cf[(k + CFECHODELAY) % s->dlbuflen];
        s->rf[k] += CFECHOAMPL * s->cf[(k + CFECHODELAY) % s->dlbuflen];

        switch (s->decode_mode) {
        case HRTF_MIX_51:
        case HRTF_MIX_MATRIX2CH:
           /* Mixer filter matrix */
           common = conv(dblen, hlen, s->cf, s->cf_ir, k + s->cf_o);
           if(s->matrix_mode) {
              /* In matrix decoding mode, the rear channel gain must be
                 renormalized, as there is an additional channel. */
              matrix_decode(in, k, 2, 3, 0, s->dlbuflen,
                            s->lr_fwr, s->rr_fwr,
                            s->lrprr_fwr, s->lrmrr_fwr,
                            &(s->adapt_lr_gain), &(s->adapt_rr_gain),
                            &(s->adapt_lrprr_gain), &(s->adapt_lrmrr_gain),
                            s->lr, s->rr, NULL, NULL, s->cr);
              common +=
                 conv(dblen, hlen, s->cr, s->cr_ir, k + s->cr_o) *
                 M1_76DB;
              left    =
                 ( conv(dblen, hlen, s->lf, s->af_ir, k + s->af_o) +
                   conv(dblen, hlen, s->rf, s->of_ir, k + s->of_o) +
                   (conv(dblen, hlen, s->lr, s->ar_ir, k + s->ar_o) +
                    conv(dblen, hlen, s->rr, s->or_ir, k + s->or_o)) *
                   M1_76DB + common);
              right   =
                 ( conv(dblen, hlen, s->rf, s->af_ir, k + s->af_o) +
                   conv(dblen, hlen, s->lf, s->of_ir, k + s->of_o) +
                   (conv(dblen, hlen, s->rr, s->ar_ir, k + s->ar_o) +
                    conv(dblen, hlen, s->lr, s->or_ir, k + s->or_o)) *
                   M1_76DB + common);
           } else {
              left    =
                 ( conv(dblen, hlen, s->lf, s->af_ir, k + s->af_o) +
                   conv(dblen, hlen, s->rf, s->of_ir, k + s->of_o) +
                   conv(dblen, hlen, s->lr, s->ar_ir, k + s->ar_o) +
                   conv(dblen, hlen, s->rr, s->or_ir, k + s->or_o) +
                   common);
              right   =
                 ( conv(dblen, hlen, s->rf, s->af_ir, k + s->af_o) +
                   conv(dblen, hlen, s->lf, s->of_ir, k + s->of_o) +
                   conv(dblen, hlen, s->rr, s->ar_ir, k + s->ar_o) +
                   conv(dblen, hlen, s->lr, s->or_ir, k + s->or_o) +
                   common);
           }
           break;
        case HRTF_MIX_STEREO:
           left    =
              ( conv(dblen, hlen, s->lf, s->af_ir, k + s->af_o) +
                conv(dblen, hlen, s->rf, s->of_ir, k + s->of_o));
           right   =
              ( conv(dblen, hlen, s->rf, s->af_ir, k + s->af_o) +
                conv(dblen, hlen, s->lf, s->of_ir, k + s->of_o));
           break;
        default:
            /* make gcc happy */
            left = 0.0;
            right = 0.0;
            break;
        }

        /* Bass compensation for the lower frequency cut of the HRTF.  A
           cross talk of the left and right channel is introduced to
           match the directional characteristics of higher frequencies.
           The bass will not have any real 3D perception, but that is
           OK (note at 180 Hz, the wavelength is about 2 m, and any
           spatial perception is impossible). */
        left_b  = conv(dblen, blen, s->ba_l, s->ba_ir, k);
        right_b = conv(dblen, blen, s->ba_r, s->ba_ir, k);
        left  += (1 - BASSCROSS) * left_b  + BASSCROSS * right_b;
        right += (1 - BASSCROSS) * right_b + BASSCROSS * left_b;
        /* Also mix the LFE channel (if available) */
        if(data->nch >= 6) {
            left  += in[5] * M3_01DB;
            right += in[5] * M3_01DB;
        }

        /* Amplitude renormalization. */
        left  *= AMPLNORM;
        right *= AMPLNORM;

        switch (s->decode_mode) {
        case HRTF_MIX_51:
        case HRTF_MIX_STEREO:
           /* "Cheating": linear stereo expansion to amplify the 3D
              perception.  Note: Too much will destroy the acoustic space
              and may even result in headaches. */
           diff = STEXPAND2 * (left - right);
           out[0] = av_clip_int16(left  + diff);
           out[1] = av_clip_int16(right - diff);
           break;
        case HRTF_MIX_MATRIX2CH:
           /* Do attempt any stereo expansion with matrix encoded
              sources.  The L, R channels are already stereo expanded
              by the steering, any further stereo expansion will sound
              very unnatural. */
           out[0] = av_clip_int16(left);
           out[1] = av_clip_int16(right);
           break;
        }

        /* Next sample... */
        in = &in[data->nch];
        out = &out[af->data->nch];
        (s->cyc_pos)--;
        if(s->cyc_pos < 0)
            s->cyc_pos += dblen;
    }

    talloc_free(data);
    af_add_output_frame(af, outframe);
    return 0;
}