Ejemplo n.º 1
0
static int activate(AVFilterContext *ctx)
{
    AVFilterLink *outlink = ctx->outputs[0];
    AlphaMergeContext *s = ctx->priv;
    int ret;

    FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);

    if (!s->main_frame) {
        ret = ff_inlink_consume_frame(ctx->inputs[0], &s->main_frame);
        if (ret < 0)
            return ret;
    }

    if (!s->alpha_frame) {
        ret = ff_inlink_consume_frame(ctx->inputs[1], &s->alpha_frame);
        if (ret < 0)
            return ret;
    }

    if (s->main_frame && s->alpha_frame) {
        draw_frame(ctx, s->main_frame, s->alpha_frame);
        ret = ff_filter_frame(outlink, s->main_frame);
        av_frame_free(&s->alpha_frame);
        s->main_frame = NULL;
        return ret;
    }

    FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink);
    FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink);

    if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
        !ff_outlink_get_status(ctx->inputs[0]) &&
        !s->main_frame) {
        ff_inlink_request_frame(ctx->inputs[0]);
        return 0;
    }

    if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
        !ff_outlink_get_status(ctx->inputs[1]) &&
        !s->alpha_frame) {
        ff_inlink_request_frame(ctx->inputs[1]);
        return 0;
    }

    return FFERROR_NOT_READY;
}
Ejemplo n.º 2
0
static int activate(AVFilterContext *ctx)
{
    AVFilterLink *inlink = ctx->inputs[0];
    AVFilterLink *outlink = ctx->outputs[0];
    CueContext *s = ctx->priv;
    int64_t pts;
    AVFrame *frame = NULL;

    FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);

    if (s->status < 3 || s->status == 5) {
        int ret = ff_inlink_consume_frame(inlink, &frame);
        if (ret < 0)
            return ret;
        if (frame)
            pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
    }

    if (!s->status && frame) {
        s->first_pts = pts;
        s->status++;
    }
    if (s->status == 1 && frame) {
        if (pts - s->first_pts < s->preroll)
            return ff_filter_frame(outlink, frame);
        s->first_pts = pts;
        s->status++;
    }
    if (s->status == 2 && frame) {
        int ret = ff_framequeue_add(&s->queue, frame);
        if (ret < 0) {
            av_frame_free(&frame);
            return ret;
        }
        frame = NULL;
        if (!(pts - s->first_pts < s->buffer && (av_gettime() - s->cue) < 0))
            s->status++;
    }
    if (s->status == 3) {
        int64_t diff;
        while ((diff = (av_gettime() - s->cue)) < 0)
            av_usleep(av_clip(-diff / 2, 100, 1000000));
        s->status++;
    }
    if (s->status == 4) {
        if (ff_framequeue_queued_frames(&s->queue))
            return ff_filter_frame(outlink, ff_framequeue_take(&s->queue));
        s->status++;
    }
    if (s->status == 5 && frame)
        return ff_filter_frame(outlink, frame);

    FF_FILTER_FORWARD_STATUS(inlink, outlink);
    FF_FILTER_FORWARD_WANTED(outlink, inlink);

    return FFERROR_NOT_READY;
}
Ejemplo n.º 3
0
static int activate(AVFilterContext *ctx)
{
    SidechainCompressContext *s = ctx->priv;
    AVFrame *out = NULL, *in[2] = { NULL };
    int ret, i, nb_samples;
    double *dst;

    FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
    if ((ret = ff_inlink_consume_frame(ctx->inputs[0], &in[0])) > 0) {
        av_audio_fifo_write(s->fifo[0], (void **)in[0]->extended_data,
                            in[0]->nb_samples);
        av_frame_free(&in[0]);
    }
    if (ret < 0)
        return ret;
    if ((ret = ff_inlink_consume_frame(ctx->inputs[1], &in[1])) > 0) {
        av_audio_fifo_write(s->fifo[1], (void **)in[1]->extended_data,
                            in[1]->nb_samples);
        av_frame_free(&in[1]);
    }
    if (ret < 0)
        return ret;

    nb_samples = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1]));
    if (nb_samples) {
        out = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
        if (!out)
            return AVERROR(ENOMEM);
        for (i = 0; i < 2; i++) {
            in[i] = ff_get_audio_buffer(ctx->inputs[i], nb_samples);
            if (!in[i]) {
                av_frame_free(&in[0]);
                av_frame_free(&in[1]);
                av_frame_free(&out);
                return AVERROR(ENOMEM);
            }
            av_audio_fifo_read(s->fifo[i], (void **)in[i]->data, nb_samples);
        }

        dst = (double *)out->data[0];
        out->pts = s->pts;
        s->pts += nb_samples;

        compressor(s, (double *)in[0]->data[0], dst,
                   (double *)in[1]->data[0], nb_samples,
                   s->level_in, s->level_sc,
                   ctx->inputs[0], ctx->inputs[1]);

        av_frame_free(&in[0]);
        av_frame_free(&in[1]);

        ret = ff_filter_frame(ctx->outputs[0], out);
        if (ret < 0)
            return ret;
    }
    FF_FILTER_FORWARD_STATUS(ctx->inputs[0], ctx->outputs[0]);
    FF_FILTER_FORWARD_STATUS(ctx->inputs[1], ctx->outputs[0]);
    if (ff_outlink_frame_wanted(ctx->outputs[0])) {
        if (!av_audio_fifo_size(s->fifo[0]))
            ff_inlink_request_frame(ctx->inputs[0]);
        if (!av_audio_fifo_size(s->fifo[1]))
            ff_inlink_request_frame(ctx->inputs[1]);
    }
    return 0;
}