Beispiel #1
0
static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
{
    unsigned out = 0, progress = 0;
    int ret;

    av_assert0(!in->status_out);
    if (!filter->nb_outputs) {
        /* not necessary with the current API and sinks */
        return 0;
    }
    while (!in->status_out) {
        if (!filter->outputs[out]->status_in) {
            progress++;
            ret = ff_request_frame_to_filter(filter->outputs[out]);
            if (ret < 0)
                return ret;
        }
        if (++out == filter->nb_outputs) {
            if (!progress) {
                /* Every output already closed: input no longer interesting
                   (example: overlay in shortest mode, other input closed). */
                ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
                return 0;
            }
            progress = 0;
            out = 0;
        }
    }
    ff_filter_set_ready(filter, 200);
    return 0;
}
Beispiel #2
0
int ff_filter_frame_to_filter(AVFilterLink *link)
{
    AVFrame *frame;
    AVFilterContext *dst = link->dst;
    int ret;

    av_assert1(ff_framequeue_queued_frames(&link->fifo));
    if (link->min_samples) {
        int min = link->min_samples;
        if (link->status_in)
            min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
        ret = take_samples(link, min, link->max_samples, &frame);
        if (ret < 0)
            return ret;
    } else {
        frame = ff_framequeue_take(&link->fifo);
    }
    /* The filter will soon have received a new frame, that may allow it to
       produce one or more: unblock its outputs. */
    filter_unblock(dst);
    ret = ff_filter_frame_framed(link, frame);
    if (ret < 0 && ret != link->status_out) {
        ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
    } else {
        /* Run once again, to see if several frames were available, or if
           the input status has also changed, or any other reason. */
        ff_filter_set_ready(dst, 300);
    }
    return ret;
}
Beispiel #3
0
void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
{
    av_assert0(!link->frame_wanted_out);
    av_assert0(!link->status_out);
    link->status_out = status;
    if (pts != AV_NOPTS_VALUE)
        ff_update_link_current_pts(link, pts);
    filter_unblock(link->dst);
    ff_filter_set_ready(link->src, 200);
}
Beispiel #4
0
static int activate(AVFilterContext *ctx)
{
    FPSContext   *s       = ctx->priv;
    AVFilterLink *inlink  = ctx->inputs[0];
    AVFilterLink *outlink = ctx->outputs[0];

    int ret;
    int again = 0;
    int64_t status_pts;

    FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);

    /* No buffered status: normal operation */
    if (!s->status) {

        /* Read available input frames if we have room */
        while (s->frames_count < 2 && ff_inlink_check_available_frame(inlink)) {
            ret = read_frame(ctx, s, inlink, outlink);
            if (ret < 0)
                return ret;
        }

        /* We do not yet have enough frames to produce output */
        if (s->frames_count < 2) {
            /* Check if we've hit EOF (or otherwise that an error status is set) */
            ret = ff_inlink_acknowledge_status(inlink, &s->status, &status_pts);
            if (ret > 0)
                update_eof_pts(ctx, s, inlink, outlink, status_pts);

            if (!ret) {
                /* If someone wants us to output, we'd better ask for more input */
                FF_FILTER_FORWARD_WANTED(outlink, inlink);
                return 0;
            }
        }
    }

    /* Buffered frames are available, so generate an output frame */
    if (s->frames_count > 0) {
        ret = write_frame(ctx, s, outlink, &again);
        /* Couldn't generate a frame, so schedule us to perform another step */
        if (again)
            ff_filter_set_ready(ctx, 100);
        return ret;
    }

    /* No frames left, so forward the status */
    if (s->status && s->frames_count == 0) {
        ff_outlink_set_status(outlink, s->status, s->next_pts);
        return 0;
    }

    return FFERROR_NOT_READY;
}
Beispiel #5
0
void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
{
    if (link->status_in == status)
        return;
    av_assert0(!link->status_in);
    link->status_in = status;
    link->status_in_pts = pts;
    link->frame_wanted_out = 0;
    link->frame_blocked_in = 0;
    filter_unblock(link->dst);
    ff_filter_set_ready(link->dst, 200);
}
Beispiel #6
0
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
{
    int ret;
    FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);

    /* Consistency checks */
    if (link->type == AVMEDIA_TYPE_VIDEO) {
        if (strcmp(link->dst->filter->name, "buffersink") &&
            strcmp(link->dst->filter->name, "format") &&
            strcmp(link->dst->filter->name, "idet") &&
            strcmp(link->dst->filter->name, "null") &&
            strcmp(link->dst->filter->name, "scale")) {
            av_assert1(frame->format                 == link->format);
            av_assert1(frame->width               == link->w);
            av_assert1(frame->height               == link->h);
        }
    } else {
        if (frame->format != link->format) {
            av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
            goto error;
        }
        if (av_frame_get_channels(frame) != link->channels) {
            av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
            goto error;
        }
        if (frame->channel_layout != link->channel_layout) {
            av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
            goto error;
        }
        if (frame->sample_rate != link->sample_rate) {
            av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
            goto error;
        }
    }

    link->frame_blocked_in = link->frame_wanted_out = 0;
    link->frame_count_in++;
    filter_unblock(link->dst);
    ret = ff_framequeue_add(&link->fifo, frame);
    if (ret < 0) {
        av_frame_free(&frame);
        return ret;
    }
    ff_filter_set_ready(link->dst, 300);
    return 0;

error:
    av_frame_free(&frame);
    return AVERROR_PATCHWELCOME;
}
Beispiel #7
0
int ff_request_frame(AVFilterLink *link)
{
    FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);

    if (link->status_out)
        return link->status_out;
    if (link->status_in) {
        if (ff_framequeue_queued_frames(&link->fifo)) {
            av_assert1(!link->frame_wanted_out);
            av_assert1(link->dst->ready >= 300);
            return 0;
        } else {
            /* Acknowledge status change. Filters using ff_request_frame() will
               handle the change automatically. Filters can also check the
               status directly but none do yet. */
            ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
            return link->status_out;
        }
    }
    link->frame_wanted_out = 1;
    ff_filter_set_ready(link->src, 100);
    return 0;
}
Beispiel #8
0
static int activate(AVFilterContext *ctx)
{
    AudioFIRContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    int ret, status, available, wanted;
    AVFrame *in = NULL;
    int64_t pts;

    FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
    if (s->response)
        FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[1], ctx);
    if (!s->eof_coeffs) {
        AVFrame *ir = NULL;

        ret = check_ir(ctx->inputs[1], ir);
        if (ret < 0)
            return ret;

        if (ff_outlink_get_status(ctx->inputs[1]) == AVERROR_EOF)
            s->eof_coeffs = 1;

        if (!s->eof_coeffs) {
            if (ff_outlink_frame_wanted(ctx->outputs[0]))
                ff_inlink_request_frame(ctx->inputs[1]);
            else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1]))
                ff_inlink_request_frame(ctx->inputs[1]);
            return 0;
        }
    }

    if (!s->have_coeffs && s->eof_coeffs) {
        ret = convert_coeffs(ctx);
        if (ret < 0)
            return ret;
    }

    available = ff_inlink_queued_samples(ctx->inputs[0]);
    wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size);
    ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in);
    if (ret > 0)
        ret = fir_frame(s, in, outlink);

    if (ret < 0)
        return ret;

    if (s->response && s->have_coeffs) {
        int64_t old_pts = s->video->pts;
        int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base);

        if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) {
            s->video->pts = new_pts;
            return ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video));
        }
    }

    if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
        ff_filter_set_ready(ctx, 10);
        return 0;
    }

    if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
        if (status == AVERROR_EOF) {
            ff_outlink_set_status(ctx->outputs[0], status, pts);
            if (s->response)
                ff_outlink_set_status(ctx->outputs[1], status, pts);
            return 0;
        }
    }

    if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
        !ff_outlink_get_status(ctx->inputs[0])) {
        ff_inlink_request_frame(ctx->inputs[0]);
        return 0;
    }

    if (s->response &&
        ff_outlink_frame_wanted(ctx->outputs[1]) &&
        !ff_outlink_get_status(ctx->inputs[0])) {
        ff_inlink_request_frame(ctx->inputs[0]);
        return 0;
    }

    return FFERROR_NOT_READY;
}