Beispiel #1
0
int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s)
{
    FFFrameSyncIn *in;
    int ret;

    if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
        return ret;

    in = s->fs.in;
    s->fs.opaque = s;
    s->fs.on_event = process_frame;
    in[0].time_base = ctx->inputs[0]->time_base;
    in[1].time_base = ctx->inputs[1]->time_base;
    in[0].sync   = 2;
    in[0].before = EXT_STOP;
    in[0].after  = EXT_INFINITY;
    in[1].sync   = 1;
    in[1].before = EXT_NULL;
    in[1].after  = EXT_INFINITY;

    if (s->shortest)
        in[0].after = in[1].after = EXT_STOP;
    if (!s->repeatlast) {
        in[1].after = EXT_NULL;
        in[1].sync  = 0;
    }
    if (s->skip_initial_unpaired) {
        in[1].before = EXT_STOP;
    }

    return ff_framesync_configure(&s->fs);
}
Beispiel #2
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    DisplaceContext *s = ctx->priv;
    AVFilterLink *srclink = ctx->inputs[0];
    AVFilterLink *xlink = ctx->inputs[1];
    AVFilterLink *ylink = ctx->inputs[2];
    FFFrameSyncIn *in;
    int ret;

    if (srclink->format != xlink->format ||
        srclink->format != ylink->format) {
        av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
        return AVERROR(EINVAL);
    }
    if (srclink->w != xlink->w ||
        srclink->h != xlink->h ||
        srclink->w != ylink->w ||
        srclink->h != ylink->h) {
        av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
               "(size %dx%d) do not match the corresponding "
               "second input link %s parameters (%dx%d) "
               "and/or third input link %s parameters (%dx%d)\n",
               ctx->input_pads[0].name, srclink->w, srclink->h,
               ctx->input_pads[1].name, xlink->w, xlink->h,
               ctx->input_pads[2].name, ylink->w, ylink->h);
        return AVERROR(EINVAL);
    }

    outlink->w = srclink->w;
    outlink->h = srclink->h;
    outlink->time_base = srclink->time_base;
    outlink->sample_aspect_ratio = srclink->sample_aspect_ratio;
    outlink->frame_rate = srclink->frame_rate;

    ret = ff_framesync_init(&s->fs, ctx, 3);
    if (ret < 0)
        return ret;

    in = s->fs.in;
    in[0].time_base = srclink->time_base;
    in[1].time_base = xlink->time_base;
    in[2].time_base = ylink->time_base;
    in[0].sync   = 2;
    in[0].before = EXT_STOP;
    in[0].after  = EXT_STOP;
    in[1].sync   = 1;
    in[1].before = EXT_NULL;
    in[1].after  = EXT_INFINITY;
    in[2].sync   = 1;
    in[2].before = EXT_NULL;
    in[2].after  = EXT_INFINITY;
    s->fs.opaque   = s;
    s->fs.on_event = process_frame;

    return ff_framesync_configure(&s->fs);
}
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    StackContext *s = ctx->priv;
    AVRational time_base = ctx->inputs[0]->time_base;
    AVRational frame_rate = ctx->inputs[0]->frame_rate;
    int height = ctx->inputs[0]->h;
    int width = ctx->inputs[0]->w;
    FFFrameSyncIn *in;
    int i, ret;

    if (s->is_vertical) {
        for (i = 1; i < s->nb_inputs; i++) {
            if (ctx->inputs[i]->w != width) {
                av_log(ctx, AV_LOG_ERROR, "Input %d width %d does not match input %d width %d.\n", i, ctx->inputs[i]->w, 0, width);
                return AVERROR(EINVAL);
            }
            height += ctx->inputs[i]->h;
        }
    } else {
        for (i = 1; i < s->nb_inputs; i++) {
            if (ctx->inputs[i]->h != height) {
                av_log(ctx, AV_LOG_ERROR, "Input %d height %d does not match input %d height %d.\n", i, ctx->inputs[i]->h, 0, height);
                return AVERROR(EINVAL);
            }
            width += ctx->inputs[i]->w;
        }
    }

    s->desc = av_pix_fmt_desc_get(outlink->format);
    if (!s->desc)
        return AVERROR_BUG;
    s->nb_planes = av_pix_fmt_count_planes(outlink->format);

    outlink->w          = width;
    outlink->h          = height;
    outlink->time_base  = time_base;
    outlink->frame_rate = frame_rate;

    if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
        return ret;

    in = s->fs.in;
    s->fs.opaque = s;
    s->fs.on_event = process_frame;

    for (i = 0; i < s->nb_inputs; i++) {
        AVFilterLink *inlink = ctx->inputs[i];

        in[i].time_base = inlink->time_base;
        in[i].sync   = 1;
        in[i].before = EXT_STOP;
        in[i].after  = EXT_INFINITY;
    }

    return ff_framesync_configure(&s->fs);
}
Beispiel #4
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    StreamSelectContext *s = ctx->priv;
    const int outlink_idx = FF_OUTLINK_IDX(outlink);
    const int inlink_idx  = s->map[outlink_idx];
    AVFilterLink *inlink = ctx->inputs[inlink_idx];
    FFFrameSyncIn *in;
    int i, ret;

    av_log(ctx, AV_LOG_VERBOSE, "config output link %d "
           "with settings from input link %d\n",
           outlink_idx, inlink_idx);

    switch (outlink->type) {
    case AVMEDIA_TYPE_VIDEO:
        outlink->w = inlink->w;
        outlink->h = inlink->h;
        outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
        outlink->frame_rate = inlink->frame_rate;
        break;
    case AVMEDIA_TYPE_AUDIO:
        outlink->sample_rate    = inlink->sample_rate;
        outlink->channels       = inlink->channels;
        outlink->channel_layout = inlink->channel_layout;
        break;
    }

    outlink->time_base = inlink->time_base;
    outlink->format = inlink->format;

    if (s->fs.opaque == s)
        return 0;

    if ((ret = ff_framesync_init(&s->fs, ctx, ctx->nb_inputs)) < 0)
        return ret;

    in = s->fs.in;
    s->fs.opaque = s;
    s->fs.on_event = process_frame;

    for (i = 0; i < ctx->nb_inputs; i++) {
        in[i].time_base = ctx->inputs[i]->time_base;
        in[i].sync      = 1;
        in[i].before    = EXT_STOP;
        in[i].after     = EXT_STOP;
    }

    s->frames = av_calloc(ctx->nb_inputs, sizeof(*s->frames));
    if (!s->frames)
        return AVERROR(ENOMEM);

    return ff_framesync_configure(&s->fs);
}
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    MixContext *s = ctx->priv;
    AVRational time_base = ctx->inputs[0]->time_base;
    AVRational frame_rate = ctx->inputs[0]->frame_rate;
    AVFilterLink *inlink = ctx->inputs[0];
    int height = ctx->inputs[0]->h;
    int width = ctx->inputs[0]->w;
    FFFrameSyncIn *in;
    int i, ret;

    for (i = 1; i < s->nb_inputs; i++) {
        if (ctx->inputs[i]->h != height || ctx->inputs[i]->w != width) {
            av_log(ctx, AV_LOG_ERROR, "Input %d size (%dx%d) does not match input %d size (%dx%d).\n", i, ctx->inputs[i]->w, ctx->inputs[i]->h, 0, width, height);
            return AVERROR(EINVAL);
        }
    }

    s->desc = av_pix_fmt_desc_get(outlink->format);
    if (!s->desc)
        return AVERROR_BUG;
    s->nb_planes = av_pix_fmt_count_planes(outlink->format);
    s->depth = s->desc->comp[0].depth;

    outlink->w          = width;
    outlink->h          = height;
    outlink->time_base  = time_base;
    outlink->frame_rate = frame_rate;

    if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
        return ret;

    in = s->fs.in;
    s->fs.opaque = s;
    s->fs.on_event = process_frame;

    if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
        return ret;

    s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
    s->height[0] = s->height[3] = inlink->h;

    for (i = 0; i < s->nb_inputs; i++) {
        AVFilterLink *inlink = ctx->inputs[i];

        in[i].time_base = inlink->time_base;
        in[i].sync   = 1;
        in[i].before = EXT_STOP;
        in[i].after  = (s->duration == 1 || (s->duration == 2 && i == 0)) ? EXT_STOP : EXT_INFINITY;
    }

    return ff_framesync_configure(&s->fs);
}
Beispiel #6
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    HysteresisContext *s = ctx->priv;
    AVFilterLink *base = ctx->inputs[0];
    AVFilterLink *alt = ctx->inputs[1];
    FFFrameSyncIn *in;
    int ret;

    if (base->format != alt->format) {
        av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
        return AVERROR(EINVAL);
    }
    if (base->w != alt->w || base->h != alt->h) {
        av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
               "(size %dx%d) do not match the corresponding "
               "second input link %s parameters (size %dx%d)\n",
               ctx->input_pads[0].name, base->w, base->h,
               ctx->input_pads[1].name,
               alt->w, alt->h);
        return AVERROR(EINVAL);
    }

    outlink->w = base->w;
    outlink->h = base->h;
    outlink->time_base = base->time_base;
    outlink->sample_aspect_ratio = base->sample_aspect_ratio;
    outlink->frame_rate = base->frame_rate;

    if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
        return ret;

    in = s->fs.in;
    in[0].time_base = base->time_base;
    in[1].time_base = alt->time_base;
    in[0].sync   = 1;
    in[0].before = EXT_STOP;
    in[0].after  = EXT_INFINITY;
    in[1].sync   = 1;
    in[1].before = EXT_STOP;
    in[1].after  = EXT_INFINITY;
    s->fs.opaque   = s;
    s->fs.on_event = process_frame;

    return ff_framesync_configure(&s->fs);
}
Beispiel #7
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    LIBVMAFContext *s = ctx->priv;
    AVFilterLink *mainlink = ctx->inputs[0];
    int ret;

    ret = ff_framesync_init_dualinput(&s->fs, ctx);
    if (ret < 0)
        return ret;
    outlink->w = mainlink->w;
    outlink->h = mainlink->h;
    outlink->time_base = mainlink->time_base;
    outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
    outlink->frame_rate = mainlink->frame_rate;
    if ((ret = ff_framesync_configure(&s->fs)) < 0)
        return ret;

    return 0;
}
Beispiel #8
0
static int program_opencl_config_output(AVFilterLink *outlink)
{
    AVFilterContext    *avctx = outlink->src;
    ProgramOpenCLContext *ctx = avctx->priv;
    int err;

    err = ff_opencl_filter_config_output(outlink);
    if (err < 0)
        return err;

    if (ctx->nb_inputs > 0) {
        FFFrameSyncIn *in;
        int i;

        err = ff_framesync_init(&ctx->fs, avctx, ctx->nb_inputs);
        if (err < 0)
            return err;

        ctx->fs.opaque = ctx;
        ctx->fs.on_event = &program_opencl_filter;

        in = ctx->fs.in;
        for (i = 0; i < ctx->nb_inputs; i++) {
            const AVFilterLink *inlink = avctx->inputs[i];

            in[i].time_base = inlink->time_base;
            in[i].sync      = 1;
            in[i].before    = EXT_STOP;
            in[i].after     = EXT_INFINITY;
        }

        err = ff_framesync_configure(&ctx->fs);
        if (err < 0)
            return err;

    } else {
        outlink->time_base = av_inv_q(ctx->source_rate);
    }

    return 0;
}
Beispiel #9
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    MidEqualizerContext *s = ctx->priv;
    AVFilterLink *in0 = ctx->inputs[0];
    AVFilterLink *in1 = ctx->inputs[1];
    FFFrameSyncIn *in;
    int ret;

    if (in0->format != in1->format) {
        av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
        return AVERROR(EINVAL);
    }

    outlink->w = in0->w;
    outlink->h = in0->h;
    outlink->time_base = in0->time_base;
    outlink->sample_aspect_ratio = in0->sample_aspect_ratio;
    outlink->frame_rate = in0->frame_rate;

    if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
        return ret;

    in = s->fs.in;
    in[0].time_base = in0->time_base;
    in[1].time_base = in1->time_base;
    in[0].sync   = 1;
    in[0].before = EXT_STOP;
    in[0].after  = EXT_INFINITY;
    in[1].sync   = 1;
    in[1].before = EXT_STOP;
    in[1].after  = EXT_INFINITY;
    s->fs.opaque   = s;
    s->fs.on_event = process_frame;

    return ff_framesync_configure(&s->fs);
}
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    MaskedMergeContext *s = ctx->priv;
    AVFilterLink *base = ctx->inputs[0];
    AVFilterLink *overlay = ctx->inputs[1];
    AVFilterLink *mask = ctx->inputs[2];
    FFFrameSyncIn *in;
    int ret;

    if (base->format != overlay->format ||
        base->format != mask->format) {
        av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
        return AVERROR(EINVAL);
    }
    if (base->w                       != overlay->w ||
        base->h                       != overlay->h ||
        base->sample_aspect_ratio.num != overlay->sample_aspect_ratio.num ||
        base->sample_aspect_ratio.den != overlay->sample_aspect_ratio.den ||
        base->w                       != mask->w ||
        base->h                       != mask->h ||
        base->sample_aspect_ratio.num != mask->sample_aspect_ratio.num ||
        base->sample_aspect_ratio.den != mask->sample_aspect_ratio.den) {
        av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
               "(size %dx%d, SAR %d:%d) do not match the corresponding "
               "second input link %s parameters (%dx%d, SAR %d:%d) "
               "and/or third input link %s parameters (%dx%d, SAR %d:%d)\n",
               ctx->input_pads[0].name, base->w, base->h,
               base->sample_aspect_ratio.num,
               base->sample_aspect_ratio.den,
               ctx->input_pads[1].name, overlay->w, overlay->h,
               overlay->sample_aspect_ratio.num,
               overlay->sample_aspect_ratio.den,
               ctx->input_pads[2].name, mask->w, mask->h,
               mask->sample_aspect_ratio.num,
               mask->sample_aspect_ratio.den);
        return AVERROR(EINVAL);
    }

    outlink->w = base->w;
    outlink->h = base->h;
    outlink->time_base = base->time_base;
    outlink->sample_aspect_ratio = base->sample_aspect_ratio;
    outlink->frame_rate = base->frame_rate;

    if ((ret = ff_framesync_init(&s->fs, ctx, 3)) < 0)
        return ret;

    in = s->fs.in;
    in[0].time_base = base->time_base;
    in[1].time_base = overlay->time_base;
    in[2].time_base = mask->time_base;
    in[0].sync   = 1;
    in[0].before = EXT_STOP;
    in[0].after  = EXT_INFINITY;
    in[1].sync   = 1;
    in[1].before = EXT_STOP;
    in[1].after  = EXT_INFINITY;
    in[2].sync   = 1;
    in[2].before = EXT_STOP;
    in[2].after  = EXT_INFINITY;
    s->fs.opaque   = s;
    s->fs.on_event = process_frame;

    return ff_framesync_configure(&s->fs);
}
Beispiel #11
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    ThresholdContext *s = ctx->priv;
    AVFilterLink *base = ctx->inputs[0];
    AVFilterLink *threshold = ctx->inputs[1];
    AVFilterLink *min = ctx->inputs[2];
    AVFilterLink *max = ctx->inputs[3];
    FFFrameSyncIn *in;
    int ret;

    if (base->format != threshold->format ||
        base->format != min->format ||
        base->format != max->format) {
        av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
        return AVERROR(EINVAL);
    }
    if (base->w                       != threshold->w ||
        base->h                       != threshold->h ||
        base->w                       != min->w ||
        base->h                       != min->h ||
        base->w                       != max->w ||
        base->h                       != max->h) {
        av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
               "(size %dx%d) do not match the corresponding "
               "second input link %s parameters (%dx%d) "
               "and/or third input link %s parameters (%dx%d) "
               "and/or fourth input link %s parameters (%dx%d)\n",
               ctx->input_pads[0].name, base->w, base->h,
               ctx->input_pads[1].name, threshold->w, threshold->h,
               ctx->input_pads[2].name, min->w, min->h,
               ctx->input_pads[3].name, max->w, max->h);
        return AVERROR(EINVAL);
    }

    outlink->w = base->w;
    outlink->h = base->h;
    outlink->time_base = base->time_base;
    outlink->sample_aspect_ratio = base->sample_aspect_ratio;
    outlink->frame_rate = base->frame_rate;

    if ((ret = ff_framesync_init(&s->fs, ctx, 4)) < 0)
        return ret;

    in = s->fs.in;
    in[0].time_base = base->time_base;
    in[1].time_base = threshold->time_base;
    in[2].time_base = min->time_base;
    in[3].time_base = max->time_base;
    in[0].sync   = 1;
    in[0].before = EXT_STOP;
    in[0].after  = EXT_STOP;
    in[1].sync   = 1;
    in[1].before = EXT_STOP;
    in[1].after  = EXT_STOP;
    in[2].sync   = 1;
    in[2].before = EXT_STOP;
    in[2].after  = EXT_STOP;
    in[3].sync   = 1;
    in[3].before = EXT_STOP;
    in[3].after  = EXT_STOP;
    s->fs.opaque   = s;
    s->fs.on_event = process_frame;

    return ff_framesync_configure(&s->fs);
}