Ejemplo n.º 1
0
static int request_frame(AVFilterLink *link)
{
    BufferSourceContext *c = link->src->priv;
    AVFilterBufferRef *picref;

    if (!c->has_frame) {
        av_log(link->src, AV_LOG_ERROR,
               "request_frame() called with no available frame!\n");
        //return -1;
    }

    /* This picture will be needed unmodified later for decoding the next
     * frame */
    picref = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE |
                                       AV_PERM_REUSE2,
                                       link->w, link->h);

    av_picture_copy((AVPicture *)&picref->data, (AVPicture *)&c->frame,
                    picref->format, link->w, link->h);

    picref->pts             = c->pts;
    picref->pixel_aspect    = c->pixel_aspect;
    picref->interlaced      = c->frame.interlaced_frame;
    picref->top_field_first = c->frame.top_field_first;
    avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
    avfilter_draw_slice(link, 0, link->h, 1);
    avfilter_end_frame(link);
    avfilter_unref_buffer(picref);

    c->has_frame = 0;

    return 0;
}
Ejemplo n.º 2
0
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
    TransContext *trans = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFilterBufferRef *buf_out;

    if (trans->passthrough)
        return ff_null_start_frame(inlink, picref);

    outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
                                           outlink->w, outlink->h);
    if (!outlink->out_buf)
        return AVERROR(ENOMEM);

    outlink->out_buf->pts = picref->pts;

    if (picref->video->sample_aspect_ratio.num == 0) {
        outlink->out_buf->video->sample_aspect_ratio = picref->video->sample_aspect_ratio;
    } else {
        outlink->out_buf->video->sample_aspect_ratio.num = picref->video->sample_aspect_ratio.den;
        outlink->out_buf->video->sample_aspect_ratio.den = picref->video->sample_aspect_ratio.num;
    }

    buf_out = avfilter_ref_buffer(outlink->out_buf, ~0);
    if (!buf_out)
        return AVERROR(ENOMEM);
    return ff_start_frame(outlink, buf_out);
}
Ejemplo n.º 3
0
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
{
    AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);
    AVFilterContext *ctx = inlink->dst;
    OverlayContext *over = ctx->priv;

    if (!outpicref)
        return AVERROR(ENOMEM);

    outpicref->pts = av_rescale_q(outpicref->pts, ctx->inputs[MAIN]->time_base,
                                  ctx->outputs[0]->time_base);

    if (!over->overpicref || over->overpicref->pts < outpicref->pts) {
        AVFilterBufferRef *old = over->overpicref;
        over->overpicref = NULL;
        ff_request_frame(ctx->inputs[OVERLAY]);
        if (over->overpicref) {
            if (old)
                avfilter_unref_buffer(old);
        } else
            over->overpicref = old;
    }

    return ff_start_frame(inlink->dst->outputs[0], outpicref);
}
Ejemplo n.º 4
0
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
    PixdescTestContext *priv = inlink->dst->priv;
    AVFilterLink *outlink    = inlink->dst->outputs[0];
    AVFilterBufferRef *outpicref;
    int i;

    outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
                                                outlink->w, outlink->h);
    outpicref = outlink->out_buf;
    avfilter_copy_buffer_ref_props(outpicref, picref);

    for (i = 0; i < 4; i++) {
        int h = outlink->h;
        h = i == 1 || i == 2 ? h>>priv->pix_desc->log2_chroma_h : h;
        if (outpicref->data[i]) {
            uint8_t *data = outpicref->data[i] +
                (outpicref->linesize[i] > 0 ? 0 : outpicref->linesize[i] * (h-1));
            memset(data, 0, FFABS(outpicref->linesize[i]) * h);
        }
    }

    /* copy palette */
    if (priv->pix_desc->flags & PIX_FMT_PAL)
        memcpy(outpicref->data[1], outpicref->data[1], 256*4);

    avfilter_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
}
Ejemplo n.º 5
0
static int request_frame(AVFilterLink *link)
{
    BufferSourceContext *c = link->src->priv;
    AVFilterBufferRef *picref;

    if (!c->has_frame) {
        av_log(link->src, AV_LOG_ERROR,
               "request_frame() called with no available frame!\n");
        return -1;
    }

    picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);

    av_image_copy(picref->data, picref->linesize,
                  c->frame.data, c->frame.linesize,
                  picref->format, link->w, link->h);

    picref->pts                    = c->pts;
    picref->video->interlaced      = c->frame.interlaced_frame;
    picref->video->top_field_first = c->frame.top_field_first;
    avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
    avfilter_draw_slice(link, 0, link->h, 1);
    avfilter_end_frame(link);
    avfilter_unref_buffer(picref);

    c->has_frame = 0;

    return 0;
}
Ejemplo n.º 6
0
static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    SelectContext *select = ctx->priv;
    AVFilterLink *inlink = outlink->src->inputs[0];
    select->select = 0;

    if (av_fifo_size(select->pending_frames)) {
        AVFilterBufferRef *picref;
        av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL);
        avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
        avfilter_draw_slice(outlink, 0, outlink->h, 1);
        avfilter_end_frame(outlink);
        avfilter_unref_buffer(picref);
        return 0;
    }

    while (!select->select) {
        int ret = avfilter_request_frame(inlink);
        if (ret < 0)
            return ret;
    }

    return 0;
}
static int
ffsink_end_frame(AVFilterLink *link)
{
    if (link->cur_buf)
        link->dst->priv = avfilter_ref_buffer(link->cur_buf, ~0);
    return 0;
}
Ejemplo n.º 8
0
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    AlphaMergeContext *merge = ctx->priv;

    int is_alpha = (inlink == ctx->inputs[1]);
    struct FFBufQueue *queue =
        (is_alpha ? &merge->queue_alpha : &merge->queue_main);
    ff_bufqueue_add(ctx, queue, inlink->cur_buf);
    inlink->cur_buf = NULL;

    while (1) {
        AVFilterBufferRef *main_buf, *alpha_buf;

        if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
            !ff_bufqueue_peek(&merge->queue_alpha, 0)) break;

        main_buf = ff_bufqueue_get(&merge->queue_main);
        alpha_buf = ff_bufqueue_get(&merge->queue_alpha);

        ctx->outputs[0]->out_buf = main_buf;
        ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(main_buf, ~0));
        merge->frame_requested = 0;
        draw_frame(ctx, main_buf, alpha_buf);
        ff_end_frame(ctx->outputs[0]);
        avfilter_unref_buffer(alpha_buf);
    }
    return 0;
}
Ejemplo n.º 9
0
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
{
    AConvertContext *aconvert = inlink->dst->priv;
    AVFilterBufferRef *curbuf = insamplesref;
    AVFilterLink * const outlink = inlink->dst->outputs[0];
    int chan_mult;

    /* in/reinint the internal buffers if this is the first buffer
     * provided or it is needed to use a bigger one */
    if (!aconvert->max_nb_samples ||
        (curbuf->audio->nb_samples > aconvert->max_nb_samples))
        if (init_buffers(inlink, curbuf->audio->nb_samples) < 0) {
            av_log(inlink->dst, AV_LOG_ERROR, "Could not initialize buffers.\n");
            return;
        }

    /* if channel mixing is required */
    if (aconvert->mix_samplesref) {
        memcpy(aconvert->in_mix,  curbuf->data, sizeof(aconvert->in_mix));
        memcpy(aconvert->out_mix, aconvert->mix_samplesref->data, sizeof(aconvert->out_mix));
        aconvert->convert_chlayout(aconvert->out_mix,
                                   aconvert->in_mix,
                                   curbuf->audio->nb_samples,
                                   aconvert);
        curbuf = aconvert->mix_samplesref;
    }

    if (aconvert->audioconvert_ctx) {
        if (!aconvert->mix_samplesref) {
            if (aconvert->in_conv == aconvert->packed_data) {
                int i, packed_stride = av_get_bytes_per_sample(inlink->format);
                aconvert->packed_data[0] = curbuf->data[0];
                for (i = 1; i < aconvert->out_nb_channels; i++)
                    aconvert->packed_data[i] = aconvert->packed_data[i-1] + packed_stride;
            } else {
                aconvert->in_conv = curbuf->data;
            }
        }

        chan_mult = inlink->planar == outlink->planar && inlink->planar == 0 ?
            aconvert->out_nb_channels : 1;

        av_audio_convert(aconvert->audioconvert_ctx,
                         (void * const *) aconvert->out_conv,
                         aconvert->out_strides,
                         (const void * const *) aconvert->in_conv,
                         aconvert->in_strides,
                         curbuf->audio->nb_samples * chan_mult);

        curbuf = aconvert->out_samplesref;
    }

    avfilter_copy_buffer_ref_props(curbuf, insamplesref);
    curbuf->audio->channel_layout = outlink->channel_layout;
    curbuf->audio->planar         = outlink->planar;

    avfilter_filter_samples(inlink->dst->outputs[0],
                            avfilter_ref_buffer(curbuf, ~0));
    avfilter_unref_buffer(insamplesref);
}
Ejemplo n.º 10
0
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
{
    SetPTSContext *setpts = inlink->dst->priv;
    double d;
    AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);

    if (isnan(setpts->var_values[VAR_STARTPTS]))
        setpts->var_values[VAR_STARTPTS] = TS2D(inpicref->pts);

    setpts->var_values[VAR_INTERLACED] = inpicref->video->interlaced;
    setpts->var_values[VAR_PTS       ] = TS2D(inpicref->pts);
    setpts->var_values[VAR_POS       ] = inpicref->pos == -1 ? NAN : inpicref->pos;

    d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
    outpicref->pts = D2TS(d);

#ifdef DEBUG
    av_log(inlink->dst, AV_LOG_DEBUG,
           "n:%"PRId64" interlaced:%d pos:%"PRId64" pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n",
           (int64_t)setpts->var_values[VAR_N],
           (int)setpts->var_values[VAR_INTERLACED],
           inpicref ->pos,
           inpicref ->pts, inpicref ->pts * av_q2d(inlink->time_base),
           outpicref->pts, outpicref->pts * av_q2d(inlink->time_base));
#endif

    setpts->var_values[VAR_N] += 1.0;
    setpts->var_values[VAR_PREV_INPTS ] = TS2D(inpicref ->pts);
    setpts->var_values[VAR_PREV_OUTPTS] = TS2D(outpicref->pts);
    ff_start_frame(inlink->dst->outputs[0], outpicref);
}
Ejemplo n.º 11
0
static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref)
{
    double ret = 0;
    SelectContext *select = ctx->priv;
    AVFilterBufferRef *prev_picref = select->prev_picref;

    if (prev_picref &&
        picref->video->h    == prev_picref->video->h &&
        picref->video->w    == prev_picref->video->w &&
        picref->linesize[0] == prev_picref->linesize[0]) {
        int x, y;
        int64_t sad;
        double mafd, diff;
        uint8_t *p1 =      picref->data[0];
        uint8_t *p2 = prev_picref->data[0];
        const int linesize = picref->linesize[0];

        for (sad = y = 0; y < picref->video->h; y += 8)
            for (x = 0; x < linesize; x += 8)
                sad += select->c.sad[1](select,
                                        p1 + y * linesize + x,
                                        p2 + y * linesize + x,
                                        linesize, 8);
        emms_c();
        mafd = sad / (picref->video->h * picref->video->w * 3);
        diff = fabs(mafd - select->prev_mafd);
        ret  = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
        select->prev_mafd = mafd;
        avfilter_unref_buffer(prev_picref);
    }
    select->prev_picref = avfilter_ref_buffer(picref, ~0);
    return ret;
}
Ejemplo n.º 12
0
static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
    AVFilterBufferRef *outpicref = avfilter_ref_buffer(picref, ~0);

    link->dst->outputs[0]->out_buf = outpicref;

    ff_start_frame(link->dst->outputs[0], outpicref);
}
Ejemplo n.º 13
0
Archivo: split.c Proyecto: vensi/libav
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
{
    AVFilterContext *ctx = inlink->dst;
    int i;

    for (i = 0; i < ctx->nb_outputs; i++)
        ff_filter_samples(inlink->dst->outputs[i],
                          avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE));
}
Ejemplo n.º 14
0
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
    AVFilterContext *ctx = inlink->dst;
    int i;

    for (i = 0; i < ctx->output_count; i++)
        avfilter_start_frame(ctx->outputs[i],
                             avfilter_ref_buffer(picref, ~AV_PERM_WRITE));
}
Ejemplo n.º 15
0
static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
    AVFilterContext *ctx = link->dst;
    ColorMatrixContext *color = ctx->priv;
    AVFilterBufferRef *outpicref = avfilter_ref_buffer(picref, ~0);

    color->outpicref = outpicref;

    return ff_start_frame(link->dst->outputs[0], outpicref);
}
Ejemplo n.º 16
0
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
    AVFilterLink *outlink = inlink->dst->outputs[0];

    outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
                                                 outlink->w, outlink->h);
    outlink->out_buf->pts = picref->pts;

    avfilter_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
}
Ejemplo n.º 17
0
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
{
    SetPTSContext *setpts = inlink->dst->priv;
    double d;
    AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);

    if (!outpicref)
        return AVERROR(ENOMEM);

    if (isnan(setpts->var_values[VAR_STARTPTS])) {
        setpts->var_values[VAR_STARTPTS] = TS2D(inpicref->pts);
        setpts->var_values[VAR_STARTT  ] = TS2T(inpicref->pts, inlink->time_base);
    }
    setpts->var_values[VAR_PTS       ] = TS2D(inpicref->pts);
    setpts->var_values[VAR_T         ] = TS2T(inpicref->pts, inlink->time_base);
    setpts->var_values[VAR_POS       ] = inpicref->pos == -1 ? NAN : inpicref->pos;

    switch (inlink->type) {
    case AVMEDIA_TYPE_VIDEO:
        setpts->var_values[VAR_INTERLACED] = inpicref->video->interlaced;
        break;

    case AVMEDIA_TYPE_AUDIO:
        setpts->var_values[VAR_NB_SAMPLES] = inpicref->audio->nb_samples;
        break;
    }

    d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
    outpicref->pts = D2TS(d);

    setpts->var_values[VAR_PREV_INPTS ] = TS2D(inpicref ->pts);
    setpts->var_values[VAR_PREV_INT   ] = TS2T(inpicref ->pts, inlink->time_base);
    setpts->var_values[VAR_PREV_OUTPTS] = TS2D(outpicref->pts);
    setpts->var_values[VAR_PREV_OUTT]   = TS2T(outpicref->pts, inlink->time_base);

    av_dlog(inlink->dst,
            "n:%"PRId64" interlaced:%d nb_samples:%d nb_consumed_samples:%d "
            "pos:%"PRId64" pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n",
            (int64_t)setpts->var_values[VAR_N],
            (int)setpts->var_values[VAR_INTERLACED],
            (int)setpts->var_values[VAR_NB_SAMPLES],
            (int)setpts->var_values[VAR_NB_CONSUMED_SAMPLES],
            (int64_t)setpts->var_values[VAR_POS],
            (int64_t)setpts->var_values[VAR_PREV_INPTS],
            setpts->var_values[VAR_PREV_INT],
            (int64_t)setpts->var_values[VAR_PREV_OUTPTS],
            setpts->var_values[VAR_PREV_OUTT]);

    setpts->var_values[VAR_N] += 1.0;
    if (setpts->type == AVMEDIA_TYPE_AUDIO) {
        setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += inpicref->audio->nb_samples;
        return ff_filter_samples(inlink->dst->outputs[0], outpicref);
    } else
        return ff_start_frame   (inlink->dst->outputs[0], outpicref);
}
Ejemplo n.º 18
0
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
{
    AVFilterContext   *ctx        = inlink->dst;
    AVFilterLink      *outlink    = ctx->outputs[0];

    AVFilterBufferRef *outpicref;

    outpicref = avfilter_ref_buffer(inpicref, ~0);
    outlink->out_buf = outpicref;

    avfilter_start_frame(outlink, outpicref);
}
Ejemplo n.º 19
0
static void start_frame(AVFilterLink *link, AVFilterBufferRef *inpicref)
{
    AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);

    outpicref->data[1] = inpicref->data[2];
    outpicref->data[2] = inpicref->data[1];

    outpicref->linesize[1] = inpicref->linesize[2];
    outpicref->linesize[2] = inpicref->linesize[1];

    avfilter_start_frame(link->dst->outputs[0], outpicref);
}
Ejemplo n.º 20
0
void avfilter_default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
    AVFilterLink *outlink = NULL;

    if (inlink->dst->output_count)
        outlink = inlink->dst->outputs[0];

    if (outlink) {
        outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
        avfilter_copy_buffer_ref_props(outlink->out_buf, picref);
        avfilter_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
    }
}
Ejemplo n.º 21
0
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
{
    SetFieldContext *setfield = inlink->dst->priv;
    AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);

    if (setfield->mode == MODE_PROG) {
        outpicref->video->interlaced = 0;
    } else if (setfield->mode != MODE_AUTO) {
        outpicref->video->interlaced = 1;
        outpicref->video->top_field_first = setfield->mode;
    }
    avfilter_start_frame(inlink->dst->outputs[0], outpicref);
}
Ejemplo n.º 22
0
static int mpsrc_request_frame(AVFilterLink *link)
{
    struct mpsrc_priv *c = link->src->priv;
    struct vf_instance *vf = c->vf;

    if (!vf->priv->in_buf)
        return AVERROR(EINVAL);
    avfilter_start_frame(link, avfilter_ref_buffer(vf->priv->in_buf, ~0));
    avfilter_draw_slice(link, 0, link->h, 1);
    avfilter_end_frame(link);
    vf->priv->in_buf = NULL;
    return 0;
}
Ejemplo n.º 23
0
static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
    AVFilterContext *ctx = link->dst;
    CropContext *crop = ctx->priv;
    AVFilterBufferRef *ref2;
    int i;

    ref2 = avfilter_ref_buffer(picref, ~0);
    ref2->video->w = crop->w;
    ref2->video->h = crop->h;

    crop->var_values[VAR_T] = picref->pts == AV_NOPTS_VALUE ?
                              NAN : picref->pts * av_q2d(link->time_base);
    crop->var_values[VAR_POS] = picref->pos == -1 ? NAN : picref->pos;
    crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);
    crop->var_values[VAR_Y] = av_expr_eval(crop->y_pexpr, crop->var_values, NULL);
    crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);

    normalize_double(&crop->x, crop->var_values[VAR_X]);
    normalize_double(&crop->y, crop->var_values[VAR_Y]);

    if (crop->x < 0) crop->x = 0;
    if (crop->y < 0) crop->y = 0;
    if ((unsigned)crop->x + (unsigned)crop->w > link->w) crop->x = link->w - crop->w;
    if ((unsigned)crop->y + (unsigned)crop->h > link->h) crop->y = link->h - crop->h;
    crop->x &= ~((1 << crop->hsub) - 1);
    crop->y &= ~((1 << crop->vsub) - 1);

#ifdef DEBUG
    av_log(ctx, AV_LOG_DEBUG,
           "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n",
           (int)crop->var_values[VAR_N], crop->var_values[VAR_T], crop->x, crop->y, crop->x + crop->w, crop->y + crop->h);
#endif

    ref2->data[0] += crop->y * ref2->linesize[0];
    ref2->data[0] += crop->x * crop->max_step[0];

    if (!((av_getav_pix_fmt_descriptors())[link->format].flags & PIX_FMT_PAL))
    {
        for (i = 1; i < 3; i ++)
        {
            if (ref2->data[i])
            {
                ref2->data[i] += (crop->y >> crop->vsub) * ref2->linesize[i];
                ref2->data[i] += (crop->x * crop->max_step[i]) >> crop->hsub;
            }
        }
    }
Ejemplo n.º 24
0
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
    AVFilterContext *ctx = inlink->dst;
    int i, ret = 0;

    for (i = 0; i < ctx->nb_outputs; i++) {
        AVFilterBufferRef *buf_out = avfilter_ref_buffer(picref, ~AV_PERM_WRITE);
        if (!buf_out)
            return AVERROR(ENOMEM);

        ret = ff_start_frame(ctx->outputs[i], buf_out);
        if (ret < 0)
            break;
    }
    return ret;
}
Ejemplo n.º 25
0
static int source_request_frame(AVFilterLink *outlink)
{
    Frei0rContext *frei0r = outlink->src->priv;
    AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
    picref->video->pixel_aspect = (AVRational) {1, 1};
    picref->pts = frei0r->pts++;
    picref->pos = -1;

    ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
    frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}),
                   NULL, (uint32_t *)picref->data[0]);
    ff_draw_slice(outlink, 0, outlink->h, 1);
    ff_end_frame(outlink);
    avfilter_unref_buffer(picref);

    return 0;
}
Ejemplo n.º 26
0
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
    AVFilterLink *outlink = inlink->dst->outputs[0];

    outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
                                                 outlink->w, outlink->h);
    outlink->out_buf->pts = picref->pts;

    if (picref->video->pixel_aspect.num == 0) {
        outlink->out_buf->video->pixel_aspect = picref->video->pixel_aspect;
    } else {
        outlink->out_buf->video->pixel_aspect.num = picref->video->pixel_aspect.den;
        outlink->out_buf->video->pixel_aspect.den = picref->video->pixel_aspect.num;
    }

    avfilter_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
}
Ejemplo n.º 27
0
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFilterBufferRef *picref2 = picref;

    if (av_cmp_q(inlink->time_base, outlink->time_base)) {
        picref2 = avfilter_ref_buffer(picref, ~0);
        picref2->pts = av_rescale_q(picref->pts, inlink->time_base, outlink->time_base);
        av_log(ctx, AV_LOG_DEBUG, "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
               inlink ->time_base.num, inlink ->time_base.den, picref ->pts,
               outlink->time_base.num, outlink->time_base.den, picref2->pts);
        avfilter_unref_buffer(picref);
    }

    ff_start_frame(outlink, picref2);
}
Ejemplo n.º 28
0
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
{
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFilterBufferRef *outpicref;

    if (inpicref->perms & AV_PERM_PRESERVE) {
        outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE,
                                              outlink->w, outlink->h);
        avfilter_copy_buffer_ref_props(outpicref, inpicref);
        outpicref->video->w = outlink->w;
        outpicref->video->h = outlink->h;
    } else
        outpicref = inpicref;

    outlink->out_buf = outpicref;
    return ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0));
}
Ejemplo n.º 29
0
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFilterBufferRef *outsamples = insamples;

    if (av_cmp_q(inlink->time_base, outlink->time_base)) {
        outsamples = avfilter_ref_buffer(insamples, ~0);
        outsamples->pts = av_rescale_q(insamples->pts, inlink->time_base, outlink->time_base);
        av_log(ctx, AV_LOG_DEBUG, "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
               inlink ->time_base.num, inlink ->time_base.den, insamples ->pts,
               outlink->time_base.num, outlink->time_base.den, outsamples->pts);
        avfilter_unref_buffer(insamples);
    }

    return ff_filter_samples(outlink, outsamples);
}
Ejemplo n.º 30
0
static int request_frame(AVFilterLink *outlink)
{
    ABufferSourceContext *abuffer = outlink->src->priv;
    AVFilterBufferRef *samplesref;

    if (!av_fifo_size(abuffer->fifo)) {
        av_log(outlink->src, AV_LOG_ERROR,
               "request_frame() called with no available frames!\n");
        return AVERROR(EINVAL);
    }

    av_fifo_generic_read(abuffer->fifo, &samplesref, sizeof(samplesref), NULL);
    avfilter_filter_samples(outlink, avfilter_ref_buffer(samplesref, ~0));
    avfilter_unref_buffer(samplesref);

    return 0;
}