Exemple #1
0
static int request_frame(AVFilterLink *outlink)
{
    FifoContext *fifo = outlink->src->priv;
    BufPic *tmp;
    int ret;

    if (!fifo->root.next) {
        if ((ret = ff_request_frame(outlink->src->inputs[0]) < 0))
            return ret;
    }

    /* by doing this, we give ownership of the reference to the next filter,
     * so we don't have to worry about dereferencing it ourselves. */
    ff_start_frame(outlink, fifo->root.next->picref);
    ff_draw_slice (outlink, 0, outlink->h, 1);
    ff_end_frame  (outlink);

    if (fifo->last == fifo->root.next)
        fifo->last = &fifo->root;
    tmp = fifo->root.next->next;
    av_free(fifo->root.next);
    fifo->root.next = tmp;

    return 0;
}
Exemple #2
0
static int end_frame(AVFilterLink *inlink)
{
    RemovelogoContext *removelogo = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFilterBufferRef *inpicref  = inlink ->cur_buf;
    AVFilterBufferRef *outpicref = outlink->out_buf;
    int direct = inpicref == outpicref;

    blur_image(removelogo->mask,
               inpicref ->data[0], inpicref ->linesize[0],
               outpicref->data[0], outpicref->linesize[0],
               removelogo->full_mask_data, inlink->w,
               inlink->w, inlink->h, direct, &removelogo->full_mask_bbox);
    blur_image(removelogo->mask,
               inpicref ->data[1], inpicref ->linesize[1],
               outpicref->data[1], outpicref->linesize[1],
               removelogo->half_mask_data, inlink->w/2,
               inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox);
    blur_image(removelogo->mask,
               inpicref ->data[2], inpicref ->linesize[2],
               outpicref->data[2], outpicref->linesize[2],
               removelogo->half_mask_data, inlink->w/2,
               inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox);

    ff_draw_slice(outlink, 0, inlink->h, 1);
    return ff_end_frame(outlink);
}
Exemple #3
0
static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    SelectContext *select = ctx->priv;
    AVFilterLink *inlink = outlink->src->inputs[0];
    select->select = 0;

    if (av_fifo_size(select->pending_frames)) {
        AVFilterBufferRef *picref;
        av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL);
        ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
        ff_draw_slice(outlink, 0, outlink->h, 1);
        ff_end_frame(outlink);
        avfilter_unref_buffer(picref);
        return 0;
    }

    while (!select->select) {
        int ret = ff_request_frame(inlink);
        if (ret < 0)
            return ret;
    }

    return 0;
}
Exemple #4
0
static void end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    BBoxContext *bbox = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    FFBoundingBox box;
    int has_bbox, w, h;

    has_bbox =
        ff_calculate_bounding_box(&box,
                                  picref->data[0], picref->linesize[0],
                                  inlink->w, inlink->h, 16);
    w = box.x2 - box.x1 + 1;
    h = box.y2 - box.y1 + 1;

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%s pts_time:%s", bbox->frame,
           av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base));

    if (has_bbox) {
        av_log(ctx, AV_LOG_INFO,
               " x1:%d x2:%d y1:%d y2:%d w:%d h:%d"
               " crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d",
               box.x1, box.x2, box.y1, box.y2, w, h,
               w, h, box.x1, box.y1,    /* crop params */
               box.x1, box.y1, w, h);   /* drawbox params */
    }
    av_log(ctx, AV_LOG_INFO, "\n");

    bbox->frame++;
    avfilter_unref_buffer(picref);
    ff_end_frame(inlink->dst->outputs[0]);
}
Exemple #5
0
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    AlphaMergeContext *merge = ctx->priv;

    int is_alpha = (inlink == ctx->inputs[1]);
    struct FFBufQueue *queue =
        (is_alpha ? &merge->queue_alpha : &merge->queue_main);
    ff_bufqueue_add(ctx, queue, inlink->cur_buf);
    inlink->cur_buf = NULL;

    while (1) {
        AVFilterBufferRef *main_buf, *alpha_buf;

        if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
            !ff_bufqueue_peek(&merge->queue_alpha, 0)) break;

        main_buf = ff_bufqueue_get(&merge->queue_main);
        alpha_buf = ff_bufqueue_get(&merge->queue_alpha);

        ctx->outputs[0]->out_buf = main_buf;
        ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(main_buf, ~0));
        merge->frame_requested = 0;
        draw_frame(ctx, main_buf, alpha_buf);
        ff_end_frame(ctx->outputs[0]);
        avfilter_unref_buffer(alpha_buf);
    }
    return 0;
}
Exemple #6
0
static void end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    int i;

    for (i = 0; i < ctx->nb_outputs; i++)
        ff_end_frame(ctx->outputs[i]);

    avfilter_unref_buffer(inlink->cur_buf);
}
Exemple #7
0
static void end_last_frame(AVFilterContext *ctx)
{
    TileContext *tile    = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];

    while (tile->current < tile->w * tile->h)
        draw_blank_frame(ctx);
    ff_draw_slice(outlink, 0, outlink->out_buf->video->h, 1);
    ff_end_frame(outlink);
    tile->current = 0;
}
Exemple #8
0
static void end_frame(AVFilterLink *inlink)
{
    SelectContext *select = inlink->dst->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;

    if (select->select) {
        if (select->cache_frames)
            return;
        ff_end_frame(inlink->dst->outputs[0]);
    }
    avfilter_unref_buffer(picref);
}
Exemple #9
0
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    int i, ret = 0;

    for (i = 0; i < ctx->nb_outputs; i++) {
        ret = ff_end_frame(ctx->outputs[i]);
        if (ret < 0)
            break;
    }
    return ret;
}
static int  end_frame(AVFilterLink *inlink)
{
    int i, j, best_frame_idx = 0;
    double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    ThumbContext *thumb   = inlink->dst->priv;
    AVFilterContext *ctx  = inlink->dst;
    AVFilterBufferRef *picref;

    // keep a reference of each frame
    thumb->frames[thumb->n].buf = inlink->cur_buf;
    inlink->cur_buf = NULL;

    // no selection until the buffer of N frames is filled up
    if (thumb->n < thumb->n_frames - 1) {
        thumb->n++;
        return 0;
    }

    // average histogram of the N frames
    for (j = 0; j < FF_ARRAY_ELEMS(avg_hist); j++) {
        for (i = 0; i < thumb->n_frames; i++)
            avg_hist[j] += (double)thumb->frames[i].histogram[j];
        avg_hist[j] /= thumb->n_frames;
    }

    // find the frame closer to the average using the sum of squared errors
    for (i = 0; i < thumb->n_frames; i++) {
        sq_err = frame_sum_square_err(thumb->frames[i].histogram, avg_hist);
        if (i == 0 || sq_err < min_sq_err)
            best_frame_idx = i, min_sq_err = sq_err;
    }

    // free and reset everything (except the best frame buffer)
    for (i = 0; i < thumb->n_frames; i++) {
        memset(thumb->frames[i].histogram, 0, sizeof(thumb->frames[i].histogram));
        if (i == best_frame_idx)
            continue;
        avfilter_unref_buffer(thumb->frames[i].buf);
        thumb->frames[i].buf = NULL;
    }
    thumb->n = 0;

    // raise the chosen one
    picref = thumb->frames[best_frame_idx].buf;
    av_log(ctx, AV_LOG_INFO, "frame id #%d (pts_time=%f) selected\n",
           best_frame_idx, picref->pts * av_q2d(inlink->time_base));
    ff_start_frame(outlink, picref);
    thumb->frames[best_frame_idx].buf = NULL;
    ff_draw_slice(outlink, 0, inlink->h, 1);
    return ff_end_frame(outlink);
}
Exemple #11
0
static void end_frame(AVFilterLink *inlink)
{
    Frei0rContext *frei0r = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFilterBufferRef  *inpicref =  inlink->cur_buf;
    AVFilterBufferRef *outpicref = outlink->out_buf;

    frei0r->update(frei0r->instance, inpicref->pts * av_q2d(inlink->time_base) * 1000,
                   (const uint32_t *)inpicref->data[0],
                   (uint32_t *)outpicref->data[0]);
    avfilter_unref_buffer(inpicref);
    ff_draw_slice(outlink, 0, outlink->h, 1);
    ff_end_frame(outlink);
    avfilter_unref_buffer(outpicref);
}
Exemple #12
0
static int end_frame(AVFilterLink *inlink)
{
    Frei0rContext *frei0r = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFilterBufferRef  *inpicref =  inlink->cur_buf;
    AVFilterBufferRef *outpicref = outlink->out_buf;
    int ret;

    frei0r->update(frei0r->instance, inpicref->pts * av_q2d(inlink->time_base) * 1000,
                   (const uint32_t *)inpicref->data[0],
                   (uint32_t *)outpicref->data[0]);
    if ((ret = ff_draw_slice(outlink, 0, outlink->h, 1)) ||
        (ret = ff_end_frame(outlink)) < 0)
        return ret;
    return 0;
}
Exemple #13
0
static int end_frame(AVFilterLink *link)
{
    AVFilterContext *ctx = link->dst;
    ColorMatrixContext *color = ctx->priv;
    AVFilterBufferRef *out = color->outpicref;

    if (link->cur_buf->format == AV_PIX_FMT_YUV422P)
        process_frame_yuv422p(color, out, link->cur_buf);
    else if (link->cur_buf->format == AV_PIX_FMT_YUV420P)
        process_frame_yuv420p(color, out, link->cur_buf);
    else
        process_frame_uyvy422(color, out, link->cur_buf);

    ff_draw_slice(ctx->outputs[0], 0, link->dst->outputs[0]->h, 1);
    return ff_end_frame(ctx->outputs[0]);
}
Exemple #14
0
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    BlackFrameContext *blackframe = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    int pblack = 0;

    pblack = blackframe->nblack * 100 / (inlink->w * inlink->h);
    if (pblack >= blackframe->bamount)
        av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f\n",
               blackframe->frame, pblack, picref->pos, picref->pts,
               picref->pts == AV_NOPTS_VALUE ? -1 : picref->pts * av_q2d(inlink->time_base));

    blackframe->frame++;
    blackframe->nblack = 0;
    return ff_end_frame(inlink->dst->outputs[0]);
}
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ShowInfoContext *showinfo = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = desc->log2_chroma_h;

    for (plane = 0; picref->data[plane] && plane < 4; plane++) {
        int64_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane);
        uint8_t *data = picref->data[plane];
        int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += picref->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08X plane_checksum:[%08X",
           showinfo->frame,
           av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), picref->pos,
           desc->name,
           picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den,
           picref->video->w, picref->video->h,
           !picref->video->interlaced     ? 'P' :         /* Progressive  */
           picref->video->top_field_first ? 'T' : 'B',    /* Top / Bottom */
           picref->video->key_frame,
           av_get_picture_type_char(picref->video->pict_type),
           checksum, plane_checksum[0]);

    for (plane = 1; picref->data[plane] && plane < 4; plane++)
        av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    showinfo->frame++;
    return ff_end_frame(inlink->dst->outputs[0]);
}
Exemple #16
0
static int color_request_frame(AVFilterLink *link)
{
    ColorContext *color = link->src->priv;
    AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
    picref->video->sample_aspect_ratio = (AVRational) {1, 1};
    picref->pts = color->pts++;
    picref->pos = -1;

    ff_start_frame(link, avfilter_ref_buffer(picref, ~0));
    ff_fill_rectangle(&color->draw, &color->color, picref->data, picref->linesize,
                      0, 0, color->w, color->h);
    ff_draw_slice(link, 0, color->h, 1);
    ff_end_frame(link);
    avfilter_unref_buffer(picref);

    return 0;
}
Exemple #17
0
static int source_request_frame(AVFilterLink *outlink)
{
    Frei0rContext *frei0r = outlink->src->priv;
    AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
    picref->video->pixel_aspect = (AVRational) {1, 1};
    picref->pts = frei0r->pts++;
    picref->pos = -1;

    ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0));
    frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}),
                   NULL, (uint32_t *)picref->data[0]);
    ff_draw_slice(outlink, 0, outlink->h, 1);
    ff_end_frame(outlink);
    avfilter_unref_buffer(picref);

    return 0;
}
Exemple #18
0
int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
{
    int ret;
    FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);

    switch (link->type) {
    case AVMEDIA_TYPE_VIDEO:
        if((ret = ff_start_frame(link, frame)) < 0)
            return ret;
        if((ret = ff_draw_slice(link, 0, frame->video->h, 1)) < 0)
            return ret;
        if((ret = ff_end_frame(link)) < 0)
            return ret;
        return ret;
    case AVMEDIA_TYPE_AUDIO:
        return ff_filter_samples(link, frame);
    default: return AVERROR(EINVAL);
    }
}
Exemple #19
0
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    OCVContext *ocv = ctx->priv;
    AVFilterLink *outlink= inlink->dst->outputs[0];
    AVFilterBufferRef *inpicref  = inlink ->cur_buf;
    AVFilterBufferRef *outpicref = outlink->out_buf;
    IplImage inimg, outimg;
    int ret;

    fill_iplimage_from_picref(&inimg , inpicref , inlink->format);
    fill_iplimage_from_picref(&outimg, outpicref, inlink->format);
    ocv->end_frame_filter(ctx, &inimg, &outimg);
    fill_picref_from_iplimage(outpicref, &outimg, inlink->format);

    if ((ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 ||
        (ret = ff_end_frame(outlink)) < 0)
        return ret;
    return 0;
}
Exemple #20
0
static int end_frame(AVFilterLink *inlink)
{
    HQDN3DContext *hqdn3d = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFilterBufferRef *inpic  = inlink ->cur_buf;
    AVFilterBufferRef *outpic = outlink->out_buf;
    int ret, c;

    for (c = 0; c < 3; c++) {
        denoise(inpic->data[c], outpic->data[c],
                hqdn3d->line, &hqdn3d->frame_prev[c],
                inpic->video->w >> (!!c * hqdn3d->hsub),
                inpic->video->h >> (!!c * hqdn3d->vsub),
                inpic->linesize[c], outpic->linesize[c],
                hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
    }

    if ((ret = ff_draw_slice(outlink, 0, inpic->video->h, 1)) < 0 ||
        (ret = ff_end_frame(outlink)) < 0)
        return ret;
    return 0;
}
Exemple #21
0
static int color_request_frame(AVFilterLink *link)
{
    ColorContext *color = link->src->priv;
    AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
    AVFilterBufferRef *buf_out;
    int ret;

    if (!picref)
        return AVERROR(ENOMEM);

    picref->video->pixel_aspect = (AVRational) {1, 1};
    picref->pts                 = color->pts++;
    picref->pos                 = -1;

    buf_out = avfilter_ref_buffer(picref, ~0);
    if (!buf_out) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    ret = ff_start_frame(link, buf_out);
    if (ret < 0)
        goto fail;

    ff_draw_rectangle(picref->data, picref->linesize,
                      color->line, color->line_step, color->hsub, color->vsub,
                      0, 0, color->w, color->h);
    ret = ff_draw_slice(link, 0, color->h, 1);
    if (ret < 0)
        goto fail;

    ret = ff_end_frame(link);

fail:
    avfilter_unref_buffer(picref);

    return ret;
}
Exemple #22
0
static void end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ShowInfoContext *showinfo = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;

    for (plane = 0; picref->data[plane] && plane < 4; plane++) {
        size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane);
        uint8_t *data = picref->data[plane];
        int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += picref->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%"PRId64" pts_time:%f pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%u plane_checksum:[%u %u %u %u]\n",
           showinfo->frame,
           picref->pts, picref->pts * av_q2d(inlink->time_base), picref->pos,
           av_pix_fmt_descriptors[picref->format].name,
           picref->video->pixel_aspect.num, picref->video->pixel_aspect.den,
           picref->video->w, picref->video->h,
           !picref->video->interlaced     ? 'P' :         /* Progressive  */
           picref->video->top_field_first ? 'T' : 'B',    /* Top / Bottom */
           picref->video->key_frame,
           av_get_picture_type_char(picref->video->pict_type),
           checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]);

    showinfo->frame++;
    ff_end_frame(inlink->dst->outputs[0]);
}
Exemple #23
0
static void end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    BlackFrameContext *blackframe = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    int pblack = 0;

    if (picref->video->key_frame)
        blackframe->last_keyframe = blackframe->frame;

    pblack = blackframe->nblack * 100 / (inlink->w * inlink->h);
    if (pblack >= blackframe->bamount)
        av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f "
               "type:%c last_keyframe:%d\n",
               blackframe->frame, pblack, picref->pos, picref->pts,
               picref->pts == AV_NOPTS_VALUE ? -1 : picref->pts * av_q2d(inlink->time_base),
               av_get_picture_type_char(picref->video->pict_type), blackframe->last_keyframe);

    blackframe->frame++;
    blackframe->nblack = 0;
    avfilter_unref_buffer(picref);
    ff_end_frame(inlink->dst->outputs[0]);
}
Exemple #24
0
static int source_request_frame(AVFilterLink *outlink)
{
    Frei0rContext *frei0r = outlink->src->priv;
    AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
    AVFilterBufferRef *buf_out;
    int ret;

    if (!picref)
        return AVERROR(ENOMEM);

    picref->video->pixel_aspect = (AVRational) {1, 1};
    picref->pts = frei0r->pts++;
    picref->pos = -1;

    buf_out = avfilter_ref_buffer(picref, ~0);
    if (!buf_out) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    ret = ff_start_frame(outlink, buf_out);
    if (ret < 0)
        goto fail;

    frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}),
                   NULL, (uint32_t *)picref->data[0]);
    ret = ff_draw_slice(outlink, 0, outlink->h, 1);
    if (ret < 0)
        goto fail;

    ret = ff_end_frame(outlink);

fail:
    avfilter_unref_buffer(picref);

    return ret;
}
Exemple #25
0
static int request_frame(AVFilterLink *outlink)
{
    FifoContext *fifo = outlink->src->priv;
    int ret = 0;

    if (!fifo->root.next) {
        if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0)
            return ret;
        av_assert0(fifo->root.next);
    }

    /* by doing this, we give ownership of the reference to the next filter,
     * so we don't have to worry about dereferencing it ourselves. */
    switch (outlink->type) {
    case AVMEDIA_TYPE_VIDEO:
        if ((ret = ff_start_frame(outlink, fifo->root.next->buf)) < 0 ||
            (ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 ||
            (ret = ff_end_frame(outlink)) < 0)
            return ret;

        queue_pop(fifo);
        break;
    case AVMEDIA_TYPE_AUDIO:
        if (outlink->request_samples) {
            return return_audio_frame(outlink->src);
        } else {
            ret = ff_filter_samples(outlink, fifo->root.next->buf);
            queue_pop(fifo);
        }
        break;
    default:
        return AVERROR(EINVAL);
    }

    return ret;
}
Exemple #26
0
static void end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    CropDetectContext *cd = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    int bpp = cd->max_pixsteps[0];
    int w, h, x, y, shrink_by;

    // ignore first 2 frames - they may be empty
    if (++cd->frame_nb > 0) {
        // Reset the crop area every reset_count frames, if reset_count is > 0
        if (cd->reset_count > 0 && cd->frame_nb > cd->reset_count) {
            cd->x1 = picref->video->w-1;
            cd->y1 = picref->video->h-1;
            cd->x2 = 0;
            cd->y2 = 0;
            cd->frame_nb = 1;
        }

        for (y = 0; y < cd->y1; y++) {
            if (checkline(ctx, picref->data[0] + picref->linesize[0] * y, bpp, picref->video->w, bpp) > cd->limit) {
                cd->y1 = y;
                break;
            }
        }

        for (y = picref->video->h-1; y > cd->y2; y--) {
            if (checkline(ctx, picref->data[0] + picref->linesize[0] * y, bpp, picref->video->w, bpp) > cd->limit) {
                cd->y2 = y;
                break;
            }
        }

        for (y = 0; y < cd->x1; y++) {
            if (checkline(ctx, picref->data[0] + bpp*y, picref->linesize[0], picref->video->h, bpp) > cd->limit) {
                cd->x1 = y;
                break;
            }
        }

        for (y = picref->video->w-1; y > cd->x2; y--) {
            if (checkline(ctx, picref->data[0] + bpp*y, picref->linesize[0], picref->video->h, bpp) > cd->limit) {
                cd->x2 = y;
                break;
            }
        }

        // round x and y (up), important for yuv colorspaces
        // make sure they stay rounded!
        x = (cd->x1+1) & ~1;
        y = (cd->y1+1) & ~1;

        w = cd->x2 - x + 1;
        h = cd->y2 - y + 1;

        // w and h must be divisible by 2 as well because of yuv
        // colorspace problems.
        if (cd->round <= 1)
            cd->round = 16;
        if (cd->round % 2)
            cd->round *= 2;

        shrink_by = w % cd->round;
        w -= shrink_by;
        x += (shrink_by/2 + 1) & ~1;

        shrink_by = h % cd->round;
        h -= shrink_by;
        y += (shrink_by/2 + 1) & ~1;

        av_log(ctx, AV_LOG_INFO,
               "x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pos:%"PRId64" pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
               cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, picref->pos, picref->pts,
               picref->pts == AV_NOPTS_VALUE ? -1 : picref->pts * av_q2d(inlink->time_base),
               w, h, x, y);
    }

    ff_end_frame(inlink->dst->outputs[0]);
}
Exemple #27
0
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext   *ctx        = inlink->dst;
    FieldOrderContext *fieldorder = ctx->priv;
    AVFilterLink      *outlink    = ctx->outputs[0];

    AVFilterBufferRef *inpicref   = inlink->cur_buf;
    AVFilterBufferRef *outpicref  = outlink->out_buf;

    int               h, plane, line_step, line_size, line;
    uint8_t           *cpy_src, *cpy_dst;

    if (    inpicref->video->interlaced
         && inpicref->video->top_field_first != fieldorder->dst_tff) {
        av_dlog(ctx,
                "picture will move %s one line\n",
                fieldorder->dst_tff ? "up" : "down");
        h = inpicref->video->h;
        for (plane = 0; plane < 4 && inpicref->data[plane]; plane++) {
            line_step = inpicref->linesize[plane];
            line_size = fieldorder->line_size[plane];
            cpy_src = inpicref->data[plane];
            cpy_dst = outpicref->data[plane];
            if (fieldorder->dst_tff) {
                /** Move every line up one line, working from
                 *  the top to the bottom of the frame.
                 *  The original top line is lost.
                 *  The new last line is created as a copy of the
                 *  penultimate line from that field. */
                for (line = 0; line < h; line++) {
                    if (1 + line < outpicref->video->h) {
                        memcpy(cpy_dst, cpy_src + line_step, line_size);
                    } else {
                        memcpy(cpy_dst, cpy_src - line_step - line_step, line_size);
                    }
                    cpy_src += line_step;
                    cpy_dst += line_step;
                }
            } else {
                /** Move every line down one line, working from
                 *  the bottom to the top of the frame.
                 *  The original bottom line is lost.
                 *  The new first line is created as a copy of the
                 *  second line from that field. */
                cpy_src += (h - 1) * line_step;
                cpy_dst += (h - 1) * line_step;
                for (line = h - 1; line >= 0 ; line--) {
                    if (line > 0) {
                        memcpy(cpy_dst, cpy_src - line_step, line_size);
                    } else {
                        memcpy(cpy_dst, cpy_src + line_step + line_step, line_size);
                    }
                    cpy_src -= line_step;
                    cpy_dst -= line_step;
                }
            }
        }
        outpicref->video->top_field_first = fieldorder->dst_tff;
        ff_draw_slice(outlink, 0, h, 1);
    } else {
        av_dlog(ctx,
                "not interlaced or field order already correct\n");
    }

    return ff_end_frame(outlink);
}
static int end_frame(AVFilterLink *link)
{
    DeshakeContext *deshake = link->dst->priv;
    AVFilterBufferRef *in  = link->cur_buf;
    AVFilterBufferRef *out = link->dst->outputs[0]->out_buf;
    Transform t = {{0},0}, orig = {{0},0};
    float matrix[9];
    float alpha = 2.0 / deshake->refcount;
    char tmp[256];

    link->cur_buf = NULL; /* it is in 'in' now */
    if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {
        // Find the most likely global motion for the current frame
        find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);
    } else {
        uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0];
        uint8_t *src2 = in->data[0];

        deshake->cx = FFMIN(deshake->cx, link->w);
        deshake->cy = FFMIN(deshake->cy, link->h);

        if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx;
        if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy;

        // Quadword align right margin
        deshake->cw &= ~15;

        src1 += deshake->cy * in->linesize[0] + deshake->cx;
        src2 += deshake->cy * in->linesize[0] + deshake->cx;

        find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t);
    }


    // Copy transform so we can output it later to compare to the smoothed value
    orig.vector.x = t.vector.x;
    orig.vector.y = t.vector.y;
    orig.angle = t.angle;
    orig.zoom = t.zoom;

    // Generate a one-sided moving exponential average
    deshake->avg.vector.x = alpha * t.vector.x + (1.0 - alpha) * deshake->avg.vector.x;
    deshake->avg.vector.y = alpha * t.vector.y + (1.0 - alpha) * deshake->avg.vector.y;
    deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle;
    deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom;

    // Remove the average from the current motion to detect the motion that
    // is not on purpose, just as jitter from bumping the camera
    t.vector.x -= deshake->avg.vector.x;
    t.vector.y -= deshake->avg.vector.y;
    t.angle -= deshake->avg.angle;
    t.zoom -= deshake->avg.zoom;

    // Invert the motion to undo it
    t.vector.x *= -1;
    t.vector.y *= -1;
    t.angle *= -1;

    // Write statistics to file
    if (deshake->fp) {
        snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vector.x, deshake->avg.vector.x, t.vector.x, orig.vector.y, deshake->avg.vector.y, t.vector.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom);
        fwrite(tmp, sizeof(char), strlen(tmp), deshake->fp);
    }

    // Turn relative current frame motion into absolute by adding it to the
    // last absolute motion
    t.vector.x += deshake->last.vector.x;
    t.vector.y += deshake->last.vector.y;
    t.angle += deshake->last.angle;
    t.zoom += deshake->last.zoom;

    // Shrink motion by 10% to keep things centered in the camera frame
    t.vector.x *= 0.9;
    t.vector.y *= 0.9;
    t.angle *= 0.9;

    // Store the last absolute motion information
    deshake->last.vector.x = t.vector.x;
    deshake->last.vector.y = t.vector.y;
    deshake->last.angle = t.angle;
    deshake->last.zoom = t.zoom;

    // Generate a luma transformation matrix
    avfilter_get_matrix(t.vector.x, t.vector.y, t.angle, 1.0 + t.zoom / 100.0, matrix);

    // Transform the luma plane
    avfilter_transform(in->data[0], out->data[0], in->linesize[0], out->linesize[0], link->w, link->h, matrix, INTERPOLATE_BILINEAR, deshake->edge);

    // Generate a chroma transformation matrix
    avfilter_get_matrix(t.vector.x / (link->w / CHROMA_WIDTH(link)), t.vector.y / (link->h / CHROMA_HEIGHT(link)), t.angle, 1.0 + t.zoom / 100.0, matrix);

    // Transform the chroma planes
    avfilter_transform(in->data[1], out->data[1], in->linesize[1], out->linesize[1], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge);
    avfilter_transform(in->data[2], out->data[2], in->linesize[2], out->linesize[2], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge);

    // Store the current frame as the reference frame for calculating the
    // motion of the next frame
    if (deshake->ref != NULL)
        avfilter_unref_buffer(deshake->ref);

    // Cleanup the old reference frame
    deshake->ref = in;

    // Draw the transformed frame information
    ff_draw_slice(link->dst->outputs[0], 0, link->h, 1);
    return ff_end_frame(link->dst->outputs[0]);
}
Exemple #29
0
static int end_frame(AVFilterLink *inlink)
{
    TransContext *trans = inlink->dst->priv;
    AVFilterBufferRef *inpic  = inlink->cur_buf;
    AVFilterBufferRef *outpic = inlink->dst->outputs[0]->out_buf;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    int plane, ret;

    if (trans->passthrough)
        return ff_null_end_frame(inlink);

    for (plane = 0; outpic->data[plane]; plane++) {
        int hsub = plane == 1 || plane == 2 ? trans->hsub : 0;
        int vsub = plane == 1 || plane == 2 ? trans->vsub : 0;
        int pixstep = trans->pixsteps[plane];
        int inh  = inpic->video->h>>vsub;
        int outw = outpic->video->w>>hsub;
        int outh = outpic->video->h>>vsub;
        uint8_t *out, *in;
        int outlinesize, inlinesize;
        int x, y;

        out = outpic->data[plane]; outlinesize = outpic->linesize[plane];
        in  = inpic ->data[plane]; inlinesize  = inpic ->linesize[plane];

        if (trans->dir&1) {
            in +=  inpic->linesize[plane] * (inh-1);
            inlinesize *= -1;
        }

        if (trans->dir&2) {
            out += outpic->linesize[plane] * (outh-1);
            outlinesize *= -1;
        }

        for (y = 0; y < outh; y++) {
            switch (pixstep) {
            case 1:
                for (x = 0; x < outw; x++)
                    out[x] = in[x*inlinesize + y];
                break;
            case 2:
                for (x = 0; x < outw; x++)
                    *((uint16_t *)(out + 2*x)) = *((uint16_t *)(in + x*inlinesize + y*2));
                break;
            case 3:
                for (x = 0; x < outw; x++) {
                    int32_t v = AV_RB24(in + x*inlinesize + y*3);
                    AV_WB24(out + 3*x, v);
                }
                break;
            case 4:
                for (x = 0; x < outw; x++)
                    *((uint32_t *)(out + 4*x)) = *((uint32_t *)(in + x*inlinesize + y*4));
                break;
            }
            out += outlinesize;
        }
    }

    if ((ret = ff_draw_slice(outlink, 0, outpic->video->h, 1)) < 0 ||
        (ret = ff_end_frame(outlink)) < 0)
        return ret;
    return 0;
}