Esempio n. 1
0
static AVFrame *get_video_buffer(AVFilterLink *link, int w, int h)
{
    FlipContext *flip = link->dst->priv;
    AVFrame *frame;
    int i;

    frame = ff_get_video_buffer(link->dst->outputs[0], w, h);
    if (!frame)
        return NULL;

    for (i = 0; i < 4; i ++) {
        int vsub = i == 1 || i == 2 ? flip->vsub : 0;
        int height = FF_CEIL_RSHIFT(h, vsub);

        if (frame->data[i]) {
            frame->data[i] += (height - 1) * frame->linesize[i];
            frame->linesize[i] = -frame->linesize[i];
        }
    }

    return frame;
}
Esempio n. 2
0
static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms,
                                        int w, int h)
{
    FlipContext *flip = link->dst->priv;
    AVFilterBufferRef *picref;
    int i;

    if (!(perms & AV_PERM_NEG_LINESIZES))
        return ff_default_get_video_buffer(link, perms, w, h);

    picref = ff_get_video_buffer(link->dst->outputs[0], perms, w, h);
    if (!picref)
        return NULL;

    for (i = 0; i < 4; i ++) {
        int vsub = i == 1 || i == 2 ? flip->vsub : 0;

        if (picref->data[i]) {
            picref->data[i] += ((h >> vsub)-1) * picref->linesize[i];
            picref->linesize[i] = -picref->linesize[i];
        }
    }
Esempio n. 3
0
static int return_frame(AVFilterContext *ctx, int is_second)
{
    YADIFContext *yadif = ctx->priv;
    AVFilterLink *link  = ctx->outputs[0];
    int tff, ret;

    if (yadif->parity == -1) {
        tff = yadif->cur->interlaced_frame ?
              yadif->cur->top_field_first : 1;
    } else {
        tff = yadif->parity ^ 1;
    }

    if (is_second) {
        yadif->out = ff_get_video_buffer(link, link->w, link->h);
        if (!yadif->out)
            return AVERROR(ENOMEM);

        av_frame_copy_props(yadif->out, yadif->cur);
        yadif->out->interlaced_frame = 0;
    }

    yadif->filter(ctx, yadif->out, tff ^ !is_second, tff);

    if (is_second) {
        int64_t cur_pts  = yadif->cur->pts;
        int64_t next_pts = yadif->next->pts;

        if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
            yadif->out->pts = cur_pts + next_pts;
        } else {
            yadif->out->pts = AV_NOPTS_VALUE;
        }
    }
    ret = ff_filter_frame(ctx->outputs[0], yadif->out);

    yadif->frame_pending = (yadif->mode&1) && !is_second;
    return ret;
}
Esempio n. 4
0
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    ThresholdContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *in, *threshold, *min, *max;
    ThreadData td;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &in,        0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &threshold, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 2, &min,       0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 3, &max,       0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(in);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, in);

        td.out = out;
        td.in = in;
        td.threshold = threshold;
        td.min = min;
        td.max = max;
        ctx->internal->execute(ctx, filter_slice, &td, NULL,
                               FFMIN(s->height[2], ff_filter_get_nb_threads(ctx)));
    }

    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
Esempio n. 5
0
static int color_request_frame(AVFilterLink *link)
{
    ColorContext *color = link->src->priv;
    AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
    AVFilterBufferRef *buf_out;
    int ret;

    if (!picref)
        return AVERROR(ENOMEM);

    picref->video->pixel_aspect = (AVRational) {1, 1};
    picref->pts                 = color->pts++;
    picref->pos                 = -1;

    buf_out = avfilter_ref_buffer(picref, ~0);
    if (!buf_out) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    ret = ff_start_frame(link, buf_out);
    if (ret < 0)
        goto fail;

    ff_draw_rectangle(picref->data, picref->linesize,
                      color->line, color->line_step, color->hsub, color->vsub,
                      0, 0, color->w, color->h);
    ret = ff_draw_slice(link, 0, color->h, 1);
    if (ret < 0)
        goto fail;

    ret = ff_end_frame(link);

fail:
    avfilter_unref_buffer(picref);

    return ret;
}
Esempio n. 6
0
static AVFrame *hwmap_get_buffer(AVFilterLink *inlink, int w, int h)
{
    AVFilterContext *avctx = inlink->dst;
    AVFilterLink  *outlink = avctx->outputs[0];
    HWMapContext      *ctx = avctx->priv;

    if (ctx->reverse && !inlink->hw_frames_ctx) {
        AVFrame *src, *dst;
        int err;

        src = ff_get_video_buffer(outlink, w, h);
        if (!src) {
            av_log(avctx, AV_LOG_ERROR, "Failed to allocate source "
                   "frame for software mapping.\n");
            return NULL;
        }

        dst = av_frame_alloc();
        if (!dst) {
            av_frame_free(&src);
            return NULL;
        }

        err = av_hwframe_map(dst, src, ctx->mode);
        if (err) {
            av_log(avctx, AV_LOG_ERROR, "Failed to map frame to "
                   "software: %d.\n", err);
            av_frame_free(&src);
            av_frame_free(&dst);
            return NULL;
        }

        av_frame_free(&src);
        return dst;
    } else {
        return ff_default_get_video_buffer(inlink, w, h);
    }
}
Esempio n. 7
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    DebandContext *s = ctx->priv;
    AVFrame *out;
    ThreadData td;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    td.in = in; td.out = out;
    ctx->internal->execute(ctx, s->deband, &td, NULL, FFMIN3(s->planeheight[1],
                                                             s->planeheight[2],
                                                             ctx->graph->nb_threads));

    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Esempio n. 8
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    BoxBlurContext *s = ctx->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFrame *out;
    int plane;
    int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub), ch = AV_CEIL_RSHIFT(in->height, s->vsub);
    int w[4] = { inlink->w, cw, cw, inlink->w };
    int h[4] = { in->height, ch, ch, in->height };
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    const int depth = desc->comp[0].depth;
    const int pixsize = (depth+7)/8;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
        hblur(out->data[plane], out->linesize[plane],
              in ->data[plane], in ->linesize[plane],
              w[plane], h[plane], s->radius[plane], s->power[plane],
              s->temp, pixsize);

    for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++)
        vblur(out->data[plane], out->linesize[plane],
              out->data[plane], out->linesize[plane],
              w[plane], h[plane], s->radius[plane], s->power[plane],
              s->temp, pixsize);

    av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
Esempio n. 9
0
static int source_request_frame(AVFilterLink *outlink)
{
    Frei0rContext *frei0r = outlink->src->priv;
    AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
    AVFilterBufferRef *buf_out;
    int ret;

    if (!picref)
        return AVERROR(ENOMEM);

    picref->video->pixel_aspect = (AVRational) {1, 1};
    picref->pts = frei0r->pts++;
    picref->pos = -1;

    buf_out = avfilter_ref_buffer(picref, ~0);
    if (!buf_out) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    ret = ff_start_frame(outlink, buf_out);
    if (ret < 0)
        goto fail;

    frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}),
                   NULL, (uint32_t *)picref->data[0]);
    ret = ff_draw_slice(outlink, 0, outlink->h, 1);
    if (ret < 0)
        goto fail;

    ret = ff_end_frame(outlink);

fail:
    avfilter_unref_buffer(picref);

    return ret;
}
Esempio n. 10
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    HQDN3DContext *hqdn3d = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFrame *out;
    int direct, c;

    if (av_frame_is_writable(in)) {
        direct = 1;
        out = in;
    } else {
        direct = 0;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }

        av_frame_copy_props(out, in);
        out->width  = outlink->w;
        out->height = outlink->h;
    }

    for (c = 0; c < 3; c++) {
        denoise(hqdn3d, in->data[c], out->data[c],
                hqdn3d->line, &hqdn3d->frame_prev[c],
                in->width  >> (!!c * hqdn3d->hsub),
                in->height >> (!!c * hqdn3d->vsub),
                in->linesize[c], out->linesize[c],
                hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
    }

    if (!direct)
        av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
Esempio n. 11
0
File: buffer.c Progetto: 1c0n/xbmc
AVFilterBufferRef *ff_copy_buffer_ref(AVFilterLink *outlink,
                                      AVFilterBufferRef *ref)
{
    AVFilterBufferRef *buf;
    int channels;

    switch (outlink->type) {

    case AVMEDIA_TYPE_VIDEO:
        buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
                                  ref->video->w, ref->video->h);
        if(!buf)
            return NULL;
        av_image_copy(buf->data, buf->linesize,
                      (void*)ref->data, ref->linesize,
                      ref->format, ref->video->w, ref->video->h);
        break;

    case AVMEDIA_TYPE_AUDIO:
        buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
                                        ref->audio->nb_samples);
        if(!buf)
            return NULL;
        channels = ref->audio->channels;
        av_samples_copy(buf->extended_data, ref->buf->extended_data,
                        0, 0, ref->audio->nb_samples,
                        channels,
                        ref->format);
        break;

    default:
        return NULL;
    }
    avfilter_copy_buffer_ref_props(buf, ref);
    return buf;
}
Esempio n. 12
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    OCVContext *ocv = ctx->priv;
    AVFilterLink *outlink= inlink->dst->outputs[0];
    AVFrame *out;
    IplImage inimg, outimg;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    fill_iplimage_from_frame(&inimg , in , inlink->format);
    fill_iplimage_from_frame(&outimg, out, inlink->format);
    ocv->end_frame_filter(ctx, &inimg, &outimg);
    fill_frame_from_iplimage(out, &outimg, inlink->format);

    av_frame_free(&in);

    return ff_filter_frame(outlink, out);
}
Esempio n. 13
0
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
{
    HQDN3DContext *hqdn3d = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFilterBufferRef *out;
    int direct, c;

    if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) {
        direct = 1;
        out = in;
    } else {
        out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
        if (!out) {
            avfilter_unref_bufferp(&in);
            return AVERROR(ENOMEM);
        }

        avfilter_copy_buffer_ref_props(out, in);
        out->video->w = outlink->w;
        out->video->h = outlink->h;
    }

    for (c = 0; c < 3; c++) {
        denoise(hqdn3d, in->data[c], out->data[c],
                hqdn3d->line, &hqdn3d->frame_prev[c],
                in->video->w >> (!!c * hqdn3d->hsub),
                in->video->h >> (!!c * hqdn3d->vsub),
                in->linesize[c], out->linesize[c],
                hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
    }

    if (!direct)
        avfilter_unref_bufferp(&in);

    return ff_filter_frame(outlink, out);
}
Esempio n. 14
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx  = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ThreadData td;
    AVFrame *out;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    /* copy palette if required */
    if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
        memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);

    td.in = in, td.out = out;
    ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));

    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Esempio n. 15
0
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ShowVolumeContext *s = ctx->priv;
    int c, i, j, k;
    double values[VAR_VARS_NB];

    if (!s->out || s->out->width  != outlink->w ||
                   s->out->height != outlink->h) {
        av_frame_free(&s->out);
        s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!s->out) {
            av_frame_free(&insamples);
            return AVERROR(ENOMEM);
        }

        for (i = 0; i < outlink->h; i++)
            memset(s->out->data[0] + i * s->out->linesize[0], 0, outlink->w * 4);
    }
    s->out->pts = insamples->pts;

    for (j = 0; j < outlink->h; j++) {
        uint8_t *dst = s->out->data[0] + j * s->out->linesize[0];
        for (k = 0; k < s->w; k++) {
            dst[k * 4 + 0] = FFMAX(dst[k * 4 + 0] - s->f, 0);
            dst[k * 4 + 1] = FFMAX(dst[k * 4 + 1] - s->f, 0);
            dst[k * 4 + 2] = FFMAX(dst[k * 4 + 2] - s->f, 0);
            dst[k * 4 + 3] = FFMAX(dst[k * 4 + 3] - s->f, 0);
        }
    }

    for (c = 0; c < inlink->channels; c++) {
        float *src = (float *)insamples->extended_data[c];
        float max = 0;
        uint32_t color;

        for (i = 0; i < insamples->nb_samples; i++)
            max = FFMAX(max, src[i]);

        max = av_clipf(max, 0, 1);
        values[VAR_VOLUME] = 20.0 * log10(max);
        values[VAR_CHANNEL] = c;
        color = av_expr_eval(s->c_expr, values, NULL);

        for (j = 0; j < s->h; j++) {
            uint8_t *dst = s->out->data[0] + (c * s->h + c * s->b + j) * s->out->linesize[0];

            for (k = 0; k < s->w * max; k++)
                AV_WN32A(dst + k * 4, color);
        }

        if (s->h >= 8 && s->draw_text)
            drawtext(s->out, 2, c * (s->h + s->b) + (s->h - 8) / 2,
                     av_get_channel_name(av_channel_layout_extract_channel(insamples->channel_layout, c)));
    }

    av_frame_free(&insamples);

    return ff_filter_frame(outlink, av_frame_clone(s->out));
}
Esempio n. 16
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    DCTdnoizContext *s = ctx->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    int direct, plane;
    AVFrame *out;

    if (av_frame_is_writable(in)) {
        direct = 1;
        out = in;
    } else {
        direct = 0;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    s->color_decorrelation(s->cbuf[0], s->p_linesize,
                           in->data[0], in->linesize[0],
                           s->pr_width, s->pr_height);
    for (plane = 0; plane < 3; plane++) {
#ifdef IDE_COMPILE
        ThreadData td = {
            s->cbuf[0][plane],
            s->cbuf[1][plane],
        };
#else
		ThreadData td = {
            .src = s->cbuf[0][plane],
            .dst = s->cbuf[1][plane],
        };
#endif
		ctx->internal->execute(ctx, filter_slice, &td, NULL, s->nb_threads);
    }
    s->color_correlation(out->data[0], out->linesize[0],
                         s->cbuf[1], s->p_linesize,
                         s->pr_width, s->pr_height);

    if (!direct) {
        int y;
        uint8_t *dst = out->data[0];
        const uint8_t *src = in->data[0];
        const int dst_linesize = out->linesize[0];
        const int src_linesize = in->linesize[0];
        const int hpad = (inlink->w - s->pr_width) * 3;
        const int vpad = (inlink->h - s->pr_height);

        if (hpad) {
            uint8_t       *dstp = dst + s->pr_width * 3;
            const uint8_t *srcp = src + s->pr_width * 3;

            for (y = 0; y < s->pr_height; y++) {
                memcpy(dstp, srcp, hpad);
                dstp += dst_linesize;
                srcp += src_linesize;
            }
        }
        if (vpad) {
            uint8_t       *dstp = dst + s->pr_height * dst_linesize;
            const uint8_t *srcp = src + s->pr_height * src_linesize;

            for (y = 0; y < vpad; y++) {
                memcpy(dstp, srcp, inlink->w * 3);
                dstp += dst_linesize;
                srcp += src_linesize;
            }
        }

        av_frame_free(&in);
    }

    return ff_filter_frame(outlink, out);
}

static av_cold void uninit(AVFilterContext *ctx)
{
    int i;
    DCTdnoizContext *s = ctx->priv;

    av_free(s->weights);
    for (i = 0; i < 2; i++) {
        av_free(s->cbuf[i][0]);
        av_free(s->cbuf[i][1]);
        av_free(s->cbuf[i][2]);
    }
    for (i = 0; i < s->nb_threads; i++) {
        av_free(s->slices[i]);
        av_expr_free(s->expr[i]);
    }
}
Esempio n. 17
0
static int request_frame(AVFilterLink *outlink)
{
    MPTestContext *test = outlink->src->priv;
    AVFrame *picref;
    int w = WIDTH, h = HEIGHT,
        cw = FF_CEIL_RSHIFT(w, test->hsub), ch = FF_CEIL_RSHIFT(h, test->vsub);
    unsigned int frame = outlink->frame_count;
    enum test_type tt = test->test;
    int i;

    if (test->max_pts >= 0 && test->pts > test->max_pts)
        return AVERROR_EOF;
    picref = ff_get_video_buffer(outlink, w, h);
    if (!picref)
        return AVERROR(ENOMEM);
    picref->pts = test->pts++;

    // clean image
    for (i = 0; i < h; i++)
        memset(picref->data[0] + i*picref->linesize[0], 0, w);
    for (i = 0; i < ch; i++) {
        memset(picref->data[1] + i*picref->linesize[1], 128, cw);
        memset(picref->data[2] + i*picref->linesize[2], 128, cw);
    }

    if (tt == TEST_ALL && frame%30) /* draw a black frame at the beginning of each test */
        tt = (frame/30)%(TEST_NB-1);

    switch (tt) {
    case TEST_DC_LUMA:
        dc_test(picref->data[0], picref->linesize[0], 256, 256, frame%30);
        break;
    case TEST_DC_CHROMA:
        dc_test(picref->data[1], picref->linesize[1], 256, 256, frame%30);
        break;
    case TEST_FREQ_LUMA:
        freq_test(picref->data[0], picref->linesize[0], frame%30);
        break;
    case TEST_FREQ_CHROMA:
        freq_test(picref->data[1], picref->linesize[1], frame%30);
        break;
    case TEST_AMP_LUMA:
        amp_test(picref->data[0], picref->linesize[0], frame%30);
        break;
    case TEST_AMP_CHROMA:
        amp_test(picref->data[1], picref->linesize[1], frame%30);
        break;
    case TEST_CBP:
        cbp_test(picref->data   , picref->linesize   , frame%30);
        break;
    case TEST_MV:
        mv_test(picref->data[0], picref->linesize[0], frame%30);
        break;
    case TEST_RING1:
        ring1_test(picref->data[0], picref->linesize[0], frame%30);
        break;
    case TEST_RING2:
        ring2_test(picref->data[0], picref->linesize[0], frame%30);
        break;
    }

    return ff_filter_frame(outlink, picref);
}
Esempio n. 18
0
/* get the input surface */
static QSVFrame *submit_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
{
    QSVFrame        *qsv_frame;
    AVFilterContext *ctx = inlink->dst;

    clear_unused_frames(s->in_frame_list);

    qsv_frame = get_free_frame(&s->in_frame_list);
    if (!qsv_frame)
        return NULL;

    /* Turn AVFrame into mfxFrameSurface1.
     * For video/opaque memory mode, pix_fmt is AV_PIX_FMT_QSV, and
     * mfxFrameSurface1 is stored in AVFrame->data[3];
     * for system memory mode, raw video data is stored in
     * AVFrame, we should map it into mfxFrameSurface1.
     */
    if (!IS_SYSTEM_MEMORY(s->in_mem_mode)) {
        if (picref->format != AV_PIX_FMT_QSV) {
            av_log(ctx, AV_LOG_ERROR, "QSVVPP gets a wrong frame.\n");
            return NULL;
        }
        qsv_frame->frame   = av_frame_clone(picref);
        qsv_frame->surface = (mfxFrameSurface1 *)qsv_frame->frame->data[3];
    } else {
        /* make a copy if the input is not padded as libmfx requires */
        if (picref->height & 31 || picref->linesize[0] & 31) {
            qsv_frame->frame = ff_get_video_buffer(inlink,
                                                   FFALIGN(inlink->w, 32),
                                                   FFALIGN(inlink->h, 32));
            if (!qsv_frame->frame)
                return NULL;

            qsv_frame->frame->width   = picref->width;
            qsv_frame->frame->height  = picref->height;

            if (av_frame_copy(qsv_frame->frame, picref) < 0) {
                av_frame_free(&qsv_frame->frame);
                return NULL;
            }

            av_frame_copy_props(qsv_frame->frame, picref);
            av_frame_free(&picref);
        } else
            qsv_frame->frame = av_frame_clone(picref);

        if (map_frame_to_surface(qsv_frame->frame,
                                &qsv_frame->surface_internal) < 0) {
            av_log(ctx, AV_LOG_ERROR, "Unsupported frame.\n");
            return NULL;
        }
        qsv_frame->surface = &qsv_frame->surface_internal;
    }

    qsv_frame->surface->Info           = s->frame_infos[FF_INLINK_IDX(inlink)];
    qsv_frame->surface->Data.TimeStamp = av_rescale_q(qsv_frame->frame->pts,
                                                      inlink->time_base, default_tb);

    qsv_frame->surface->Info.PicStruct =
            !qsv_frame->frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE :
            (qsv_frame->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF :
                                                 MFX_PICSTRUCT_FIELD_BFF);
    if (qsv_frame->frame->repeat_pict == 1)
        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
    else if (qsv_frame->frame->repeat_pict == 2)
        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
    else if (qsv_frame->frame->repeat_pict == 4)
        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING;

    return qsv_frame;
}
Esempio n. 19
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    unsigned x, y, direct = 0;
    AVFilterContext *ctx = inlink->dst;
    VignetteContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;

    if (av_frame_is_writable(in)) {
        direct = 1;
        out = in;
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    if (s->eval_mode == EVAL_MODE_FRAME)
        update_context(s, inlink, in);

    if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) {
        uint8_t       *dst = out->data[0];
        const uint8_t *src = in ->data[0];
        const float *fmap = s->fmap;
        const int dst_linesize = out->linesize[0];
        const int src_linesize = in ->linesize[0];
        const int fmap_linesize = s->fmap_linesize;

        for (y = 0; y < inlink->h; y++) {
            uint8_t       *dstp = dst;
            const uint8_t *srcp = src;

            for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) {
                const float f = fmap[x];

                dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s));
                dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s));
                dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s));
            }
            dst += dst_linesize;
            src += src_linesize;
            fmap += fmap_linesize;
        }
    } else {
        int plane;

        for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
            uint8_t       *dst = out->data[plane];
            const uint8_t *src = in ->data[plane];
            const float *fmap = s->fmap;
            const int dst_linesize = out->linesize[plane];
            const int src_linesize = in ->linesize[plane];
            const int fmap_linesize = s->fmap_linesize;
            const int chroma = plane == 1 || plane == 2;
            const int hsub = chroma ? s->desc->log2_chroma_w : 0;
            const int vsub = chroma ? s->desc->log2_chroma_h : 0;
            const int w = AV_CEIL_RSHIFT(inlink->w, hsub);
            const int h = AV_CEIL_RSHIFT(inlink->h, vsub);

            for (y = 0; y < h; y++) {
                uint8_t *dstp = dst;
                const uint8_t *srcp = src;

                for (x = 0; x < w; x++) {
                    const double dv = get_dither_value(s);
                    if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv);
                    else        *dstp++ = av_clip_uint8(fmap[x        ] *  *srcp++              + dv);
                }
                dst += dst_linesize;
                src += src_linesize;
                fmap += fmap_linesize << vsub;
            }
        }
    }

    if (!direct)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Esempio n. 20
0
static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i,
                               double *zoom, double *dx, double *dy)
{
    ZPContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    int64_t pts = s->frame_count;
    int k, x, y, w, h, ret = 0;
    uint8_t *input[4];
    int px[4], py[4];
    AVFrame *out;

    var_values[VAR_PX]    = s->x;
    var_values[VAR_PY]    = s->y;
    var_values[VAR_PZOOM] = s->prev_zoom;
    var_values[VAR_PDURATION] = s->prev_nb_frames;
    var_values[VAR_TIME] = pts * av_q2d(outlink->time_base);
    var_values[VAR_FRAME] = i;
    var_values[VAR_ON] = outlink->frame_count + 1;
    if ((ret = av_expr_parse_and_eval(zoom, s->zoom_expr_str,
                                      var_names, var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        return ret;

    *zoom = av_clipd(*zoom, 1, 10);
    var_values[VAR_ZOOM] = *zoom;
    w = in->width * (1.0 / *zoom);
    h = in->height * (1.0 / *zoom);

    if ((ret = av_expr_parse_and_eval(dx, s->x_expr_str,
                                      var_names, var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        return ret;
    x = *dx = av_clipd(*dx, 0, FFMAX(in->width - w, 0));
    var_values[VAR_X] = *dx;
    x &= ~((1 << s->desc->log2_chroma_w) - 1);

    if ((ret = av_expr_parse_and_eval(dy, s->y_expr_str,
                                      var_names, var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        return ret;
    y = *dy = av_clipd(*dy, 0, FFMAX(in->height - h, 0));
    var_values[VAR_Y] = *dy;
    y &= ~((1 << s->desc->log2_chroma_h) - 1);

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        ret = AVERROR(ENOMEM);
        return ret;
    }

    px[1] = px[2] = AV_CEIL_RSHIFT(x, s->desc->log2_chroma_w);
    px[0] = px[3] = x;

    py[1] = py[2] = AV_CEIL_RSHIFT(y, s->desc->log2_chroma_h);
    py[0] = py[3] = y;

    s->sws = sws_alloc_context();
    if (!s->sws) {
        ret = AVERROR(ENOMEM);
        return ret;
    }

    for (k = 0; in->data[k]; k++)
        input[k] = in->data[k] + py[k] * in->linesize[k] + px[k];

    av_opt_set_int(s->sws, "srcw", w, 0);
    av_opt_set_int(s->sws, "srch", h, 0);
    av_opt_set_int(s->sws, "src_format", in->format, 0);
    av_opt_set_int(s->sws, "dstw", outlink->w, 0);
    av_opt_set_int(s->sws, "dsth", outlink->h, 0);
    av_opt_set_int(s->sws, "dst_format", outlink->format, 0);
    av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0);

    if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0)
        return ret;

    sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize);

    out->pts = pts;
    s->frame_count++;

    ret = ff_filter_frame(outlink, out);
    sws_freeContext(s->sws);
    s->sws = NULL;
    s->current_frame++;
    return ret;
}
Esempio n. 21
0
int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame)
{
    AVFilterContext *ctx = link->dst;
    YADIFContext *yadif = ctx->priv;

    av_assert0(frame);

    if (yadif->frame_pending)
        return_frame(ctx, 1);

    if (yadif->prev)
        av_frame_free(&yadif->prev);
    yadif->prev = yadif->cur;
    yadif->cur  = yadif->next;
    yadif->next = frame;

    if (!yadif->cur &&
        !(yadif->cur = av_frame_clone(yadif->next)))
        return AVERROR(ENOMEM);

    if (checkstride(yadif, yadif->next, yadif->cur)) {
        av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
        fixstride(link, yadif->next);
    }
    if (checkstride(yadif, yadif->next, yadif->cur))
        fixstride(link, yadif->cur);
    if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))
        fixstride(link, yadif->prev);
    if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) {
        av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
        return -1;
    }

    if (!yadif->prev)
        return 0;

    if ((yadif->deint && !yadif->cur->interlaced_frame) ||
        ctx->is_disabled ||
        (yadif->deint && !yadif->prev->interlaced_frame && yadif->prev->repeat_pict) ||
        (yadif->deint && !yadif->next->interlaced_frame && yadif->next->repeat_pict)
    ) {
        yadif->out  = av_frame_clone(yadif->cur);
        if (!yadif->out)
            return AVERROR(ENOMEM);

        av_frame_free(&yadif->prev);
        if (yadif->out->pts != AV_NOPTS_VALUE)
            yadif->out->pts *= 2;
        return ff_filter_frame(ctx->outputs[0], yadif->out);
    }

    yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
    if (!yadif->out)
        return AVERROR(ENOMEM);

    av_frame_copy_props(yadif->out, yadif->cur);
    yadif->out->interlaced_frame = 0;

    if (yadif->out->pts != AV_NOPTS_VALUE)
        yadif->out->pts *= 2;

    return return_frame(ctx, 0);
}
Esempio n. 22
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    HistogramContext *h   = inlink->dst->priv;
    AVFilterContext *ctx  = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;
    int i, j, k, l, m;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }

    out->pts = in->pts;

    for (k = 0; k < 4 && out->data[k]; k++) {
        const int is_chroma = (k == 1 || k == 2);
        const int dst_h = AV_CEIL_RSHIFT(outlink->h, (is_chroma ? h->odesc->log2_chroma_h : 0));
        const int dst_w = AV_CEIL_RSHIFT(outlink->w, (is_chroma ? h->odesc->log2_chroma_w : 0));

        if (h->histogram_size <= 256) {
            for (i = 0; i < dst_h ; i++)
                memset(out->data[h->odesc->comp[k].plane] +
                       i * out->linesize[h->odesc->comp[k].plane],
                       h->bg_color[k], dst_w);
        } else {
            const int mult = h->mult;

            for (i = 0; i < dst_h ; i++)
                for (j = 0; j < dst_w; j++)
                    AV_WN16(out->data[h->odesc->comp[k].plane] +
                        i * out->linesize[h->odesc->comp[k].plane] + j * 2,
                        h->bg_color[k] * mult);
        }
    }

    for (m = 0, k = 0; k < h->ncomp; k++) {
        const int p = h->desc->comp[k].plane;
        const int height = h->planeheight[p];
        const int width = h->planewidth[p];
        double max_hval_log;
        unsigned max_hval = 0;
        int start;

        if (!((1 << k) & h->components))
            continue;
        start = m++ * (h->level_height + h->scale_height) * h->display_mode;

        if (h->histogram_size <= 256) {
            for (i = 0; i < height; i++) {
                const uint8_t *src = in->data[p] + i * in->linesize[p];
                for (j = 0; j < width; j++)
                    h->histogram[src[j]]++;
            }
        } else {
            for (i = 0; i < height; i++) {
                const uint16_t *src = (const uint16_t *)(in->data[p] + i * in->linesize[p]);
                for (j = 0; j < width; j++)
                    h->histogram[src[j]]++;
            }
        }

        for (i = 0; i < h->histogram_size; i++)
            max_hval = FFMAX(max_hval, h->histogram[i]);
        max_hval_log = log2(max_hval + 1);

        for (i = 0; i < outlink->w; i++) {
            int col_height;

            if (h->levels_mode)
                col_height = lrint(h->level_height * (1. - (log2(h->histogram[i] + 1) / max_hval_log)));
            else
                col_height = h->level_height - (h->histogram[i] * (int64_t)h->level_height + max_hval - 1) / max_hval;

            if (h->histogram_size <= 256) {
                for (j = h->level_height - 1; j >= col_height; j--) {
                    if (h->display_mode) {
                        for (l = 0; l < h->dncomp; l++)
                            out->data[l][(j + start) * out->linesize[l] + i] = h->fg_color[l];
                    } else {
                        out->data[p][(j + start) * out->linesize[p] + i] = 255;
                    }
                }
                for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--)
                    out->data[p][(j + start) * out->linesize[p] + i] = i;
            } else {
                const int mult = h->mult;

                for (j = h->level_height - 1; j >= col_height; j--) {
                    if (h->display_mode) {
                        for (l = 0; l < h->dncomp; l++)
                            AV_WN16(out->data[l] + (j + start) * out->linesize[l] + i * 2, h->fg_color[l] * mult);
                    } else {
                        AV_WN16(out->data[p] + (j + start) * out->linesize[p] + i * 2, 255 * mult);
                    }
                }
                for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--)
                    AV_WN16(out->data[p] + (j + start) * out->linesize[p] + i * 2, i);
            }
        }

        memset(h->histogram, 0, h->histogram_size * sizeof(unsigned));
    }

    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Esempio n. 23
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext   *ctx     = inlink->dst;
    FieldOrderContext *s       = ctx->priv;
    AVFilterLink      *outlink = ctx->outputs[0];
    int h, plane, src_line_step, dst_line_step, line_size, line;
    uint8_t *dst, *src;
    AVFrame *out;

    if (!frame->interlaced_frame ||
        frame->top_field_first == s->dst_tff) {
        av_log(ctx, AV_LOG_VERBOSE,
               "Skipping %s.\n",
               frame->interlaced_frame ?
               "frame with same field order" : "progressive frame");
        return ff_filter_frame(outlink, frame);
    }

    if (av_frame_is_writable(frame)) {
        out = frame;
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&frame);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, frame);
    }

    av_log(ctx, AV_LOG_TRACE,
            "picture will move %s one line\n",
            s->dst_tff ? "up" : "down");
    h = frame->height;
    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
        dst_line_step = out->linesize[plane];
        src_line_step = frame->linesize[plane];
        line_size = s->line_size[plane];
        dst = out->data[plane];
        src = frame->data[plane];
        if (s->dst_tff) {
            /** Move every line up one line, working from
             *  the top to the bottom of the frame.
             *  The original top line is lost.
             *  The new last line is created as a copy of the
             *  penultimate line from that field. */
            for (line = 0; line < h; line++) {
                if (1 + line < frame->height) {
                    memcpy(dst, src + src_line_step, line_size);
                } else {
                    memcpy(dst, src - 2 * src_line_step, line_size);
                }
                dst += dst_line_step;
                src += src_line_step;
            }
        } else {
            /** Move every line down one line, working from
             *  the bottom to the top of the frame.
             *  The original bottom line is lost.
             *  The new first line is created as a copy of the
             *  second line from that field. */
            dst += (h - 1) * dst_line_step;
            src += (h - 1) * src_line_step;
            for (line = h - 1; line >= 0 ; line--) {
                if (line > 0) {
                    memcpy(dst, src - src_line_step, line_size);
                } else {
                    memcpy(dst, src + 2 * src_line_step, line_size);
                }
                dst -= dst_line_step;
                src -= src_line_step;
            }
        }
    }
    out->top_field_first = s->dst_tff;

    if (frame != out)
        av_frame_free(&frame);
    return ff_filter_frame(outlink, out);
}
Esempio n. 24
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    PullupContext *s = ctx->priv;
    PullupBuffer *b;
    PullupFrame *f;
    AVFrame *out;
    int p, ret = 0;

    b = pullup_get_buffer(s, 2);
    if (!b) {
        av_log(ctx, AV_LOG_WARNING, "Could not get buffer!\n");
        f = pullup_get_frame(s);
        pullup_release_frame(f);
        goto end;
    }

    av_image_copy(b->planes, s->planewidth,
                  (const uint8_t**)in->data, in->linesize,
                  inlink->format, inlink->w, inlink->h);

    p = in->interlaced_frame ? !in->top_field_first : 0;
    pullup_submit_field(s, b, p  );
    pullup_submit_field(s, b, p^1);

    if (in->repeat_pict)
        pullup_submit_field(s, b, p);

    pullup_release_buffer(b, 2);

    f = pullup_get_frame(s);
    if (!f)
        goto end;

    if (f->length < 2) {
        pullup_release_frame(f);
        f = pullup_get_frame(s);
        if (!f)
            goto end;
        if (f->length < 2) {
            pullup_release_frame(f);
            if (!in->repeat_pict)
                goto end;
            f = pullup_get_frame(s);
            if (!f)
                goto end;
            if (f->length < 2) {
                pullup_release_frame(f);
                goto end;
            }
        }
    }

    /* If the frame isn't already exportable... */
    if (!f->buffer)
        pullup_pack_frame(s, f);

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        ret = AVERROR(ENOMEM);
        goto end;
    }
    av_frame_copy_props(out, in);

    av_image_copy(out->data, out->linesize,
                  (const uint8_t**)f->buffer->planes, s->planewidth,
                  inlink->format, inlink->w, inlink->h);

    ret = ff_filter_frame(outlink, out);
    pullup_release_frame(f);
end:
    av_frame_free(&in);
    return ret;
}
Esempio n. 25
0
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    TInterlaceContext *tinterlace = ctx->priv;
    AVFrame *cur, *next, *out;
    int field, tff, ret;

    av_frame_free(&tinterlace->cur);
    tinterlace->cur  = tinterlace->next;
    tinterlace->next = picref;

    cur = tinterlace->cur;
    next = tinterlace->next;
    /* we need at least two frames */
    if (!tinterlace->cur)
        return 0;

    switch (tinterlace->mode) {
    case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into
             * the lower field, generating a double-height video at half framerate */
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, cur);
        out->height = outlink->h;
        out->interlaced_frame = 1;
        out->top_field_first = 1;
        out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));

        /* write odd frame lines into the upper field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, FIELD_UPPER, tinterlace->flags);
        /* write even frame lines into the lower field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)next->data, next->linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, FIELD_LOWER, tinterlace->flags);
        av_frame_free(&tinterlace->next);
        break;

    case MODE_DROP_ODD:  /* only output even frames, odd  frames are dropped; height unchanged, half framerate */
    case MODE_DROP_EVEN: /* only output odd  frames, even frames are dropped; height unchanged, half framerate */
        out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_free(&tinterlace->next);
        break;

    case MODE_PAD: /* expand each frame to double height, but pad alternate
                    * lines with black; framerate unchanged */
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, cur);
        out->height = outlink->h;
        out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));

        field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER;
        /* copy upper and lower fields */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, field, tinterlace->flags);
        /* pad with black the other field */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)tinterlace->black_data, tinterlace->black_linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, !field, tinterlace->flags);
        break;

        /* interleave upper/lower lines from odd frames with lower/upper lines from even frames,
         * halving the frame rate and preserving image height */
    case MODE_INTERLEAVE_TOP:    /* top    field first */
    case MODE_INTERLEAVE_BOTTOM: /* bottom field first */
        tff = tinterlace->mode == MODE_INTERLEAVE_TOP;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, cur);
        out->interlaced_frame = 1;
        out->top_field_first = tff;

        /* copy upper/lower field from cur */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
                           tinterlace->flags);
        /* copy lower/upper field from next */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)next->data, next->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
                           tinterlace->flags);
        av_frame_free(&tinterlace->next);
        break;
    case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */
        /* output current frame first */
        out = av_frame_clone(cur);
        if (!out)
            return AVERROR(ENOMEM);
        out->interlaced_frame = 1;
        if (cur->pts != AV_NOPTS_VALUE)
            out->pts = cur->pts*2;

        out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
        if ((ret = ff_filter_frame(outlink, out)) < 0)
            return ret;

        /* output mix of current and next frame */
        tff = next->top_field_first;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, next);
        out->interlaced_frame = 1;
        out->top_field_first = !tff;

        if (next->pts != AV_NOPTS_VALUE && cur->pts != AV_NOPTS_VALUE)
            out->pts = cur->pts + next->pts;
        else
            out->pts = AV_NOPTS_VALUE;
        /* write current frame second field lines into the second field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
                           tinterlace->flags);
        /* write next frame first field lines into the first field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)next->data, next->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
                           tinterlace->flags);
        break;
    default:
        av_assert0(0);
    }

    out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
    ret = ff_filter_frame(outlink, out);
    tinterlace->frame++;

    return ret;
}
Esempio n. 26
0
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
    int (*filter_frame)(AVFilterLink *, AVFrame *);
    AVFilterContext *dstctx = link->dst;
    AVFilterPad *dst = link->dstpad;
    AVFrame *out = NULL;
    int ret;
    AVFilterCommand *cmd= link->dst->command_queue;
    int64_t pts;

    if (link->closed) {
        av_frame_free(&frame);
        return AVERROR_EOF;
    }

    if (!(filter_frame = dst->filter_frame))
        filter_frame = default_filter_frame;

    /* copy the frame if needed */
    if (dst->needs_writable && !av_frame_is_writable(frame)) {
        av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            out = ff_get_video_buffer(link, link->w, link->h);
            break;
        case AVMEDIA_TYPE_AUDIO:
            out = ff_get_audio_buffer(link, frame->nb_samples);
            break;
        default:
            ret = AVERROR(EINVAL);
            goto fail;
        }
        if (!out) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        ret = av_frame_copy_props(out, frame);
        if (ret < 0)
            goto fail;

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
            break;
        case AVMEDIA_TYPE_AUDIO:
            av_samples_copy(out->extended_data, frame->extended_data,
                            0, 0, frame->nb_samples,
                            av_get_channel_layout_nb_channels(frame->channel_layout),
                            frame->format);
            break;
        default:
            ret = AVERROR(EINVAL);
            goto fail;
        }

        av_frame_free(&frame);
    } else
        out = frame;

    while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)){
        av_log(link->dst, AV_LOG_DEBUG,
               "Processing command time:%f command:%s arg:%s\n",
               cmd->time, cmd->command, cmd->arg);
        avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
        ff_command_queue_pop(link->dst);
        cmd= link->dst->command_queue;
    }

    pts = out->pts;
    if (dstctx->enable_str) {
        int64_t pos = av_frame_get_pkt_pos(out);
        dstctx->var_values[VAR_N] = link->frame_count;
        dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
        dstctx->var_values[VAR_W] = link->w;
        dstctx->var_values[VAR_H] = link->h;
        dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;

        dstctx->is_disabled = fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) < 0.5;
        if (dstctx->is_disabled &&
            (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
            filter_frame = default_filter_frame;
    }
    ret = filter_frame(link, out);
    link->frame_count++;
    link->frame_requested = 0;
    ff_update_link_current_pts(link, pts);
    return ret;

fail:
    av_frame_free(&out);
    av_frame_free(&frame);
    return ret;
}
Esempio n. 27
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx  = inlink->dst;
    FlipContext *s     = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;
    uint8_t *inrow, *outrow;
    int i, j, plane, step;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    /* copy palette if required */
    if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
        memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);

    for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
        const int width  = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->w, s->hsub) : inlink->w;
        const int height = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, s->vsub) : inlink->h;
        step = s->max_step[plane];

        outrow = out->data[plane];
        inrow  = in ->data[plane] + (width - 1) * step;
        for (i = 0; i < height; i++) {
            switch (step) {
            case 1:
                for (j = 0; j < width; j++)
                    outrow[j] = inrow[-j];
            break;

            case 2:
            {
                uint16_t *outrow16 = (uint16_t *)outrow;
                uint16_t * inrow16 = (uint16_t *) inrow;
                for (j = 0; j < width; j++)
                    outrow16[j] = inrow16[-j];
            }
            break;

            case 3:
            {
                uint8_t *in  =  inrow;
                uint8_t *out = outrow;
                for (j = 0; j < width; j++, out += 3, in -= 3) {
                    int32_t v = AV_RB24(in);
                    AV_WB24(out, v);
                }
            }
            break;

            case 4:
            {
                uint32_t *outrow32 = (uint32_t *)outrow;
                uint32_t * inrow32 = (uint32_t *) inrow;
                for (j = 0; j < width; j++)
                    outrow32[j] = inrow32[-j];
            }
            break;

            default:
                for (j = 0; j < width; j++)
                    memcpy(outrow + j*step, inrow - j*step, step);
            }

            inrow  += in ->linesize[plane];
            outrow += out->linesize[plane];
        }
    }

    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
Esempio n. 28
0
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    AudioVectorScopeContext *s = ctx->priv;
    const int hw = s->hw;
    const int hh = s->hh;
    unsigned x, y;
    const double zoom = s->zoom;
    int i;

    if (!s->outpicref || s->outpicref->width  != outlink->w ||
                         s->outpicref->height != outlink->h) {
        av_frame_free(&s->outpicref);
        s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!s->outpicref) {
            av_frame_free(&insamples);
            return AVERROR(ENOMEM);
        }

        for (i = 0; i < outlink->h; i++)
            memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w * 4);
    }
    s->outpicref->pts = insamples->pts;

    fade(s);

    switch (insamples->format) {
    case AV_SAMPLE_FMT_S16:
        for (i = 0; i < insamples->nb_samples; i++) {
            int16_t *src = (int16_t *)insamples->data[0] + i * 2;

            if (s->mode == LISSAJOUS) {
                x = ((src[1] - src[0]) * zoom / (float)(UINT16_MAX) + 1) * hw;
                y = (1.0 - (src[0] + src[1]) * zoom / (float)UINT16_MAX) * hh;
            } else {
                x = (src[1] * zoom / (float)INT16_MAX + 1) * hw;
                y = (src[0] * zoom / (float)INT16_MAX + 1) * hh;
            }

            draw_dot(s, x, y);
        }
        break;
    case AV_SAMPLE_FMT_FLT:
        for (i = 0; i < insamples->nb_samples; i++) {
            float *src = (float *)insamples->data[0] + i * 2;

            if (s->mode == LISSAJOUS) {
                x = ((src[1] - src[0]) * zoom / 2 + 1) * hw;
                y = (1.0 - (src[0] + src[1]) * zoom / 2) * hh;
            } else {
                x = (src[1] * zoom + 1) * hw;
                y = (src[0] * zoom + 1) * hh;
            }

            draw_dot(s, x, y);
        }
        break;
    }

    av_frame_free(&insamples);

    return ff_filter_frame(outlink, av_frame_clone(s->outpicref));
}
Esempio n. 29
0
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
    DeshakeContext *deshake = link->dst->priv;
    AVFilterLink *outlink = link->dst->outputs[0];
    AVFrame *out;
    Transform t = {{0},0}, orig = {{0},0};
    float matrix_y[9], matrix_uv[9];
    float alpha = 2.0 / deshake->refcount;
    char tmp[256];
    int ret = 0;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    if (CONFIG_OPENCL && deshake->opencl) {
        ret = ff_opencl_deshake_process_inout_buf(link->dst,in, out);
        if (ret < 0)
            return ret;
    }

    if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) {
        // Find the most likely global motion for the current frame
        find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t);
    } else {
        uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0];
        uint8_t *src2 = in->data[0];

        deshake->cx = FFMIN(deshake->cx, link->w);
        deshake->cy = FFMIN(deshake->cy, link->h);

        if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx;
        if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy;

        // Quadword align right margin
        deshake->cw &= ~15;

        src1 += deshake->cy * in->linesize[0] + deshake->cx;
        src2 += deshake->cy * in->linesize[0] + deshake->cx;

        find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t);
    }


    // Copy transform so we can output it later to compare to the smoothed value
    orig.vector.x = t.vector.x;
    orig.vector.y = t.vector.y;
    orig.angle = t.angle;
    orig.zoom = t.zoom;

    // Generate a one-sided moving exponential average
    deshake->avg.vector.x = alpha * t.vector.x + (1.0 - alpha) * deshake->avg.vector.x;
    deshake->avg.vector.y = alpha * t.vector.y + (1.0 - alpha) * deshake->avg.vector.y;
    deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle;
    deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom;

    // Remove the average from the current motion to detect the motion that
    // is not on purpose, just as jitter from bumping the camera
    t.vector.x -= deshake->avg.vector.x;
    t.vector.y -= deshake->avg.vector.y;
    t.angle -= deshake->avg.angle;
    t.zoom -= deshake->avg.zoom;

    // Invert the motion to undo it
    t.vector.x *= -1;
    t.vector.y *= -1;
    t.angle *= -1;

    // Write statistics to file
    if (deshake->fp) {
        snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vector.x, deshake->avg.vector.x, t.vector.x, orig.vector.y, deshake->avg.vector.y, t.vector.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom);
        fwrite(tmp, sizeof(char), strlen(tmp), deshake->fp);
    }

    // Turn relative current frame motion into absolute by adding it to the
    // last absolute motion
    t.vector.x += deshake->last.vector.x;
    t.vector.y += deshake->last.vector.y;
    t.angle += deshake->last.angle;
    t.zoom += deshake->last.zoom;

    // Shrink motion by 10% to keep things centered in the camera frame
    t.vector.x *= 0.9;
    t.vector.y *= 0.9;
    t.angle *= 0.9;

    // Store the last absolute motion information
    deshake->last.vector.x = t.vector.x;
    deshake->last.vector.y = t.vector.y;
    deshake->last.angle = t.angle;
    deshake->last.zoom = t.zoom;

    // Generate a luma transformation matrix
    avfilter_get_matrix(t.vector.x, t.vector.y, t.angle, 1.0 + t.zoom / 100.0, matrix_y);
    // Generate a chroma transformation matrix
    avfilter_get_matrix(t.vector.x / (link->w / CHROMA_WIDTH(link)), t.vector.y / (link->h / CHROMA_HEIGHT(link)), t.angle, 1.0 + t.zoom / 100.0, matrix_uv);
    // Transform the luma and chroma planes
    ret = deshake->transform(link->dst, link->w, link->h, CHROMA_WIDTH(link), CHROMA_HEIGHT(link),
                             matrix_y, matrix_uv, INTERPOLATE_BILINEAR, deshake->edge, in, out);

    // Cleanup the old reference frame
    av_frame_free(&deshake->ref);

    if (ret < 0)
        return ret;

    // Store the current frame as the reference frame for calculating the
    // motion of the next frame
    deshake->ref = in;

    return ff_filter_frame(outlink, out);
}
Esempio n. 30
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    GBlurContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out;
    int plane;

    set_params(s->sigma,  s->steps, &s->postscale,  &s->boundaryscale,  &s->nu);
    set_params(s->sigmaV, s->steps, &s->postscaleV, &s->boundaryscaleV, &s->nuV);

    if (av_frame_is_writable(in)) {
        out = in;
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&in);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, in);
    }

    for (plane = 0; plane < s->nb_planes; plane++) {
        const int height = s->planeheight[plane];
        const int width = s->planewidth[plane];
        float *bptr = s->buffer;
        const uint8_t *src = in->data[plane];
        const uint16_t *src16 = (const uint16_t *)in->data[plane];
        uint8_t *dst = out->data[plane];
        uint16_t *dst16 = (uint16_t *)out->data[plane];
        int y, x;

        if (!s->sigma || !(s->planes & (1 << plane))) {
            if (out != in)
                av_image_copy_plane(out->data[plane], out->linesize[plane],
                                    in->data[plane], in->linesize[plane],
                                    width * ((s->depth + 7) / 8), height);
            continue;
        }

        if (s->depth == 8) {
            for (y = 0; y < height; y++) {
                for (x = 0; x < width; x++) {
                    bptr[x] = src[x];
                }
                bptr += width;
                src += in->linesize[plane];
            }
        } else {
            for (y = 0; y < height; y++) {
                for (x = 0; x < width; x++) {
                    bptr[x] = src16[x];
                }
                bptr += width;
                src16 += in->linesize[plane] / 2;
            }
        }

        gaussianiir2d(ctx, plane);

        bptr = s->buffer;
        if (s->depth == 8) {
            for (y = 0; y < height; y++) {
                for (x = 0; x < width; x++) {
                    dst[x] = bptr[x];
                }
                bptr += width;
                dst += out->linesize[plane];
            }
        } else {
            for (y = 0; y < height; y++) {
                for (x = 0; x < width; x++) {
                    dst16[x] = bptr[x];
                }
                bptr += width;
                dst16 += out->linesize[plane] / 2;
            }
        }
    }

    if (out != in)
        av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}