コード例 #1
0
ファイル: vf_transpose.c プロジェクト: jsebechlebsky/FFmpeg
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    TransContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    ThreadData td;
    AVFrame *out;

    if (s->passthrough)
        return ff_filter_frame(outlink, in);

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    if (in->sample_aspect_ratio.num == 0) {
        out->sample_aspect_ratio = in->sample_aspect_ratio;
    } else {
        out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
        out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
    }

    td.in = in, td.out = out;
    ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
}
コード例 #2
0
ファイル: vf_amplify.c プロジェクト: Kagami/ffmpeg-hevc-accel
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    AmplifyContext *s = ctx->priv;
    ThreadData td;
    AVFrame *out;

    if (s->nb_frames < s->nb_inputs) {
        s->frames[s->nb_frames] = in;
        s->nb_frames++;
        return 0;
    } else {
        av_frame_free(&s->frames[0]);
        memmove(&s->frames[0], &s->frames[1], sizeof(*s->frames) * (s->nb_inputs - 1));
        s->frames[s->nb_inputs - 1] = in;
    }

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out)
        return AVERROR(ENOMEM);
    out->pts = s->frames[0]->pts;

    td.out = out;
    td.in = s->frames;
    ctx->internal->execute(ctx, amplify_frame, &td, NULL, FFMIN(s->height[1], ff_filter_get_nb_threads(ctx)));

    return ff_filter_frame(outlink, out);
}
コード例 #3
0
ファイル: af_afir.c プロジェクト: DeHackEd/FFmpeg
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFrame *out = NULL;

    out = ff_get_audio_buffer(outlink, in->nb_samples);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }

    if (s->pts == AV_NOPTS_VALUE)
        s->pts = in->pts;
    s->in[0] = in;
    ctx->internal->execute(ctx, fir_channels, out, NULL, FFMIN(outlink->channels,
                                                               ff_filter_get_nb_threads(ctx)));

    out->pts = s->pts;
    if (s->pts != AV_NOPTS_VALUE)
        s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);

    av_frame_free(&in);
    s->in[0] = NULL;

    return ff_filter_frame(outlink, out);
}
コード例 #4
0
ファイル: vf_convolution.c プロジェクト: LongChair/FFmpeg
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ConvolutionContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int p;

    s->depth = desc->comp[0].depth;

    s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
    s->planewidth[0] = s->planewidth[3] = inlink->w;
    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
    s->nb_threads = ff_filter_get_nb_threads(ctx);
    s->bptrs = av_calloc(s->nb_threads, sizeof(*s->bptrs));
    if (!s->bptrs)
        return AVERROR(ENOMEM);

    s->bstride = s->planewidth[0] + 64;
    s->bpc = (s->depth + 7) / 8;
    s->buffer = av_malloc_array(7 * s->bstride * s->nb_threads, s->bpc);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    for (p = 0; p < s->nb_threads; p++) {
        s->bptrs[p] = s->buffer + 7 * s->bstride * s->bpc * p;
    }

    if (!strcmp(ctx->filter->name, "convolution")) {
        if (s->depth > 8) {
            for (p = 0; p < s->nb_planes; p++) {
                if (s->size[p] == 3)
                    s->filter[p] = filter16_3x3;
                else if (s->size[p] == 5)
                    s->filter[p] = filter16_5x5;
                else if (s->size[p] == 7)
                    s->filter[p] = filter16_7x7;
            }
        }
    } else if (!strcmp(ctx->filter->name, "prewitt")) {
        if (s->depth > 8)
            for (p = 0; p < s->nb_planes; p++)
                s->filter[p] = filter16_prewitt;
    } else if (!strcmp(ctx->filter->name, "roberts")) {
        if (s->depth > 8)
            for (p = 0; p < s->nb_planes; p++)
                s->filter[p] = filter16_roberts;
    } else if (!strcmp(ctx->filter->name, "sobel")) {
        if (s->depth > 8)
            for (p = 0; p < s->nb_planes; p++)
                s->filter[p] = filter16_sobel;
    }

    return 0;
}
コード例 #5
0
ファイル: vf_vibrance.c プロジェクト: FFmpeg/FFmpeg
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
    AVFilterContext *avctx = link->dst;
    VibranceContext *s = avctx->priv;
    int res;

    if (res = avctx->internal->execute(avctx, s->do_slice, frame, NULL,
                                       FFMIN(frame->height, ff_filter_get_nb_threads(avctx))))
        return res;

    return ff_filter_frame(avctx->outputs[0], frame);
}
コード例 #6
0
ファイル: vf_gblur.c プロジェクト: DeHackEd/FFmpeg
static void gaussianiir2d(AVFilterContext *ctx, int plane)
{
    GBlurContext *s = ctx->priv;
    const int width = s->planewidth[plane];
    const int height = s->planeheight[plane];
    const int nb_threads = ff_filter_get_nb_threads(ctx);
    ThreadData td;

    if (s->sigma <= 0 || s->steps < 0)
        return;

    td.width = width;
    td.height = height;
    ctx->internal->execute(ctx, filter_horizontally, &td, NULL, FFMIN(height, nb_threads));
    ctx->internal->execute(ctx, filter_vertically, &td, NULL, FFMIN(width, nb_threads));
    ctx->internal->execute(ctx, filter_postscale, &td, NULL, FFMIN(width * height, nb_threads));
}
コード例 #7
0
static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    int plane;
    AVFilterContext *ctx = inlink->dst;
    const int nb_threads = ff_filter_get_nb_threads(ctx);
    GEQContext *geq = ctx->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    AVFrame *out;

    geq->values[VAR_N] = inlink->frame_count_out,
    geq->values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),

    geq->picref = in;
    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }
    av_frame_copy_props(out, in);

    for (plane = 0; plane < geq->planes && out->data[plane]; plane++) {
        const int width = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->w, geq->hsub) : inlink->w;
        const int height = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(inlink->h, geq->vsub) : inlink->h;
        const int linesize = out->linesize[plane];
        ThreadData td;

        geq->dst = out->data[plane];
        geq->dst16 = (uint16_t*)out->data[plane];

        geq->values[VAR_W]  = width;
        geq->values[VAR_H]  = height;
        geq->values[VAR_SW] = width / (double)inlink->w;
        geq->values[VAR_SH] = height / (double)inlink->h;

        td.width = width;
        td.height = height;
        td.plane = plane;
        td.linesize = linesize;

        ctx->internal->execute(ctx, slice_geq_filter, &td, NULL, FFMIN(height, nb_threads));
    }

    av_frame_free(&geq->picref);
    return ff_filter_frame(outlink, out);
}
コード例 #8
0
ファイル: vf_threshold.c プロジェクト: DeHackEd/FFmpeg
static int process_frame(FFFrameSync *fs)
{
    AVFilterContext *ctx = fs->parent;
    ThresholdContext *s = fs->opaque;
    AVFilterLink *outlink = ctx->outputs[0];
    AVFrame *out, *in, *threshold, *min, *max;
    ThreadData td;
    int ret;

    if ((ret = ff_framesync_get_frame(&s->fs, 0, &in,        0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 1, &threshold, 0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 2, &min,       0)) < 0 ||
        (ret = ff_framesync_get_frame(&s->fs, 3, &max,       0)) < 0)
        return ret;

    if (ctx->is_disabled) {
        out = av_frame_clone(in);
        if (!out)
            return AVERROR(ENOMEM);
    } else {
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, in);

        td.out = out;
        td.in = in;
        td.threshold = threshold;
        td.min = min;
        td.max = max;
        ctx->internal->execute(ctx, filter_slice, &td, NULL,
                               FFMIN(s->height[2], ff_filter_get_nb_threads(ctx)));
    }

    out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);

    return ff_filter_frame(outlink, out);
}
コード例 #9
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    FadeContext *s       = ctx->priv;
    double frame_timestamp = frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base);

    // Calculate Fade assuming this is a Fade In
    if (s->fade_state == VF_FADE_WAITING) {
        s->factor=0;
        if (frame_timestamp >= s->start_time/(double)AV_TIME_BASE
            && inlink->frame_count_out >= s->start_frame) {
            // Time to start fading
            s->fade_state = VF_FADE_FADING;

            // Save start time in case we are starting based on frames and fading based on time
            if (s->start_time == 0 && s->start_frame != 0) {
                s->start_time = frame_timestamp*(double)AV_TIME_BASE;
            }

            // Save start frame in case we are starting based on time and fading based on frames
            if (s->start_time != 0 && s->start_frame == 0) {
                s->start_frame = inlink->frame_count_out;
            }
        }
    }
    if (s->fade_state == VF_FADE_FADING) {
        if (s->duration == 0) {
            // Fading based on frame count
            s->factor = (inlink->frame_count_out - s->start_frame) * s->fade_per_frame;
            if (inlink->frame_count_out > s->start_frame + s->nb_frames) {
                s->fade_state = VF_FADE_DONE;
            }

        } else {
            // Fading based on duration
            s->factor = (frame_timestamp - s->start_time/(double)AV_TIME_BASE)
                            * (float) UINT16_MAX / (s->duration/(double)AV_TIME_BASE);
            if (frame_timestamp > s->start_time/(double)AV_TIME_BASE
                                  + s->duration/(double)AV_TIME_BASE) {
                s->fade_state = VF_FADE_DONE;
            }
        }
    }
    if (s->fade_state == VF_FADE_DONE) {
        s->factor=UINT16_MAX;
    }

    s->factor = av_clip_uint16(s->factor);

    // Invert fade_factor if Fading Out
    if (s->type == FADE_OUT) {
        s->factor=UINT16_MAX-s->factor;
    }

    if (s->factor < UINT16_MAX) {
        if (s->alpha) {
            ctx->internal->execute(ctx, filter_slice_alpha, frame, NULL,
                                FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
        } else if (s->is_packed_rgb && !s->black_fade) {
            ctx->internal->execute(ctx, filter_slice_rgb, frame, NULL,
                                   FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
        } else {
            /* luma, or rgb plane in case of black */
            ctx->internal->execute(ctx, filter_slice_luma, frame, NULL,
                                FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));

            if (frame->data[1] && frame->data[2]) {
                /* chroma planes */
                ctx->internal->execute(ctx, filter_slice_chroma, frame, NULL,
                                    FFMIN(frame->height, ff_filter_get_nb_threads(ctx)));
            }
        }
    }

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
コード例 #10
0
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ATADenoiseContext *s = ctx->priv;
    AVFrame *out, *in;
    int i;

    if (s->q.available != s->size) {
        if (s->q.available < s->mid) {
            for (i = 0; i < s->mid; i++) {
                out = av_frame_clone(buf);
                if (!out) {
                    av_frame_free(&buf);
                    return AVERROR(ENOMEM);
                }
                ff_bufqueue_add(ctx, &s->q, out);
            }
        }
        if (s->q.available < s->size) {
            ff_bufqueue_add(ctx, &s->q, buf);
            s->available++;
        }
        return 0;
    }

    in = ff_bufqueue_peek(&s->q, s->mid);

    if (!ctx->is_disabled) {
        ThreadData td;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&buf);
            return AVERROR(ENOMEM);
        }

        for (i = 0; i < s->size; i++) {
            AVFrame *frame = ff_bufqueue_peek(&s->q, i);

            s->data[0][i] = frame->data[0];
            s->data[1][i] = frame->data[1];
            s->data[2][i] = frame->data[2];
            s->linesize[0][i] = frame->linesize[0];
            s->linesize[1][i] = frame->linesize[1];
            s->linesize[2][i] = frame->linesize[2];
        }

        td.in = in; td.out = out;
        ctx->internal->execute(ctx, s->filter_slice, &td, NULL,
                               FFMIN3(s->planeheight[1],
                                      s->planeheight[2],
                                      ff_filter_get_nb_threads(ctx)));
        av_frame_copy_props(out, in);
    } else {
        out = av_frame_clone(in);
        if (!out) {
            av_frame_free(&buf);
            return AVERROR(ENOMEM);
        }
    }

    in = ff_bufqueue_get(&s->q);
    av_frame_free(&in);
    ff_bufqueue_add(ctx, &s->q, buf);

    return ff_filter_frame(outlink, out);
}