예제 #1
0
파일: vf_tinterlace.c 프로젝트: 9aa5/FFmpeg
static int config_out_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[outlink->format];
    TInterlaceContext *tinterlace = ctx->priv;

    tinterlace->vsub = desc->log2_chroma_h;
    outlink->w = inlink->w;
    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
        inlink->h*2 : inlink->h;

    if (tinterlace->mode == MODE_PAD) {
        uint8_t black[4] = { 16, 128, 128, 16 };
        int i, ret;
        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
            black[0] = black[3] = 0;
        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
                             outlink->w, outlink->h, outlink->format, 1);
        if (ret < 0)
            return ret;

        /* fill black picture with black */
        for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
            int h = i == 1 || i == 2 ? outlink->h >> desc->log2_chroma_h : outlink->h;
            memset(tinterlace->black_data[i], black[i],
                   tinterlace->black_linesize[i] * h);
        }
    }
예제 #2
0
static int config_out_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
    TInterlaceContext *tinterlace = ctx->priv;

    tinterlace->vsub = desc->log2_chroma_h;
    outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
    outlink->w = inlink->w;
    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
        inlink->h*2 : inlink->h;

    if (tinterlace->mode == MODE_PAD) {
        uint8_t black[4] = { 16, 128, 128, 16 };
        int i, ret;
        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
            black[0] = black[3] = 0;
        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
                             outlink->w, outlink->h, outlink->format, 1);
        if (ret < 0)
            return ret;

        /* fill black picture with black */
        for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
            int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
            memset(tinterlace->black_data[i], black[i],
                   tinterlace->black_linesize[i] * h);
        }
    }
    if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)
            && !(tinterlace->mode == MODE_INTERLEAVE_TOP
              || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
        av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d\n",
                tinterlace->mode);
        tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;
    }
    if (tinterlace->mode == MODE_INTERLACEX2) {
        outlink->time_base.num = inlink->time_base.num;
        outlink->time_base.den = inlink->time_base.den * 2;
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
    } else if (tinterlace->mode != MODE_PAD) {
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
    }

    if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
        tinterlace->lowpass_line = lowpass_line_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    }

    av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n",
           tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",
           inlink->h, outlink->h);

    return 0;
}
예제 #3
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    BlackDetectContext *blackdetect = ctx->priv;

    blackdetect->black_min_duration =
        blackdetect->black_min_duration_time / av_q2d(inlink->time_base);

    blackdetect->pixel_black_th_i = ff_fmt_is_in(inlink->format, yuvj_formats) ?
        // luminance_minimum_value + pixel_black_th * luminance_range_size
             blackdetect->pixel_black_th *  255 :
        16 + blackdetect->pixel_black_th * (235 - 16);

    av_log(blackdetect, AV_LOG_VERBOSE,
           "black_min_duration:%s pixel_black_th:%f pixel_black_th_i:%d picture_black_ratio_th:%f\n",
           av_ts2timestr(blackdetect->black_min_duration, &inlink->time_base),
           blackdetect->pixel_black_th, blackdetect->pixel_black_th_i,
           blackdetect->picture_black_ratio_th);
    return 0;
}
예제 #4
0
static int config_props(AVFilterLink *inlink)
{
    FadeContext *s = inlink->dst->priv;
    const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);

    s->hsub = pixdesc->log2_chroma_w;
    s->vsub = pixdesc->log2_chroma_h;

    s->bpp = pixdesc->flags & AV_PIX_FMT_FLAG_PLANAR ?
             1 :
             av_get_bits_per_pixel(pixdesc) >> 3;
    s->alpha &= !!(pixdesc->flags & AV_PIX_FMT_FLAG_ALPHA);
    s->is_packed_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;

    /* use CCIR601/709 black level for studio-level pixel non-alpha components */
    s->black_level =
            ff_fmt_is_in(inlink->format, studio_level_pix_fmts) && !s->alpha ? 16 : 0;
    /* 32768 = 1 << 15, it is an integer representation
     * of 0.5 and is for rounding. */
    s->black_level_scaled = (s->black_level << 16) + 32768;
    return 0;
}
예제 #5
0
static int config_props(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    LutContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint8_t rgba_map[4]; /* component index -> RGBA color index map */
    int min[4], max[4];
    int val, color, ret;

    s->hsub = desc->log2_chroma_w;
    s->vsub = desc->log2_chroma_h;

    s->var_values[VAR_W] = inlink->w;
    s->var_values[VAR_H] = inlink->h;
    s->is_16bit = desc->comp[0].depth_minus1 > 7;

    switch (inlink->format) {
    case AV_PIX_FMT_YUV410P:
    case AV_PIX_FMT_YUV411P:
    case AV_PIX_FMT_YUV420P:
    case AV_PIX_FMT_YUV422P:
    case AV_PIX_FMT_YUV440P:
    case AV_PIX_FMT_YUV444P:
    case AV_PIX_FMT_YUVA420P:
    case AV_PIX_FMT_YUVA422P:
    case AV_PIX_FMT_YUVA444P:
    case AV_PIX_FMT_YUV420P9LE:
    case AV_PIX_FMT_YUV422P9LE:
    case AV_PIX_FMT_YUV444P9LE:
    case AV_PIX_FMT_YUVA420P9LE:
    case AV_PIX_FMT_YUVA422P9LE:
    case AV_PIX_FMT_YUVA444P9LE:
    case AV_PIX_FMT_YUV420P10LE:
    case AV_PIX_FMT_YUV422P10LE:
    case AV_PIX_FMT_YUV440P10LE:
    case AV_PIX_FMT_YUV444P10LE:
    case AV_PIX_FMT_YUVA420P10LE:
    case AV_PIX_FMT_YUVA422P10LE:
    case AV_PIX_FMT_YUVA444P10LE:
    case AV_PIX_FMT_YUV420P12LE:
    case AV_PIX_FMT_YUV422P12LE:
    case AV_PIX_FMT_YUV440P12LE:
    case AV_PIX_FMT_YUV444P12LE:
    case AV_PIX_FMT_YUV420P14LE:
    case AV_PIX_FMT_YUV422P14LE:
    case AV_PIX_FMT_YUV444P14LE:
    case AV_PIX_FMT_YUV420P16LE:
    case AV_PIX_FMT_YUV422P16LE:
    case AV_PIX_FMT_YUV444P16LE:
    case AV_PIX_FMT_YUVA420P16LE:
    case AV_PIX_FMT_YUVA422P16LE:
    case AV_PIX_FMT_YUVA444P16LE:
        min[Y] = 16 * (1 << (desc->comp[0].depth_minus1 - 7));
        min[U] = 16 * (1 << (desc->comp[1].depth_minus1 - 7));
        min[V] = 16 * (1 << (desc->comp[2].depth_minus1 - 7));
        min[A] = 0;
        max[Y] = 235 * (1 << (desc->comp[0].depth_minus1 - 7));
        max[U] = 240 * (1 << (desc->comp[1].depth_minus1 - 7));
        max[V] = 240 * (1 << (desc->comp[2].depth_minus1 - 7));
        max[A] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;
        break;
    default:
        min[0] = min[1] = min[2] = min[3] = 0;
        max[0] = max[1] = max[2] = max[3] = 255;
    }

    s->is_yuv = s->is_rgb = 0;
    if      (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) s->is_yuv = 1;
    else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) s->is_rgb = 1;

    if (s->is_rgb) {
        ff_fill_rgba_map(rgba_map, inlink->format);
        s->step = av_get_bits_per_pixel(desc) >> 3;
    }
static int config_out_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
    TInterlaceContext *tinterlace = ctx->priv;
    int i;

    tinterlace->vsub = desc->log2_chroma_h;
    outlink->w = inlink->w;
    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2?
        inlink->h*2 : inlink->h;
    if (tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2)
        outlink->sample_aspect_ratio = av_mul_q(inlink->sample_aspect_ratio,
                                                av_make_q(2, 1));

    if (tinterlace->mode == MODE_PAD) {
        uint8_t black[4] = { 0, 0, 0, 16 };
        int ret;
        ff_draw_init(&tinterlace->draw, outlink->format, 0);
        ff_draw_color(&tinterlace->draw, &tinterlace->color, black);
        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
            tinterlace->color.comp[0].u8[0] = 0;
        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
                             outlink->w, outlink->h, outlink->format, 16);
        if (ret < 0)
            return ret;

        ff_fill_rectangle(&tinterlace->draw, &tinterlace->color, tinterlace->black_data,
                          tinterlace->black_linesize, 0, 0, outlink->w, outlink->h);
    }
    if (tinterlace->flags & (TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF)
            && !(tinterlace->mode == MODE_INTERLEAVE_TOP
              || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
        av_log(ctx, AV_LOG_WARNING, "low_pass_filter flags ignored with mode %d\n",
                tinterlace->mode);
        tinterlace->flags &= ~(TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF);
    }
    tinterlace->preout_time_base = inlink->time_base;
    if (tinterlace->mode == MODE_INTERLACEX2) {
        tinterlace->preout_time_base.den *= 2;
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){1,2});
    } else if (tinterlace->mode == MODE_MERGEX2) {
        outlink->frame_rate = inlink->frame_rate;
        outlink->time_base  = inlink->time_base;
    } else if (tinterlace->mode != MODE_PAD) {
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){2,1});
    }

    for (i = 0; i<FF_ARRAY_ELEMS(standard_tbs); i++){
        if (!av_cmp_q(standard_tbs[i], outlink->time_base))
            break;
    }
    if (i == FF_ARRAY_ELEMS(standard_tbs) ||
        (tinterlace->flags & TINTERLACE_FLAG_EXACT_TB))
        outlink->time_base = tinterlace->preout_time_base;

    tinterlace->csp = av_pix_fmt_desc_get(outlink->format);
    if (tinterlace->flags & TINTERLACE_FLAG_CVLPF) {
        if (tinterlace->csp->comp[0].depth > 8)
            tinterlace->lowpass_line = lowpass_line_complex_c_16;
        else
            tinterlace->lowpass_line = lowpass_line_complex_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    } else if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
        if (tinterlace->csp->comp[0].depth > 8)
            tinterlace->lowpass_line = lowpass_line_c_16;
        else
            tinterlace->lowpass_line = lowpass_line_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    }

    av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n", tinterlace->mode,
           (tinterlace->flags & TINTERLACE_FLAG_CVLPF) ? "complex" :
           (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "linear" : "off",
           inlink->h, outlink->h);

    return 0;
}