Example #1
0
static int filter_frame(AVFilterLink *link, AVFrame *picref)
{
    AVFilterContext *ctx = link->dst;
    IDETContext *idet = ctx->priv;
    AVDictionary **metadata = avpriv_frame_get_metadatap(picref);

    if (idet->prev)
        av_frame_free(&idet->prev);
    idet->prev = idet->cur;
    idet->cur  = idet->next;
    idet->next = picref;

    if (!idet->cur)
        return 0;

    if (!idet->prev)
        idet->prev = av_frame_clone(idet->cur);

    if (!idet->csp)
        idet->csp = av_pix_fmt_desc_get(link->format);
    if (idet->csp->comp[0].depth_minus1 / 8 == 1){
        idet->filter_line = (ff_idet_filter_func)ff_idet_filter_line_c_16bit;
        if (ARCH_X86)
            ff_idet_init_x86(idet, 1);
    }

    filter(ctx);

    av_dict_set_int(metadata, "lavfi.idet.tff", idet->prestat[TFF], 0);
    av_dict_set_int(metadata, "lavfi.idet.bff", idet->prestat[BFF], 0);
    av_dict_set_int(metadata, "lavfi.idet.progressive", idet->prestat[PROGRESSIVE], 0);
    av_dict_set_int(metadata, "lavfi.idet.undetermined", idet->prestat[UNDETERMINED], 0);

    return ff_filter_frame(ctx->outputs[0], av_frame_clone(idet->cur));
}
Example #2
0
static int add_metadata(int count, int type,
                        const char *name, const char *sep, TiffContext *s, AVFrame *frame)
{
    switch(type) {
    case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, avpriv_frame_get_metadatap(frame));
    case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, avpriv_frame_get_metadatap(frame));
    case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, avpriv_frame_get_metadatap(frame));
    default         : return AVERROR_INVALIDDATA;
    };
}
Example #3
0
static AVFrame *do_psnr(AVFilterContext *ctx, AVFrame *main,
                        const AVFrame *ref)
{
    PSNRContext *s = ctx->priv;
    double comp_mse[4], mse = 0;
    int j, c;
    AVDictionary **metadata = avpriv_frame_get_metadatap(main);

    compute_images_mse(s, (const uint8_t **)main->data, main->linesize,
                       (const uint8_t **)ref->data, ref->linesize,
                       main->width, main->height, comp_mse);

    for (j = 0; j < s->nb_components; j++)
        mse += comp_mse[j] * s->planeweight[j];

    s->min_mse = FFMIN(s->min_mse, mse);
    s->max_mse = FFMAX(s->max_mse, mse);

    s->mse += mse;
    for (j = 0; j < s->nb_components; j++)
        s->mse_comp[j] += comp_mse[j];
    s->nb_frames++;

    for (j = 0; j < s->nb_components; j++) {
        c = s->is_rgb ? s->rgba_map[j] : j;
        set_meta(metadata, "lavfi.psnr.mse.", s->comps[j], comp_mse[c]);
        set_meta(metadata, "lavfi.psnr.psnr.", s->comps[j], get_psnr(comp_mse[c], 1, s->max[c]));
    }
    set_meta(metadata, "lavfi.psnr.mse_avg", 0, mse);
    set_meta(metadata, "lavfi.psnr.psnr_avg", 0, get_psnr(mse, 1, s->average_max));

    if (s->stats_file) {
        fprintf(s->stats_file, "n:%"PRId64" mse_avg:%0.2f ", s->nb_frames, mse);
        for (j = 0; j < s->nb_components; j++) {
            c = s->is_rgb ? s->rgba_map[j] : j;
            fprintf(s->stats_file, "mse_%c:%0.2f ", s->comps[j], comp_mse[c]);
        }
        for (j = 0; j < s->nb_components; j++) {
            c = s->is_rgb ? s->rgba_map[j] : j;
            fprintf(s->stats_file, "psnr_%c:%0.2f ", s->comps[j],
                    get_psnr(comp_mse[c], 1, s->max[c]));
        }
        fprintf(s->stats_file, "\n");
    }

    return main;
}
Example #4
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    BlackFrameContext *s = ctx->priv;
    int x, i;
    int pblack = 0;
    uint8_t *p = frame->data[0];
    AVDictionary **metadata;
    char buf[32];

    for (i = 0; i < frame->height; i++) {
        for (x = 0; x < inlink->w; x++)
            s->nblack += p[x] < s->bthresh;
        p += frame->linesize[0];
    }

    if (frame->key_frame)
        s->last_keyframe = s->frame;

    pblack = s->nblack * 100 / (inlink->w * inlink->h);
    if (pblack >= s->bamount) {
        metadata = avpriv_frame_get_metadatap(frame);

        av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f "
               "type:%c last_keyframe:%d\n",
               s->frame, pblack, frame->pts,
               frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
               av_get_picture_type_char(frame->pict_type), s->last_keyframe);

        SET_META("lavfi.blackframe.pblack", "%u", pblack);
    }

    s->frame++;
    s->nblack = 0;
    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
Example #5
0
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *got_frame, AVPacket *avpkt)
{
    TiffContext *const s = avctx->priv_data;
    AVFrame *const p = data;
    ThreadFrame frame = { .f = data };
    unsigned off;
    int le, ret, plane, planes;
    int i, j, entries, stride;
    unsigned soff, ssize;
    uint8_t *dst;
    GetByteContext stripsizes;
    GetByteContext stripdata;

    bytestream2_init(&s->gb, avpkt->data, avpkt->size);

    // parse image header
    if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
        av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
        return ret;
    } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
        av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
        return AVERROR_INVALIDDATA;
    }
    s->le          = le;
    // TIFF_BPP is not a required tag and defaults to 1
    s->bppcount    = s->bpp = 1;
    s->photometric = TIFF_PHOTOMETRIC_NONE;
    s->compr       = TIFF_RAW;
    s->fill_order  = 0;
    free_geotags(s);

    // Reset these offsets so we can tell if they were set this frame
    s->stripsizesoff = s->strippos = 0;
    /* parse image file directory */
    bytestream2_seek(&s->gb, off, SEEK_SET);
    entries = ff_tget_short(&s->gb, le);
    if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
        return AVERROR_INVALIDDATA;
    for (i = 0; i < entries; i++) {
        if ((ret = tiff_decode_tag(s, p)) < 0)
            return ret;
    }

    for (i = 0; i<s->geotag_count; i++) {
        const char *keyname = get_geokey_name(s->geotags[i].key);
        if (!keyname) {
            av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
            continue;
        }
        if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
            av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
            continue;
        }
        ret = av_dict_set(avpriv_frame_get_metadatap(p), keyname, s->geotags[i].val, 0);
        if (ret<0) {
            av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
            return ret;
        }
    }

    if (!s->strippos && !s->stripoff) {
        av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
        return AVERROR_INVALIDDATA;
    }
    /* now we have the data and may start decoding */
    if ((ret = init_image(s, &frame)) < 0)
        return ret;

    if (s->strips == 1 && !s->stripsize) {
        av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
        s->stripsize = avpkt->size - s->stripoff;
    }

    if (s->stripsizesoff) {
        if (s->stripsizesoff >= (unsigned)avpkt->size)
            return AVERROR_INVALIDDATA;
        bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
                         avpkt->size - s->stripsizesoff);
    }
    if (s->strippos) {
        if (s->strippos >= (unsigned)avpkt->size)
            return AVERROR_INVALIDDATA;
        bytestream2_init(&stripdata, avpkt->data + s->strippos,
                         avpkt->size - s->strippos);
    }

    if (s->rps <= 0) {
        av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
        return AVERROR_INVALIDDATA;
    }

    planes = s->planar ? s->bppcount : 1;
    for (plane = 0; plane < planes; plane++) {
        stride = p->linesize[plane];
        dst    = p->data[plane];
    for (i = 0; i < s->height; i += s->rps) {
        if (s->stripsizesoff)
            ssize = ff_tget(&stripsizes, s->sstype, le);
        else
            ssize = s->stripsize;

        if (s->strippos)
            soff = ff_tget(&stripdata, s->sot, le);
        else
            soff = s->stripoff;

        if (soff > avpkt->size || ssize > avpkt->size - soff) {
            av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
            return AVERROR_INVALIDDATA;
        }
        if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
                                     FFMIN(s->rps, s->height - i))) < 0) {
            if (avctx->err_recognition & AV_EF_EXPLODE)
                return ret;
            break;
        }
        dst += s->rps * stride;
    }
    if (s->predictor == 2) {
        if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
            av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
            return AVERROR_PATCHWELCOME;
        }
        dst   = p->data[plane];
        soff  = s->bpp >> 3;
        if (s->planar)
            soff  = FFMAX(soff / s->bppcount, 1);
        ssize = s->width * soff;
        if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
            s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
            s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
            s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
            for (i = 0; i < s->height; i++) {
                for (j = soff; j < ssize; j += 2)
                    AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
                dst += stride;
            }
        } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
                   s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
                   s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
                   s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
            for (i = 0; i < s->height; i++) {
                for (j = soff; j < ssize; j += 2)
                    AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
                dst += stride;
            }
        } else {
            for (i = 0; i < s->height; i++) {
                for (j = soff; j < ssize; j++)
                    dst[j] += dst[j - soff];
                dst += stride;
            }
        }
    }

    if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
        dst = p->data[plane];
        for (i = 0; i < s->height; i++) {
            for (j = 0; j < p->linesize[plane]; j++)
                dst[j] = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255) - dst[j];
            dst += stride;
        }
    }
    }

    if (s->planar && s->bppcount > 2) {
        FFSWAP(uint8_t*, p->data[0],     p->data[2]);
        FFSWAP(int,      p->linesize[0], p->linesize[2]);
        FFSWAP(uint8_t*, p->data[0],     p->data[1]);
        FFSWAP(int,      p->linesize[0], p->linesize[1]);
    }
Example #6
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    CropDetectContext *s = ctx->priv;
    int bpp = s->max_pixsteps[0];
    int w, h, x, y, shrink_by;
    AVDictionary **metadata;
    int outliers, last_y;
    int limit = round(s->limit);

    // ignore first 2 frames - they may be empty
    if (++s->frame_nb > 0) {
        metadata = avpriv_frame_get_metadatap(frame);

        // Reset the crop area every reset_count frames, if reset_count is > 0
        if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
            s->x1 = frame->width  - 1;
            s->y1 = frame->height - 1;
            s->x2 = 0;
            s->y2 = 0;
            s->frame_nb = 1;
        }

#define FIND(DST, FROM, NOEND, INC, STEP0, STEP1, LEN) \
        outliers = 0;\
        for (last_y = y = FROM; NOEND; y = y INC) {\
            if (checkline(ctx, frame->data[0] + STEP0 * y, STEP1, LEN, bpp) > limit) {\
                if (++outliers > s->max_outliers) { \
                    DST = last_y;\
                    break;\
                }\
            } else\
                last_y = y INC;\
        }

        FIND(s->y1,                 0,               y < s->y1, +1, frame->linesize[0], bpp, frame->width);
        FIND(s->y2, frame->height - 1, y > FFMAX(s->y2, s->y1), -1, frame->linesize[0], bpp, frame->width);
        FIND(s->x1,                 0,               y < s->x1, +1, bpp, frame->linesize[0], frame->height);
        FIND(s->x2,  frame->width - 1, y > FFMAX(s->x2, s->x1), -1, bpp, frame->linesize[0], frame->height);


        // round x and y (up), important for yuv colorspaces
        // make sure they stay rounded!
        x = (s->x1+1) & ~1;
        y = (s->y1+1) & ~1;

        w = s->x2 - x + 1;
        h = s->y2 - y + 1;

        // w and h must be divisible by 2 as well because of yuv
        // colorspace problems.
        if (s->round <= 1)
            s->round = 16;
        if (s->round % 2)
            s->round *= 2;

        shrink_by = w % s->round;
        w -= shrink_by;
        x += (shrink_by/2 + 1) & ~1;

        shrink_by = h % s->round;
        h -= shrink_by;
        y += (shrink_by/2 + 1) & ~1;

        SET_META("lavfi.cropdetect.x1", s->x1);
        SET_META("lavfi.cropdetect.x2", s->x2);
        SET_META("lavfi.cropdetect.y1", s->y1);
        SET_META("lavfi.cropdetect.y2", s->y2);
        SET_META("lavfi.cropdetect.w",  w);
        SET_META("lavfi.cropdetect.h",  h);
        SET_META("lavfi.cropdetect.x",  x);
        SET_META("lavfi.cropdetect.y",  y);

        av_log(ctx, AV_LOG_INFO,
               "x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
               s->x1, s->x2, s->y1, s->y2, w, h, x, y, frame->pts,
               frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
               w, h, x, y);
    }

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
Example #7
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    CropDetectContext *s = ctx->priv;
    int bpp = s->max_pixsteps[0];
    int w, h, x, y, shrink_by;
    AVDictionary **metadata;

    // ignore first 2 frames - they may be empty
    if (++s->frame_nb > 0) {
        metadata = avpriv_frame_get_metadatap(frame);

        // Reset the crop area every reset_count frames, if reset_count is > 0
        if (s->reset_count > 0 && s->frame_nb > s->reset_count) {
            s->x1 = frame->width  - 1;
            s->y1 = frame->height - 1;
            s->x2 = 0;
            s->y2 = 0;
            s->frame_nb = 1;
        }

        for (y = 0; y < s->y1; y++) {
            if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > s->limit) {
                s->y1 = y;
                break;
            }
        }

        for (y = frame->height - 1; y > FFMAX(s->y2, s->y1); y--) {
            if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > s->limit) {
                s->y2 = y;
                break;
            }
        }

        for (y = 0; y < s->x1; y++) {
            if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > s->limit) {
                s->x1 = y;
                break;
            }
        }

        for (y = frame->width - 1; y > FFMAX(s->x2, s->x1); y--) {
            if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > s->limit) {
                s->x2 = y;
                break;
            }
        }

        // round x and y (up), important for yuv colorspaces
        // make sure they stay rounded!
        x = (s->x1+1) & ~1;
        y = (s->y1+1) & ~1;

        w = s->x2 - x + 1;
        h = s->y2 - y + 1;

        // w and h must be divisible by 2 as well because of yuv
        // colorspace problems.
        if (s->round <= 1)
            s->round = 16;
        if (s->round % 2)
            s->round *= 2;

        shrink_by = w % s->round;
        w -= shrink_by;
        x += (shrink_by/2 + 1) & ~1;

        shrink_by = h % s->round;
        h -= shrink_by;
        y += (shrink_by/2 + 1) & ~1;

        SET_META("lavfi.cropdetect.x1", s->x1);
        SET_META("lavfi.cropdetect.x2", s->x2);
        SET_META("lavfi.cropdetect.y1", s->y1);
        SET_META("lavfi.cropdetect.y2", s->y2);
        SET_META("lavfi.cropdetect.w",  w);
        SET_META("lavfi.cropdetect.h",  h);
        SET_META("lavfi.cropdetect.x",  x);
        SET_META("lavfi.cropdetect.y",  y);

        av_log(ctx, AV_LOG_INFO,
               "x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
               s->x1, s->x2, s->y1, s->y2, w, h, x, y, frame->pts,
               frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
               w, h, x, y);
    }

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
Example #8
0
static void filter(AVFilterContext *ctx)
{
    IDETContext *idet = ctx->priv;
    int y, i;
    int64_t alpha[2]={0};
    int64_t delta=0;
    int64_t gamma[2]={0};
    Type type, best_type;
    RepeatedField repeat;
    int match = 0;
    AVDictionary **metadata = avpriv_frame_get_metadatap(idet->cur);

    for (i = 0; i < idet->csp->nb_components; i++) {
        int w = idet->cur->width;
        int h = idet->cur->height;
        int refs = idet->cur->linesize[i];

        if (i && i<3) {
            w = FF_CEIL_RSHIFT(w, idet->csp->log2_chroma_w);
            h = FF_CEIL_RSHIFT(h, idet->csp->log2_chroma_h);
        }

        for (y = 2; y < h - 2; y++) {
            uint8_t *prev = &idet->prev->data[i][y*refs];
            uint8_t *cur  = &idet->cur ->data[i][y*refs];
            uint8_t *next = &idet->next->data[i][y*refs];
            alpha[ y   &1] += idet->filter_line(cur-refs, prev, cur+refs, w);
            alpha[(y^1)&1] += idet->filter_line(cur-refs, next, cur+refs, w);
            delta          += idet->filter_line(cur-refs,  cur, cur+refs, w);
            gamma[(y^1)&1] += idet->filter_line(cur     , prev, cur     , w);
        }
    }

    if      (alpha[0] > idet->interlace_threshold * alpha[1]){
        type = TFF;
    }else if(alpha[1] > idet->interlace_threshold * alpha[0]){
        type = BFF;
    }else if(alpha[1] > idet->progressive_threshold * delta){
        type = PROGRESSIVE;
    }else{
        type = UNDETERMINED;
    }

    if ( gamma[0] > idet->repeat_threshold * gamma[1] ){
        repeat = REPEAT_TOP;
    } else if ( gamma[1] > idet->repeat_threshold * gamma[0] ){
        repeat = REPEAT_BOTTOM;
    } else {
        repeat = REPEAT_NONE;
    }

    memmove(idet->history+1, idet->history, HIST_SIZE-1);
    idet->history[0] = type;
    best_type = UNDETERMINED;
    for(i=0; i<HIST_SIZE; i++){
        if(idet->history[i] != UNDETERMINED){
            if(best_type == UNDETERMINED)
                best_type = idet->history[i];

            if(idet->history[i] == best_type) {
                match++;
            }else{
                match=0;
                break;
            }
        }
    }
    if(idet->last_type == UNDETERMINED){
        if(match  ) idet->last_type = best_type;
    }else{
        if(match>2) idet->last_type = best_type;
    }

    if      (idet->last_type == TFF){
        idet->cur->top_field_first = 1;
        idet->cur->interlaced_frame = 1;
    }else if(idet->last_type == BFF){
        idet->cur->top_field_first = 0;
        idet->cur->interlaced_frame = 1;
    }else if(idet->last_type == PROGRESSIVE){
        idet->cur->interlaced_frame = 0;
    }

    for(i=0; i<3; i++)
        idet->repeats[i]  = av_rescale(idet->repeats [i], idet->decay_coefficient, PRECISION);

    for(i=0; i<4; i++){
        idet->prestat [i] = av_rescale(idet->prestat [i], idet->decay_coefficient, PRECISION);
        idet->poststat[i] = av_rescale(idet->poststat[i], idet->decay_coefficient, PRECISION);
    }

    idet->total_repeats [         repeat] ++;
    idet->repeats       [         repeat] += PRECISION;

    idet->total_prestat [           type] ++;
    idet->prestat       [           type] += PRECISION;

    idet->total_poststat[idet->last_type] ++;
    idet->poststat      [idet->last_type] += PRECISION;

    av_log(ctx, AV_LOG_DEBUG, "Repeated Field:%12s, Single frame:%12s, Multi frame:%12s\n",
           rep2str(repeat), type2str(type), type2str(idet->last_type));

    av_dict_set    (metadata, "lavfi.idet.repeated.current_frame", rep2str(repeat), 0);
    av_dict_set_fxp(metadata, "lavfi.idet.repeated.neither",       idet->repeats[REPEAT_NONE], 2, 0);
    av_dict_set_fxp(metadata, "lavfi.idet.repeated.top",           idet->repeats[REPEAT_TOP], 2, 0);
    av_dict_set_fxp(metadata, "lavfi.idet.repeated.bottom",        idet->repeats[REPEAT_BOTTOM], 2, 0);

    av_dict_set    (metadata, "lavfi.idet.single.current_frame",   type2str(type), 0);
    av_dict_set_fxp(metadata, "lavfi.idet.single.tff",             idet->prestat[TFF], 2 , 0);
    av_dict_set_fxp(metadata, "lavfi.idet.single.bff",             idet->prestat[BFF], 2, 0);
    av_dict_set_fxp(metadata, "lavfi.idet.single.progressive",     idet->prestat[PROGRESSIVE], 2, 0);
    av_dict_set_fxp(metadata, "lavfi.idet.single.undetermined",    idet->prestat[UNDETERMINED], 2, 0);

    av_dict_set    (metadata, "lavfi.idet.multiple.current_frame", type2str(idet->last_type), 0);
    av_dict_set_fxp(metadata, "lavfi.idet.multiple.tff",           idet->poststat[TFF], 2, 0);
    av_dict_set_fxp(metadata, "lavfi.idet.multiple.bff",           idet->poststat[BFF], 2, 0);
    av_dict_set_fxp(metadata, "lavfi.idet.multiple.progressive",   idet->poststat[PROGRESSIVE], 2, 0);
    av_dict_set_fxp(metadata, "lavfi.idet.multiple.undetermined",  idet->poststat[UNDETERMINED], 2, 0);
}
Example #9
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    MetadataContext *s = ctx->priv;
    AVDictionary **metadata = avpriv_frame_get_metadatap(frame);
    AVDictionaryEntry *e;

    if (!*metadata)
        return ff_filter_frame(outlink, frame);

    e = av_dict_get(*metadata, !s->key ? "" : s->key, NULL,
                    !s->key ? AV_DICT_IGNORE_SUFFIX: 0);

    switch (s->mode) {
    case METADATA_SELECT:
        if (!s->value && e && e->value) {
            return ff_filter_frame(outlink, frame);
        } else if (s->value && e && e->value &&
                   s->compare(s, e->value, s->value)) {
            return ff_filter_frame(outlink, frame);
        }
        break;
    case METADATA_ADD:
        if (e && e->value) {
            ;
        } else {
            av_dict_set(metadata, s->key, s->value, 0);
        }
        return ff_filter_frame(outlink, frame);
        break;
    case METADATA_MODIFY:
        if (e && e->value) {
            av_dict_set(metadata, s->key, s->value, 0);
        }
        return ff_filter_frame(outlink, frame);
        break;
    case METADATA_PRINT:
        if (!s->key && e) {
            s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n",
                     inlink->frame_count, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
            s->print(ctx, "%s=%s\n", e->key, e->value);
            while ((e = av_dict_get(*metadata, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL) {
                s->print(ctx, "%s=%s\n", e->key, e->value);
            }
        } else if (e && e->value && (!s->value || (e->value && s->compare(s, e->value, s->value)))) {
            s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n",
                     inlink->frame_count, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
            s->print(ctx, "%s=%s\n", s->key, e->value);
        }
        return ff_filter_frame(outlink, frame);
        break;
    case METADATA_DELETE:
        if (!s->key) {
            av_dict_free(metadata);
        } else if (e && e->value && (!s->value || s->compare(s, e->value, s->value))) {
            av_dict_set(metadata, s->key, NULL, 0);
        }
        return ff_filter_frame(outlink, frame);
        break;
    default:
        av_assert0(0);
    };

    av_frame_free(&frame);

    return 0;
}
Example #10
0
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
{
    SelectContext *select = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    double res;

    if (isnan(select->var_values[VAR_START_PTS]))
        select->var_values[VAR_START_PTS] = TS2D(frame->pts);
    if (isnan(select->var_values[VAR_START_T]))
        select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);

    select->var_values[VAR_N  ] = inlink->frame_count;
    select->var_values[VAR_PTS] = TS2D(frame->pts);
    select->var_values[VAR_T  ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
    select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
    select->var_values[VAR_KEY] = frame->key_frame;

    switch (inlink->type) {
    case AVMEDIA_TYPE_AUDIO:
        select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
        break;

    case AVMEDIA_TYPE_VIDEO:
        select->var_values[VAR_INTERLACE_TYPE] =
            !frame->interlaced_frame ? INTERLACE_TYPE_P :
        frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
        select->var_values[VAR_PICT_TYPE] = frame->pict_type;
        if (select->do_scene_detect) {
            char buf[32];
            select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
            // TODO: document metadata
            snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
            av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
        }
        break;
    }

    select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
    av_log(inlink->dst, AV_LOG_DEBUG,
           "n:%f pts:%f t:%f key:%d",
           select->var_values[VAR_N],
           select->var_values[VAR_PTS],
           select->var_values[VAR_T],
           frame->key_frame);

    switch (inlink->type) {
    case AVMEDIA_TYPE_VIDEO:
        av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
               (!frame->interlaced_frame) ? 'P' :
               frame->top_field_first     ? 'T' : 'B',
               av_get_picture_type_char(frame->pict_type),
               select->var_values[VAR_SCENE]);
        break;
    case AVMEDIA_TYPE_AUDIO:
        av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
               frame->nb_samples,
               select->var_values[VAR_CONSUMED_SAMPLES_N]);
        break;
    }

    if (res == 0) {
        select->select_out = -1; /* drop */
    } else if (isnan(res) || res < 0) {
        select->select_out = 0; /* first output */
    } else {
        select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
    }

    av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);

    if (res) {
        select->var_values[VAR_PREV_SELECTED_N]   = select->var_values[VAR_N];
        select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
        select->var_values[VAR_PREV_SELECTED_T]   = select->var_values[VAR_T];
        select->var_values[VAR_SELECTED_N] += 1.0;
        if (inlink->type == AVMEDIA_TYPE_AUDIO)
            select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
    }

    select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
    select->var_values[VAR_PREV_T]   = select->var_values[VAR_T];
}