Beispiel #1
0
static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *picref)
{
    SelectContext *select = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    double res;

    if (CONFIG_AVCODEC && select->do_scene_detect) {
        char buf[32];
        select->var_values[VAR_SCENE] = get_scene_score(ctx, picref);
        // TODO: document metadata
        snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
        av_dict_set(&picref->metadata, "lavfi.scene_score", buf, 0);
    }
    if (isnan(select->var_values[VAR_START_PTS]))
        select->var_values[VAR_START_PTS] = TS2D(picref->pts);
    if (isnan(select->var_values[VAR_START_T]))
        select->var_values[VAR_START_T] = TS2D(picref->pts) * av_q2d(inlink->time_base);

    select->var_values[VAR_PTS] = TS2D(picref->pts);
    select->var_values[VAR_T  ] = TS2D(picref->pts) * av_q2d(inlink->time_base);
    select->var_values[VAR_POS] = picref->pos == -1 ? NAN : picref->pos;
    select->var_values[VAR_PREV_PTS] = TS2D(picref ->pts);

    select->var_values[VAR_INTERLACE_TYPE] =
        !picref->video->interlaced     ? INTERLACE_TYPE_P :
        picref->video->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
    select->var_values[VAR_PICT_TYPE] = picref->video->pict_type;

    res = av_expr_eval(select->expr, select->var_values, NULL);
    av_log(inlink->dst, AV_LOG_DEBUG,
           "n:%d pts:%d t:%f pos:%d interlace_type:%c key:%d pict_type:%c "
           "-> select:%f\n",
           (int)select->var_values[VAR_N],
           (int)select->var_values[VAR_PTS],
           select->var_values[VAR_T],
           (int)select->var_values[VAR_POS],
           select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_P ? 'P' :
           select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_T ? 'T' :
           select->var_values[VAR_INTERLACE_TYPE] == INTERLACE_TYPE_B ? 'B' : '?',
           (int)select->var_values[VAR_KEY],
           av_get_picture_type_char(select->var_values[VAR_PICT_TYPE]),
           res);

    select->var_values[VAR_N] += 1.0;

    if (res) {
        select->var_values[VAR_PREV_SELECTED_N]   = select->var_values[VAR_N];
        select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
        select->var_values[VAR_PREV_SELECTED_T]   = select->var_values[VAR_T];
        select->var_values[VAR_SELECTED_N] += 1.0;
    }
    return res;
}
static int process_work_frame(AVFilterContext *ctx, int stop)
{
    FrameRateContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    int64_t work_next_pts;
    AVFrame *copy_src1, *copy_src2, *work;
    int interpolate;

    ff_dlog(ctx, "process_work_frame()\n");

    ff_dlog(ctx, "process_work_frame() pending_input_frames %d\n", s->pending_srce_frames);

    if (s->srce[s->prev]) ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64"\n", s->srce[s->prev]->pts);
    if (s->srce[s->crnt]) ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64"\n", s->srce[s->crnt]->pts);
    if (s->srce[s->next]) ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64"\n", s->srce[s->next]->pts);

    if (!s->srce[s->crnt]) {
        // the filter cannot do anything
        ff_dlog(ctx, "process_work_frame() no current frame cached: move on to next frame, do not output a frame\n");
        next_source(ctx);
        return 0;
    }

    work_next_pts = s->pts + s->average_dest_pts_delta;

    ff_dlog(ctx, "process_work_frame() work crnt pts:%"PRId64"\n", s->pts);
    ff_dlog(ctx, "process_work_frame() work next pts:%"PRId64"\n", work_next_pts);
    if (s->srce[s->prev])
        ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64" at dest time base:%u/%u\n",
            s->srce_pts_dest[s->prev], s->dest_time_base.num, s->dest_time_base.den);
    if (s->srce[s->crnt])
        ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64" at dest time base:%u/%u\n",
            s->srce_pts_dest[s->crnt], s->dest_time_base.num, s->dest_time_base.den);
    if (s->srce[s->next])
        ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64" at dest time base:%u/%u\n",
            s->srce_pts_dest[s->next], s->dest_time_base.num, s->dest_time_base.den);

    av_assert0(s->srce[s->next]);

    // should filter be skipping input frame (output frame rate is lower than input frame rate)
    if (!s->flush && s->pts >= s->srce_pts_dest[s->next]) {
        ff_dlog(ctx, "process_work_frame() work crnt pts >= srce next pts: SKIP FRAME, move on to next frame, do not output a frame\n");
        next_source(ctx);
        s->pending_srce_frames--;
        return 0;
    }

    // calculate interpolation
    interpolate = (int) ((s->pts - s->srce_pts_dest[s->crnt]) * 256.0 / s->average_srce_pts_dest_delta);
    ff_dlog(ctx, "process_work_frame() interpolate:%d/256\n", interpolate);
    copy_src1 = s->srce[s->crnt];
    if (interpolate > s->interp_end) {
        ff_dlog(ctx, "process_work_frame() source is:NEXT\n");
        copy_src1 = s->srce[s->next];
    }
    if (s->srce[s->prev] && interpolate < -s->interp_end) {
        ff_dlog(ctx, "process_work_frame() source is:PREV\n");
        copy_src1 = s->srce[s->prev];
    }

    // decide whether to blend two frames
    if ((interpolate >= s->interp_start && interpolate <= s->interp_end) || (interpolate <= -s->interp_start && interpolate >= -s->interp_end)) {
        double interpolate_scene_score = 0;

        if (interpolate > 0) {
            ff_dlog(ctx, "process_work_frame() interpolate source is:NEXT\n");
            copy_src2 = s->srce[s->next];
        } else {
            ff_dlog(ctx, "process_work_frame() interpolate source is:PREV\n");
            copy_src2 = s->srce[s->prev];
        }
        if ((s->flags & FRAMERATE_FLAG_SCD) && copy_src2) {
            interpolate_scene_score = get_scene_score(ctx, copy_src1, copy_src2);
            ff_dlog(ctx, "process_work_frame() interpolate scene score:%f\n", interpolate_scene_score);
        }
        // decide if the shot-change detection allows us to blend two frames
        if (interpolate_scene_score < s->scene_score && copy_src2) {
            uint16_t src2_factor = abs(interpolate);
            uint16_t src1_factor = 256 - src2_factor;
            int plane, line, pixel;

            // get work-space for output frame
            work = ff_get_video_buffer(outlink, outlink->w, outlink->h);
            if (!work)
                return AVERROR(ENOMEM);

            av_frame_copy_props(work, s->srce[s->crnt]);

            ff_dlog(ctx, "process_work_frame() INTERPOLATE to create work frame\n");
            for (plane = 0; plane < 4 && copy_src1->data[plane] && copy_src2->data[plane]; plane++) {
                int cpy_line_width = s->line_size[plane];
                uint8_t *cpy_src1_data = copy_src1->data[plane];
                int cpy_src1_line_size = copy_src1->linesize[plane];
                uint8_t *cpy_src2_data = copy_src2->data[plane];
                int cpy_src2_line_size = copy_src2->linesize[plane];
                int cpy_src_h = (plane > 0 && plane < 3) ? (copy_src1->height >> s->vsub) : (copy_src1->height);
                uint8_t *cpy_dst_data = work->data[plane];
                int cpy_dst_line_size = work->linesize[plane];
                if (plane <1 || plane >2) {
                    // luma or alpha
                    for (line = 0; line < cpy_src_h; line++) {
                        for (pixel = 0; pixel < cpy_line_width; pixel++) {
                            // integer version of (src1 * src1_factor) + (src2 + src2_factor) + 0.5
                            // 0.5 is for rounding
                            // 128 is the integer representation of 0.5 << 8
                            cpy_dst_data[pixel] = ((cpy_src1_data[pixel] * src1_factor) + (cpy_src2_data[pixel] * src2_factor) + 128) >> 8;
                        }
                        cpy_src1_data += cpy_src1_line_size;
                        cpy_src2_data += cpy_src2_line_size;
                        cpy_dst_data += cpy_dst_line_size;
                    }
                } else {
                    // chroma
                    for (line = 0; line < cpy_src_h; line++) {
                        for (pixel = 0; pixel < cpy_line_width; pixel++) {
                            // as above
                            // because U and V are based around 128 we have to subtract 128 from the components.
                            // 32896 is the integer representation of 128.5 << 8
                            cpy_dst_data[pixel] = (((cpy_src1_data[pixel] - 128) * src1_factor) + ((cpy_src2_data[pixel] - 128) * src2_factor) + 32896) >> 8;
                        }
                        cpy_src1_data += cpy_src1_line_size;
                        cpy_src2_data += cpy_src2_line_size;
                        cpy_dst_data += cpy_dst_line_size;
                    }
                }
            }
Beispiel #3
0
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
{
    SelectContext *select = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];
    double res;

    if (isnan(select->var_values[VAR_START_PTS]))
        select->var_values[VAR_START_PTS] = TS2D(frame->pts);
    if (isnan(select->var_values[VAR_START_T]))
        select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);

    select->var_values[VAR_N  ] = inlink->frame_count;
    select->var_values[VAR_PTS] = TS2D(frame->pts);
    select->var_values[VAR_T  ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
    select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
    select->var_values[VAR_KEY] = frame->key_frame;

    switch (inlink->type) {
    case AVMEDIA_TYPE_AUDIO:
        select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
        break;

    case AVMEDIA_TYPE_VIDEO:
        select->var_values[VAR_INTERLACE_TYPE] =
            !frame->interlaced_frame ? INTERLACE_TYPE_P :
        frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
        select->var_values[VAR_PICT_TYPE] = frame->pict_type;
        if (select->do_scene_detect) {
            char buf[32];
            select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
            // TODO: document metadata
            snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
            av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0);
        }
        break;
    }

    select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
    av_log(inlink->dst, AV_LOG_DEBUG,
           "n:%f pts:%f t:%f key:%d",
           select->var_values[VAR_N],
           select->var_values[VAR_PTS],
           select->var_values[VAR_T],
           frame->key_frame);

    switch (inlink->type) {
    case AVMEDIA_TYPE_VIDEO:
        av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
               (!frame->interlaced_frame) ? 'P' :
               frame->top_field_first     ? 'T' : 'B',
               av_get_picture_type_char(frame->pict_type),
               select->var_values[VAR_SCENE]);
        break;
    case AVMEDIA_TYPE_AUDIO:
        av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
               frame->nb_samples,
               select->var_values[VAR_CONSUMED_SAMPLES_N]);
        break;
    }

    if (res == 0) {
        select->select_out = -1; /* drop */
    } else if (isnan(res) || res < 0) {
        select->select_out = 0; /* first output */
    } else {
        select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
    }

    av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);

    if (res) {
        select->var_values[VAR_PREV_SELECTED_N]   = select->var_values[VAR_N];
        select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
        select->var_values[VAR_PREV_SELECTED_T]   = select->var_values[VAR_T];
        select->var_values[VAR_SELECTED_N] += 1.0;
        if (inlink->type == AVMEDIA_TYPE_AUDIO)
            select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
    }

    select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
    select->var_values[VAR_PREV_T]   = select->var_values[VAR_T];
}