static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { SetPTSContext *setpts = inlink->dst->priv; int64_t in_pts = frame->pts; double d; if (isnan(setpts->var_values[VAR_STARTPTS])) { setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts); setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base); } setpts->var_values[VAR_PTS ] = TS2D(frame->pts); setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base); setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); setpts->var_values[VAR_RTCTIME ] = av_gettime(); if (inlink->type == AVMEDIA_TYPE_VIDEO) { setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame; } else if (inlink->type == AVMEDIA_TYPE_AUDIO) { setpts->var_values[VAR_S] = frame->nb_samples; setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples; } d = av_expr_eval(setpts->expr, setpts->var_values, NULL); frame->pts = D2TS(d); av_log(inlink->dst, AV_LOG_DEBUG, "N:%"PRId64" PTS:%s T:%f POS:%s", (int64_t)setpts->var_values[VAR_N], d2istr(setpts->var_values[VAR_PTS]), setpts->var_values[VAR_T], d2istr(setpts->var_values[VAR_POS])); switch (inlink->type) { case AVMEDIA_TYPE_VIDEO: av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64, (int64_t)setpts->var_values[VAR_INTERLACED]); break; case AVMEDIA_TYPE_AUDIO: av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64, (int64_t)setpts->var_values[VAR_NB_SAMPLES], (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]); break; } av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base)); if (inlink->type == AVMEDIA_TYPE_VIDEO) { setpts->var_values[VAR_N] += 1.0; } else { setpts->var_values[VAR_N] += frame->nb_samples; } setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts); setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base); setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts); setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base); if (setpts->type == AVMEDIA_TYPE_AUDIO) { setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples; } return ff_filter_frame(inlink->dst->outputs[0], frame); }
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { SetPTSContext *setpts = inlink->dst->priv; double d; AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); if (!outpicref) return AVERROR(ENOMEM); if (isnan(setpts->var_values[VAR_STARTPTS])) { setpts->var_values[VAR_STARTPTS] = TS2D(inpicref->pts); setpts->var_values[VAR_STARTT ] = TS2T(inpicref->pts, inlink->time_base); } setpts->var_values[VAR_PTS ] = TS2D(inpicref->pts); setpts->var_values[VAR_T ] = TS2T(inpicref->pts, inlink->time_base); setpts->var_values[VAR_POS ] = inpicref->pos == -1 ? NAN : inpicref->pos; switch (inlink->type) { case AVMEDIA_TYPE_VIDEO: setpts->var_values[VAR_INTERLACED] = inpicref->video->interlaced; break; case AVMEDIA_TYPE_AUDIO: setpts->var_values[VAR_NB_SAMPLES] = inpicref->audio->nb_samples; break; } d = av_expr_eval(setpts->expr, setpts->var_values, NULL); outpicref->pts = D2TS(d); setpts->var_values[VAR_PREV_INPTS ] = TS2D(inpicref ->pts); setpts->var_values[VAR_PREV_INT ] = TS2T(inpicref ->pts, inlink->time_base); setpts->var_values[VAR_PREV_OUTPTS] = TS2D(outpicref->pts); setpts->var_values[VAR_PREV_OUTT] = TS2T(outpicref->pts, inlink->time_base); av_dlog(inlink->dst, "n:%"PRId64" interlaced:%d nb_samples:%d nb_consumed_samples:%d " "pos:%"PRId64" pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n", (int64_t)setpts->var_values[VAR_N], (int)setpts->var_values[VAR_INTERLACED], (int)setpts->var_values[VAR_NB_SAMPLES], (int)setpts->var_values[VAR_NB_CONSUMED_SAMPLES], (int64_t)setpts->var_values[VAR_POS], (int64_t)setpts->var_values[VAR_PREV_INPTS], setpts->var_values[VAR_PREV_INT], (int64_t)setpts->var_values[VAR_PREV_OUTPTS], setpts->var_values[VAR_PREV_OUTT]); setpts->var_values[VAR_N] += 1.0; if (setpts->type == AVMEDIA_TYPE_AUDIO) { setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += inpicref->audio->nb_samples; return ff_filter_samples(inlink->dst->outputs[0], outpicref); } else return ff_start_frame (inlink->dst->outputs[0], outpicref); }
static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame) { int x, y; float *dst = s->fmap; int dst_linesize = s->fmap_linesize; if (frame) { s->var_values[VAR_N] = inlink->frame_count_out; s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base); s->var_values[VAR_PTS] = TS2D(frame->pts); } else { s->var_values[VAR_N] = NAN; s->var_values[VAR_T] = NAN; s->var_values[VAR_PTS] = NAN; } s->angle = av_expr_eval(s->angle_pexpr, s->var_values, NULL); s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL); s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL); if (isnan(s->x0) || isnan(s->y0) || isnan(s->angle)) s->eval_mode = EVAL_MODE_FRAME; s->angle = av_clipf(s->angle, 0, M_PI_2); if (s->backward) { for (y = 0; y < inlink->h; y++) { for (x = 0; x < inlink->w; x++) dst[x] = 1. / get_natural_factor(s, x, y); dst += dst_linesize; } } else { for (y = 0; y < inlink->h; y++) { for (x = 0; x < inlink->w; x++) dst[x] = get_natural_factor(s, x, y); dst += dst_linesize; } } }