/* Read a frame from the input and save it in the buffer */ static int read_frame(AVFilterContext *ctx, FPSContext *s, AVFilterLink *inlink, AVFilterLink *outlink) { AVFrame *frame; int ret; int64_t in_pts; /* Must only be called when we have buffer room available */ av_assert1(s->frames_count < 2); ret = ff_inlink_consume_frame(inlink, &frame); /* Caller must have run ff_inlink_check_available_frame first */ av_assert1(ret); if (ret < 0) return ret; /* Convert frame pts to output timebase. * The dance with offsets is required to match the rounding behaviour of the * previous version of the fps filter when using the start_time option. */ in_pts = frame->pts; frame->pts = s->out_pts_off + av_rescale_q_rnd(in_pts - s->in_pts_off, inlink->time_base, outlink->time_base, s->rounding | AV_ROUND_PASS_MINMAX); av_log(ctx, AV_LOG_DEBUG, "Read frame with in pts %"PRId64", out pts %"PRId64"\n", in_pts, frame->pts); s->frames[s->frames_count++] = frame; s->frames_in++; return 1; }
static int activate(AVFilterContext *ctx) { AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; AVFrame *in; int64_t pts; int ret, status; FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); ret = ff_inlink_consume_frame(inlink, &in); if (ret < 0) return ret; if (ret > 0) return filter_frame(inlink, in); if (ff_inlink_acknowledge_status(inlink, &status, &pts)) { if (status == AVERROR_EOF) { int64_t out_pts = pts; ret = flush_frame(outlink, pts, &out_pts); ff_outlink_set_status(outlink, status, out_pts); return ret; } } FF_FILTER_FORWARD_WANTED(outlink, inlink); return FFERROR_NOT_READY; }
static int activate(AVFilterContext *ctx) { AVFilterLink *outlink = ctx->outputs[0]; AlphaMergeContext *s = ctx->priv; int ret; FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx); if (!s->main_frame) { ret = ff_inlink_consume_frame(ctx->inputs[0], &s->main_frame); if (ret < 0) return ret; } if (!s->alpha_frame) { ret = ff_inlink_consume_frame(ctx->inputs[1], &s->alpha_frame); if (ret < 0) return ret; } if (s->main_frame && s->alpha_frame) { draw_frame(ctx, s->main_frame, s->alpha_frame); ret = ff_filter_frame(outlink, s->main_frame); av_frame_free(&s->alpha_frame); s->main_frame = NULL; return ret; } FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink); FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink); if (ff_outlink_frame_wanted(ctx->outputs[0]) && !ff_outlink_get_status(ctx->inputs[0]) && !s->main_frame) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } if (ff_outlink_frame_wanted(ctx->outputs[0]) && !ff_outlink_get_status(ctx->inputs[1]) && !s->alpha_frame) { ff_inlink_request_frame(ctx->inputs[1]); return 0; } return FFERROR_NOT_READY; }
static int activate(AVFilterContext *ctx) { AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; CueContext *s = ctx->priv; int64_t pts; AVFrame *frame = NULL; FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); if (s->status < 3 || s->status == 5) { int ret = ff_inlink_consume_frame(inlink, &frame); if (ret < 0) return ret; if (frame) pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q); } if (!s->status && frame) { s->first_pts = pts; s->status++; } if (s->status == 1 && frame) { if (pts - s->first_pts < s->preroll) return ff_filter_frame(outlink, frame); s->first_pts = pts; s->status++; } if (s->status == 2 && frame) { int ret = ff_framequeue_add(&s->queue, frame); if (ret < 0) { av_frame_free(&frame); return ret; } frame = NULL; if (!(pts - s->first_pts < s->buffer && (av_gettime() - s->cue) < 0)) s->status++; } if (s->status == 3) { int64_t diff; while ((diff = (av_gettime() - s->cue)) < 0) av_usleep(av_clip(-diff / 2, 100, 1000000)); s->status++; } if (s->status == 4) { if (ff_framequeue_queued_frames(&s->queue)) return ff_filter_frame(outlink, ff_framequeue_take(&s->queue)); s->status++; } if (s->status == 5 && frame) return ff_filter_frame(outlink, frame); FF_FILTER_FORWARD_STATUS(inlink, outlink); FF_FILTER_FORWARD_WANTED(outlink, inlink); return FFERROR_NOT_READY; }
static int activate(AVFilterContext *ctx) { AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; LoopContext *s = ctx->priv; AVFrame *frame = NULL; int ret, status; int64_t pts; FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); if (!s->eof && (s->nb_frames < s->size || !s->loop)) { ret = ff_inlink_consume_frame(inlink, &frame); if (ret < 0) return ret; if (ret > 0) return filter_frame(inlink, frame); } if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) { if (status == AVERROR_EOF) s->eof = 1; } if (s->eof && (s->loop == 0 || s->nb_frames < s->size)) { ff_outlink_set_status(outlink, AVERROR_EOF, s->duration); return 0; } if (!s->eof && (!s->size || (s->nb_frames < s->size) || (s->nb_frames >= s->size && s->loop == 0))) { FF_FILTER_FORWARD_WANTED(outlink, inlink); } else if (s->loop && s->nb_frames == s->size) { return push_frame(ctx); } return FFERROR_NOT_READY; }
static int activate(AVFilterContext *ctx) { SidechainCompressContext *s = ctx->priv; AVFrame *out = NULL, *in[2] = { NULL }; int ret, i, nb_samples; double *dst; FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx); if ((ret = ff_inlink_consume_frame(ctx->inputs[0], &in[0])) > 0) { av_audio_fifo_write(s->fifo[0], (void **)in[0]->extended_data, in[0]->nb_samples); av_frame_free(&in[0]); } if (ret < 0) return ret; if ((ret = ff_inlink_consume_frame(ctx->inputs[1], &in[1])) > 0) { av_audio_fifo_write(s->fifo[1], (void **)in[1]->extended_data, in[1]->nb_samples); av_frame_free(&in[1]); } if (ret < 0) return ret; nb_samples = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1])); if (nb_samples) { out = ff_get_audio_buffer(ctx->outputs[0], nb_samples); if (!out) return AVERROR(ENOMEM); for (i = 0; i < 2; i++) { in[i] = ff_get_audio_buffer(ctx->inputs[i], nb_samples); if (!in[i]) { av_frame_free(&in[0]); av_frame_free(&in[1]); av_frame_free(&out); return AVERROR(ENOMEM); } av_audio_fifo_read(s->fifo[i], (void **)in[i]->data, nb_samples); } dst = (double *)out->data[0]; out->pts = s->pts; s->pts += nb_samples; compressor(s, (double *)in[0]->data[0], dst, (double *)in[1]->data[0], nb_samples, s->level_in, s->level_sc, ctx->inputs[0], ctx->inputs[1]); av_frame_free(&in[0]); av_frame_free(&in[1]); ret = ff_filter_frame(ctx->outputs[0], out); if (ret < 0) return ret; } FF_FILTER_FORWARD_STATUS(ctx->inputs[0], ctx->outputs[0]); FF_FILTER_FORWARD_STATUS(ctx->inputs[1], ctx->outputs[0]); if (ff_outlink_frame_wanted(ctx->outputs[0])) { if (!av_audio_fifo_size(s->fifo[0])) ff_inlink_request_frame(ctx->inputs[0]); if (!av_audio_fifo_size(s->fifo[1])) ff_inlink_request_frame(ctx->inputs[1]); } return 0; }
static int activate(AVFilterContext *ctx) { AVFilterLink *outlink = ctx->outputs[0]; MixContext *s = ctx->priv; AVFrame *buf = NULL; int i, ret; for (i = 0; i < s->nb_inputs; i++) { AVFilterLink *inlink = ctx->inputs[i]; if ((ret = ff_inlink_consume_frame(ctx->inputs[i], &buf)) > 0) { if (i == 0) { int64_t pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts); if (ret < 0) { av_frame_free(&buf); return ret; } } ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data, buf->nb_samples); if (ret < 0) { av_frame_free(&buf); return ret; } av_frame_free(&buf); ret = output_frame(outlink); if (ret < 0) return ret; } } for (i = 0; i < s->nb_inputs; i++) { int64_t pts; int status; if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) { if (status == AVERROR_EOF) { if (i == 0) { s->input_state[i] = 0; if (s->nb_inputs == 1) { ff_outlink_set_status(outlink, status, pts); return 0; } } else { s->input_state[i] |= INPUT_EOF; if (av_audio_fifo_size(s->fifos[i]) == 0) { s->input_state[i] = 0; } } } } } if (calc_active_inputs(s)) { ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts); return 0; } if (ff_outlink_frame_wanted(outlink)) { int wanted_samples; if (!(s->input_state[0] & INPUT_ON)) return request_samples(ctx, 1); if (s->frame_list->nb_frames == 0) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } av_assert0(s->frame_list->nb_frames > 0); wanted_samples = frame_list_next_frame_size(s->frame_list); return request_samples(ctx, wanted_samples); } return 0; }
static int activate(AVFilterContext *ctx) { ZPContext *s = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; int status, ret = 0; int64_t pts; if (s->in && ff_outlink_frame_wanted(outlink)) { double zoom = -1, dx = -1, dy = -1; ret = output_single_frame(ctx, s->in, s->var_values, s->current_frame, &zoom, &dx, &dy); if (ret < 0) return ret; } if (!s->in && (ret = ff_inlink_consume_frame(inlink, &s->in)) > 0) { double zoom = -1, dx = -1, dy = -1, nb_frames; s->finished = 0; s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = s->in->width; s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = s->in->height; s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = s->w; s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = s->h; s->var_values[VAR_IN] = inlink->frame_count_out + 1; s->var_values[VAR_ON] = outlink->frame_count_in + 1; s->var_values[VAR_PX] = s->x; s->var_values[VAR_PY] = s->y; s->var_values[VAR_X] = 0; s->var_values[VAR_Y] = 0; s->var_values[VAR_PZOOM] = s->prev_zoom; s->var_values[VAR_ZOOM] = 1; s->var_values[VAR_PDURATION] = s->prev_nb_frames; s->var_values[VAR_A] = (double) s->in->width / s->in->height; s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; s->var_values[VAR_DAR] = s->var_values[VAR_A] * s->var_values[VAR_SAR]; s->var_values[VAR_HSUB] = 1 << s->desc->log2_chroma_w; s->var_values[VAR_VSUB] = 1 << s->desc->log2_chroma_h; if ((ret = av_expr_parse_and_eval(&nb_frames, s->duration_expr_str, var_names, s->var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { av_frame_free(&s->in); return ret; } s->var_values[VAR_DURATION] = s->nb_frames = nb_frames; ret = output_single_frame(ctx, s->in, s->var_values, s->current_frame, &zoom, &dx, &dy); if (ret < 0) return ret; } if (ret < 0) { return ret; } else if (s->finished && ff_inlink_acknowledge_status(inlink, &status, &pts)) { ff_outlink_set_status(outlink, status, pts); return 0; } else { if (ff_outlink_frame_wanted(outlink) && s->finished) ff_inlink_request_frame(inlink); return 0; } }