static int activate(AVFilterContext *ctx) { AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; AVFrame *in; int64_t pts; int ret, status; FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); ret = ff_inlink_consume_frame(inlink, &in); if (ret < 0) return ret; if (ret > 0) return filter_frame(inlink, in); if (ff_inlink_acknowledge_status(inlink, &status, &pts)) { if (status == AVERROR_EOF) { int64_t out_pts = pts; ret = flush_frame(outlink, pts, &out_pts); ff_outlink_set_status(outlink, status, out_pts); return ret; } } FF_FILTER_FORWARD_WANTED(outlink, inlink); return FFERROR_NOT_READY; }
static int activate(AVFilterContext *ctx) { FPSContext *s = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; int ret; int again = 0; int64_t status_pts; FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); /* No buffered status: normal operation */ if (!s->status) { /* Read available input frames if we have room */ while (s->frames_count < 2 && ff_inlink_check_available_frame(inlink)) { ret = read_frame(ctx, s, inlink, outlink); if (ret < 0) return ret; } /* We do not yet have enough frames to produce output */ if (s->frames_count < 2) { /* Check if we've hit EOF (or otherwise that an error status is set) */ ret = ff_inlink_acknowledge_status(inlink, &s->status, &status_pts); if (ret > 0) update_eof_pts(ctx, s, inlink, outlink, status_pts); if (!ret) { /* If someone wants us to output, we'd better ask for more input */ FF_FILTER_FORWARD_WANTED(outlink, inlink); return 0; } } } /* Buffered frames are available, so generate an output frame */ if (s->frames_count > 0) { ret = write_frame(ctx, s, outlink, &again); /* Couldn't generate a frame, so schedule us to perform another step */ if (again) ff_filter_set_ready(ctx, 100); return ret; } /* No frames left, so forward the status */ if (s->status && s->frames_count == 0) { ff_outlink_set_status(outlink, s->status, s->next_pts); return 0; } return FFERROR_NOT_READY; }
static int activate(AVFilterContext *ctx) { AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; LoopContext *s = ctx->priv; AVFrame *frame = NULL; int ret, status; int64_t pts; FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); if (!s->eof && (s->nb_frames < s->size || !s->loop)) { ret = ff_inlink_consume_frame(inlink, &frame); if (ret < 0) return ret; if (ret > 0) return filter_frame(inlink, frame); } if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) { if (status == AVERROR_EOF) s->eof = 1; } if (s->eof && (s->loop == 0 || s->nb_frames < s->size)) { ff_outlink_set_status(outlink, AVERROR_EOF, s->duration); return 0; } if (!s->eof && (!s->size || (s->nb_frames < s->size) || (s->nb_frames >= s->size && s->loop == 0))) { FF_FILTER_FORWARD_WANTED(outlink, inlink); } else if (s->loop && s->nb_frames == s->size) { return push_frame(ctx); } return FFERROR_NOT_READY; }
static int activate(AVFilterContext *ctx) { AVFilterLink *outlink = ctx->outputs[0]; MixContext *s = ctx->priv; AVFrame *buf = NULL; int i, ret; for (i = 0; i < s->nb_inputs; i++) { AVFilterLink *inlink = ctx->inputs[i]; if ((ret = ff_inlink_consume_frame(ctx->inputs[i], &buf)) > 0) { if (i == 0) { int64_t pts = av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts); if (ret < 0) { av_frame_free(&buf); return ret; } } ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data, buf->nb_samples); if (ret < 0) { av_frame_free(&buf); return ret; } av_frame_free(&buf); ret = output_frame(outlink); if (ret < 0) return ret; } } for (i = 0; i < s->nb_inputs; i++) { int64_t pts; int status; if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) { if (status == AVERROR_EOF) { if (i == 0) { s->input_state[i] = 0; if (s->nb_inputs == 1) { ff_outlink_set_status(outlink, status, pts); return 0; } } else { s->input_state[i] |= INPUT_EOF; if (av_audio_fifo_size(s->fifos[i]) == 0) { s->input_state[i] = 0; } } } } } if (calc_active_inputs(s)) { ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts); return 0; } if (ff_outlink_frame_wanted(outlink)) { int wanted_samples; if (!(s->input_state[0] & INPUT_ON)) return request_samples(ctx, 1); if (s->frame_list->nb_frames == 0) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } av_assert0(s->frame_list->nb_frames > 0); wanted_samples = frame_list_next_frame_size(s->frame_list); return request_samples(ctx, wanted_samples); } return 0; }
/** * Read samples from the input FIFOs, mix, and write to the output link. */ static int output_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; MixContext *s = ctx->priv; AVFrame *out_buf, *in_buf; int nb_samples, ns, i; if (s->input_state[0] & INPUT_ON) { /* first input live: use the corresponding frame size */ nb_samples = frame_list_next_frame_size(s->frame_list); for (i = 1; i < s->nb_inputs; i++) { if (s->input_state[i] & INPUT_ON) { ns = av_audio_fifo_size(s->fifos[i]); if (ns < nb_samples) { if (!(s->input_state[i] & INPUT_EOF)) /* unclosed input with not enough samples */ return 0; /* closed input to drain */ nb_samples = ns; } } } } else { /* first input closed: use the available samples */ nb_samples = INT_MAX; for (i = 1; i < s->nb_inputs; i++) { if (s->input_state[i] & INPUT_ON) { ns = av_audio_fifo_size(s->fifos[i]); nb_samples = FFMIN(nb_samples, ns); } } if (nb_samples == INT_MAX) { ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts); return 0; } } s->next_pts = frame_list_next_pts(s->frame_list); frame_list_remove_samples(s->frame_list, nb_samples); calculate_scales(s, nb_samples); if (nb_samples == 0) return 0; out_buf = ff_get_audio_buffer(outlink, nb_samples); if (!out_buf) return AVERROR(ENOMEM); in_buf = ff_get_audio_buffer(outlink, nb_samples); if (!in_buf) { av_frame_free(&out_buf); return AVERROR(ENOMEM); } for (i = 0; i < s->nb_inputs; i++) { if (s->input_state[i] & INPUT_ON) { int planes, plane_size, p; av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data, nb_samples); planes = s->planar ? s->nb_channels : 1; plane_size = nb_samples * (s->planar ? 1 : s->nb_channels); plane_size = FFALIGN(plane_size, 16); if (out_buf->format == AV_SAMPLE_FMT_FLT || out_buf->format == AV_SAMPLE_FMT_FLTP) { for (p = 0; p < planes; p++) { s->fdsp->vector_fmac_scalar((float *)out_buf->extended_data[p], (float *) in_buf->extended_data[p], s->input_scale[i], plane_size); } } else { for (p = 0; p < planes; p++) { s->fdsp->vector_dmac_scalar((double *)out_buf->extended_data[p], (double *) in_buf->extended_data[p], s->input_scale[i], plane_size); } } } } av_frame_free(&in_buf); out_buf->pts = s->next_pts; if (s->next_pts != AV_NOPTS_VALUE) s->next_pts += nb_samples; return ff_filter_frame(outlink, out_buf); }
static int activate(AVFilterContext *ctx) { AudioFIRContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int ret, status, available, wanted; AVFrame *in = NULL; int64_t pts; FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx); if (s->response) FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[1], ctx); if (!s->eof_coeffs) { AVFrame *ir = NULL; ret = check_ir(ctx->inputs[1], ir); if (ret < 0) return ret; if (ff_outlink_get_status(ctx->inputs[1]) == AVERROR_EOF) s->eof_coeffs = 1; if (!s->eof_coeffs) { if (ff_outlink_frame_wanted(ctx->outputs[0])) ff_inlink_request_frame(ctx->inputs[1]); else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1])) ff_inlink_request_frame(ctx->inputs[1]); return 0; } } if (!s->have_coeffs && s->eof_coeffs) { ret = convert_coeffs(ctx); if (ret < 0) return ret; } available = ff_inlink_queued_samples(ctx->inputs[0]); wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size); ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in); if (ret > 0) ret = fir_frame(s, in, outlink); if (ret < 0) return ret; if (s->response && s->have_coeffs) { int64_t old_pts = s->video->pts; int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base); if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) { s->video->pts = new_pts; return ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video)); } } if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) { ff_filter_set_ready(ctx, 10); return 0; } if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) { if (status == AVERROR_EOF) { ff_outlink_set_status(ctx->outputs[0], status, pts); if (s->response) ff_outlink_set_status(ctx->outputs[1], status, pts); return 0; } } if (ff_outlink_frame_wanted(ctx->outputs[0]) && !ff_outlink_get_status(ctx->inputs[0])) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } if (s->response && ff_outlink_frame_wanted(ctx->outputs[1]) && !ff_outlink_get_status(ctx->inputs[0])) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } return FFERROR_NOT_READY; }
static int activate(AVFilterContext *ctx) { ZPContext *s = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; int status, ret = 0; int64_t pts; if (s->in && ff_outlink_frame_wanted(outlink)) { double zoom = -1, dx = -1, dy = -1; ret = output_single_frame(ctx, s->in, s->var_values, s->current_frame, &zoom, &dx, &dy); if (ret < 0) return ret; } if (!s->in && (ret = ff_inlink_consume_frame(inlink, &s->in)) > 0) { double zoom = -1, dx = -1, dy = -1, nb_frames; s->finished = 0; s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = s->in->width; s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = s->in->height; s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = s->w; s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = s->h; s->var_values[VAR_IN] = inlink->frame_count_out + 1; s->var_values[VAR_ON] = outlink->frame_count_in + 1; s->var_values[VAR_PX] = s->x; s->var_values[VAR_PY] = s->y; s->var_values[VAR_X] = 0; s->var_values[VAR_Y] = 0; s->var_values[VAR_PZOOM] = s->prev_zoom; s->var_values[VAR_ZOOM] = 1; s->var_values[VAR_PDURATION] = s->prev_nb_frames; s->var_values[VAR_A] = (double) s->in->width / s->in->height; s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; s->var_values[VAR_DAR] = s->var_values[VAR_A] * s->var_values[VAR_SAR]; s->var_values[VAR_HSUB] = 1 << s->desc->log2_chroma_w; s->var_values[VAR_VSUB] = 1 << s->desc->log2_chroma_h; if ((ret = av_expr_parse_and_eval(&nb_frames, s->duration_expr_str, var_names, s->var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { av_frame_free(&s->in); return ret; } s->var_values[VAR_DURATION] = s->nb_frames = nb_frames; ret = output_single_frame(ctx, s->in, s->var_values, s->current_frame, &zoom, &dx, &dy); if (ret < 0) return ret; } if (ret < 0) { return ret; } else if (s->finished && ff_inlink_acknowledge_status(inlink, &status, &pts)) { ff_outlink_set_status(outlink, status, pts); return 0; } else { if (ff_outlink_frame_wanted(outlink) && s->finished) ff_inlink_request_frame(inlink); return 0; } }