static int activate(AVFilterContext *ctx) { AVFilterLink *outlink = ctx->outputs[0]; AlphaMergeContext *s = ctx->priv; int ret; FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx); if (!s->main_frame) { ret = ff_inlink_consume_frame(ctx->inputs[0], &s->main_frame); if (ret < 0) return ret; } if (!s->alpha_frame) { ret = ff_inlink_consume_frame(ctx->inputs[1], &s->alpha_frame); if (ret < 0) return ret; } if (s->main_frame && s->alpha_frame) { draw_frame(ctx, s->main_frame, s->alpha_frame); ret = ff_filter_frame(outlink, s->main_frame); av_frame_free(&s->alpha_frame); s->main_frame = NULL; return ret; } FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink); FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink); if (ff_outlink_frame_wanted(ctx->outputs[0]) && !ff_outlink_get_status(ctx->inputs[0]) && !s->main_frame) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } if (ff_outlink_frame_wanted(ctx->outputs[0]) && !ff_outlink_get_status(ctx->inputs[1]) && !s->alpha_frame) { ff_inlink_request_frame(ctx->inputs[1]); return 0; } return FFERROR_NOT_READY; }
static int activate(AVFilterContext *ctx) { SidechainCompressContext *s = ctx->priv; AVFrame *out = NULL, *in[2] = { NULL }; int ret, i, nb_samples; double *dst; FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx); if ((ret = ff_inlink_consume_frame(ctx->inputs[0], &in[0])) > 0) { av_audio_fifo_write(s->fifo[0], (void **)in[0]->extended_data, in[0]->nb_samples); av_frame_free(&in[0]); } if (ret < 0) return ret; if ((ret = ff_inlink_consume_frame(ctx->inputs[1], &in[1])) > 0) { av_audio_fifo_write(s->fifo[1], (void **)in[1]->extended_data, in[1]->nb_samples); av_frame_free(&in[1]); } if (ret < 0) return ret; nb_samples = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1])); if (nb_samples) { out = ff_get_audio_buffer(ctx->outputs[0], nb_samples); if (!out) return AVERROR(ENOMEM); for (i = 0; i < 2; i++) { in[i] = ff_get_audio_buffer(ctx->inputs[i], nb_samples); if (!in[i]) { av_frame_free(&in[0]); av_frame_free(&in[1]); av_frame_free(&out); return AVERROR(ENOMEM); } av_audio_fifo_read(s->fifo[i], (void **)in[i]->data, nb_samples); } dst = (double *)out->data[0]; out->pts = s->pts; s->pts += nb_samples; compressor(s, (double *)in[0]->data[0], dst, (double *)in[1]->data[0], nb_samples, s->level_in, s->level_sc, ctx->inputs[0], ctx->inputs[1]); av_frame_free(&in[0]); av_frame_free(&in[1]); ret = ff_filter_frame(ctx->outputs[0], out); if (ret < 0) return ret; } FF_FILTER_FORWARD_STATUS(ctx->inputs[0], ctx->outputs[0]); FF_FILTER_FORWARD_STATUS(ctx->inputs[1], ctx->outputs[0]); if (ff_outlink_frame_wanted(ctx->outputs[0])) { if (!av_audio_fifo_size(s->fifo[0])) ff_inlink_request_frame(ctx->inputs[0]); if (!av_audio_fifo_size(s->fifo[1])) ff_inlink_request_frame(ctx->inputs[1]); } return 0; }
static int activate(AVFilterContext *ctx) { AudioFIRContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int ret, status, available, wanted; AVFrame *in = NULL; int64_t pts; FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx); if (s->response) FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[1], ctx); if (!s->eof_coeffs) { AVFrame *ir = NULL; ret = check_ir(ctx->inputs[1], ir); if (ret < 0) return ret; if (ff_outlink_get_status(ctx->inputs[1]) == AVERROR_EOF) s->eof_coeffs = 1; if (!s->eof_coeffs) { if (ff_outlink_frame_wanted(ctx->outputs[0])) ff_inlink_request_frame(ctx->inputs[1]); else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1])) ff_inlink_request_frame(ctx->inputs[1]); return 0; } } if (!s->have_coeffs && s->eof_coeffs) { ret = convert_coeffs(ctx); if (ret < 0) return ret; } available = ff_inlink_queued_samples(ctx->inputs[0]); wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size); ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in); if (ret > 0) ret = fir_frame(s, in, outlink); if (ret < 0) return ret; if (s->response && s->have_coeffs) { int64_t old_pts = s->video->pts; int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base); if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) { s->video->pts = new_pts; return ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video)); } } if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) { ff_filter_set_ready(ctx, 10); return 0; } if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) { if (status == AVERROR_EOF) { ff_outlink_set_status(ctx->outputs[0], status, pts); if (s->response) ff_outlink_set_status(ctx->outputs[1], status, pts); return 0; } } if (ff_outlink_frame_wanted(ctx->outputs[0]) && !ff_outlink_get_status(ctx->inputs[0])) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } if (s->response && ff_outlink_frame_wanted(ctx->outputs[1]) && !ff_outlink_get_status(ctx->inputs[0])) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } return FFERROR_NOT_READY; }