int ff_filter_frame_to_filter(AVFilterLink *link) { AVFrame *frame; AVFilterContext *dst = link->dst; int ret; av_assert1(ff_framequeue_queued_frames(&link->fifo)); if (link->min_samples) { int min = link->min_samples; if (link->status_in) min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo)); ret = take_samples(link, min, link->max_samples, &frame); if (ret < 0) return ret; } else { frame = ff_framequeue_take(&link->fifo); } /* The filter will soon have received a new frame, that may allow it to produce one or more: unblock its outputs. */ filter_unblock(dst); ret = ff_filter_frame_framed(link, frame); if (ret < 0 && ret != link->status_out) { ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE); } else { /* Run once again, to see if several frames were available, or if the input status has also changed, or any other reason. */ ff_filter_set_ready(dst, 300); } return ret; }
static int activate(AVFilterContext *ctx) { AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; CueContext *s = ctx->priv; int64_t pts; AVFrame *frame = NULL; FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); if (s->status < 3 || s->status == 5) { int ret = ff_inlink_consume_frame(inlink, &frame); if (ret < 0) return ret; if (frame) pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q); } if (!s->status && frame) { s->first_pts = pts; s->status++; } if (s->status == 1 && frame) { if (pts - s->first_pts < s->preroll) return ff_filter_frame(outlink, frame); s->first_pts = pts; s->status++; } if (s->status == 2 && frame) { int ret = ff_framequeue_add(&s->queue, frame); if (ret < 0) { av_frame_free(&frame); return ret; } frame = NULL; if (!(pts - s->first_pts < s->buffer && (av_gettime() - s->cue) < 0)) s->status++; } if (s->status == 3) { int64_t diff; while ((diff = (av_gettime() - s->cue)) < 0) av_usleep(av_clip(-diff / 2, 100, 1000000)); s->status++; } if (s->status == 4) { if (ff_framequeue_queued_frames(&s->queue)) return ff_filter_frame(outlink, ff_framequeue_take(&s->queue)); s->status++; } if (s->status == 5 && frame) return ff_filter_frame(outlink, frame); FF_FILTER_FORWARD_STATUS(inlink, outlink); FF_FILTER_FORWARD_WANTED(outlink, inlink); return FFERROR_NOT_READY; }
static int take_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe) { AVFrame *frame0, *frame, *buf; unsigned nb_samples, nb_frames, i, p; int ret; /* Note: this function relies on no format changes and must only be called with enough samples. */ av_assert1(samples_ready(link)); frame0 = frame = ff_framequeue_peek(&link->fifo, 0); if (frame->nb_samples >= min && frame->nb_samples < max) { *rframe = ff_framequeue_take(&link->fifo); return 0; } nb_frames = 0; nb_samples = 0; while (1) { if (nb_samples + frame->nb_samples > max) { if (nb_samples < min) nb_samples = max; break; } nb_samples += frame->nb_samples; nb_frames++; if (nb_frames == ff_framequeue_queued_frames(&link->fifo)) break; frame = ff_framequeue_peek(&link->fifo, nb_frames); } buf = ff_get_audio_buffer(link, nb_samples); if (!buf) return AVERROR(ENOMEM); ret = av_frame_copy_props(buf, frame0); if (ret < 0) { av_frame_free(&buf); return ret; } buf->pts = frame0->pts; p = 0; for (i = 0; i < nb_frames; i++) { frame = ff_framequeue_take(&link->fifo); av_samples_copy(buf->extended_data, frame->extended_data, p, 0, frame->nb_samples, link->channels, link->format); p += frame->nb_samples; av_frame_free(&frame); } if (p < nb_samples) { unsigned n = nb_samples - p; frame = ff_framequeue_peek(&link->fifo, 0); av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n, link->channels, link->format); frame->nb_samples -= n; av_samples_copy(frame->extended_data, frame->extended_data, 0, n, frame->nb_samples, link->channels, link->format); if (frame->pts != AV_NOPTS_VALUE) frame->pts += av_rescale_q(n, av_make_q(1, link->sample_rate), link->time_base); ff_framequeue_update_peeked(&link->fifo, 0); ff_framequeue_skip_samples(&link->fifo, n); } *rframe = buf; return 0; }