static vod_status_t audio_filter_choose_source(audio_filter_state_t* state, audio_filter_source_t** result) { audio_filter_source_t* sources_cur; audio_filter_source_t* best_source; int failed_requests_max; int failed_requests; int ret; vod_status_t rc; for (;;) { ret = avfilter_graph_request_oldest(state->filter_graph); if (ret < 0) { break; } rc = audio_filter_read_filter_sink(state); if (rc != VOD_OK) { return rc; } } if (ret != AVERROR(EAGAIN)) { vod_log_error(VOD_LOG_ERR, state->request_context->log, 0, "audio_filter_choose_source: avfilter_graph_request_oldest failed %d", ret); return VOD_UNEXPECTED; } failed_requests_max = -1; best_source = NULL; for (sources_cur = state->sources; sources_cur < state->sources_end; sources_cur++) { if (sources_cur->cur_frame >= sources_cur->cur_frame_part.last_frame) { if (sources_cur->cur_frame_part.next == NULL) { continue; } sources_cur->cur_frame_part = *sources_cur->cur_frame_part.next; sources_cur->cur_frame = sources_cur->cur_frame_part.first_frame; } failed_requests = av_buffersrc_get_nb_failed_requests(sources_cur->buffer_src); if (failed_requests > failed_requests_max) { failed_requests_max = failed_requests; best_source = sources_cur; } } *result = best_source; return VOD_OK; }
struct lsInput* get_best_input(struct liveStream *ctx) { struct lsInput* input = NULL; struct lsInput* best_input = NULL; int nb_requests = 0; int nb_requests_max = 0; int ret = 0; if ( ctx->have_filter ) { take_filter_lock(&ctx->filter_lock); ret = avfilter_graph_request_oldest(ctx->filter_graph); give_filter_lock(&ctx->filter_lock); if(ret >= 0) { reap_filter(ctx); } for(input = ctx->inputs;input;input = input->next) { if(input->eof_reached) continue; take_filter_lock(&ctx->filter_lock); nb_requests = av_buffersrc_get_nb_failed_requests(input->in_filter); give_filter_lock(&ctx->filter_lock); if (nb_requests > nb_requests_max) { nb_requests_max = nb_requests; best_input = input; } } } else { for(input = ctx->inputs;input;input = input->next) { if(input->eof_reached) continue; /* XXX select from PTS or DTS */ best_input = ctx->inputs; } } return best_input; }
static bool feed_input_pads(struct lavfi *c) { bool progress = false; bool was_draining = c->draining_recover; assert(c->initialized); for (int n = 0; n < c->num_in_pads; n++) { struct lavfi_pad *pad = c->in_pads[n]; bool requested = true; #if LIBAVFILTER_VERSION_MICRO >= 100 requested = av_buffersrc_get_nb_failed_requests(pad->buffer) > 0; #endif // Always request a frame after EOF so that we can know if the EOF state // changes (e.g. for sparse streams with midstream EOF). requested |= pad->buffer_is_eof; if (requested) read_pad_input(c, pad); if (!pad->pending.type || c->draining_recover) continue; if (pad->buffer_is_eof) { MP_WARN(c, "eof state changed on %s\n", pad->name); c->draining_recover = true; send_global_eof(c); continue; } if (pad->pending.type == MP_FRAME_AUDIO && !c->warned_nospeed) { struct mp_aframe *aframe = pad->pending.data; if (mp_aframe_get_speed(aframe) != 1.0) { MP_ERR(c, "speed changing filters before libavfilter are not " "supported and can cause desyncs\n"); c->warned_nospeed = true; } } AVFrame *frame = mp_frame_to_av(pad->pending, &pad->timebase); bool eof = pad->pending.type == MP_FRAME_EOF; if (c->emulate_audio_pts && pad->pending.type == MP_FRAME_AUDIO) { struct mp_aframe *aframe = pad->pending.data; c->in_pts = mp_aframe_end_pts(aframe); frame->pts = c->in_samples; // timebase is 1/sample_rate c->in_samples += frame->nb_samples; } mp_frame_unref(&pad->pending); if (!frame && !eof) { MP_FATAL(c, "out of memory or unsupported format\n"); continue; } pad->buffer_is_eof = !frame; if (av_buffersrc_add_frame(pad->buffer, frame) < 0) MP_FATAL(c, "could not pass frame to filter\n"); av_frame_free(&frame); progress = true; } if (!was_draining && c->draining_recover) progress = true; return progress; }
unsigned BufferSrcFilterContext::getFailedRequestsCount() { assert(isValid()); return av_buffersrc_get_nb_failed_requests(getAVFilterContext()); }