static void get_metadata_from_av_frame(struct af_instance *af, AVFrame *frame) { #if HAVE_AVFRAME_METADATA struct priv *p = af->priv; if (!p->metadata) p->metadata = talloc_zero(p, struct mp_tags); mp_tags_copy_from_av_dictionary(p->metadata, av_frame_get_metadata(frame)); #endif }
static bool read_output_pads(struct lavfi *c) { bool progress = false; assert(c->initialized); for (int n = 0; n < c->num_out_pads; n++) { struct lavfi_pad *pad = c->out_pads[n]; if (!mp_pin_in_needs_data(pad->pin)) continue; assert(pad->buffer); int r = AVERROR_EOF; if (!pad->buffer_is_eof) r = av_buffersink_get_frame_flags(pad->buffer, c->tmp_frame, 0); if (r >= 0) { #if LIBAVUTIL_VERSION_MICRO >= 100 mp_tags_copy_from_av_dictionary(pad->metadata, c->tmp_frame->metadata); #endif struct mp_frame frame = mp_frame_from_av(pad->type, c->tmp_frame, &pad->timebase); if (c->emulate_audio_pts && frame.type == MP_FRAME_AUDIO) { AVFrame *avframe = c->tmp_frame; struct mp_aframe *aframe = frame.data; double in_time = c->in_samples * av_q2d(c->in_pads[0]->timebase); double out_time = avframe->pts * av_q2d(pad->timebase); mp_aframe_set_pts(aframe, c->in_pts + (c->in_pts != MP_NOPTS_VALUE ? (out_time - in_time) : 0)); } if (frame.type == MP_FRAME_VIDEO) { struct mp_image *vframe = frame.data; vframe->nominal_fps = av_q2d(av_buffersink_get_frame_rate(pad->buffer)); } av_frame_unref(c->tmp_frame); if (frame.type) { mp_pin_in_write(pad->pin, frame); } else { MP_ERR(c, "could not use filter output\n"); mp_frame_unref(&frame); } progress = true; } else if (r == AVERROR(EAGAIN)) { // We expect that libavfilter will request input on one of the // input pads (via av_buffersrc_get_nb_failed_requests()). } else if (r == AVERROR_EOF) { if (!c->draining_recover && !pad->buffer_is_eof) mp_pin_in_write(pad->pin, MP_EOF_FRAME); if (!pad->buffer_is_eof) progress = true; pad->buffer_is_eof = true; } else { // Real error - ignore it. MP_ERR(c, "error on filtering (%d)\n", r); } } return progress; }