// Fill mpctx->next_frames[] with a newly filtered or decoded image. // returns VD_* code static int video_output_image(struct MPContext *mpctx, double endpts) { bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING; if (mpctx->d_video->header->attached_picture) { if (vo_has_frame(mpctx->video_out)) return VD_EOF; if (mpctx->num_next_frames >= 1) return VD_NEW_FRAME; int r = video_decode_and_filter(mpctx); video_filter(mpctx, true); // force EOF filtering (avoid decoding more) mpctx->next_frames[0] = vf_read_output_frame(mpctx->d_video->vfilter); if (mpctx->next_frames[0]) { mpctx->next_frames[0]->pts = MP_NOPTS_VALUE; mpctx->num_next_frames = 1; } return r <= 0 ? VD_EOF : VD_PROGRESS; } if (have_new_frame(mpctx, false)) return VD_NEW_FRAME; // Get a new frame if we need one. int r = VD_PROGRESS; if (needs_new_frame(mpctx)) { // Filter a new frame. r = video_decode_and_filter(mpctx); if (r < 0) return r; // error struct mp_image *img = vf_read_output_frame(mpctx->d_video->vfilter); if (img) { // Always add these; they make backstepping after seeking faster. add_frame_pts(mpctx, img->pts); if (endpts != MP_NOPTS_VALUE && img->pts >= endpts) { r = VD_EOF; } else if (mpctx->max_frames == 0) { r = VD_EOF; } else if (hrseek && mpctx->hrseek_lastframe) { mp_image_setrefp(&mpctx->saved_frame, img); } else if (hrseek && img->pts < mpctx->hrseek_pts - .005) { /* just skip */ } else { add_new_frame(mpctx, img); img = NULL; } talloc_free(img); } } // Last-frame seek if (r <= 0 && hrseek && mpctx->hrseek_lastframe && mpctx->saved_frame) { add_new_frame(mpctx, mpctx->saved_frame); mpctx->saved_frame = NULL; r = VD_PROGRESS; } return have_new_frame(mpctx, r <= 0) ? VD_NEW_FRAME : r; }
// Fill mpctx->next_frame[] with a newly filtered or decoded image. // returns VD_* code static int video_output_image(struct MPContext *mpctx, double endpts) { bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING; if (mpctx->d_video->header->attached_picture) { if (vo_has_frame(mpctx->video_out)) return VD_EOF; if (mpctx->next_frame[0]) return VD_NEW_FRAME; int r = video_decode_and_filter(mpctx); video_filter(mpctx, true); // force EOF filtering (avoid decoding more) mpctx->next_frame[0] = vf_read_output_frame(mpctx->d_video->vfilter); if (mpctx->next_frame[0]) mpctx->next_frame[0]->pts = MP_NOPTS_VALUE; return r <= 0 ? VD_EOF : VD_PROGRESS; } if (have_new_frame(mpctx)) return VD_NEW_FRAME; if (!mpctx->next_frame[0] && mpctx->next_frame[1]) { mpctx->next_frame[0] = mpctx->next_frame[1]; mpctx->next_frame[1] = NULL; double pts = mpctx->next_frame[0]->pts; double last_pts = mpctx->video_pts; if (last_pts == MP_NOPTS_VALUE) last_pts = pts; double frame_time = pts - last_pts; if (frame_time < 0 || frame_time >= 60) { // Assume a PTS difference >= 60 seconds is a discontinuity. MP_WARN(mpctx, "Jump in video pts: %f -> %f\n", last_pts, pts); frame_time = 0; } mpctx->video_next_pts = pts; if (mpctx->d_audio) mpctx->delay -= frame_time; if (mpctx->video_status >= STATUS_READY) { mpctx->time_frame += frame_time / mpctx->opts->playback_speed; adjust_sync(mpctx, pts, frame_time); } mpctx->dropped_frames = 0; MP_TRACE(mpctx, "frametime=%5.3f\n", frame_time); } if (have_new_frame(mpctx)) return VD_NEW_FRAME; // Get a new frame if we need one. int r = VD_PROGRESS; if (!mpctx->next_frame[1]) { // Filter a new frame. r = video_decode_and_filter(mpctx); if (r < 0) return r; // error struct mp_image *img = vf_read_output_frame(mpctx->d_video->vfilter); if (img) { // Always add these; they make backstepping after seeking faster. add_frame_pts(mpctx, img->pts); bool drop = false; if ((endpts != MP_NOPTS_VALUE && img->pts >= endpts) || mpctx->max_frames == 0) { drop = true; r = VD_EOF; } if (!drop && hrseek && mpctx->hrseek_lastframe) { mp_image_setrefp(&mpctx->saved_frame, img); drop = true; } if (hrseek && img->pts < mpctx->hrseek_pts - .005) drop = true; if (drop) { talloc_free(img); } else { mpctx->next_frame[1] = img; } } } // On EOF, always allow the playloop to use the remaining frame. if (have_new_frame(mpctx) || (r <= 0 && mpctx->next_frame[0])) return VD_NEW_FRAME; // Last-frame seek if (r <= 0 && hrseek && mpctx->hrseek_lastframe && mpctx->saved_frame) { mpctx->next_frame[1] = mpctx->saved_frame; mpctx->saved_frame = NULL; return VD_PROGRESS; } return r; }
double update_video(struct MPContext *mpctx, double endpts) { struct dec_video *d_video = mpctx->d_video; struct vo *video_out = mpctx->video_out; if (d_video->header->attached_picture) return update_video_attached_pic(mpctx); if (load_next_vo_frame(mpctx, false)) { // Use currently queued VO frame } else if (d_video->waiting_decoded_mpi) { // Draining on reconfig if (!load_next_vo_frame(mpctx, true)) return -1; } else { // Decode a new frame struct demux_packet *pkt = demux_read_packet(d_video->header); if (pkt && pkt->pts != MP_NOPTS_VALUE) pkt->pts += mpctx->video_offset; if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) || d_video->has_broken_packet_pts) { mpctx->hrseek_framedrop = false; } int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ? 1 : check_framedrop(mpctx, -1); struct mp_image *decoded_frame = video_decode(d_video, pkt, framedrop_type); talloc_free(pkt); if (decoded_frame) { filter_video(mpctx, decoded_frame, false); } else if (!pkt) { if (!load_next_vo_frame(mpctx, true)) return -1; } } // Whether the VO has an image queued. // If it does, it will be used to time and display the next frame. if (!video_out->frame_loaded) return 0; double pts = video_out->next_pts; if (endpts == MP_NOPTS_VALUE || pts < endpts) add_frame_pts(mpctx, pts); if (mpctx->hrseek_active && pts < mpctx->hrseek_pts - .005) { vo_skip_frame(video_out); return 0; } mpctx->hrseek_active = false; double last_pts = mpctx->video_next_pts; if (last_pts == MP_NOPTS_VALUE) last_pts = pts; double frame_time = pts - last_pts; if (frame_time < 0 || frame_time >= 60) { // Assume a PTS difference >= 60 seconds is a discontinuity. MP_WARN(mpctx, "Jump in video pts: %f -> %f\n", last_pts, pts); frame_time = 0; } mpctx->video_next_pts = pts; if (mpctx->d_audio) mpctx->delay -= frame_time; return frame_time; }
double update_video(struct MPContext *mpctx, double endpts) { struct sh_video *sh_video = mpctx->sh_video; struct vo *video_out = mpctx->video_out; sh_video->vfilter->control(sh_video->vfilter, VFCTRL_SET_OSD_OBJ, mpctx->osd); // for vf_sub if (!mpctx->opts->correct_pts) return update_video_nocorrect_pts(mpctx); if (sh_video->gsh->attached_picture) return update_video_attached_pic(mpctx); double pts; while (1) { if (load_next_vo_frame(mpctx, false)) break; pts = MP_NOPTS_VALUE; struct demux_packet *pkt = NULL; while (1) { pkt = demux_read_packet(mpctx->sh_video->gsh); if (!pkt || pkt->len) break; /* Packets with size 0 are assumed to not correspond to frames, * but to indicate the absence of a frame in formats like AVI * that must have packets at fixed timecode intervals. */ talloc_free(pkt); } if (pkt) pts = pkt->pts; if (pts != MP_NOPTS_VALUE) pts += mpctx->video_offset; if (pts >= mpctx->hrseek_pts - .005) mpctx->hrseek_framedrop = false; int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ? 1 : check_framedrop(mpctx, -1); struct mp_image *decoded_frame = decode_video(sh_video, pkt, framedrop_type, pts); talloc_free(pkt); if (decoded_frame) { determine_frame_pts(mpctx); filter_video(mpctx, decoded_frame); } else if (!pkt) { if (!load_next_vo_frame(mpctx, true)) return -1; } break; } if (!video_out->frame_loaded) return 0; pts = video_out->next_pts; if (pts == MP_NOPTS_VALUE) { MP_ERR(mpctx, "Video pts after filters MISSING\n"); // Try to use decoder pts from before filters pts = sh_video->pts; if (pts == MP_NOPTS_VALUE) pts = sh_video->last_pts; } if (endpts == MP_NOPTS_VALUE || pts < endpts) add_frame_pts(mpctx, pts); if (mpctx->hrseek_active && pts < mpctx->hrseek_pts - .005) { vo_skip_frame(video_out); return 0; } mpctx->hrseek_active = false; sh_video->pts = pts; if (sh_video->last_pts == MP_NOPTS_VALUE) sh_video->last_pts = sh_video->pts; else if (sh_video->last_pts > sh_video->pts) { MP_WARN(mpctx, "Decreasing video pts: %f < %f\n", sh_video->pts, sh_video->last_pts); /* If the difference in pts is small treat it as jitter around the * right value (possibly caused by incorrect timestamp ordering) and * just show this frame immediately after the last one. * Treat bigger differences as timestamp resets and start counting * timing of later frames from the position of this one. */ if (sh_video->last_pts - sh_video->pts > 0.5) sh_video->last_pts = sh_video->pts; else sh_video->pts = sh_video->last_pts; } else if (sh_video->pts >= sh_video->last_pts + 60) { // Assume a PTS difference >= 60 seconds is a discontinuity. MP_WARN(mpctx, "Jump in video pts: %f -> %f\n", sh_video->last_pts, sh_video->pts); sh_video->last_pts = sh_video->pts; } double frame_time = sh_video->pts - sh_video->last_pts; sh_video->last_pts = sh_video->pts; if (mpctx->sh_audio) mpctx->delay -= frame_time; return frame_time; }
// Fill the VO buffer with a newly filtered or decoded image. // returns VD_* code static int video_output_image(struct MPContext *mpctx, double endpts, bool reconfig_ok) { struct vf_chain *vf = mpctx->d_video->vfilter; struct vo *vo = mpctx->video_out; // Already enough video buffered in VO? // (This implies vo_has_next_frame(vo, false/true) returns true.) if (!vo_needs_new_image(vo) && vo->params) return 1; // Filter a new frame. int r = video_decode_and_filter(mpctx); if (r < 0) return r; // error vf_output_frame(vf, false); if (vf->output) { double pts = vf->output->pts; // Always add these; they make backstepping after seeking faster. add_frame_pts(mpctx, pts); bool drop = false; bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING && !mpctx->d_video->header->attached_picture; if (hrseek && pts < mpctx->hrseek_pts - .005) drop = true; if (endpts != MP_NOPTS_VALUE && pts >= endpts) { drop = true; r = VD_EOF; } if (drop) { talloc_free(vf->output); vf->output = NULL; return r; } } // Filter output is different from VO input? bool need_vo_reconfig = !vo->params || !mp_image_params_equal(&vf->output_params, vo->params); if (need_vo_reconfig) { // Draining VO buffers. if (vo_has_next_frame(vo, true)) return 0; // EOF so that caller displays remaining VO frames // There was no decoded image yet - must not signal fake EOF. // Likewise, if there's no filtered frame yet, don't reconfig yet. if (!vf->output_params.imgfmt || !vf->output) return r; // Force draining. if (!reconfig_ok) return 0; struct mp_image_params p = vf->output_params; const struct vo_driver *info = mpctx->video_out->driver; MP_INFO(mpctx, "VO: [%s] %dx%d => %dx%d %s\n", info->name, p.w, p.h, p.d_w, p.d_h, vo_format_name(p.imgfmt)); MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description); int vo_r = vo_reconfig(vo, &p, 0); if (vo_r < 0) { vf->initialized = -1; return VD_ERROR; } init_vo(mpctx); // Display the frame queued after this immediately. // (Neutralizes frame time calculation in update_video.) mpctx->video_next_pts = MP_NOPTS_VALUE; } // Queue new frame, if there's one. struct mp_image *img = vf_read_output_frame(vf); if (img) { vo_queue_image(vo, img); return VD_PROGRESS; } return r; // includes the true EOF case }