void video_reset(struct dec_video *d_video) { video_vd_control(d_video, VDCTRL_RESET, NULL); d_video->num_buffered_pts = 0; d_video->first_packet_pdts = MP_NOPTS_VALUE; d_video->start_pts = MP_NOPTS_VALUE; d_video->decoded_pts = MP_NOPTS_VALUE; d_video->codec_pts = MP_NOPTS_VALUE; d_video->codec_dts = MP_NOPTS_VALUE; d_video->last_format = d_video->fixed_format = (struct mp_image_params){0}; d_video->dropped_frames = 0; d_video->current_state = DATA_AGAIN; mp_image_unrefp(&d_video->current_mpi); } int video_vd_control(struct dec_video *d_video, int cmd, void *arg) { const struct vd_functions *vd = d_video->vd_driver; if (vd) return vd->control(d_video, cmd, arg); return CONTROL_UNKNOWN; } void video_uninit(struct dec_video *d_video) { mp_image_unrefp(&d_video->current_mpi); mp_image_unrefp(&d_video->cover_art_mpi); if (d_video->vd_driver) { MP_VERBOSE(d_video, "Uninit video.\n"); d_video->vd_driver->uninit(d_video); } talloc_free(d_video); }
static void reconfig_video(struct MPContext *mpctx, const struct mp_image_params *params, bool probe_only) { struct MPOpts *opts = mpctx->opts; struct dec_video *d_video = mpctx->d_video; d_video->decoder_output = *params; set_allowed_vo_formats(d_video->vfilter, mpctx->video_out); // The event should happen _after_ filter and VO reconfig. Since we don't // have any fine grained locking, this is just as good. mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL); if (video_reconfig_filters(d_video, params) < 0) { // Most video filters don't work with hardware decoding, so this // might be the reason filter reconfig failed. if (!probe_only && video_vd_control(d_video, VDCTRL_FORCE_HWDEC_FALLBACK, NULL) == CONTROL_OK) { // Fallback active; decoder will return software format next // time. Don't abort video decoding. d_video->vfilter->initialized = 0; } return; } if (d_video->vfilter->initialized < 1) return; struct mp_image_params p = d_video->vfilter->output_params; const struct vo_driver *info = mpctx->video_out->driver; MP_INFO(mpctx, "VO: [%s] %dx%d => %dx%d %s\n", info->name, p.w, p.h, p.d_w, p.d_h, vo_format_name(p.imgfmt)); MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description); int r = vo_reconfig(mpctx->video_out, &p, 0); if (r < 0) d_video->vfilter->initialized = -1; if (r >= 0) { if (opts->gamma_gamma != 1000) video_set_colors(d_video, "gamma", opts->gamma_gamma); if (opts->gamma_brightness != 1000) video_set_colors(d_video, "brightness", opts->gamma_brightness); if (opts->gamma_contrast != 1000) video_set_colors(d_video, "contrast", opts->gamma_contrast); if (opts->gamma_saturation != 1000) video_set_colors(d_video, "saturation", opts->gamma_saturation); if (opts->gamma_hue != 1000) video_set_colors(d_video, "hue", opts->gamma_hue); } }
// Reconfigure the filter chain according to decoder output. // probe_only: don't force fallback to software when doing hw decoding, and // the filter chain couldn't be configured static void filter_reconfig(struct MPContext *mpctx, bool probe_only) { struct dec_video *d_video = mpctx->d_video; struct mp_image_params params = d_video->decoder_output; mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL); set_allowed_vo_formats(d_video->vfilter, mpctx->video_out); if (video_reconfig_filters(d_video, ¶ms) < 0) { // Most video filters don't work with hardware decoding, so this // might be the reason why filter reconfig failed. if (!probe_only && video_vd_control(d_video, VDCTRL_FORCE_HWDEC_FALLBACK, NULL) == CONTROL_OK) { // Fallback active; decoder will return software format next // time. Don't abort video decoding. d_video->vfilter->initialized = 0; mp_image_unrefp(&d_video->waiting_decoded_mpi); d_video->decoder_output = (struct mp_image_params){0}; MP_VERBOSE(mpctx, "hwdec falback due to filters.\n"); } return; } if (d_video->vfilter->initialized < 1) return; if (params.rotate && (params.rotate % 90 == 0)) { if (!(mpctx->video_out->driver->caps & VO_CAP_ROTATE90)) { // Try to insert a rotation filter. char *args[] = {"angle", "auto", NULL}; if (try_filter(mpctx, params, "rotate", "autorotate", args) >= 0) { params.rotate = 0; } else { MP_ERR(mpctx, "Can't insert rotation filter.\n"); } } } if (params.stereo_in != params.stereo_out && params.stereo_in > 0 && params.stereo_out >= 0) { char *to = (char *)MP_STEREO3D_NAME(params.stereo_out); if (to) { char *args[] = {"in", "auto", "out", to, NULL, NULL}; if (try_filter(mpctx, params, "stereo3d", "stereo3d", args) < 0) MP_ERR(mpctx, "Can't insert 3D conversion filter.\n"); } } }
void video_reset_decoding(struct dec_video *d_video) { video_vd_control(d_video, VDCTRL_RESET, NULL); if (d_video->vfilter && d_video->vfilter->initialized == 1) vf_seek_reset(d_video->vfilter); mp_image_unrefp(&d_video->waiting_decoded_mpi); d_video->num_buffered_pts = 0; d_video->last_pts = MP_NOPTS_VALUE; d_video->first_packet_pdts = MP_NOPTS_VALUE; d_video->decoded_pts = MP_NOPTS_VALUE; d_video->codec_pts = MP_NOPTS_VALUE; d_video->codec_dts = MP_NOPTS_VALUE; }
static void add_avi_pts(struct dec_video *d_video, double pts) { if (pts != MP_NOPTS_VALUE) { int delay = -1; video_vd_control(d_video, VDCTRL_QUERY_UNSEEN_FRAMES, &delay); if (delay >= 0 && delay < d_video->num_buffered_pts) d_video->num_buffered_pts = delay; if (d_video->num_buffered_pts == MP_ARRAY_SIZE(d_video->buffered_pts)) { MP_ERR(d_video, "Too many buffered pts\n"); } else { for (int i = d_video->num_buffered_pts; i > 0; i--) d_video->buffered_pts[i] = d_video->buffered_pts[i - 1]; d_video->buffered_pts[0] = pts; d_video->num_buffered_pts++; } } }
static void add_pts_to_sort(struct dec_video *d_video, double pts) { if (pts != MP_NOPTS_VALUE) { int delay = -1; video_vd_control(d_video, VDCTRL_QUERY_UNSEEN_FRAMES, &delay); if (delay >= 0 && delay < d_video->num_buffered_pts) d_video->num_buffered_pts = delay; if (d_video->num_buffered_pts == sizeof(d_video->buffered_pts) / sizeof(double)) MP_ERR(d_video, "Too many buffered pts\n"); else { int i, j; for (i = 0; i < d_video->num_buffered_pts; i++) if (d_video->buffered_pts[i] < pts) break; for (j = d_video->num_buffered_pts; j > i; j--) d_video->buffered_pts[j] = d_video->buffered_pts[j - 1]; d_video->buffered_pts[i] = pts; d_video->num_buffered_pts++; } } }
void video_reset(struct dec_video *d_video) { video_vd_control(d_video, VDCTRL_RESET, NULL); d_video->first_packet_pdts = MP_NOPTS_VALUE; d_video->start_pts = MP_NOPTS_VALUE; d_video->decoded_pts = MP_NOPTS_VALUE; d_video->codec_pts = MP_NOPTS_VALUE; d_video->codec_dts = MP_NOPTS_VALUE; d_video->last_format = d_video->fixed_format = (struct mp_image_params){0}; d_video->dropped_frames = 0; d_video->current_state = DATA_AGAIN; mp_image_unrefp(&d_video->current_mpi); talloc_free(d_video->packet); d_video->packet = NULL; talloc_free(d_video->new_segment); d_video->new_segment = NULL; d_video->start = d_video->end = MP_NOPTS_VALUE; } int video_vd_control(struct dec_video *d_video, int cmd, void *arg) { const struct vd_functions *vd = d_video->vd_driver; if (vd) return vd->control(d_video, cmd, arg); return CONTROL_UNKNOWN; } void video_uninit(struct dec_video *d_video) { if (!d_video) return; mp_image_unrefp(&d_video->current_mpi); mp_image_unrefp(&d_video->cover_art_mpi); if (d_video->vd_driver) { MP_VERBOSE(d_video, "Uninit video.\n"); d_video->vd_driver->uninit(d_video); } talloc_free(d_video->packet); talloc_free(d_video->new_segment); talloc_free(d_video); } static int init_video_codec(struct dec_video *d_video, const char *decoder) { if (!d_video->vd_driver->init(d_video, decoder)) { MP_VERBOSE(d_video, "Video decoder init failed.\n"); return 0; } return 1; } struct mp_decoder_list *video_decoder_list(void) { struct mp_decoder_list *list = talloc_zero(NULL, struct mp_decoder_list); for (int i = 0; mpcodecs_vd_drivers[i] != NULL; i++) mpcodecs_vd_drivers[i]->add_decoders(list); return list; } static struct mp_decoder_list *mp_select_video_decoders(const char *codec, char *selection) { struct mp_decoder_list *list = video_decoder_list(); struct mp_decoder_list *new = mp_select_decoders(list, codec, selection); talloc_free(list); return new; } static const struct vd_functions *find_driver(const char *name) { for (int i = 0; mpcodecs_vd_drivers[i] != NULL; i++) { if (strcmp(mpcodecs_vd_drivers[i]->name, name) == 0) return mpcodecs_vd_drivers[i]; } return NULL; } bool video_init_best_codec(struct dec_video *d_video) { struct MPOpts *opts = d_video->opts; assert(!d_video->vd_driver); video_reset(d_video); d_video->has_broken_packet_pts = -10; // needs 10 packets to reach decision struct mp_decoder_entry *decoder = NULL; struct mp_decoder_list *list = mp_select_video_decoders(d_video->codec->codec, opts->video_decoders); mp_print_decoders(d_video->log, MSGL_V, "Codec list:", list); for (int n = 0; n < list->num_entries; n++) { struct mp_decoder_entry *sel = &list->entries[n]; const struct vd_functions *driver = find_driver(sel->family); if (!driver) continue; MP_VERBOSE(d_video, "Opening video decoder %s:%s\n", sel->family, sel->decoder); d_video->vd_driver = driver; if (init_video_codec(d_video, sel->decoder)) { decoder = sel; break; } d_video->vd_driver = NULL; MP_WARN(d_video, "Video decoder init failed for " "%s:%s\n", sel->family, sel->decoder); } if (d_video->vd_driver) { d_video->decoder_desc = talloc_asprintf(d_video, "%s [%s:%s]", decoder->desc, decoder->family, decoder->decoder); MP_VERBOSE(d_video, "Selected video codec: %s\n", d_video->decoder_desc); } else { MP_ERR(d_video, "Failed to initialize a video decoder for codec '%s'.\n", d_video->codec->codec); } if (d_video->header->missing_timestamps) { MP_WARN(d_video, "This stream has no timestamps!\n"); MP_WARN(d_video, "Making up playback time using %f FPS.\n", d_video->fps); MP_WARN(d_video, "Seeking will probably fail badly.\n"); } talloc_free(list); return !!d_video->vd_driver; } static void fix_image_params(struct dec_video *d_video, struct mp_image_params *params) { struct MPOpts *opts = d_video->opts; struct mp_image_params p = *params; struct mp_codec_params *c = d_video->codec; MP_VERBOSE(d_video, "Decoder format: %s\n", mp_image_params_to_str(params)); // While mp_image_params normally always have to have d_w/d_h set, the // decoder signals unknown bitstream aspect ratio with both set to 0. float dec_aspect = p.p_w > 0 && p.p_h > 0 ? p.p_w / (float)p.p_h : 0; if (d_video->initial_decoder_aspect == 0) d_video->initial_decoder_aspect = dec_aspect; bool use_container = true; switch (opts->aspect_method) { case 0: // We normally prefer the container aspect, unless the decoder aspect // changes at least once. if (dec_aspect > 0 && d_video->initial_decoder_aspect != dec_aspect) { MP_VERBOSE(d_video, "Using bitstream aspect ratio.\n"); // Even if the aspect switches back, don't use container aspect again. d_video->initial_decoder_aspect = -1; use_container = false; } break; case 1: use_container = false; break; } if (use_container && c->par_w > 0 && c->par_h) { MP_VERBOSE(d_video, "Using container aspect ratio.\n"); p.p_w = c->par_w; p.p_h = c->par_h; } if (opts->movie_aspect >= 0) { MP_VERBOSE(d_video, "Forcing user-set aspect ratio.\n"); if (opts->movie_aspect == 0) { p.p_w = p.p_h = 1; } else { AVRational a = av_d2q(opts->movie_aspect, INT_MAX); mp_image_params_set_dsize(&p, a.num, a.den); } } // Assume square pixels if no aspect ratio is set at all. if (p.p_w <= 0 || p.p_h <= 0) p.p_w = p.p_h = 1; // Detect colorspace from resolution. mp_image_params_guess_csp(&p); d_video->last_format = *params; d_video->fixed_format = p; } static struct mp_image *decode_packet(struct dec_video *d_video, struct demux_packet *packet, int drop_frame) { struct MPOpts *opts = d_video->opts; if (!d_video->vd_driver) return NULL; double pkt_pts = packet ? packet->pts : MP_NOPTS_VALUE; double pkt_dts = packet ? packet->dts : MP_NOPTS_VALUE; if (pkt_pts == MP_NOPTS_VALUE) d_video->has_broken_packet_pts = 1; double pkt_pdts = pkt_pts == MP_NOPTS_VALUE ? pkt_dts : pkt_pts; if (pkt_pdts != MP_NOPTS_VALUE && d_video->first_packet_pdts == MP_NOPTS_VALUE) d_video->first_packet_pdts = pkt_pdts; MP_STATS(d_video, "start decode video"); struct mp_image *mpi = d_video->vd_driver->decode(d_video, packet, drop_frame); MP_STATS(d_video, "end decode video"); // Error, discarded frame, dropped frame, or initial codec delay. if (!mpi || drop_frame) { talloc_free(mpi); return NULL; } if (opts->field_dominance == 0) { mpi->fields |= MP_IMGFIELD_TOP_FIRST | MP_IMGFIELD_INTERLACED; } else if (opts->field_dominance == 1) { mpi->fields &= ~MP_IMGFIELD_TOP_FIRST; mpi->fields |= MP_IMGFIELD_INTERLACED; } // Note: the PTS is reordered, but the DTS is not. Both should be monotonic. double pts = mpi->pts; double dts = mpi->dts; if (pts != MP_NOPTS_VALUE) { if (pts < d_video->codec_pts) d_video->num_codec_pts_problems++; d_video->codec_pts = mpi->pts; } if (dts != MP_NOPTS_VALUE) { if (dts <= d_video->codec_dts) d_video->num_codec_dts_problems++; d_video->codec_dts = mpi->dts; } if (d_video->has_broken_packet_pts < 0) d_video->has_broken_packet_pts++; if (d_video->num_codec_pts_problems) d_video->has_broken_packet_pts = 1; // If PTS is unset, or non-monotonic, fall back to DTS. if ((d_video->num_codec_pts_problems > d_video->num_codec_dts_problems || pts == MP_NOPTS_VALUE) && dts != MP_NOPTS_VALUE) pts = dts; if (!opts->correct_pts || pts == MP_NOPTS_VALUE) { if (opts->correct_pts && !d_video->header->missing_timestamps) MP_WARN(d_video, "No video PTS! Making something up.\n"); double frame_time = 1.0f / (d_video->fps > 0 ? d_video->fps : 25); double base = d_video->first_packet_pdts; pts = d_video->decoded_pts; if (pts == MP_NOPTS_VALUE) { pts = base == MP_NOPTS_VALUE ? 0 : base; } else { pts += frame_time; } } if (!mp_image_params_equal(&d_video->last_format, &mpi->params)) fix_image_params(d_video, &mpi->params); mpi->params = d_video->fixed_format; mpi->pts = pts; d_video->decoded_pts = pts; // Compensate for incorrectly using mpeg-style DTS for avi timestamps. if (d_video->codec->avi_dts && opts->correct_pts && mpi->pts != MP_NOPTS_VALUE && d_video->fps > 0) { int delay = -1; video_vd_control(d_video, VDCTRL_GET_BFRAMES, &delay); mpi->pts -= MPMAX(delay, 0) / d_video->fps; } return mpi; } void video_reset_aspect(struct dec_video *d_video) { d_video->last_format = (struct mp_image_params){0}; } void video_set_framedrop(struct dec_video *d_video, bool enabled) { d_video->framedrop_enabled = enabled; } // Frames before the start timestamp can be dropped. (Used for hr-seek.) void video_set_start(struct dec_video *d_video, double start_pts) { d_video->start_pts = start_pts; } void video_work(struct dec_video *d_video) { if (d_video->current_mpi) return; if (d_video->header->attached_picture) { if (d_video->current_state == DATA_AGAIN && !d_video->cover_art_mpi) { d_video->cover_art_mpi = decode_packet(d_video, d_video->header->attached_picture, 0); // Might need flush. if (!d_video->cover_art_mpi) d_video->cover_art_mpi = decode_packet(d_video, NULL, 0); d_video->current_state = DATA_OK; } if (d_video->current_state == DATA_OK) d_video->current_mpi = mp_image_new_ref(d_video->cover_art_mpi); // (DATA_OK is returned the first time, when current_mpi is sill set) d_video->current_state = DATA_EOF; return; } if (!d_video->packet && !d_video->new_segment && demux_read_packet_async(d_video->header, &d_video->packet) == 0) { d_video->current_state = DATA_WAIT; return; } if (d_video->packet) { if (d_video->packet->dts == MP_NOPTS_VALUE && !d_video->codec->avi_dts) d_video->packet->dts = d_video->packet->pts; } if (d_video->packet && d_video->packet->new_segment) { assert(!d_video->new_segment); d_video->new_segment = d_video->packet; d_video->packet = NULL; } bool had_input_packet = !!d_video->packet; bool had_packet = had_input_packet || d_video->new_segment; double start_pts = d_video->start_pts; if (d_video->start != MP_NOPTS_VALUE && (start_pts == MP_NOPTS_VALUE || d_video->start > start_pts)) start_pts = d_video->start; int framedrop_type = d_video->framedrop_enabled ? 1 : 0; if (start_pts != MP_NOPTS_VALUE && d_video->packet && d_video->packet->pts < start_pts - .005 && !d_video->has_broken_packet_pts) { framedrop_type = 2; } d_video->current_mpi = decode_packet(d_video, d_video->packet, framedrop_type); if (d_video->packet && d_video->packet->len == 0) { talloc_free(d_video->packet); d_video->packet = NULL; } d_video->current_state = DATA_OK; if (!d_video->current_mpi) { d_video->current_state = DATA_EOF; if (had_packet) { if (framedrop_type == 1) d_video->dropped_frames += 1; d_video->current_state = DATA_AGAIN; } } bool segment_ended = !d_video->current_mpi && !had_input_packet; if (d_video->current_mpi && d_video->current_mpi->pts != MP_NOPTS_VALUE) { double vpts = d_video->current_mpi->pts; segment_ended = d_video->end != MP_NOPTS_VALUE && vpts >= d_video->end; if ((d_video->start != MP_NOPTS_VALUE && vpts < d_video->start) || segment_ended) { talloc_free(d_video->current_mpi); d_video->current_mpi = NULL; } } // If there's a new segment, start it as soon as we're drained/finished. if (segment_ended && d_video->new_segment) { struct demux_packet *new_segment = d_video->new_segment; d_video->new_segment = NULL; // Could avoid decoder reinit; would still need flush. d_video->codec = new_segment->codec; if (d_video->vd_driver) d_video->vd_driver->uninit(d_video); d_video->vd_driver = NULL; video_init_best_codec(d_video); d_video->start = new_segment->start; d_video->end = new_segment->end; new_segment->new_segment = false; d_video->packet = new_segment; d_video->current_state = DATA_AGAIN; } } // Fetch an image decoded with video_work(). Returns one of: // DATA_OK: *out_mpi is set to a new image // DATA_WAIT: waiting for demuxer; will receive a wakeup signal // DATA_EOF: end of file, no more frames to be expected // DATA_AGAIN: dropped frame or something similar int video_get_frame(struct dec_video *d_video, struct mp_image **out_mpi) { *out_mpi = NULL; if (d_video->current_mpi) { *out_mpi = d_video->current_mpi; d_video->current_mpi = NULL; return DATA_OK; } if (d_video->current_state == DATA_OK) return DATA_AGAIN; return d_video->current_state; }