Exemple #1
0
// Read a packet, store decoded image into d_video->waiting_decoded_mpi
// returns VD_* code
static int decode_image(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    if (d_video->header->attached_picture) {
        d_video->waiting_decoded_mpi =
                    video_decode(d_video, d_video->header->attached_picture, 0);
        return VD_EOF;
    }

    struct demux_packet *pkt;
    if (demux_read_packet_async(d_video->header, &pkt) == 0)
        return VD_WAIT;
    if (pkt && pkt->pts != MP_NOPTS_VALUE)
        pkt->pts += mpctx->video_offset;
    if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) ||
        d_video->has_broken_packet_pts ||
        !mpctx->opts->hr_seek_framedrop)
    {
        mpctx->hrseek_framedrop = false;
    }
    bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING;
    int framedrop_type = hrseek && mpctx->hrseek_framedrop ?
                         1 : check_framedrop(mpctx, -1);
    d_video->waiting_decoded_mpi =
        video_decode(d_video, pkt, framedrop_type);
    bool had_packet = !!pkt;
    talloc_free(pkt);

    return had_packet ? VD_PROGRESS : VD_EOF;
}
Exemple #2
0
static int decode_packet(struct dec_audio *da)
{
    struct ad_mpg123_context *con = da->priv;
    int ret;

    mp_audio_set_null_data(&da->decoded);

    struct demux_packet *pkt;
    if (demux_read_packet_async(da->header, &pkt) == 0)
        return AD_WAIT;
    if (!pkt)
        return AD_EOF;

    /* Next bytes from that presentation time. */
    if (pkt->pts != MP_NOPTS_VALUE) {
        da->pts        = pkt->pts;
        da->pts_offset = 0;
    }

    /* Have to use mpg123_feed() to avoid decoding here. */
    ret = mpg123_feed(con->handle, pkt->buffer, pkt->len);
    talloc_free(pkt);

    if (ret != MPG123_OK)
        goto mpg123_fail;

    unsigned char *audio = NULL;
    size_t bytes = 0;
    ret = mpg123_decode_frame(con->handle, NULL, &audio, &bytes);

    if (ret == MPG123_NEED_MORE)
        return 0;

    if (ret != MPG123_OK && ret != MPG123_DONE && ret != MPG123_NEW_FORMAT)
        goto mpg123_fail;

    ret = set_format(da);
    if (ret != MPG123_OK)
        goto mpg123_fail;

    if (con->sample_size < 1) {
        MP_ERR(da, "no sample size\n");
        return AD_ERR;
    }

    int got_samples = bytes / con->sample_size;
    da->decoded.planes[0] = audio;
    da->decoded.samples = got_samples;

    update_info(da);
    return 0;

mpg123_fail:
    MP_ERR(da, "mpg123 decoding error: %s\n", mpg123_strerror(con->handle));
    return AD_ERR;
}
Exemple #3
0
static int decode_packet(struct dec_audio *da, struct mp_audio **out)
{
    struct spdifContext *spdif_ctx = da->priv;
    AVFormatContext     *lavf_ctx  = spdif_ctx->lavf_ctx;

    spdif_ctx->out_buffer_len  = 0;

    struct demux_packet *mpkt;
    if (demux_read_packet_async(da->header, &mpkt) == 0)
        return AD_WAIT;

    if (!mpkt)
        return AD_EOF;

    AVPacket pkt;
    mp_set_av_packet(&pkt, mpkt, NULL);
    pkt.pts = pkt.dts = 0;
    if (mpkt->pts != MP_NOPTS_VALUE) {
        da->pts        = mpkt->pts;
        da->pts_offset = 0;
    }
    int ret = av_write_frame(lavf_ctx, &pkt);
    talloc_free(mpkt);
    avio_flush(lavf_ctx->pb);
    if (ret < 0)
        return AD_ERR;

    int samples = spdif_ctx->out_buffer_len / spdif_ctx->fmt.sstride;
    *out = mp_audio_pool_get(da->pool, &spdif_ctx->fmt, samples);
    if (!*out)
        return AD_ERR;

    memcpy((*out)->planes[0], spdif_ctx->out_buffer, spdif_ctx->out_buffer_len);

    return 0;
}
Exemple #4
0
int reinit_video_chain(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    assert(!mpctx->d_video);
    struct track *track = mpctx->current_track[0][STREAM_VIDEO];
    struct sh_stream *sh = track ? track->stream : NULL;
    if (!sh)
        goto no_video;

    MP_VERBOSE(mpctx, "[V] fourcc:0x%X  size:%dx%d  fps:%5.3f\n",
               sh->format,
               sh->video->disp_w, sh->video->disp_h,
               sh->video->fps);

    //================== Init VIDEO (codec & libvo) ==========================
    if (!opts->fixed_vo || !mpctx->video_out) {
        struct vo_extra ex = {
            .input_ctx = mpctx->input,
            .osd = mpctx->osd,
            .encode_lavc_ctx = mpctx->encode_lavc_ctx,
            .opengl_cb_context = mpctx->gl_cb_ctx,
        };
        mpctx->video_out = init_best_video_out(mpctx->global, &ex);
        if (!mpctx->video_out) {
            MP_FATAL(mpctx, "Error opening/initializing "
                    "the selected video_out (-vo) device.\n");
            mpctx->error_playing = MPV_ERROR_VO_INIT_FAILED;
            goto err_out;
        }
        mpctx->mouse_cursor_visible = true;
    }

    update_window_title(mpctx, true);

    struct dec_video *d_video = talloc_zero(NULL, struct dec_video);
    mpctx->d_video = d_video;
    d_video->global = mpctx->global;
    d_video->log = mp_log_new(d_video, mpctx->log, "!vd");
    d_video->opts = mpctx->opts;
    d_video->header = sh;
    d_video->fps = sh->video->fps;
    d_video->vo = mpctx->video_out;

    if (opts->force_fps) {
        d_video->fps = opts->force_fps;
        MP_INFO(mpctx, "FPS forced to %5.3f.\n", d_video->fps);
        MP_INFO(mpctx, "Use --no-correct-pts to force FPS based timing.\n");
    }
    update_fps(mpctx);

    vo_control(mpctx->video_out, VOCTRL_GET_HWDEC_INFO, &d_video->hwdec_info);

    recreate_video_filters(mpctx);

    if (!video_init_best_codec(d_video, opts->video_decoders))
        goto err_out;

    bool saver_state = opts->pause || !opts->stop_screensaver;
    vo_control(mpctx->video_out, saver_state ? VOCTRL_RESTORE_SCREENSAVER
                                             : VOCTRL_KILL_SCREENSAVER, NULL);

    vo_set_paused(mpctx->video_out, mpctx->paused);

    mpctx->sync_audio_to_video = !sh->attached_picture;
    mpctx->vo_pts_history_seek_ts++;

    // If we switch on video again, ensure audio position matches up.
    if (mpctx->d_audio)
        mpctx->audio_status = STATUS_SYNCING;

    reset_video_state(mpctx);
    reset_subtitle_state(mpctx);

    return 1;

err_out:
no_video:
    uninit_video_chain(mpctx);
    if (track)
        error_on_track(mpctx, track);
    handle_force_window(mpctx, true);
    return 0;
}

// Try to refresh the video by doing a precise seek to the currently displayed
// frame. This can go wrong in all sorts of ways, so use sparingly.
void mp_force_video_refresh(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    struct dec_video *d_video = mpctx->d_video;

    if (!d_video || !d_video->decoder_output.imgfmt)
        return;

    // If not paused, the next frame should come soon enough.
    if (opts->pause && mpctx->last_vo_pts != MP_NOPTS_VALUE)
        queue_seek(mpctx, MPSEEK_ABSOLUTE, mpctx->last_vo_pts, 2, true);
}

static int check_framedrop(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    // check for frame-drop:
    if (mpctx->video_status == STATUS_PLAYING && !mpctx->paused &&
        mpctx->audio_status == STATUS_PLAYING && !ao_untimed(mpctx->ao))
    {
        float fps = mpctx->d_video->fps;
        double frame_time = fps > 0 ? 1.0 / fps : 0;
        // we should avoid dropping too many frames in sequence unless we
        // are too late. and we allow 100ms A-V delay here:
        if (mpctx->last_av_difference - 0.100 > mpctx->dropped_frames * frame_time)
            return !!(opts->frame_dropping & 2);
    }
    return 0;
}

// Read a packet, store decoded image into d_video->waiting_decoded_mpi
// returns VD_* code
static int decode_image(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    if (d_video->header->attached_picture) {
        d_video->waiting_decoded_mpi =
                    video_decode(d_video, d_video->header->attached_picture, 0);
        return VD_EOF;
    }

    struct demux_packet *pkt;
    if (demux_read_packet_async(d_video->header, &pkt) == 0)
        return VD_WAIT;
    if (pkt && pkt->pts != MP_NOPTS_VALUE)
        pkt->pts += mpctx->video_offset;
    if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) ||
        d_video->has_broken_packet_pts ||
        !mpctx->opts->hr_seek_framedrop)
    {
        mpctx->hrseek_framedrop = false;
    }
    bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING;
    int framedrop_type = hrseek && mpctx->hrseek_framedrop ?
                         2 : check_framedrop(mpctx);
    d_video->waiting_decoded_mpi =
        video_decode(d_video, pkt, framedrop_type);
    bool had_packet = !!pkt;
    talloc_free(pkt);

    if (had_packet && !d_video->waiting_decoded_mpi &&
        mpctx->video_status == STATUS_PLAYING)
    {
        mpctx->dropped_frames_total++;
        mpctx->dropped_frames++;
    }

    return had_packet ? VD_PROGRESS : VD_EOF;
}


// Called after video reinit. This can be generally used to try to insert more
// filters using the filter chain edit functionality in command.c.
static void init_filter_params(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;

    // Note that the filter chain is already initialized. This code might
    // recreate the chain a second time, which is not very elegant, but allows
    // us to test whether enabling deinterlacing works with the current video
    // format and other filters.
    if (opts->deinterlace >= 0)
        mp_property_do("deinterlace", M_PROPERTY_SET, &opts->deinterlace, mpctx);
}

// Feed newly decoded frames to the filter, take care of format changes.
// If eof=true, drain the filter chain, and return VD_EOF if empty.
static int video_filter(struct MPContext *mpctx, bool eof)
{
    struct dec_video *d_video = mpctx->d_video;
    struct vf_chain *vf = d_video->vfilter;

    if (vf->initialized < 0)
        return VD_ERROR;

    // There is already a filtered frame available.
    // If vf_needs_input() returns > 0, the filter wants input anyway.
    if (vf_output_frame(vf, eof) > 0 && vf_needs_input(vf) < 1)
        return VD_PROGRESS;

    // Decoder output is different from filter input?
    bool need_vf_reconfig = !vf->input_params.imgfmt || vf->initialized < 1 ||
        !mp_image_params_equal(&d_video->decoder_output, &vf->input_params);

    // (If imgfmt==0, nothing was decoded yet, and the format is unknown.)
    if (need_vf_reconfig && d_video->decoder_output.imgfmt) {
        // Drain the filter chain.
        if (vf_output_frame(vf, true) > 0)
            return VD_PROGRESS;

        // The filter chain is drained; execute the filter format change.
        filter_reconfig(mpctx, false);
        if (vf->initialized == 0)
            return VD_PROGRESS; // hw decoding fallback; try again
        if (vf->initialized < 1)
            return VD_ERROR;
        init_filter_params(mpctx);
        return VD_RECONFIG;
    }

    // If something was decoded, and the filter chain is ready, filter it.
    if (!need_vf_reconfig && d_video->waiting_decoded_mpi) {
        vf_filter_frame(vf, d_video->waiting_decoded_mpi);
        d_video->waiting_decoded_mpi = NULL;
        return VD_PROGRESS;
    }

    return eof ? VD_EOF : VD_PROGRESS;
}

// Make sure at least 1 filtered image is available, decode new video if needed.
// returns VD_* code
// A return value of VD_PROGRESS doesn't necessarily output a frame, but makes
// the promise that calling this function again will eventually do something.
static int video_decode_and_filter(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    int r = video_filter(mpctx, false);
    if (r < 0)
        return r;

    if (!d_video->waiting_decoded_mpi) {
        // Decode a new image, or at least feed the decoder a packet.
        r = decode_image(mpctx);
        if (r == VD_WAIT)
            return r;
        if (d_video->waiting_decoded_mpi)
            d_video->decoder_output = d_video->waiting_decoded_mpi->params;
    }

    bool eof = !d_video->waiting_decoded_mpi && (r == VD_EOF || r < 0);
    r = video_filter(mpctx, eof);
    if (r == VD_RECONFIG) // retry feeding decoded image
        r = video_filter(mpctx, eof);
    return r;
}

static int video_feed_async_filter(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;
    struct vf_chain *vf = d_video->vfilter;

    if (vf->initialized < 0)
        return VD_ERROR;

    if (vf_needs_input(vf) < 1)
        return 0;
    mpctx->sleeptime = 0; // retry until done
    return video_decode_and_filter(mpctx);
}

/* Modify video timing to match the audio timeline. There are two main
 * reasons this is needed. First, video and audio can start from different
 * positions at beginning of file or after a seek (MPlayer starts both
 * immediately even if they have different pts). Second, the file can have
 * audio timestamps that are inconsistent with the duration of the audio
 * packets, for example two consecutive timestamp values differing by
 * one second but only a packet with enough samples for half a second
 * of playback between them.
 */
static void adjust_sync(struct MPContext *mpctx, double v_pts, double frame_time)
{
    struct MPOpts *opts = mpctx->opts;

    if (mpctx->audio_status != STATUS_PLAYING)
        return;

    double a_pts = written_audio_pts(mpctx) + mpctx->audio_delay - mpctx->delay;
    double av_delay = a_pts - v_pts;

    double change = av_delay * 0.1;
    double max_change = opts->default_max_pts_correction >= 0 ?
                        opts->default_max_pts_correction : frame_time * 0.1;
    if (change < -max_change)
        change = -max_change;
    else if (change > max_change)
        change = max_change;
    mpctx->delay += change;
    mpctx->total_avsync_change += change;
}

// Enough video filtered already to push one frame to the VO?
static bool have_new_frame(struct MPContext *mpctx)
{
    bool need_2nd = !!(mpctx->opts->frame_dropping & 1) // we need the duration
        && mpctx->video_pts != MP_NOPTS_VALUE; // ...except for the 1st frame

    return mpctx->next_frame[0] && (!need_2nd || mpctx->next_frame[1]);
}
Exemple #5
0
void video_reset(struct dec_video *d_video)
{
    video_vd_control(d_video, VDCTRL_RESET, NULL);
    d_video->first_packet_pdts = MP_NOPTS_VALUE;
    d_video->start_pts = MP_NOPTS_VALUE;
    d_video->decoded_pts = MP_NOPTS_VALUE;
    d_video->codec_pts = MP_NOPTS_VALUE;
    d_video->codec_dts = MP_NOPTS_VALUE;
    d_video->last_format = d_video->fixed_format = (struct mp_image_params){0};
    d_video->dropped_frames = 0;
    d_video->current_state = DATA_AGAIN;
    mp_image_unrefp(&d_video->current_mpi);
    talloc_free(d_video->packet);
    d_video->packet = NULL;
    talloc_free(d_video->new_segment);
    d_video->new_segment = NULL;
    d_video->start = d_video->end = MP_NOPTS_VALUE;
}

int video_vd_control(struct dec_video *d_video, int cmd, void *arg)
{
    const struct vd_functions *vd = d_video->vd_driver;
    if (vd)
        return vd->control(d_video, cmd, arg);
    return CONTROL_UNKNOWN;
}

void video_uninit(struct dec_video *d_video)
{
    if (!d_video)
        return;
    mp_image_unrefp(&d_video->current_mpi);
    mp_image_unrefp(&d_video->cover_art_mpi);
    if (d_video->vd_driver) {
        MP_VERBOSE(d_video, "Uninit video.\n");
        d_video->vd_driver->uninit(d_video);
    }
    talloc_free(d_video->packet);
    talloc_free(d_video->new_segment);
    talloc_free(d_video);
}

static int init_video_codec(struct dec_video *d_video, const char *decoder)
{
    if (!d_video->vd_driver->init(d_video, decoder)) {
        MP_VERBOSE(d_video, "Video decoder init failed.\n");
        return 0;
    }
    return 1;
}

struct mp_decoder_list *video_decoder_list(void)
{
    struct mp_decoder_list *list = talloc_zero(NULL, struct mp_decoder_list);
    for (int i = 0; mpcodecs_vd_drivers[i] != NULL; i++)
        mpcodecs_vd_drivers[i]->add_decoders(list);
    return list;
}

static struct mp_decoder_list *mp_select_video_decoders(const char *codec,
                                                        char *selection)
{
    struct mp_decoder_list *list = video_decoder_list();
    struct mp_decoder_list *new = mp_select_decoders(list, codec, selection);
    talloc_free(list);
    return new;
}

static const struct vd_functions *find_driver(const char *name)
{
    for (int i = 0; mpcodecs_vd_drivers[i] != NULL; i++) {
        if (strcmp(mpcodecs_vd_drivers[i]->name, name) == 0)
            return mpcodecs_vd_drivers[i];
    }
    return NULL;
}

bool video_init_best_codec(struct dec_video *d_video)
{
    struct MPOpts *opts = d_video->opts;

    assert(!d_video->vd_driver);
    video_reset(d_video);
    d_video->has_broken_packet_pts = -10; // needs 10 packets to reach decision

    struct mp_decoder_entry *decoder = NULL;
    struct mp_decoder_list *list =
        mp_select_video_decoders(d_video->codec->codec, opts->video_decoders);

    mp_print_decoders(d_video->log, MSGL_V, "Codec list:", list);

    for (int n = 0; n < list->num_entries; n++) {
        struct mp_decoder_entry *sel = &list->entries[n];
        const struct vd_functions *driver = find_driver(sel->family);
        if (!driver)
            continue;
        MP_VERBOSE(d_video, "Opening video decoder %s:%s\n",
                   sel->family, sel->decoder);
        d_video->vd_driver = driver;
        if (init_video_codec(d_video, sel->decoder)) {
            decoder = sel;
            break;
        }
        d_video->vd_driver = NULL;
        MP_WARN(d_video, "Video decoder init failed for "
                "%s:%s\n", sel->family, sel->decoder);
    }

    if (d_video->vd_driver) {
        d_video->decoder_desc =
            talloc_asprintf(d_video, "%s [%s:%s]", decoder->desc, decoder->family,
                            decoder->decoder);
        MP_VERBOSE(d_video, "Selected video codec: %s\n", d_video->decoder_desc);
    } else {
        MP_ERR(d_video, "Failed to initialize a video decoder for codec '%s'.\n",
               d_video->codec->codec);
    }

    if (d_video->header->missing_timestamps) {
        MP_WARN(d_video, "This stream has no timestamps!\n");
        MP_WARN(d_video, "Making up playback time using %f FPS.\n", d_video->fps);
        MP_WARN(d_video, "Seeking will probably fail badly.\n");
    }

    talloc_free(list);
    return !!d_video->vd_driver;
}

static void fix_image_params(struct dec_video *d_video,
                             struct mp_image_params *params)
{
    struct MPOpts *opts = d_video->opts;
    struct mp_image_params p = *params;
    struct mp_codec_params *c = d_video->codec;

    MP_VERBOSE(d_video, "Decoder format: %s\n", mp_image_params_to_str(params));

    // While mp_image_params normally always have to have d_w/d_h set, the
    // decoder signals unknown bitstream aspect ratio with both set to 0.
    float dec_aspect = p.p_w > 0 && p.p_h > 0 ? p.p_w / (float)p.p_h : 0;
    if (d_video->initial_decoder_aspect == 0)
        d_video->initial_decoder_aspect = dec_aspect;

    bool use_container = true;
    switch (opts->aspect_method) {
    case 0:
        // We normally prefer the container aspect, unless the decoder aspect
        // changes at least once.
        if (dec_aspect > 0 && d_video->initial_decoder_aspect != dec_aspect) {
            MP_VERBOSE(d_video, "Using bitstream aspect ratio.\n");
            // Even if the aspect switches back, don't use container aspect again.
            d_video->initial_decoder_aspect = -1;
            use_container = false;
        }
        break;
    case 1:
        use_container = false;
        break;
    }

    if (use_container && c->par_w > 0 && c->par_h) {
        MP_VERBOSE(d_video, "Using container aspect ratio.\n");
        p.p_w = c->par_w;
        p.p_h = c->par_h;
    }

    if (opts->movie_aspect >= 0) {
        MP_VERBOSE(d_video, "Forcing user-set aspect ratio.\n");
        if (opts->movie_aspect == 0) {
            p.p_w = p.p_h = 1;
        } else {
            AVRational a = av_d2q(opts->movie_aspect, INT_MAX);
            mp_image_params_set_dsize(&p, a.num, a.den);
        }
    }

    // Assume square pixels if no aspect ratio is set at all.
    if (p.p_w <= 0 || p.p_h <= 0)
        p.p_w = p.p_h = 1;

    // Detect colorspace from resolution.
    mp_image_params_guess_csp(&p);

    d_video->last_format = *params;
    d_video->fixed_format = p;
}

static struct mp_image *decode_packet(struct dec_video *d_video,
                                      struct demux_packet *packet,
                                      int drop_frame)
{
    struct MPOpts *opts = d_video->opts;

    if (!d_video->vd_driver)
        return NULL;

    double pkt_pts = packet ? packet->pts : MP_NOPTS_VALUE;
    double pkt_dts = packet ? packet->dts : MP_NOPTS_VALUE;

    if (pkt_pts == MP_NOPTS_VALUE)
        d_video->has_broken_packet_pts = 1;

    double pkt_pdts = pkt_pts == MP_NOPTS_VALUE ? pkt_dts : pkt_pts;
    if (pkt_pdts != MP_NOPTS_VALUE && d_video->first_packet_pdts == MP_NOPTS_VALUE)
        d_video->first_packet_pdts = pkt_pdts;

    MP_STATS(d_video, "start decode video");

    struct mp_image *mpi = d_video->vd_driver->decode(d_video, packet, drop_frame);

    MP_STATS(d_video, "end decode video");

    // Error, discarded frame, dropped frame, or initial codec delay.
    if (!mpi || drop_frame) {
        talloc_free(mpi);
        return NULL;
    }

    if (opts->field_dominance == 0) {
        mpi->fields |= MP_IMGFIELD_TOP_FIRST | MP_IMGFIELD_INTERLACED;
    } else if (opts->field_dominance == 1) {
        mpi->fields &= ~MP_IMGFIELD_TOP_FIRST;
        mpi->fields |= MP_IMGFIELD_INTERLACED;
    }

    // Note: the PTS is reordered, but the DTS is not. Both should be monotonic.
    double pts = mpi->pts;
    double dts = mpi->dts;

    if (pts != MP_NOPTS_VALUE) {
        if (pts < d_video->codec_pts)
            d_video->num_codec_pts_problems++;
        d_video->codec_pts = mpi->pts;
    }

    if (dts != MP_NOPTS_VALUE) {
        if (dts <= d_video->codec_dts)
            d_video->num_codec_dts_problems++;
        d_video->codec_dts = mpi->dts;
    }

    if (d_video->has_broken_packet_pts < 0)
        d_video->has_broken_packet_pts++;
    if (d_video->num_codec_pts_problems)
        d_video->has_broken_packet_pts = 1;

    // If PTS is unset, or non-monotonic, fall back to DTS.
    if ((d_video->num_codec_pts_problems > d_video->num_codec_dts_problems ||
         pts == MP_NOPTS_VALUE) && dts != MP_NOPTS_VALUE)
        pts = dts;

    if (!opts->correct_pts || pts == MP_NOPTS_VALUE) {
        if (opts->correct_pts && !d_video->header->missing_timestamps)
            MP_WARN(d_video, "No video PTS! Making something up.\n");

        double frame_time = 1.0f / (d_video->fps > 0 ? d_video->fps : 25);
        double base = d_video->first_packet_pdts;
        pts = d_video->decoded_pts;
        if (pts == MP_NOPTS_VALUE) {
            pts = base == MP_NOPTS_VALUE ? 0 : base;
        } else {
            pts += frame_time;
        }
    }

    if (!mp_image_params_equal(&d_video->last_format, &mpi->params))
        fix_image_params(d_video, &mpi->params);

    mpi->params = d_video->fixed_format;

    mpi->pts = pts;
    d_video->decoded_pts = pts;

    // Compensate for incorrectly using mpeg-style DTS for avi timestamps.
    if (d_video->codec->avi_dts && opts->correct_pts &&
        mpi->pts != MP_NOPTS_VALUE && d_video->fps > 0)
    {
        int delay = -1;
        video_vd_control(d_video, VDCTRL_GET_BFRAMES, &delay);
        mpi->pts -= MPMAX(delay, 0) / d_video->fps;
    }

    return mpi;
}

void video_reset_aspect(struct dec_video *d_video)
{
    d_video->last_format = (struct mp_image_params){0};
}

void video_set_framedrop(struct dec_video *d_video, bool enabled)
{
    d_video->framedrop_enabled = enabled;
}

// Frames before the start timestamp can be dropped. (Used for hr-seek.)
void video_set_start(struct dec_video *d_video, double start_pts)
{
    d_video->start_pts = start_pts;
}

void video_work(struct dec_video *d_video)
{
    if (d_video->current_mpi)
        return;

    if (d_video->header->attached_picture) {
        if (d_video->current_state == DATA_AGAIN && !d_video->cover_art_mpi) {
            d_video->cover_art_mpi =
                decode_packet(d_video, d_video->header->attached_picture, 0);
            // Might need flush.
            if (!d_video->cover_art_mpi)
                d_video->cover_art_mpi = decode_packet(d_video, NULL, 0);
            d_video->current_state = DATA_OK;
        }
        if (d_video->current_state == DATA_OK)
            d_video->current_mpi = mp_image_new_ref(d_video->cover_art_mpi);
        // (DATA_OK is returned the first time, when current_mpi is sill set)
        d_video->current_state = DATA_EOF;
        return;
    }

    if (!d_video->packet && !d_video->new_segment &&
        demux_read_packet_async(d_video->header, &d_video->packet) == 0)
    {
        d_video->current_state = DATA_WAIT;
        return;
    }

    if (d_video->packet) {
        if (d_video->packet->dts == MP_NOPTS_VALUE && !d_video->codec->avi_dts)
            d_video->packet->dts = d_video->packet->pts;
    }

    if (d_video->packet && d_video->packet->new_segment) {
        assert(!d_video->new_segment);
        d_video->new_segment = d_video->packet;
        d_video->packet = NULL;
    }

    bool had_input_packet = !!d_video->packet;
    bool had_packet = had_input_packet || d_video->new_segment;

    double start_pts = d_video->start_pts;
    if (d_video->start != MP_NOPTS_VALUE && (start_pts == MP_NOPTS_VALUE ||
                                             d_video->start > start_pts))
        start_pts = d_video->start;

    int framedrop_type = d_video->framedrop_enabled ? 1 : 0;
    if (start_pts != MP_NOPTS_VALUE && d_video->packet &&
        d_video->packet->pts < start_pts - .005 &&
        !d_video->has_broken_packet_pts)
    {
        framedrop_type = 2;
    }
    d_video->current_mpi = decode_packet(d_video, d_video->packet, framedrop_type);
    if (d_video->packet && d_video->packet->len == 0) {
        talloc_free(d_video->packet);
        d_video->packet = NULL;
    }

    d_video->current_state = DATA_OK;
    if (!d_video->current_mpi) {
        d_video->current_state = DATA_EOF;
        if (had_packet) {
            if (framedrop_type == 1)
                d_video->dropped_frames += 1;
            d_video->current_state = DATA_AGAIN;
        }
    }

    bool segment_ended = !d_video->current_mpi && !had_input_packet;

    if (d_video->current_mpi && d_video->current_mpi->pts != MP_NOPTS_VALUE) {
        double vpts = d_video->current_mpi->pts;
        segment_ended = d_video->end != MP_NOPTS_VALUE && vpts >= d_video->end;
        if ((d_video->start != MP_NOPTS_VALUE && vpts < d_video->start)
            || segment_ended)
        {
            talloc_free(d_video->current_mpi);
            d_video->current_mpi = NULL;
        }
    }

    // If there's a new segment, start it as soon as we're drained/finished.
    if (segment_ended && d_video->new_segment) {
        struct demux_packet *new_segment = d_video->new_segment;
        d_video->new_segment = NULL;

        // Could avoid decoder reinit; would still need flush.
        d_video->codec = new_segment->codec;
        if (d_video->vd_driver)
            d_video->vd_driver->uninit(d_video);
        d_video->vd_driver = NULL;
        video_init_best_codec(d_video);

        d_video->start = new_segment->start;
        d_video->end = new_segment->end;

        new_segment->new_segment = false;

        d_video->packet = new_segment;
        d_video->current_state = DATA_AGAIN;
    }
}

// Fetch an image decoded with video_work(). Returns one of:
//  DATA_OK:    *out_mpi is set to a new image
//  DATA_WAIT:  waiting for demuxer; will receive a wakeup signal
//  DATA_EOF:   end of file, no more frames to be expected
//  DATA_AGAIN: dropped frame or something similar
int video_get_frame(struct dec_video *d_video, struct mp_image **out_mpi)
{
    *out_mpi = NULL;
    if (d_video->current_mpi) {
        *out_mpi = d_video->current_mpi;
        d_video->current_mpi = NULL;
        return DATA_OK;
    }
    if (d_video->current_state == DATA_OK)
        return DATA_AGAIN;
    return d_video->current_state;
}