コード例 #1
0
static void execute_task(hs_worker *worker, _task_t task)
{
	hs_task *_task = task->task;
	task_timing *_task_timing = _task->timing;
	worker_timing *_worker_timing = worker->timing;

	//printf("GPU worker executes task\n");

	if(worker->exectued == 0)
	{
		//printf("GPU zero taks\n");
		get_relative_time(&worker->timing->start_time);
		worker->exectued = 1;
	}

	get_relative_time(&_task_timing->start_time);

	_task->cuda_func(_task->data_interface);

	get_relative_time(&_task_timing->end_time);

	update_worker_exec_status(_worker_timing, &_task_timing->start_time, &_task_timing->end_time);

	fifo_push_task(worker->finished_tasks, task);

	dec_nsubmitted_tasks();
}
コード例 #2
0
ファイル: timer.c プロジェクト: Bremma/pinmame
void timer_adjust(mame_timer *which, double duration, int param, double period)
{
	double time = get_relative_time();

	/* if this is the callback timer, mark it modified */
	if (which == callback_timer)
		callback_timer_modified = 1;

	/* compute the time of the next firing and insert into the list */
	which->callback_param = param;
	which->enabled = 1;

	/* set the start and expire times */
	which->start = time;
	which->expire = time + duration;
	which->period = period;

	/* remove and re-insert the timer in its new order */
	timer_list_remove(which);
	timer_list_insert(which);

	/* if this was inserted as the head, abort the current timeslice and resync */
LOG(("timer_adjust %08X to expire @ %.9f\n", (UINT32)which, which->expire));
	if (which == timer_head && cpu_getexecutingcpu() >= 0)
		activecpu_abort_timeslice();
}
コード例 #3
0
ファイル: timer.c プロジェクト: Bremma/pinmame
mame_timer *timer_alloc(void (*callback)(int))
{
	double time = get_relative_time();
	mame_timer *timer = timer_new();

	/* fail if we can't allocate a new entry */
	if (!timer)
		return NULL;

	/* fill in the record */
	timer->callback = callback;
	timer->callback_param = 0;
	timer->enabled = 0;
	timer->temporary = 0;
	timer->tag = get_resource_tag();
	timer->period = 0;

	/* compute the time of the next firing and insert into the list */
	timer->start = time;
	timer->expire = TIME_NEVER;
	timer_list_insert(timer);

	/* return a handle */
	return timer;
}
コード例 #4
0
ファイル: source_timer.c プロジェクト: suzp1984/genesis
static int source_timer_check(Source* thiz)
{
    DECLES_PRIV(priv, thiz);

    int t = priv->next_time - get_relative_time();
    t = t < 0 ? 0 : t;

    return t;
}
コード例 #5
0
ファイル: source_timer.c プロジェクト: suzp1984/genesis
Ret source_timer_modify(Source* thiz, int interval)
{
    DECLES_PRIV(priv, thiz);

    priv->interval = interval;
    priv->next_time = get_relative_time() + priv->interval;

    return RET_OK;
}
コード例 #6
0
ファイル: video.c プロジェクト: jeremiejig/mpv
static double timing_sleep(struct MPContext *mpctx, double time_frame)
{
    // assume kernel HZ=100 for softsleep, works with larger HZ but with
    // unnecessarily high CPU usage
    struct MPOpts *opts = mpctx->opts;
    double margin = opts->softsleep ? 0.011 : 0;
    while (time_frame > margin) {
        mp_sleep_us(1000000 * (time_frame - margin));
        time_frame -= get_relative_time(mpctx);
    }
    if (opts->softsleep) {
        if (time_frame < 0)
            MP_WARN(mpctx, "Warning! Softsleep underflow!\n");
        while (time_frame > 0)
            time_frame -= get_relative_time(mpctx);  // burn the CPU
    }
    return time_frame;
}
コード例 #7
0
ファイル: source_timer.c プロジェクト: suzp1984/genesis
Ret source_timer_reset(Source* thiz)
{
    DECLES_PRIV(priv, thiz);

    thiz->disable = 0;
    priv->next_time = get_relative_time() + priv->interval;

    return RET_OK;
}
コード例 #8
0
ファイル: source_timer.c プロジェクト: suzp1984/genesis
static Ret source_timer_dispatch(Source* thiz)
{
    DECLES_PRIV(priv, thiz);
    Ret ret = RET_FAIL;

    if (thiz->disable <= 0)
    {
        ret = priv->action(priv->user_data);
    }

    priv->next_time = get_relative_time() + priv->interval;

    return ret;
}
コード例 #9
0
void
procman_debug_real(const char *file, int line, const char *func,
                   const char *format, ...)
{
    va_list args;
    char *msg;

    if (G_LIKELY(!is_debug_enabled()))
        return;

    va_start(args, format);
    msg = g_strdup_vprintf(format, args);
    va_end(args);

    g_debug("[%.3f %s:%d %s] %s", get_relative_time(), file, line, func, msg);

    g_free(msg);
}
コード例 #10
0
ファイル: video.c プロジェクト: xnoreq/mpv
// Update the A/V sync difference after a video frame has been shown.
static void update_avsync_after_frame(struct MPContext *mpctx)
{
    mpctx->time_frame -= get_relative_time(mpctx);
    mpctx->last_av_difference = 0;

    if (mpctx->audio_status != STATUS_PLAYING ||
        mpctx->video_status != STATUS_PLAYING)
        return;

    double a_pos = playing_audio_pts(mpctx);

    mpctx->last_av_difference = a_pos - mpctx->video_pts + mpctx->audio_delay;
    if (mpctx->time_frame > 0)
        mpctx->last_av_difference +=
                mpctx->time_frame * mpctx->opts->playback_speed;
    if (a_pos == MP_NOPTS_VALUE || mpctx->video_pts == MP_NOPTS_VALUE)
        mpctx->last_av_difference = MP_NOPTS_VALUE;
    if (mpctx->last_av_difference > 0.5 && !mpctx->drop_message_shown) {
        MP_WARN(mpctx, "%s", av_desync_help_text);
        mpctx->drop_message_shown = true;
    }
}
コード例 #11
0
ファイル: source_timer.c プロジェクト: suzp1984/genesis
Source* source_timer_create(int interval, TimerAction action, void* user_data)
{
    Source* thiz = (Source*)malloc(sizeof(Source) + sizeof(PrivInfo));
    
    if (thiz != NULL) {
        DECLES_PRIV(priv, thiz);
        thiz->getfd = source_timer_getfd;
        thiz->check = source_timer_check;
        thiz->dispatch = source_timer_dispatch;
        thiz->destroy = source_timer_destroy;

        thiz->ref = 1;
        thiz->source_type = SOURCE_NORMAL;
        thiz->disable = 0;

        priv->interval = interval;
        priv->action = action;
        priv->user_data = user_data;
        priv->next_time = get_relative_time() + interval;
    }

    return thiz;
}
コード例 #12
0
ファイル: timer.c プロジェクト: Bremma/pinmame
double timer_get_time(void)
{
	return global_offset + get_relative_time();
}
コード例 #13
0
ファイル: timer.c プロジェクト: Bremma/pinmame
double timer_timeleft(mame_timer *which)
{
	double time = get_relative_time();
	return which->expire - time;
}
コード例 #14
0
ファイル: timer.c プロジェクト: Bremma/pinmame
double timer_timeelapsed(mame_timer *which)
{
	double time = get_relative_time();
	return time - which->start;
}
コード例 #15
0
ファイル: timer.c プロジェクト: Bremma/pinmame
double timer_time_until_next_timer(void)
{
	double time = get_relative_time();
	return timer_head->expire - time;
}
コード例 #16
0
ファイル: video.c プロジェクト: jeremiejig/mpv
void write_video(struct MPContext *mpctx, double endpts)
{
    struct MPOpts *opts = mpctx->opts;
    struct vo *vo = mpctx->video_out;

    if (!mpctx->d_video)
        return;

    update_fps(mpctx);

    // Whether there's still at least 1 video frame that can be shown.
    // If false, it means we can reconfig the VO if needed (normally, this
    // would disrupt playback, so only do it on !still_playing).
    bool still_playing = vo_has_next_frame(vo, true);
    // For the last frame case (frame is being displayed).
    still_playing |= mpctx->playing_last_frame;
    still_playing |= mpctx->last_frame_duration > 0;

    double frame_time = 0;
    int r = update_video(mpctx, endpts, !still_playing, &frame_time);
    MP_TRACE(mpctx, "update_video: %d (still_playing=%d)\n", r, still_playing);

    if (r == VD_WAIT) // Demuxer will wake us up for more packets to decode.
        return;

    if (r < 0) {
        MP_FATAL(mpctx, "Could not initialize video chain.\n");
        int uninit = INITIALIZED_VCODEC;
        if (!opts->force_vo)
            uninit |= INITIALIZED_VO;
        uninit_player(mpctx, uninit);
        if (!mpctx->current_track[STREAM_AUDIO])
            mpctx->stop_play = PT_NEXT_ENTRY;
        mpctx->error_playing = true;
        handle_force_window(mpctx, true);
        return; // restart loop
    }

    if (r == VD_EOF) {
        if (!mpctx->playing_last_frame && mpctx->last_frame_duration > 0) {
            mpctx->time_frame += mpctx->last_frame_duration;
            mpctx->last_frame_duration = 0;
            mpctx->playing_last_frame = true;
            MP_VERBOSE(mpctx, "showing last frame\n");
        }
    }

    if (r == VD_NEW_FRAME) {
        MP_TRACE(mpctx, "frametime=%5.3f\n", frame_time);

        if (mpctx->video_status > STATUS_PLAYING)
            mpctx->video_status = STATUS_PLAYING;

        if (mpctx->video_status >= STATUS_READY) {
            mpctx->time_frame += frame_time / opts->playback_speed;
            adjust_sync(mpctx, frame_time);
        }
    } else if (r == VD_EOF && mpctx->playing_last_frame) {
        // Let video timing code continue displaying.
        mpctx->video_status = STATUS_DRAINING;
        MP_VERBOSE(mpctx, "still showing last frame\n");
    } else if (r <= 0) {
        // EOF or error
        mpctx->delay = 0;
        mpctx->last_av_difference = 0;
        mpctx->video_status = STATUS_EOF;
        MP_VERBOSE(mpctx, "video EOF\n");
        return;
    } else {
        if (mpctx->video_status > STATUS_PLAYING)
            mpctx->video_status = STATUS_PLAYING;

        // Decode more in next iteration.
        mpctx->sleeptime = 0;
        MP_TRACE(mpctx, "filtering more video\n");
    }

    // Actual playback starts when both audio and video are ready.
    if (mpctx->video_status == STATUS_READY)
        return;

    if (mpctx->paused && mpctx->video_status >= STATUS_READY)
        return;

    mpctx->time_frame -= get_relative_time(mpctx);
    double audio_pts = playing_audio_pts(mpctx);
    if (!mpctx->sync_audio_to_video || mpctx->video_status < STATUS_READY) {
        mpctx->time_frame = 0;
    } else if (mpctx->audio_status == STATUS_PLAYING &&
               mpctx->video_status == STATUS_PLAYING)
    {
        double buffered_audio = ao_get_delay(mpctx->ao);
        MP_TRACE(mpctx, "audio delay=%f\n", buffered_audio);

        if (opts->autosync) {
            /* Smooth reported playback position from AO by averaging
             * it with the value expected based on previus value and
             * time elapsed since then. May help smooth video timing
             * with audio output that have inaccurate position reporting.
             * This is badly implemented; the behavior of the smoothing
             * now undesirably depends on how often this code runs
             * (mainly depends on video frame rate). */
            float predicted = (mpctx->delay / opts->playback_speed +
                                mpctx->time_frame);
            float difference = buffered_audio - predicted;
            buffered_audio = predicted + difference / opts->autosync;
        }

        mpctx->time_frame = (buffered_audio -
                                mpctx->delay / opts->playback_speed);
    } else {
        /* If we're more than 200 ms behind the right playback
         * position, don't try to speed up display of following
         * frames to catch up; continue with default speed from
         * the current frame instead.
         * If untimed is set always output frames immediately
         * without sleeping.
         */
        if (mpctx->time_frame < -0.2 || opts->untimed || vo->untimed)
            mpctx->time_frame = 0;
    }

    double vsleep = mpctx->time_frame - vo->flip_queue_offset;
    if (vsleep > 0.050) {
        mpctx->sleeptime = MPMIN(mpctx->sleeptime, vsleep - 0.040);
        return;
    }
    mpctx->sleeptime = 0;
    mpctx->playing_last_frame = false;

    // last frame case
    if (r != VD_NEW_FRAME)
        return;

    //=================== FLIP PAGE (VIDEO BLT): ======================


    mpctx->video_pts = mpctx->video_next_pts;
    mpctx->last_vo_pts = mpctx->video_pts;
    mpctx->playback_pts = mpctx->video_pts;

    update_subtitles(mpctx);
    update_osd_msg(mpctx);

    MP_STATS(mpctx, "vo draw frame");

    vo_new_frame_imminent(vo);

    MP_STATS(mpctx, "vo sleep");

    mpctx->time_frame -= get_relative_time(mpctx);
    mpctx->time_frame -= vo->flip_queue_offset;
    if (mpctx->time_frame > 0.001)
        mpctx->time_frame = timing_sleep(mpctx, mpctx->time_frame);
    mpctx->time_frame += vo->flip_queue_offset;

    int64_t t2 = mp_time_us();
    /* Playing with playback speed it's possible to get pathological
     * cases with mpctx->time_frame negative enough to cause an
     * overflow in pts_us calculation, thus the MPMAX. */
    double time_frame = MPMAX(mpctx->time_frame, -1);
    int64_t pts_us = mpctx->last_time + time_frame * 1e6;
    int duration = -1;
    double pts2 = vo_get_next_pts(vo, 0); // this is the next frame PTS
    if (mpctx->video_pts != MP_NOPTS_VALUE && pts2 == MP_NOPTS_VALUE) {
        // Make up a frame duration. Using the frame rate is not a good
        // choice, since the frame rate could be unset/broken/random.
        float fps = mpctx->d_video->fps;
        double frame_duration = fps > 0 ? 1.0 / fps : 0;
        pts2 = mpctx->video_pts + MPCLAMP(frame_duration, 0.0, 5.0);
    }
    if (pts2 != MP_NOPTS_VALUE) {
        // expected A/V sync correction is ignored
        double diff = (pts2 - mpctx->video_pts);
        diff /= opts->playback_speed;
        if (mpctx->time_frame < 0)
            diff += mpctx->time_frame;
        if (diff < 0)
            diff = 0;
        if (diff > 10)
            diff = 10;
        duration = diff * 1e6;
        mpctx->last_frame_duration = diff;
    }
    if (mpctx->video_status != STATUS_PLAYING)
        duration = -1;

    MP_STATS(mpctx, "start flip");
    vo_flip_page(vo, pts_us | 1, duration);
    MP_STATS(mpctx, "end flip");

    if (audio_pts != MP_NOPTS_VALUE)
        MP_STATS(mpctx, "value %f ptsdiff", mpctx->video_pts - audio_pts);

    mpctx->last_vo_flip_duration = (mp_time_us() - t2) * 0.000001;
    if (vo->driver->flip_page_timed) {
        // No need to adjust sync based on flip speed
        mpctx->last_vo_flip_duration = 0;
        // For print_status - VO call finishing early is OK for sync
        mpctx->time_frame -= get_relative_time(mpctx);
    }
    mpctx->shown_vframes++;
    if (mpctx->video_status < STATUS_PLAYING)
        mpctx->video_status = STATUS_READY;
    update_avsync(mpctx);
    screenshot_flip(mpctx);

    mp_notify(mpctx, MPV_EVENT_TICK, NULL);

    if (!mpctx->sync_audio_to_video)
        mpctx->video_status = STATUS_EOF;
}
コード例 #17
0
ファイル: video.c プロジェクト: Jim-Duke/mpv
void write_video(struct MPContext *mpctx, double endpts)
{
    struct MPOpts *opts = mpctx->opts;
    struct vo *vo = mpctx->video_out;

    if (!mpctx->d_video)
        return;

    // Actual playback starts when both audio and video are ready.
    if (mpctx->video_status == STATUS_READY)
        return;

    if (mpctx->paused && mpctx->video_status >= STATUS_READY)
        return;

    int r = video_output_image(mpctx, endpts);
    MP_TRACE(mpctx, "video_output_image: %d\n", r);

    if (r < 0)
        goto error;

    if (r == VD_WAIT) // Demuxer will wake us up for more packets to decode.
        return;

    if (r == VD_EOF) {
        mpctx->video_status =
            vo_still_displaying(vo) ? STATUS_DRAINING : STATUS_EOF;
        mpctx->delay = 0;
        mpctx->last_av_difference = 0;
        MP_DBG(mpctx, "video EOF (status=%d)\n", mpctx->video_status);
        return;
    }

    if (mpctx->video_status > STATUS_PLAYING)
        mpctx->video_status = STATUS_PLAYING;

    if (r != VD_NEW_FRAME) {
        mpctx->sleeptime = 0; // Decode more in next iteration.
        return;
    }

    // Filter output is different from VO input?
    struct mp_image_params p = mpctx->next_frames[0]->params;
    if (!vo->params || !mp_image_params_equal(&p, vo->params)) {
        // Changing config deletes the current frame; wait until it's finished.
        if (vo_still_displaying(vo))
            return;

        const struct vo_driver *info = mpctx->video_out->driver;
        char extra[20] = {0};
        if (p.w != p.d_w || p.h != p.d_h)
            snprintf(extra, sizeof(extra), " => %dx%d", p.d_w, p.d_h);
        MP_INFO(mpctx, "VO: [%s] %dx%d%s %s\n",
                info->name, p.w, p.h, extra, vo_format_name(p.imgfmt));
        MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description);

        int vo_r = vo_reconfig(vo, &p, 0);
        if (vo_r < 0) {
            mpctx->error_playing = MPV_ERROR_VO_INIT_FAILED;
            goto error;
        }
        init_vo(mpctx);
    }

    mpctx->time_frame -= get_relative_time(mpctx);
    update_avsync_before_frame(mpctx);

    double time_frame = MPMAX(mpctx->time_frame, -1);
    int64_t pts = mp_time_us() + (int64_t)(time_frame * 1e6);

    // wait until VO wakes us up to get more frames
    if (!vo_is_ready_for_frame(vo, pts)) {
        if (video_feed_async_filter(mpctx) < 0)
            goto error;
        return;
    }

    assert(mpctx->num_next_frames >= 1);
    struct vo_frame dummy = {
        .pts = pts,
        .duration = -1,
        .num_frames = mpctx->num_next_frames,
    };
    for (int n = 0; n < dummy.num_frames; n++)
        dummy.frames[n] = mpctx->next_frames[n];
    struct vo_frame *frame = vo_frame_ref(&dummy);

    double diff = -1;
    double vpts0 = mpctx->next_frames[0]->pts;
    double vpts1 = MP_NOPTS_VALUE;
    if (mpctx->num_next_frames >= 2)
        vpts1 = mpctx->next_frames[1]->pts;
    if (vpts0 != MP_NOPTS_VALUE && vpts1 != MP_NOPTS_VALUE)
        diff = vpts1 - vpts0;
    if (diff < 0 && mpctx->d_video->fps > 0)
        diff = 1.0 / mpctx->d_video->fps; // fallback to demuxer-reported fps
    if (opts->untimed || vo->driver->untimed)
        diff = -1; // disable frame dropping and aspects of frame timing
    if (diff >= 0) {
        // expected A/V sync correction is ignored
        diff /= opts->playback_speed;
        if (mpctx->time_frame < 0)
            diff += mpctx->time_frame;
        frame->duration = MPCLAMP(diff, 0, 10) * 1e6;
    }

    mpctx->video_pts = mpctx->next_frames[0]->pts;
    mpctx->last_vo_pts = mpctx->video_pts;
    mpctx->playback_pts = mpctx->video_pts;

    update_avsync_after_frame(mpctx);

    mpctx->osd_force_update = true;
    update_osd_msg(mpctx);
    update_subtitles(mpctx);

    vo_queue_frame(vo, frame);

    shift_frames(mpctx);

    // The frames were shifted down; "initialize" the new first entry.
    if (mpctx->num_next_frames >= 1)
        handle_new_frame(mpctx);

    mpctx->shown_vframes++;
    if (mpctx->video_status < STATUS_PLAYING) {
        mpctx->video_status = STATUS_READY;
        // After a seek, make sure to wait until the first frame is visible.
        vo_wait_frame(vo);
        MP_VERBOSE(mpctx, "first video frame after restart shown\n");
    }
    screenshot_flip(mpctx);

    mp_notify(mpctx, MPV_EVENT_TICK, NULL);

    if (!mpctx->sync_audio_to_video)
        mpctx->video_status = STATUS_EOF;

    if (mpctx->video_status != STATUS_EOF) {
        if (mpctx->step_frames > 0) {
            mpctx->step_frames--;
            if (!mpctx->step_frames && !opts->pause)
                pause_player(mpctx);
        }
        if (mpctx->max_frames == 0 && !mpctx->stop_play)
            mpctx->stop_play = AT_END_OF_FILE;
        if (mpctx->max_frames > 0)
            mpctx->max_frames--;
    }

    mpctx->sleeptime = 0;
    return;

error:
    MP_FATAL(mpctx, "Could not initialize video chain.\n");
    uninit_video_chain(mpctx);
    error_on_track(mpctx, mpctx->current_track[STREAM_VIDEO][0]);
    handle_force_window(mpctx, true);
    mpctx->sleeptime = 0;
}
コード例 #18
0
ファイル: video.c プロジェクト: bagobor/mpv
void write_video(struct MPContext *mpctx, double endpts)
{
    struct MPOpts *opts = mpctx->opts;
    struct vo *vo = mpctx->video_out;

    if (!mpctx->d_video)
        return;

    // Actual playback starts when both audio and video are ready.
    if (mpctx->video_status == STATUS_READY)
        return;

    if (mpctx->paused && mpctx->video_status >= STATUS_READY)
        return;

    update_fps(mpctx);

    int r = video_output_image(mpctx, endpts);
    MP_TRACE(mpctx, "video_output_image: %d\n", r);

    if (r < 0)
        goto error;

    if (r == VD_WAIT) // Demuxer will wake us up for more packets to decode.
        return;

    if (r == VD_EOF) {
        mpctx->video_status =
            vo_still_displaying(vo) ? STATUS_DRAINING : STATUS_EOF;
        mpctx->delay = 0;
        mpctx->last_av_difference = 0;
        MP_VERBOSE(mpctx, "video EOF (status=%d)\n", mpctx->video_status);
        return;
    }

    if (mpctx->video_status > STATUS_PLAYING)
        mpctx->video_status = STATUS_PLAYING;

    mpctx->time_frame -= get_relative_time(mpctx);
    update_avsync_before_frame(mpctx);

    if (r != VD_NEW_FRAME) {
        mpctx->sleeptime = 0; // Decode more in next iteration.
        return;
    }

    // Filter output is different from VO input?
    struct mp_image_params p = mpctx->next_frame[0]->params;
    if (!vo->params || !mp_image_params_equal(&p, vo->params)) {
        // Changing config deletes the current frame; wait until it's finished.
        if (vo_still_displaying(vo))
            return;

        const struct vo_driver *info = mpctx->video_out->driver;
        MP_INFO(mpctx, "VO: [%s] %dx%d => %dx%d %s\n",
                info->name, p.w, p.h, p.d_w, p.d_h, vo_format_name(p.imgfmt));
        MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description);

        int vo_r = vo_reconfig(vo, &p, 0);
        if (vo_r < 0)
            goto error;
        init_vo(mpctx);
        mpctx->time_frame = 0; // display immediately
    }

    double time_frame = MPMAX(mpctx->time_frame, -1);
    int64_t pts = mp_time_us() + (int64_t)(time_frame * 1e6);

    if (!vo_is_ready_for_frame(vo, pts))
        return; // wait until VO wakes us up to get more frames

    int64_t duration = -1;
    double diff = -1;
    double vpts0 = mpctx->next_frame[0] ? mpctx->next_frame[0]->pts : MP_NOPTS_VALUE;
    double vpts1 = mpctx->next_frame[1] ? mpctx->next_frame[1]->pts : MP_NOPTS_VALUE;
    if (vpts0 != MP_NOPTS_VALUE && vpts1 != MP_NOPTS_VALUE)
        diff = vpts1 - vpts0;
    if (diff < 0 && mpctx->d_video->fps > 0)
        diff = 1.0 / mpctx->d_video->fps; // fallback to demuxer-reported fps
    if (diff >= 0) {
        // expected A/V sync correction is ignored
        diff /= opts->playback_speed;
        if (mpctx->time_frame < 0)
            diff += mpctx->time_frame;
        duration = MPCLAMP(diff, 0, 10) * 1e6;
    }

    mpctx->video_pts = mpctx->next_frame[0]->pts;
    mpctx->last_vo_pts = mpctx->video_pts;
    mpctx->playback_pts = mpctx->video_pts;

    mpctx->osd_force_update = true;
    update_osd_msg(mpctx);
    update_subtitles(mpctx);

    vo_queue_frame(vo, mpctx->next_frame[0], pts, duration);
    mpctx->next_frame[0] = NULL;

    mpctx->shown_vframes++;
    if (mpctx->video_status < STATUS_PLAYING) {
        mpctx->video_status = STATUS_READY;
        // After a seek, make sure to wait until the first frame is visible.
        vo_wait_frame(vo);
    }
    update_avsync_after_frame(mpctx);
    screenshot_flip(mpctx);

    mp_notify(mpctx, MPV_EVENT_TICK, NULL);

    if (!mpctx->sync_audio_to_video)
        mpctx->video_status = STATUS_EOF;

    if (mpctx->video_status != STATUS_EOF) {
        if (mpctx->step_frames > 0) {
            mpctx->step_frames--;
            if (!mpctx->step_frames && !opts->pause)
                pause_player(mpctx);
        }
        if (mpctx->max_frames == 0)
            mpctx->stop_play = PT_NEXT_ENTRY;
        if (mpctx->max_frames > 0)
            mpctx->max_frames--;
    }

    mpctx->sleeptime = 0;
    return;

error:
    MP_FATAL(mpctx, "Could not initialize video chain.\n");
    int uninit = INITIALIZED_VCODEC;
    if (!opts->force_vo)
        uninit |= INITIALIZED_VO;
    uninit_player(mpctx, uninit);
    if (!mpctx->current_track[STREAM_AUDIO])
        mpctx->stop_play = PT_NEXT_ENTRY;
    mpctx->error_playing = true;
    handle_force_window(mpctx, true);
    mpctx->sleeptime = 0;
}