Exemple #1
0
static double update_video_nocorrect_pts(struct MPContext *mpctx)
{
    struct sh_video *sh_video = mpctx->sh_video;
    double frame_time = 0;
    while (1) {
        // In nocorrect-pts mode there is no way to properly time these frames
        if (load_next_vo_frame(mpctx, false))
            break;
        frame_time = sh_video->next_frame_time;
        if (mpctx->restart_playback)
            frame_time = 0;
        struct demux_packet *pkt = video_read_frame(mpctx);
        if (!pkt)
            return -1;
        if (mpctx->sh_audio)
            mpctx->delay -= frame_time;
        // video_read_frame can change fps (e.g. for ASF video)
        update_fps(mpctx);
        int framedrop_type = check_framedrop(mpctx, frame_time);

        void *decoded_frame = decode_video(sh_video, pkt, framedrop_type,
                                           sh_video->pts);
        talloc_free(pkt);
        if (decoded_frame) {
            filter_video(mpctx, decoded_frame);
        }
        break;
    }
    return frame_time;
}
Exemple #2
0
// Read a packet, store decoded image into d_video->waiting_decoded_mpi
// returns VD_* code
static int decode_image(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    if (d_video->header->attached_picture) {
        d_video->waiting_decoded_mpi =
                    video_decode(d_video, d_video->header->attached_picture, 0);
        return VD_EOF;
    }

    struct demux_packet *pkt;
    if (demux_read_packet_async(d_video->header, &pkt) == 0)
        return VD_WAIT;
    if (pkt && pkt->pts != MP_NOPTS_VALUE)
        pkt->pts += mpctx->video_offset;
    if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) ||
        d_video->has_broken_packet_pts ||
        !mpctx->opts->hr_seek_framedrop)
    {
        mpctx->hrseek_framedrop = false;
    }
    bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING;
    int framedrop_type = hrseek && mpctx->hrseek_framedrop ?
                         1 : check_framedrop(mpctx, -1);
    d_video->waiting_decoded_mpi =
        video_decode(d_video, pkt, framedrop_type);
    bool had_packet = !!pkt;
    talloc_free(pkt);

    return had_packet ? VD_PROGRESS : VD_EOF;
}
Exemple #3
0
int reinit_video_chain(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    assert(!mpctx->d_video);
    struct track *track = mpctx->current_track[0][STREAM_VIDEO];
    struct sh_stream *sh = track ? track->stream : NULL;
    if (!sh)
        goto no_video;

    MP_VERBOSE(mpctx, "[V] fourcc:0x%X  size:%dx%d  fps:%5.3f\n",
               sh->format,
               sh->video->disp_w, sh->video->disp_h,
               sh->video->fps);

    //================== Init VIDEO (codec & libvo) ==========================
    if (!opts->fixed_vo || !mpctx->video_out) {
        struct vo_extra ex = {
            .input_ctx = mpctx->input,
            .osd = mpctx->osd,
            .encode_lavc_ctx = mpctx->encode_lavc_ctx,
            .opengl_cb_context = mpctx->gl_cb_ctx,
        };
        mpctx->video_out = init_best_video_out(mpctx->global, &ex);
        if (!mpctx->video_out) {
            MP_FATAL(mpctx, "Error opening/initializing "
                    "the selected video_out (-vo) device.\n");
            mpctx->error_playing = MPV_ERROR_VO_INIT_FAILED;
            goto err_out;
        }
        mpctx->mouse_cursor_visible = true;
    }

    update_window_title(mpctx, true);

    struct dec_video *d_video = talloc_zero(NULL, struct dec_video);
    mpctx->d_video = d_video;
    d_video->global = mpctx->global;
    d_video->log = mp_log_new(d_video, mpctx->log, "!vd");
    d_video->opts = mpctx->opts;
    d_video->header = sh;
    d_video->fps = sh->video->fps;
    d_video->vo = mpctx->video_out;

    if (opts->force_fps) {
        d_video->fps = opts->force_fps;
        MP_INFO(mpctx, "FPS forced to %5.3f.\n", d_video->fps);
        MP_INFO(mpctx, "Use --no-correct-pts to force FPS based timing.\n");
    }
    update_fps(mpctx);

    vo_control(mpctx->video_out, VOCTRL_GET_HWDEC_INFO, &d_video->hwdec_info);

    recreate_video_filters(mpctx);

    if (!video_init_best_codec(d_video, opts->video_decoders))
        goto err_out;

    bool saver_state = opts->pause || !opts->stop_screensaver;
    vo_control(mpctx->video_out, saver_state ? VOCTRL_RESTORE_SCREENSAVER
                                             : VOCTRL_KILL_SCREENSAVER, NULL);

    vo_set_paused(mpctx->video_out, mpctx->paused);

    mpctx->sync_audio_to_video = !sh->attached_picture;
    mpctx->vo_pts_history_seek_ts++;

    // If we switch on video again, ensure audio position matches up.
    if (mpctx->d_audio)
        mpctx->audio_status = STATUS_SYNCING;

    reset_video_state(mpctx);
    reset_subtitle_state(mpctx);

    return 1;

err_out:
no_video:
    uninit_video_chain(mpctx);
    if (track)
        error_on_track(mpctx, track);
    handle_force_window(mpctx, true);
    return 0;
}

// Try to refresh the video by doing a precise seek to the currently displayed
// frame. This can go wrong in all sorts of ways, so use sparingly.
void mp_force_video_refresh(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    struct dec_video *d_video = mpctx->d_video;

    if (!d_video || !d_video->decoder_output.imgfmt)
        return;

    // If not paused, the next frame should come soon enough.
    if (opts->pause && mpctx->last_vo_pts != MP_NOPTS_VALUE)
        queue_seek(mpctx, MPSEEK_ABSOLUTE, mpctx->last_vo_pts, 2, true);
}

static int check_framedrop(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    // check for frame-drop:
    if (mpctx->video_status == STATUS_PLAYING && !mpctx->paused &&
        mpctx->audio_status == STATUS_PLAYING && !ao_untimed(mpctx->ao))
    {
        float fps = mpctx->d_video->fps;
        double frame_time = fps > 0 ? 1.0 / fps : 0;
        // we should avoid dropping too many frames in sequence unless we
        // are too late. and we allow 100ms A-V delay here:
        if (mpctx->last_av_difference - 0.100 > mpctx->dropped_frames * frame_time)
            return !!(opts->frame_dropping & 2);
    }
    return 0;
}

// Read a packet, store decoded image into d_video->waiting_decoded_mpi
// returns VD_* code
static int decode_image(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    if (d_video->header->attached_picture) {
        d_video->waiting_decoded_mpi =
                    video_decode(d_video, d_video->header->attached_picture, 0);
        return VD_EOF;
    }

    struct demux_packet *pkt;
    if (demux_read_packet_async(d_video->header, &pkt) == 0)
        return VD_WAIT;
    if (pkt && pkt->pts != MP_NOPTS_VALUE)
        pkt->pts += mpctx->video_offset;
    if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) ||
        d_video->has_broken_packet_pts ||
        !mpctx->opts->hr_seek_framedrop)
    {
        mpctx->hrseek_framedrop = false;
    }
    bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING;
    int framedrop_type = hrseek && mpctx->hrseek_framedrop ?
                         2 : check_framedrop(mpctx);
    d_video->waiting_decoded_mpi =
        video_decode(d_video, pkt, framedrop_type);
    bool had_packet = !!pkt;
    talloc_free(pkt);

    if (had_packet && !d_video->waiting_decoded_mpi &&
        mpctx->video_status == STATUS_PLAYING)
    {
        mpctx->dropped_frames_total++;
        mpctx->dropped_frames++;
    }

    return had_packet ? VD_PROGRESS : VD_EOF;
}


// Called after video reinit. This can be generally used to try to insert more
// filters using the filter chain edit functionality in command.c.
static void init_filter_params(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;

    // Note that the filter chain is already initialized. This code might
    // recreate the chain a second time, which is not very elegant, but allows
    // us to test whether enabling deinterlacing works with the current video
    // format and other filters.
    if (opts->deinterlace >= 0)
        mp_property_do("deinterlace", M_PROPERTY_SET, &opts->deinterlace, mpctx);
}

// Feed newly decoded frames to the filter, take care of format changes.
// If eof=true, drain the filter chain, and return VD_EOF if empty.
static int video_filter(struct MPContext *mpctx, bool eof)
{
    struct dec_video *d_video = mpctx->d_video;
    struct vf_chain *vf = d_video->vfilter;

    if (vf->initialized < 0)
        return VD_ERROR;

    // There is already a filtered frame available.
    // If vf_needs_input() returns > 0, the filter wants input anyway.
    if (vf_output_frame(vf, eof) > 0 && vf_needs_input(vf) < 1)
        return VD_PROGRESS;

    // Decoder output is different from filter input?
    bool need_vf_reconfig = !vf->input_params.imgfmt || vf->initialized < 1 ||
        !mp_image_params_equal(&d_video->decoder_output, &vf->input_params);

    // (If imgfmt==0, nothing was decoded yet, and the format is unknown.)
    if (need_vf_reconfig && d_video->decoder_output.imgfmt) {
        // Drain the filter chain.
        if (vf_output_frame(vf, true) > 0)
            return VD_PROGRESS;

        // The filter chain is drained; execute the filter format change.
        filter_reconfig(mpctx, false);
        if (vf->initialized == 0)
            return VD_PROGRESS; // hw decoding fallback; try again
        if (vf->initialized < 1)
            return VD_ERROR;
        init_filter_params(mpctx);
        return VD_RECONFIG;
    }

    // If something was decoded, and the filter chain is ready, filter it.
    if (!need_vf_reconfig && d_video->waiting_decoded_mpi) {
        vf_filter_frame(vf, d_video->waiting_decoded_mpi);
        d_video->waiting_decoded_mpi = NULL;
        return VD_PROGRESS;
    }

    return eof ? VD_EOF : VD_PROGRESS;
}

// Make sure at least 1 filtered image is available, decode new video if needed.
// returns VD_* code
// A return value of VD_PROGRESS doesn't necessarily output a frame, but makes
// the promise that calling this function again will eventually do something.
static int video_decode_and_filter(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    int r = video_filter(mpctx, false);
    if (r < 0)
        return r;

    if (!d_video->waiting_decoded_mpi) {
        // Decode a new image, or at least feed the decoder a packet.
        r = decode_image(mpctx);
        if (r == VD_WAIT)
            return r;
        if (d_video->waiting_decoded_mpi)
            d_video->decoder_output = d_video->waiting_decoded_mpi->params;
    }

    bool eof = !d_video->waiting_decoded_mpi && (r == VD_EOF || r < 0);
    r = video_filter(mpctx, eof);
    if (r == VD_RECONFIG) // retry feeding decoded image
        r = video_filter(mpctx, eof);
    return r;
}

static int video_feed_async_filter(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;
    struct vf_chain *vf = d_video->vfilter;

    if (vf->initialized < 0)
        return VD_ERROR;

    if (vf_needs_input(vf) < 1)
        return 0;
    mpctx->sleeptime = 0; // retry until done
    return video_decode_and_filter(mpctx);
}

/* Modify video timing to match the audio timeline. There are two main
 * reasons this is needed. First, video and audio can start from different
 * positions at beginning of file or after a seek (MPlayer starts both
 * immediately even if they have different pts). Second, the file can have
 * audio timestamps that are inconsistent with the duration of the audio
 * packets, for example two consecutive timestamp values differing by
 * one second but only a packet with enough samples for half a second
 * of playback between them.
 */
static void adjust_sync(struct MPContext *mpctx, double v_pts, double frame_time)
{
    struct MPOpts *opts = mpctx->opts;

    if (mpctx->audio_status != STATUS_PLAYING)
        return;

    double a_pts = written_audio_pts(mpctx) + mpctx->audio_delay - mpctx->delay;
    double av_delay = a_pts - v_pts;

    double change = av_delay * 0.1;
    double max_change = opts->default_max_pts_correction >= 0 ?
                        opts->default_max_pts_correction : frame_time * 0.1;
    if (change < -max_change)
        change = -max_change;
    else if (change > max_change)
        change = max_change;
    mpctx->delay += change;
    mpctx->total_avsync_change += change;
}

// Enough video filtered already to push one frame to the VO?
static bool have_new_frame(struct MPContext *mpctx)
{
    bool need_2nd = !!(mpctx->opts->frame_dropping & 1) // we need the duration
        && mpctx->video_pts != MP_NOPTS_VALUE; // ...except for the 1st frame

    return mpctx->next_frame[0] && (!need_2nd || mpctx->next_frame[1]);
}
Exemple #4
0
double update_video(struct MPContext *mpctx, double endpts)
{
    struct sh_video *sh_video = mpctx->sh_video;
    struct vo *video_out = mpctx->video_out;
    sh_video->vfilter->control(sh_video->vfilter, VFCTRL_SET_OSD_OBJ,
                               mpctx->osd); // for vf_sub
    if (!mpctx->opts->correct_pts)
        return update_video_nocorrect_pts(mpctx);

    if (sh_video->gsh->attached_picture)
        return update_video_attached_pic(mpctx);

    double pts;

    while (1) {
        if (load_next_vo_frame(mpctx, false))
            break;
        pts = MP_NOPTS_VALUE;
        struct demux_packet *pkt = NULL;
        while (1) {
            pkt = demux_read_packet(mpctx->sh_video->gsh);
            if (!pkt || pkt->len)
                break;
            /* Packets with size 0 are assumed to not correspond to frames,
             * but to indicate the absence of a frame in formats like AVI
             * that must have packets at fixed timecode intervals. */
            talloc_free(pkt);
        }
        if (pkt)
            pts = pkt->pts;
        if (pts != MP_NOPTS_VALUE)
            pts += mpctx->video_offset;
        if (pts >= mpctx->hrseek_pts - .005)
            mpctx->hrseek_framedrop = false;
        int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ?
                             1 : check_framedrop(mpctx, -1);
        struct mp_image *decoded_frame =
            decode_video(sh_video, pkt, framedrop_type, pts);
        talloc_free(pkt);
        if (decoded_frame) {
            determine_frame_pts(mpctx);
            filter_video(mpctx, decoded_frame);
        } else if (!pkt) {
            if (!load_next_vo_frame(mpctx, true))
                return -1;
        }
        break;
    }

    if (!video_out->frame_loaded)
        return 0;

    pts = video_out->next_pts;
    if (pts == MP_NOPTS_VALUE) {
        MP_ERR(mpctx, "Video pts after filters MISSING\n");
        // Try to use decoder pts from before filters
        pts = sh_video->pts;
        if (pts == MP_NOPTS_VALUE)
            pts = sh_video->last_pts;
    }
    if (endpts == MP_NOPTS_VALUE || pts < endpts)
        add_frame_pts(mpctx, pts);
    if (mpctx->hrseek_active && pts < mpctx->hrseek_pts - .005) {
        vo_skip_frame(video_out);
        return 0;
    }
    mpctx->hrseek_active = false;
    sh_video->pts = pts;
    if (sh_video->last_pts == MP_NOPTS_VALUE)
        sh_video->last_pts = sh_video->pts;
    else if (sh_video->last_pts > sh_video->pts) {
        MP_WARN(mpctx, "Decreasing video pts: %f < %f\n",
                sh_video->pts, sh_video->last_pts);
        /* If the difference in pts is small treat it as jitter around the
         * right value (possibly caused by incorrect timestamp ordering) and
         * just show this frame immediately after the last one.
         * Treat bigger differences as timestamp resets and start counting
         * timing of later frames from the position of this one. */
        if (sh_video->last_pts - sh_video->pts > 0.5)
            sh_video->last_pts = sh_video->pts;
        else
            sh_video->pts = sh_video->last_pts;
    } else if (sh_video->pts >= sh_video->last_pts + 60) {
        // Assume a PTS difference >= 60 seconds is a discontinuity.
        MP_WARN(mpctx, "Jump in video pts: %f -> %f\n",
                sh_video->last_pts, sh_video->pts);
        sh_video->last_pts = sh_video->pts;
    }
    double frame_time = sh_video->pts - sh_video->last_pts;
    sh_video->last_pts = sh_video->pts;
    if (mpctx->sh_audio)
        mpctx->delay -= frame_time;
    return frame_time;
}
Exemple #5
0
double update_video(struct MPContext *mpctx, double endpts)
{
    struct dec_video *d_video = mpctx->d_video;
    struct vo *video_out = mpctx->video_out;

    if (d_video->header->attached_picture)
        return update_video_attached_pic(mpctx);

    if (load_next_vo_frame(mpctx, false)) {
        // Use currently queued VO frame
    } else if (d_video->waiting_decoded_mpi) {
        // Draining on reconfig
        if (!load_next_vo_frame(mpctx, true))
            return -1;
    } else {
        // Decode a new frame
        struct demux_packet *pkt = demux_read_packet(d_video->header);
        if (pkt && pkt->pts != MP_NOPTS_VALUE)
            pkt->pts += mpctx->video_offset;
        if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) ||
                d_video->has_broken_packet_pts)
        {
            mpctx->hrseek_framedrop = false;
        }
        int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ?
                             1 : check_framedrop(mpctx, -1);
        struct mp_image *decoded_frame =
            video_decode(d_video, pkt, framedrop_type);
        talloc_free(pkt);
        if (decoded_frame) {
            filter_video(mpctx, decoded_frame, false);
        } else if (!pkt) {
            if (!load_next_vo_frame(mpctx, true))
                return -1;
        }
    }

    // Whether the VO has an image queued.
    // If it does, it will be used to time and display the next frame.
    if (!video_out->frame_loaded)
        return 0;

    double pts = video_out->next_pts;
    if (endpts == MP_NOPTS_VALUE || pts < endpts)
        add_frame_pts(mpctx, pts);
    if (mpctx->hrseek_active && pts < mpctx->hrseek_pts - .005) {
        vo_skip_frame(video_out);
        return 0;
    }
    mpctx->hrseek_active = false;
    double last_pts = mpctx->video_next_pts;
    if (last_pts == MP_NOPTS_VALUE)
        last_pts = pts;
    double frame_time = pts - last_pts;
    if (frame_time < 0 || frame_time >= 60) {
        // Assume a PTS difference >= 60 seconds is a discontinuity.
        MP_WARN(mpctx, "Jump in video pts: %f -> %f\n", last_pts, pts);
        frame_time = 0;
    }
    mpctx->video_next_pts = pts;
    if (mpctx->d_audio)
        mpctx->delay -= frame_time;
    return frame_time;
}