Exemplo n.º 1
0
/* Try to get at least minsamples decoded+filtered samples in outbuf
 * (total length including possible existing data).
 * Return 0 on success, -1 on error/EOF (not distinguidaed).
 * In the former case outbuf has at least minsamples buffered on return.
 * In case of EOF/error it might or might not be. */
int audio_decode(struct dec_audio *d_audio, struct mp_audio_buffer *outbuf,
                 int minsamples)
{
    // Indicates that a filter seems to be buffering large amounts of data
    int huge_filter_buffer = 0;
    // Decoded audio must be cut at boundaries of this many samples
    // (Note: the reason for this is unknown, possibly a refactoring artifact)
    int unitsize = 16;

    /* Filter output size will be about filter_multiplier times input size.
     * If some filter buffers audio in big blocks this might only hold
     * as average over time. */
    double filter_multiplier = af_calc_filter_multiplier(d_audio->afilter);

    int prev_buffered = -1;
    int res = 0;
    MP_STATS(d_audio, "start audio");
    while (res >= 0 && minsamples >= 0) {
        int buffered = mp_audio_buffer_samples(outbuf);
        if (minsamples < buffered || buffered == prev_buffered)
            break;
        prev_buffered = buffered;

        int decsamples = (minsamples - buffered) / filter_multiplier;
        // + some extra for possible filter buffering
        decsamples += unitsize << 5;

        if (huge_filter_buffer) {
            /* Some filter must be doing significant buffering if the estimated
             * input length didn't produce enough output from filters.
             * Feed the filters 250 samples at a time until we have enough
             * output. Very small amounts could make filtering inefficient while
             * large amounts can make mpv demux the file unnecessarily far ahead
             * to get audio data and buffer video frames in memory while doing
             * so. However the performance impact of either is probably not too
             * significant as long as the value is not completely insane. */
            decsamples = 250;
        }

        /* if this iteration does not fill buffer, we must have lots
         * of buffering in filters */
        huge_filter_buffer = 1;

        res = filter_n_bytes(d_audio, outbuf, decsamples);
    }
    MP_STATS(d_audio, "end audio");
    return res;
}
Exemplo n.º 2
0
int mpv_opengl_cb_report_flip(mpv_opengl_cb_context *ctx, int64_t time)
{
    MP_STATS(ctx, "glcb-reportflip");

    pthread_mutex_lock(&ctx->lock);
    ctx->flip_count += 1;
    pthread_cond_signal(&ctx->wakeup);
    pthread_mutex_unlock(&ctx->lock);

    return 0;
}
Exemplo n.º 3
0
Arquivo: video.c Projeto: Jim-Duke/mpv
/* Update avsync before a new video frame is displayed. Actually, this can be
 * called arbitrarily often before the actual display.
 * This adjusts the time of the next video frame */
static void update_avsync_before_frame(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    struct vo *vo = mpctx->video_out;

    if (!mpctx->sync_audio_to_video || mpctx->video_status < STATUS_READY) {
        mpctx->time_frame = 0;
    } else if (mpctx->audio_status == STATUS_PLAYING &&
               mpctx->video_status == STATUS_PLAYING &&
               !ao_untimed(mpctx->ao))
    {
        double buffered_audio = ao_get_delay(mpctx->ao);

        double predicted = mpctx->delay / opts->playback_speed +
                           mpctx->time_frame;
        double difference = buffered_audio - predicted;
        MP_STATS(mpctx, "value %f audio-diff", difference);

        if (opts->autosync) {
            /* Smooth reported playback position from AO by averaging
             * it with the value expected based on previus value and
             * time elapsed since then. May help smooth video timing
             * with audio output that have inaccurate position reporting.
             * This is badly implemented; the behavior of the smoothing
             * now undesirably depends on how often this code runs
             * (mainly depends on video frame rate). */
            buffered_audio = predicted + difference / opts->autosync;
        }

        mpctx->time_frame = buffered_audio - mpctx->delay / opts->playback_speed;
    } else {
        /* If we're more than 200 ms behind the right playback
         * position, don't try to speed up display of following
         * frames to catch up; continue with default speed from
         * the current frame instead.
         * If untimed is set always output frames immediately
         * without sleeping.
         */
        if (mpctx->time_frame < -0.2 || opts->untimed || vo->driver->untimed)
            mpctx->time_frame = 0;
    }
}
Exemplo n.º 4
0
static void update_osd(struct vo *vo)
{
    struct priv *p = vo->priv;
    if (!p->enable_osd)
        return;

    mpgl_osd_generate(p->osd, p->osd_res, p->osd_pts, 0, 0);

    int64_t osd_change_counter = mpgl_get_change_counter(p->osd);
    if (p->osd_change_counter == osd_change_counter) {
        p->skip_osd = true;
        return;
    }
    p->osd_change_counter = osd_change_counter;

    MP_STATS(vo, "start rpi_osd");

    p->egl.gl->ClearColor(0, 0, 0, 0);
    p->egl.gl->Clear(GL_COLOR_BUFFER_BIT);

    for (int n = 0; n < MAX_OSD_PARTS; n++) {
        enum sub_bitmap_format fmt = mpgl_osd_get_part_format(p->osd, n);
        if (!fmt)
            continue;
        gl_sc_uniform_sampler(p->sc, "osdtex", GL_TEXTURE_2D, 0);
        switch (fmt) {
        case SUBBITMAP_RGBA: {
            GLSLF("// OSD (RGBA)\n");
            GLSL(color = texture(osdtex, texcoord).bgra;)
            break;
        }
        case SUBBITMAP_LIBASS: {
            GLSLF("// OSD (libass)\n");
            GLSL(color =
                vec4(ass_color.rgb, ass_color.a * texture(osdtex, texcoord).r);)
            break;
        }
        default:
            abort();
        }
Exemplo n.º 5
0
static struct mp_image *decode_packet(struct dec_video *d_video,
                                      struct demux_packet *packet,
                                      int drop_frame)
{
    struct MPOpts *opts = d_video->opts;
    bool avi_pts = d_video->header->codec->avi_dts && opts->correct_pts;

    struct demux_packet packet_copy;
    if (packet && packet->dts == MP_NOPTS_VALUE) {
        packet_copy = *packet;
        packet = &packet_copy;
        packet->dts = packet->pts;
    }

    double pkt_pts = packet ? packet->pts : MP_NOPTS_VALUE;
    double pkt_dts = packet ? packet->dts : MP_NOPTS_VALUE;

    double pkt_pdts = pkt_pts == MP_NOPTS_VALUE ? pkt_dts : pkt_pts;
    if (pkt_pdts != MP_NOPTS_VALUE && d_video->first_packet_pdts == MP_NOPTS_VALUE)
        d_video->first_packet_pdts = pkt_pdts;

    if (avi_pts)
        add_avi_pts(d_video, pkt_pdts);

    if (d_video->header->codec->avi_dts)
        drop_frame = 0;

    MP_STATS(d_video, "start decode video");

    struct mp_image *mpi = d_video->vd_driver->decode(d_video, packet, drop_frame);

    MP_STATS(d_video, "end decode video");

    // Error, discarded frame, dropped frame, or initial codec delay.
    if (!mpi || drop_frame) {
        // If we already had output, this must be a dropped frame.
        if (d_video->decoded_pts != MP_NOPTS_VALUE && d_video->num_buffered_pts)
            d_video->num_buffered_pts--;
        talloc_free(mpi);
        return NULL;
    }

    if (opts->field_dominance == 0) {
        mpi->fields |= MP_IMGFIELD_TOP_FIRST | MP_IMGFIELD_INTERLACED;
    } else if (opts->field_dominance == 1) {
        mpi->fields &= ~MP_IMGFIELD_TOP_FIRST;
        mpi->fields |= MP_IMGFIELD_INTERLACED;
    }

    // Note: the PTS is reordered, but the DTS is not. Both should be monotonic.
    double pts = mpi->pts;
    double dts = mpi->dts;

    if (pts != MP_NOPTS_VALUE) {
        if (pts < d_video->codec_pts)
            d_video->num_codec_pts_problems++;
        d_video->codec_pts = mpi->pts;
    }

    if (dts != MP_NOPTS_VALUE) {
        if (dts <= d_video->codec_dts)
            d_video->num_codec_dts_problems++;
        d_video->codec_dts = mpi->dts;
    }

    // If PTS is unset, or non-monotonic, fall back to DTS.
    if ((d_video->num_codec_pts_problems > d_video->num_codec_dts_problems ||
         pts == MP_NOPTS_VALUE) && dts != MP_NOPTS_VALUE)
        pts = dts;

    // Alternative PTS determination methods
    if (avi_pts)
        pts = retrieve_avi_pts(d_video, pts);

    if (!opts->correct_pts || pts == MP_NOPTS_VALUE) {
        if (opts->correct_pts && !d_video->header->missing_timestamps)
            MP_WARN(d_video, "No video PTS! Making something up.\n");

        double frame_time = 1.0f / (d_video->fps > 0 ? d_video->fps : 25);
        double base = d_video->first_packet_pdts;
        pts = d_video->decoded_pts;
        if (pts == MP_NOPTS_VALUE) {
            pts = base == MP_NOPTS_VALUE ? 0 : base;
        } else {
            pts += frame_time;
        }
    }

    if (d_video->has_broken_packet_pts < 0)
        d_video->has_broken_packet_pts++;
    if (d_video->num_codec_pts_problems || pkt_pts == MP_NOPTS_VALUE)
        d_video->has_broken_packet_pts = 1;

    if (!mp_image_params_equal(&d_video->last_format, &mpi->params))
        fix_image_params(d_video, &mpi->params);

    mpi->params = d_video->fixed_format;

    mpi->pts = pts;
    d_video->decoded_pts = pts;
    return mpi;
}
Exemplo n.º 6
0
int mpv_opengl_cb_draw(mpv_opengl_cb_context *ctx, int fbo, int vp_w, int vp_h)
{
    assert(ctx->renderer);

    gl_video_set_gl_state(ctx->renderer);

    pthread_mutex_lock(&ctx->lock);

    struct vo *vo = ctx->active;

    ctx->force_update |= ctx->reconfigured;

    if (ctx->vp_w != vp_w || ctx->vp_h != vp_h)
        ctx->force_update = true;

    if (ctx->force_update && vo) {
        ctx->force_update = false;
        ctx->vp_w = vp_w;
        ctx->vp_h = vp_h;

        struct mp_rect src, dst;
        struct mp_osd_res osd;
        mp_get_src_dst_rects(ctx->log, &ctx->vo_opts, vo->driver->caps,
                             &ctx->img_params, vp_w, abs(vp_h),
                             1.0, &src, &dst, &osd);

        gl_video_resize(ctx->renderer, vp_w, vp_h, &src, &dst, &osd);
    }

    if (ctx->reconfigured) {
        gl_video_set_osd_source(ctx->renderer, vo ? vo->osd : NULL);
        gl_video_config(ctx->renderer, &ctx->img_params);
        ctx->eq_changed = true;
    }
    if (ctx->update_new_opts) {
        struct vo_priv *p = vo ? vo->priv : NULL;
        struct vo_priv *opts = ctx->new_opts ? ctx->new_opts : p;
        if (opts) {
            gl_video_set_options(ctx->renderer, opts->renderer_opts);
            if (vo)
                gl_video_configure_queue(ctx->renderer, vo);
            ctx->gl->debug_context = opts->use_gl_debug;
            gl_video_set_debug(ctx->renderer, opts->use_gl_debug);
        }
    }
    ctx->reconfigured = false;
    ctx->update_new_opts = false;

    if (ctx->reset) {
        gl_video_reset(ctx->renderer);
        ctx->reset = false;
        if (ctx->cur_frame)
            ctx->cur_frame->still = true;
    }

    struct mp_csp_equalizer *eq = gl_video_eq_ptr(ctx->renderer);
    if (ctx->eq_changed) {
        memcpy(eq->values, ctx->eq.values, sizeof(eq->values));
        gl_video_eq_update(ctx->renderer);
    }
    ctx->eq_changed = false;

    struct vo_frame *frame = ctx->next_frame;
    int64_t wait_present_count = ctx->present_count;
    if (frame) {
        ctx->next_frame = NULL;
        wait_present_count += 1;
        pthread_cond_signal(&ctx->wakeup);
        talloc_free(ctx->cur_frame);
        ctx->cur_frame = vo_frame_ref(frame);
    } else {
        frame = vo_frame_ref(ctx->cur_frame);
        if (frame)
            frame->redraw = true;
        MP_STATS(ctx, "glcb-noframe");
    }
    struct vo_frame dummy = {0};
    if (!frame)
        frame = &dummy;

    pthread_mutex_unlock(&ctx->lock);

    MP_STATS(ctx, "glcb-render");
    gl_video_render_frame(ctx->renderer, frame, fbo);

    gl_video_unset_gl_state(ctx->renderer);

    if (frame != &dummy)
        talloc_free(frame);

    pthread_mutex_lock(&ctx->lock);
    while (wait_present_count > ctx->present_count)
        pthread_cond_wait(&ctx->wakeup, &ctx->lock);
    pthread_mutex_unlock(&ctx->lock);

    return 0;
}
Exemplo n.º 7
0
void write_video(struct MPContext *mpctx, double endpts)
{
    struct MPOpts *opts = mpctx->opts;
    struct vo *vo = mpctx->video_out;

    if (!mpctx->d_video)
        return;

    update_fps(mpctx);

    // Whether there's still at least 1 video frame that can be shown.
    // If false, it means we can reconfig the VO if needed (normally, this
    // would disrupt playback, so only do it on !still_playing).
    bool still_playing = vo_has_next_frame(vo, true);
    // For the last frame case (frame is being displayed).
    still_playing |= mpctx->playing_last_frame;
    still_playing |= mpctx->last_frame_duration > 0;

    double frame_time = 0;
    int r = update_video(mpctx, endpts, !still_playing, &frame_time);
    MP_TRACE(mpctx, "update_video: %d (still_playing=%d)\n", r, still_playing);

    if (r == VD_WAIT) // Demuxer will wake us up for more packets to decode.
        return;

    if (r < 0) {
        MP_FATAL(mpctx, "Could not initialize video chain.\n");
        int uninit = INITIALIZED_VCODEC;
        if (!opts->force_vo)
            uninit |= INITIALIZED_VO;
        uninit_player(mpctx, uninit);
        if (!mpctx->current_track[STREAM_AUDIO])
            mpctx->stop_play = PT_NEXT_ENTRY;
        mpctx->error_playing = true;
        handle_force_window(mpctx, true);
        return; // restart loop
    }

    if (r == VD_EOF) {
        if (!mpctx->playing_last_frame && mpctx->last_frame_duration > 0) {
            mpctx->time_frame += mpctx->last_frame_duration;
            mpctx->last_frame_duration = 0;
            mpctx->playing_last_frame = true;
            MP_VERBOSE(mpctx, "showing last frame\n");
        }
    }

    if (r == VD_NEW_FRAME) {
        MP_TRACE(mpctx, "frametime=%5.3f\n", frame_time);

        if (mpctx->video_status > STATUS_PLAYING)
            mpctx->video_status = STATUS_PLAYING;

        if (mpctx->video_status >= STATUS_READY) {
            mpctx->time_frame += frame_time / opts->playback_speed;
            adjust_sync(mpctx, frame_time);
        }
    } else if (r == VD_EOF && mpctx->playing_last_frame) {
        // Let video timing code continue displaying.
        mpctx->video_status = STATUS_DRAINING;
        MP_VERBOSE(mpctx, "still showing last frame\n");
    } else if (r <= 0) {
        // EOF or error
        mpctx->delay = 0;
        mpctx->last_av_difference = 0;
        mpctx->video_status = STATUS_EOF;
        MP_VERBOSE(mpctx, "video EOF\n");
        return;
    } else {
        if (mpctx->video_status > STATUS_PLAYING)
            mpctx->video_status = STATUS_PLAYING;

        // Decode more in next iteration.
        mpctx->sleeptime = 0;
        MP_TRACE(mpctx, "filtering more video\n");
    }

    // Actual playback starts when both audio and video are ready.
    if (mpctx->video_status == STATUS_READY)
        return;

    if (mpctx->paused && mpctx->video_status >= STATUS_READY)
        return;

    mpctx->time_frame -= get_relative_time(mpctx);
    double audio_pts = playing_audio_pts(mpctx);
    if (!mpctx->sync_audio_to_video || mpctx->video_status < STATUS_READY) {
        mpctx->time_frame = 0;
    } else if (mpctx->audio_status == STATUS_PLAYING &&
               mpctx->video_status == STATUS_PLAYING)
    {
        double buffered_audio = ao_get_delay(mpctx->ao);
        MP_TRACE(mpctx, "audio delay=%f\n", buffered_audio);

        if (opts->autosync) {
            /* Smooth reported playback position from AO by averaging
             * it with the value expected based on previus value and
             * time elapsed since then. May help smooth video timing
             * with audio output that have inaccurate position reporting.
             * This is badly implemented; the behavior of the smoothing
             * now undesirably depends on how often this code runs
             * (mainly depends on video frame rate). */
            float predicted = (mpctx->delay / opts->playback_speed +
                                mpctx->time_frame);
            float difference = buffered_audio - predicted;
            buffered_audio = predicted + difference / opts->autosync;
        }

        mpctx->time_frame = (buffered_audio -
                                mpctx->delay / opts->playback_speed);
    } else {
        /* If we're more than 200 ms behind the right playback
         * position, don't try to speed up display of following
         * frames to catch up; continue with default speed from
         * the current frame instead.
         * If untimed is set always output frames immediately
         * without sleeping.
         */
        if (mpctx->time_frame < -0.2 || opts->untimed || vo->untimed)
            mpctx->time_frame = 0;
    }

    double vsleep = mpctx->time_frame - vo->flip_queue_offset;
    if (vsleep > 0.050) {
        mpctx->sleeptime = MPMIN(mpctx->sleeptime, vsleep - 0.040);
        return;
    }
    mpctx->sleeptime = 0;
    mpctx->playing_last_frame = false;

    // last frame case
    if (r != VD_NEW_FRAME)
        return;

    //=================== FLIP PAGE (VIDEO BLT): ======================


    mpctx->video_pts = mpctx->video_next_pts;
    mpctx->last_vo_pts = mpctx->video_pts;
    mpctx->playback_pts = mpctx->video_pts;

    update_subtitles(mpctx);
    update_osd_msg(mpctx);

    MP_STATS(mpctx, "vo draw frame");

    vo_new_frame_imminent(vo);

    MP_STATS(mpctx, "vo sleep");

    mpctx->time_frame -= get_relative_time(mpctx);
    mpctx->time_frame -= vo->flip_queue_offset;
    if (mpctx->time_frame > 0.001)
        mpctx->time_frame = timing_sleep(mpctx, mpctx->time_frame);
    mpctx->time_frame += vo->flip_queue_offset;

    int64_t t2 = mp_time_us();
    /* Playing with playback speed it's possible to get pathological
     * cases with mpctx->time_frame negative enough to cause an
     * overflow in pts_us calculation, thus the MPMAX. */
    double time_frame = MPMAX(mpctx->time_frame, -1);
    int64_t pts_us = mpctx->last_time + time_frame * 1e6;
    int duration = -1;
    double pts2 = vo_get_next_pts(vo, 0); // this is the next frame PTS
    if (mpctx->video_pts != MP_NOPTS_VALUE && pts2 == MP_NOPTS_VALUE) {
        // Make up a frame duration. Using the frame rate is not a good
        // choice, since the frame rate could be unset/broken/random.
        float fps = mpctx->d_video->fps;
        double frame_duration = fps > 0 ? 1.0 / fps : 0;
        pts2 = mpctx->video_pts + MPCLAMP(frame_duration, 0.0, 5.0);
    }
    if (pts2 != MP_NOPTS_VALUE) {
        // expected A/V sync correction is ignored
        double diff = (pts2 - mpctx->video_pts);
        diff /= opts->playback_speed;
        if (mpctx->time_frame < 0)
            diff += mpctx->time_frame;
        if (diff < 0)
            diff = 0;
        if (diff > 10)
            diff = 10;
        duration = diff * 1e6;
        mpctx->last_frame_duration = diff;
    }
    if (mpctx->video_status != STATUS_PLAYING)
        duration = -1;

    MP_STATS(mpctx, "start flip");
    vo_flip_page(vo, pts_us | 1, duration);
    MP_STATS(mpctx, "end flip");

    if (audio_pts != MP_NOPTS_VALUE)
        MP_STATS(mpctx, "value %f ptsdiff", mpctx->video_pts - audio_pts);

    mpctx->last_vo_flip_duration = (mp_time_us() - t2) * 0.000001;
    if (vo->driver->flip_page_timed) {
        // No need to adjust sync based on flip speed
        mpctx->last_vo_flip_duration = 0;
        // For print_status - VO call finishing early is OK for sync
        mpctx->time_frame -= get_relative_time(mpctx);
    }
    mpctx->shown_vframes++;
    if (mpctx->video_status < STATUS_PLAYING)
        mpctx->video_status = STATUS_READY;
    update_avsync(mpctx);
    screenshot_flip(mpctx);

    mp_notify(mpctx, MPV_EVENT_TICK, NULL);

    if (!mpctx->sync_audio_to_video)
        mpctx->video_status = STATUS_EOF;
}
Exemplo n.º 8
0
struct mp_image *video_decode(struct dec_video *d_video,
                              struct demux_packet *packet,
                              int drop_frame)
{
    struct MPOpts *opts = d_video->opts;
    bool sort_pts =
        (opts->user_pts_assoc_mode != 1 || d_video->header->video->avi_dts)
        && opts->correct_pts;

    struct demux_packet packet_copy;
    if (packet && packet->dts == MP_NOPTS_VALUE) {
        packet_copy = *packet;
        packet = &packet_copy;
        packet->dts = packet->pts;
    }

    double pkt_pts = packet ? packet->pts : MP_NOPTS_VALUE;
    double pkt_dts = packet ? packet->dts : MP_NOPTS_VALUE;

    double pkt_pdts = pkt_pts == MP_NOPTS_VALUE ? pkt_dts : pkt_pts;
    if (pkt_pdts != MP_NOPTS_VALUE)
        d_video->last_packet_pdts = pkt_pdts;

    if (sort_pts)
        add_pts_to_sort(d_video, pkt_pdts);

    double prev_codec_pts = d_video->codec_pts;
    double prev_codec_dts = d_video->codec_dts;

    MP_STATS(d_video, "start decode video");

    struct mp_image *mpi = d_video->vd_driver->decode(d_video, packet, drop_frame);

    MP_STATS(d_video, "end decode video");

    if (!mpi || drop_frame) {
        talloc_free(mpi);
        return NULL;            // error / skipped frame
    }

    if (opts->field_dominance == 0)
        mpi->fields |= MP_IMGFIELD_TOP_FIRST;
    else if (opts->field_dominance == 1)
        mpi->fields &= ~MP_IMGFIELD_TOP_FIRST;

    // Note: the PTS is reordered, but the DTS is not. Both should be monotonic.
    double pts = d_video->codec_pts;
    double dts = d_video->codec_dts;

    if (pts == MP_NOPTS_VALUE) {
        d_video->codec_pts = prev_codec_pts;
    } else if (pts < prev_codec_pts) {
        d_video->num_codec_pts_problems++;
    }

    if (dts == MP_NOPTS_VALUE) {
        d_video->codec_dts = prev_codec_dts;
    } else if (dts <= prev_codec_dts) {
        d_video->num_codec_dts_problems++;
    }

    // If PTS is unset, or non-monotonic, fall back to DTS.
    if ((d_video->num_codec_pts_problems > d_video->num_codec_dts_problems ||
         pts == MP_NOPTS_VALUE) && dts != MP_NOPTS_VALUE)
        pts = dts;

    // Alternative PTS determination methods
    if (sort_pts)
        pts = retrieve_sorted_pts(d_video, pts);

    if (!opts->correct_pts || pts == MP_NOPTS_VALUE) {
        if (opts->correct_pts)
            MP_WARN(d_video, "No video PTS! Making something up.\n");

        double frame_time = 1.0f / (d_video->fps > 0 ? d_video->fps : 25);
        double base = d_video->last_packet_pdts;
        pts = d_video->decoded_pts;
        if (pts == MP_NOPTS_VALUE)
            pts = base == MP_NOPTS_VALUE ? 0 : base;

        pts += frame_time;
    }

    if (d_video->decoded_pts != MP_NOPTS_VALUE && pts <= d_video->decoded_pts) {
        MP_WARN(d_video, "Non-monotonic video pts: %f <= %f\n",
                pts, d_video->decoded_pts);
    }

    if (d_video->has_broken_packet_pts < 0)
        d_video->has_broken_packet_pts++;
    if (d_video->num_codec_pts_problems || pkt_pts == MP_NOPTS_VALUE)
        d_video->has_broken_packet_pts = 1;

    mpi->pts = pts;
    d_video->decoded_pts = pts;
    return mpi;
}
Exemplo n.º 9
0
void video_reset(struct dec_video *d_video)
{
    video_vd_control(d_video, VDCTRL_RESET, NULL);
    d_video->first_packet_pdts = MP_NOPTS_VALUE;
    d_video->start_pts = MP_NOPTS_VALUE;
    d_video->decoded_pts = MP_NOPTS_VALUE;
    d_video->codec_pts = MP_NOPTS_VALUE;
    d_video->codec_dts = MP_NOPTS_VALUE;
    d_video->last_format = d_video->fixed_format = (struct mp_image_params){0};
    d_video->dropped_frames = 0;
    d_video->current_state = DATA_AGAIN;
    mp_image_unrefp(&d_video->current_mpi);
    talloc_free(d_video->packet);
    d_video->packet = NULL;
    talloc_free(d_video->new_segment);
    d_video->new_segment = NULL;
    d_video->start = d_video->end = MP_NOPTS_VALUE;
}

int video_vd_control(struct dec_video *d_video, int cmd, void *arg)
{
    const struct vd_functions *vd = d_video->vd_driver;
    if (vd)
        return vd->control(d_video, cmd, arg);
    return CONTROL_UNKNOWN;
}

void video_uninit(struct dec_video *d_video)
{
    if (!d_video)
        return;
    mp_image_unrefp(&d_video->current_mpi);
    mp_image_unrefp(&d_video->cover_art_mpi);
    if (d_video->vd_driver) {
        MP_VERBOSE(d_video, "Uninit video.\n");
        d_video->vd_driver->uninit(d_video);
    }
    talloc_free(d_video->packet);
    talloc_free(d_video->new_segment);
    talloc_free(d_video);
}

static int init_video_codec(struct dec_video *d_video, const char *decoder)
{
    if (!d_video->vd_driver->init(d_video, decoder)) {
        MP_VERBOSE(d_video, "Video decoder init failed.\n");
        return 0;
    }
    return 1;
}

struct mp_decoder_list *video_decoder_list(void)
{
    struct mp_decoder_list *list = talloc_zero(NULL, struct mp_decoder_list);
    for (int i = 0; mpcodecs_vd_drivers[i] != NULL; i++)
        mpcodecs_vd_drivers[i]->add_decoders(list);
    return list;
}

static struct mp_decoder_list *mp_select_video_decoders(const char *codec,
                                                        char *selection)
{
    struct mp_decoder_list *list = video_decoder_list();
    struct mp_decoder_list *new = mp_select_decoders(list, codec, selection);
    talloc_free(list);
    return new;
}

static const struct vd_functions *find_driver(const char *name)
{
    for (int i = 0; mpcodecs_vd_drivers[i] != NULL; i++) {
        if (strcmp(mpcodecs_vd_drivers[i]->name, name) == 0)
            return mpcodecs_vd_drivers[i];
    }
    return NULL;
}

bool video_init_best_codec(struct dec_video *d_video)
{
    struct MPOpts *opts = d_video->opts;

    assert(!d_video->vd_driver);
    video_reset(d_video);
    d_video->has_broken_packet_pts = -10; // needs 10 packets to reach decision

    struct mp_decoder_entry *decoder = NULL;
    struct mp_decoder_list *list =
        mp_select_video_decoders(d_video->codec->codec, opts->video_decoders);

    mp_print_decoders(d_video->log, MSGL_V, "Codec list:", list);

    for (int n = 0; n < list->num_entries; n++) {
        struct mp_decoder_entry *sel = &list->entries[n];
        const struct vd_functions *driver = find_driver(sel->family);
        if (!driver)
            continue;
        MP_VERBOSE(d_video, "Opening video decoder %s:%s\n",
                   sel->family, sel->decoder);
        d_video->vd_driver = driver;
        if (init_video_codec(d_video, sel->decoder)) {
            decoder = sel;
            break;
        }
        d_video->vd_driver = NULL;
        MP_WARN(d_video, "Video decoder init failed for "
                "%s:%s\n", sel->family, sel->decoder);
    }

    if (d_video->vd_driver) {
        d_video->decoder_desc =
            talloc_asprintf(d_video, "%s [%s:%s]", decoder->desc, decoder->family,
                            decoder->decoder);
        MP_VERBOSE(d_video, "Selected video codec: %s\n", d_video->decoder_desc);
    } else {
        MP_ERR(d_video, "Failed to initialize a video decoder for codec '%s'.\n",
               d_video->codec->codec);
    }

    if (d_video->header->missing_timestamps) {
        MP_WARN(d_video, "This stream has no timestamps!\n");
        MP_WARN(d_video, "Making up playback time using %f FPS.\n", d_video->fps);
        MP_WARN(d_video, "Seeking will probably fail badly.\n");
    }

    talloc_free(list);
    return !!d_video->vd_driver;
}

static void fix_image_params(struct dec_video *d_video,
                             struct mp_image_params *params)
{
    struct MPOpts *opts = d_video->opts;
    struct mp_image_params p = *params;
    struct mp_codec_params *c = d_video->codec;

    MP_VERBOSE(d_video, "Decoder format: %s\n", mp_image_params_to_str(params));

    // While mp_image_params normally always have to have d_w/d_h set, the
    // decoder signals unknown bitstream aspect ratio with both set to 0.
    float dec_aspect = p.p_w > 0 && p.p_h > 0 ? p.p_w / (float)p.p_h : 0;
    if (d_video->initial_decoder_aspect == 0)
        d_video->initial_decoder_aspect = dec_aspect;

    bool use_container = true;
    switch (opts->aspect_method) {
    case 0:
        // We normally prefer the container aspect, unless the decoder aspect
        // changes at least once.
        if (dec_aspect > 0 && d_video->initial_decoder_aspect != dec_aspect) {
            MP_VERBOSE(d_video, "Using bitstream aspect ratio.\n");
            // Even if the aspect switches back, don't use container aspect again.
            d_video->initial_decoder_aspect = -1;
            use_container = false;
        }
        break;
    case 1:
        use_container = false;
        break;
    }

    if (use_container && c->par_w > 0 && c->par_h) {
        MP_VERBOSE(d_video, "Using container aspect ratio.\n");
        p.p_w = c->par_w;
        p.p_h = c->par_h;
    }

    if (opts->movie_aspect >= 0) {
        MP_VERBOSE(d_video, "Forcing user-set aspect ratio.\n");
        if (opts->movie_aspect == 0) {
            p.p_w = p.p_h = 1;
        } else {
            AVRational a = av_d2q(opts->movie_aspect, INT_MAX);
            mp_image_params_set_dsize(&p, a.num, a.den);
        }
    }

    // Assume square pixels if no aspect ratio is set at all.
    if (p.p_w <= 0 || p.p_h <= 0)
        p.p_w = p.p_h = 1;

    // Detect colorspace from resolution.
    mp_image_params_guess_csp(&p);

    d_video->last_format = *params;
    d_video->fixed_format = p;
}

static struct mp_image *decode_packet(struct dec_video *d_video,
                                      struct demux_packet *packet,
                                      int drop_frame)
{
    struct MPOpts *opts = d_video->opts;

    if (!d_video->vd_driver)
        return NULL;

    double pkt_pts = packet ? packet->pts : MP_NOPTS_VALUE;
    double pkt_dts = packet ? packet->dts : MP_NOPTS_VALUE;

    if (pkt_pts == MP_NOPTS_VALUE)
        d_video->has_broken_packet_pts = 1;

    double pkt_pdts = pkt_pts == MP_NOPTS_VALUE ? pkt_dts : pkt_pts;
    if (pkt_pdts != MP_NOPTS_VALUE && d_video->first_packet_pdts == MP_NOPTS_VALUE)
        d_video->first_packet_pdts = pkt_pdts;

    MP_STATS(d_video, "start decode video");

    struct mp_image *mpi = d_video->vd_driver->decode(d_video, packet, drop_frame);

    MP_STATS(d_video, "end decode video");

    // Error, discarded frame, dropped frame, or initial codec delay.
    if (!mpi || drop_frame) {
        talloc_free(mpi);
        return NULL;
    }

    if (opts->field_dominance == 0) {
        mpi->fields |= MP_IMGFIELD_TOP_FIRST | MP_IMGFIELD_INTERLACED;
    } else if (opts->field_dominance == 1) {
        mpi->fields &= ~MP_IMGFIELD_TOP_FIRST;
        mpi->fields |= MP_IMGFIELD_INTERLACED;
    }

    // Note: the PTS is reordered, but the DTS is not. Both should be monotonic.
    double pts = mpi->pts;
    double dts = mpi->dts;

    if (pts != MP_NOPTS_VALUE) {
        if (pts < d_video->codec_pts)
            d_video->num_codec_pts_problems++;
        d_video->codec_pts = mpi->pts;
    }

    if (dts != MP_NOPTS_VALUE) {
        if (dts <= d_video->codec_dts)
            d_video->num_codec_dts_problems++;
        d_video->codec_dts = mpi->dts;
    }

    if (d_video->has_broken_packet_pts < 0)
        d_video->has_broken_packet_pts++;
    if (d_video->num_codec_pts_problems)
        d_video->has_broken_packet_pts = 1;

    // If PTS is unset, or non-monotonic, fall back to DTS.
    if ((d_video->num_codec_pts_problems > d_video->num_codec_dts_problems ||
         pts == MP_NOPTS_VALUE) && dts != MP_NOPTS_VALUE)
        pts = dts;

    if (!opts->correct_pts || pts == MP_NOPTS_VALUE) {
        if (opts->correct_pts && !d_video->header->missing_timestamps)
            MP_WARN(d_video, "No video PTS! Making something up.\n");

        double frame_time = 1.0f / (d_video->fps > 0 ? d_video->fps : 25);
        double base = d_video->first_packet_pdts;
        pts = d_video->decoded_pts;
        if (pts == MP_NOPTS_VALUE) {
            pts = base == MP_NOPTS_VALUE ? 0 : base;
        } else {
            pts += frame_time;
        }
    }

    if (!mp_image_params_equal(&d_video->last_format, &mpi->params))
        fix_image_params(d_video, &mpi->params);

    mpi->params = d_video->fixed_format;

    mpi->pts = pts;
    d_video->decoded_pts = pts;

    // Compensate for incorrectly using mpeg-style DTS for avi timestamps.
    if (d_video->codec->avi_dts && opts->correct_pts &&
        mpi->pts != MP_NOPTS_VALUE && d_video->fps > 0)
    {
        int delay = -1;
        video_vd_control(d_video, VDCTRL_GET_BFRAMES, &delay);
        mpi->pts -= MPMAX(delay, 0) / d_video->fps;
    }

    return mpi;
}

void video_reset_aspect(struct dec_video *d_video)
{
    d_video->last_format = (struct mp_image_params){0};
}

void video_set_framedrop(struct dec_video *d_video, bool enabled)
{
    d_video->framedrop_enabled = enabled;
}

// Frames before the start timestamp can be dropped. (Used for hr-seek.)
void video_set_start(struct dec_video *d_video, double start_pts)
{
    d_video->start_pts = start_pts;
}

void video_work(struct dec_video *d_video)
{
    if (d_video->current_mpi)
        return;

    if (d_video->header->attached_picture) {
        if (d_video->current_state == DATA_AGAIN && !d_video->cover_art_mpi) {
            d_video->cover_art_mpi =
                decode_packet(d_video, d_video->header->attached_picture, 0);
            // Might need flush.
            if (!d_video->cover_art_mpi)
                d_video->cover_art_mpi = decode_packet(d_video, NULL, 0);
            d_video->current_state = DATA_OK;
        }
        if (d_video->current_state == DATA_OK)
            d_video->current_mpi = mp_image_new_ref(d_video->cover_art_mpi);
        // (DATA_OK is returned the first time, when current_mpi is sill set)
        d_video->current_state = DATA_EOF;
        return;
    }

    if (!d_video->packet && !d_video->new_segment &&
        demux_read_packet_async(d_video->header, &d_video->packet) == 0)
    {
        d_video->current_state = DATA_WAIT;
        return;
    }

    if (d_video->packet) {
        if (d_video->packet->dts == MP_NOPTS_VALUE && !d_video->codec->avi_dts)
            d_video->packet->dts = d_video->packet->pts;
    }

    if (d_video->packet && d_video->packet->new_segment) {
        assert(!d_video->new_segment);
        d_video->new_segment = d_video->packet;
        d_video->packet = NULL;
    }

    bool had_input_packet = !!d_video->packet;
    bool had_packet = had_input_packet || d_video->new_segment;

    double start_pts = d_video->start_pts;
    if (d_video->start != MP_NOPTS_VALUE && (start_pts == MP_NOPTS_VALUE ||
                                             d_video->start > start_pts))
        start_pts = d_video->start;

    int framedrop_type = d_video->framedrop_enabled ? 1 : 0;
    if (start_pts != MP_NOPTS_VALUE && d_video->packet &&
        d_video->packet->pts < start_pts - .005 &&
        !d_video->has_broken_packet_pts)
    {
        framedrop_type = 2;
    }
    d_video->current_mpi = decode_packet(d_video, d_video->packet, framedrop_type);
    if (d_video->packet && d_video->packet->len == 0) {
        talloc_free(d_video->packet);
        d_video->packet = NULL;
    }

    d_video->current_state = DATA_OK;
    if (!d_video->current_mpi) {
        d_video->current_state = DATA_EOF;
        if (had_packet) {
            if (framedrop_type == 1)
                d_video->dropped_frames += 1;
            d_video->current_state = DATA_AGAIN;
        }
    }

    bool segment_ended = !d_video->current_mpi && !had_input_packet;

    if (d_video->current_mpi && d_video->current_mpi->pts != MP_NOPTS_VALUE) {
        double vpts = d_video->current_mpi->pts;
        segment_ended = d_video->end != MP_NOPTS_VALUE && vpts >= d_video->end;
        if ((d_video->start != MP_NOPTS_VALUE && vpts < d_video->start)
            || segment_ended)
        {
            talloc_free(d_video->current_mpi);
            d_video->current_mpi = NULL;
        }
    }

    // If there's a new segment, start it as soon as we're drained/finished.
    if (segment_ended && d_video->new_segment) {
        struct demux_packet *new_segment = d_video->new_segment;
        d_video->new_segment = NULL;

        // Could avoid decoder reinit; would still need flush.
        d_video->codec = new_segment->codec;
        if (d_video->vd_driver)
            d_video->vd_driver->uninit(d_video);
        d_video->vd_driver = NULL;
        video_init_best_codec(d_video);

        d_video->start = new_segment->start;
        d_video->end = new_segment->end;

        new_segment->new_segment = false;

        d_video->packet = new_segment;
        d_video->current_state = DATA_AGAIN;
    }
}

// Fetch an image decoded with video_work(). Returns one of:
//  DATA_OK:    *out_mpi is set to a new image
//  DATA_WAIT:  waiting for demuxer; will receive a wakeup signal
//  DATA_EOF:   end of file, no more frames to be expected
//  DATA_AGAIN: dropped frame or something similar
int video_get_frame(struct dec_video *d_video, struct mp_image **out_mpi)
{
    *out_mpi = NULL;
    if (d_video->current_mpi) {
        *out_mpi = d_video->current_mpi;
        d_video->current_mpi = NULL;
        return DATA_OK;
    }
    if (d_video->current_state == DATA_OK)
        return DATA_AGAIN;
    return d_video->current_state;
}