Beispiel #1
0
struct mp_image *convert_image(struct mp_image *image, int destfmt,
                               struct mp_log *log)
{
    int d_w, d_h;
    mp_image_params_get_dsize(&image->params, &d_w, &d_h);
    bool is_anamorphic = image->w != d_w || image->h != d_h;

    // Caveat: no colorspace/levels conversion done if pixel formats equal
    //         it's unclear what colorspace/levels the target wants
    if (image->imgfmt == destfmt && !is_anamorphic)
        return mp_image_new_ref(image);

    struct mp_image *dst = mp_image_alloc(destfmt, d_w, d_h);
    if (!dst) {
        mp_err(log, "Out of memory.\n");
        return NULL;
    }
    mp_image_copy_attributes(dst, image);

    if (mp_image_swscale(dst, image, mp_sws_hq_flags) < 0) {
        mp_err(log, "Error when converting image.\n");
        talloc_free(dst);
        return NULL;
    }

    return dst;
}
Beispiel #2
0
static mp_image_t *get_screenshot(struct vo *vo)
{
    struct xvctx *ctx = vo->priv;
    if (!ctx->original_image)
        return NULL;

    return mp_image_new_ref(ctx->original_image);
}
Beispiel #3
0
static struct mp_image *filter(struct vf_instance *vf, struct mp_image *mpi)
{
    mp_image_unrefp(&vf->priv->current);
    vf->priv->current = talloc_steal(vf, mp_image_new_ref(mpi));
    mp_image_set_display_size(vf->priv->current, vf->priv->display_w,
                              vf->priv->display_h);
    return mpi;
}
Beispiel #4
0
VideoFrame::VideoFrame::Data::Data(bool free, mp_image *mpi, const VideoFormat &format, double pts, int field)
: format(format), pts(pts), field(field) {
	data[0] = mpi->planes[0];
	data[1] = mpi->planes[1];
	data[2] = mpi->planes[2];
	data[3] = mpi->planes[3];
	this->mpi = free ? mpi : mp_image_new_ref(mpi);
}
Beispiel #5
0
static VdpVideoSurface ref_field(struct priv *p,
                                 struct mp_vdpau_mixer_frame *frame, int pos)
{
    struct mp_image *mpi = mp_image_new_ref(mp_refqueue_get_field(p->queue, pos));
    if (!mpi)
        return VDP_INVALID_HANDLE;
    talloc_steal(frame, mpi);
    return (uintptr_t)mpi->planes[3];
}
Beispiel #6
0
static int control (vf_instance_t *vf, int request, void *data)
{
    if (request == VFCTRL_SCREENSHOT && vf->priv->current) {
        struct voctrl_screenshot_args *args = data;
        args->out_image = mp_image_new_ref(vf->priv->current);
        return CONTROL_TRUE;
    }
    return vf_next_control (vf, request, data);
}
Beispiel #7
0
static void draw_frame(struct vo *vo, struct vo_frame *frame)
{
    struct priv *p = vo->priv;

    mp_image_t *mpi = NULL;
    if (!frame->redraw && !frame->repeat)
        mpi = mp_image_new_ref(frame->current);

    talloc_free(p->next_image);
    p->next_image = mpi;
}
Beispiel #8
0
VideoFrame::VideoFrame::Data::Data(const Data &other)
: QSharedData(other), image(other.image), format(other.format) {
	field = other.field;
	pts = other.pts;
	data[0] = other.data[0];
	data[1] = other.data[1];
	data[2] = other.data[2];
	data[3] = other.data[3];
	if (other.mpi)
		mpi = mp_image_new_ref(other.mpi);
	if (!other.buffer.isEmpty()) {
		int offset = 0;
		buffer.resize(other.buffer.size());
		for (int i=0; i<format.planes(); ++i) {
			data[i] = (uchar*)buffer.data() + offset;
			offset += format.bytesPerPlain(i);
		}
		memcpy(buffer.data(), other.buffer.data(), buffer.size());
	}
}
Beispiel #9
0
// Read a packet, store decoded image into d_video->waiting_decoded_mpi
// returns VD_* code
static int decode_image(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    if (d_video->header->attached_picture) {
        d_video->waiting_decoded_mpi = mp_image_new_ref(d_video->cover_art_mpi);
        return VD_EOF;
    }

    struct demux_packet *pkt;
    if (demux_read_packet_async(d_video->header, &pkt) == 0)
        return VD_WAIT;
    if (pkt && pkt->pts != MP_NOPTS_VALUE)
        pkt->pts += mpctx->video_offset;
    if (pkt && pkt->dts != MP_NOPTS_VALUE)
        pkt->dts += mpctx->video_offset;
    if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) ||
        d_video->has_broken_packet_pts ||
        !mpctx->opts->hr_seek_framedrop)
    {
        mpctx->hrseek_framedrop = false;
    }
    bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING;
    int framedrop_type = hrseek && mpctx->hrseek_framedrop ?
                         2 : check_framedrop(mpctx);
    d_video->waiting_decoded_mpi =
        video_decode(d_video, pkt, framedrop_type);
    bool had_packet = !!pkt;
    talloc_free(pkt);

    if (had_packet && !d_video->waiting_decoded_mpi &&
        mpctx->video_status == STATUS_PLAYING &&
        (mpctx->opts->frame_dropping & 2))
    {
        mpctx->dropped_frames_total++;
        mpctx->dropped_frames++;
    }

    return had_packet ? VD_PROGRESS : VD_EOF;
}
Beispiel #10
0
static void draw_osd(struct vo *vo, struct osd_state *osd)
{
    struct xvctx *ctx = vo->priv;

    struct mp_image img = get_xv_buffer(vo, ctx->current_buf);

    struct mp_osd_res res = {
        .w = ctx->image_width,
        .h = ctx->image_height,
        .display_par = 1.0 / vo->aspdat.par,
    };

    osd_draw_on_image(osd, res, osd->vo_pts, 0, &img);
}

static void wait_for_completion(struct vo *vo, int max_outstanding)
{
#if HAVE_SHM
    struct xvctx *ctx = vo->priv;
    struct vo_x11_state *x11 = vo->x11;
    if (ctx->Shmem_Flag) {
        while (x11->ShmCompletionWaitCount > max_outstanding) {
            if (!ctx->Shm_Warned_Slow) {
                MP_WARN(vo, "X11 can't keep up! Waiting"
                        " for XShm completion events...\n");
                ctx->Shm_Warned_Slow = 1;
            }
            mp_sleep_us(1000);
            vo_x11_check_events(vo);
        }
    }
#endif
}

static void flip_page(struct vo *vo)
{
    struct xvctx *ctx = vo->priv;
    put_xvimage(vo, ctx->xvimage[ctx->current_buf]);

    /* remember the currently visible buffer */
    ctx->current_buf = (ctx->current_buf + 1) % ctx->num_buffers;

    if (!ctx->Shmem_Flag)
        XSync(vo->x11->display, False);
}

static mp_image_t *get_screenshot(struct vo *vo)
{
    struct xvctx *ctx = vo->priv;
    if (!ctx->original_image)
        return NULL;

    return mp_image_new_ref(ctx->original_image);
}

// Note: redraw_frame() can call this with NULL.
static void draw_image(struct vo *vo, mp_image_t *mpi)
{
    struct xvctx *ctx = vo->priv;

    wait_for_completion(vo, ctx->num_buffers - 1);

    struct mp_image xv_buffer = get_xv_buffer(vo, ctx->current_buf);
    if (mpi) {
        mp_image_copy(&xv_buffer, mpi);
    } else {
        mp_image_clear(&xv_buffer, 0, 0, xv_buffer.w, xv_buffer.h);
    }

    mp_image_setrefp(&ctx->original_image, mpi);
}

static int redraw_frame(struct vo *vo)
{
    struct xvctx *ctx = vo->priv;

    draw_image(vo, ctx->original_image);
    return true;
}

static int query_format(struct vo *vo, uint32_t format)
{
    struct xvctx *ctx = vo->priv;
    uint32_t i;
    int flag = VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW;

    int fourcc = find_xv_format(format);
    if (fourcc) {
        for (i = 0; i < ctx->formats; i++) {
            if (ctx->fo[i].id == fourcc)
                return flag;
        }
    }
    return 0;
}

static void uninit(struct vo *vo)
{
    struct xvctx *ctx = vo->priv;
    int i;

    talloc_free(ctx->original_image);

    if (ctx->ai)
        XvFreeAdaptorInfo(ctx->ai);
    ctx->ai = NULL;
    if (ctx->fo) {
        XFree(ctx->fo);
        ctx->fo = NULL;
    }
    for (i = 0; i < ctx->num_buffers; i++)
        deallocate_xvimage(vo, i);
    // uninit() shouldn't get called unless initialization went past vo_init()
    vo_x11_uninit(vo);
}

static int preinit(struct vo *vo)
{
    XvPortID xv_p;
    int busy_ports = 0;
    unsigned int i;
    struct xvctx *ctx = vo->priv;
    int xv_adaptor = ctx->cfg_xv_adaptor;

    if (!vo_x11_init(vo))
        return -1;

    struct vo_x11_state *x11 = vo->x11;

    /* check for Xvideo extension */
    unsigned int ver, rel, req, ev, err;
    if (Success != XvQueryExtension(x11->display, &ver, &rel, &req, &ev, &err)) {
        MP_ERR(vo, "Xv not supported by this X11 version/driver\n");
        goto error;
    }

    /* check for Xvideo support */
    if (Success !=
        XvQueryAdaptors(x11->display, DefaultRootWindow(x11->display),
                        &ctx->adaptors, &ctx->ai)) {
        MP_ERR(vo, "XvQueryAdaptors failed.\n");
        goto error;
    }

    /* check adaptors */
    if (ctx->xv_port) {
        int port_found;

        for (port_found = 0, i = 0; !port_found && i < ctx->adaptors; i++) {
            if ((ctx->ai[i].type & XvInputMask)
                && (ctx->ai[i].type & XvImageMask)) {
                for (xv_p = ctx->ai[i].base_id;
                     xv_p < ctx->ai[i].base_id + ctx->ai[i].num_ports;
                     ++xv_p) {
                    if (xv_p == ctx->xv_port) {
                        port_found = 1;
                        break;
                    }
                }
            }
        }
        if (port_found) {
            if (XvGrabPort(x11->display, ctx->xv_port, CurrentTime))
                ctx->xv_port = 0;
        } else {
            MP_WARN(vo, "Invalid port parameter, overriding with port 0.\n");
            ctx->xv_port = 0;
        }
    }

    for (i = 0; i < ctx->adaptors && ctx->xv_port == 0; i++) {
        /* check if adaptor number has been specified */
        if (xv_adaptor != -1 && xv_adaptor != i)
            continue;

        if ((ctx->ai[i].type & XvInputMask) && (ctx->ai[i].type & XvImageMask)) {
            for (xv_p = ctx->ai[i].base_id;
                 xv_p < ctx->ai[i].base_id + ctx->ai[i].num_ports; ++xv_p)
                if (!XvGrabPort(x11->display, xv_p, CurrentTime)) {
                    ctx->xv_port = xv_p;
                    MP_VERBOSE(vo, "Using Xv Adapter #%d (%s)\n",
                               i, ctx->ai[i].name);
                    break;
                } else {
                    MP_WARN(vo, "Could not grab port %i.\n", (int) xv_p);
                    ++busy_ports;
                }
        }
    }
    if (!ctx->xv_port) {
        if (busy_ports)
            MP_ERR(vo,
                   "Could not find free Xvideo port - maybe another process is already\n"\
                   "using it. Close all video applications, and try again. If that does\n"\
                   "not help, see 'mpv -vo help' for other (non-xv) video out drivers.\n");
        else
            MP_ERR(vo,
                   "It seems there is no Xvideo support for your video card available.\n"\
                   "Run 'xvinfo' to verify its Xv support and read\n"\
                   "DOCS/HTML/en/video.html#xv!\n"\
                   "See 'mpv -vo help' for other (non-xv) video out drivers.\n"\
                   "Try -vo x11.\n");
        goto error;
    }

    if (!xv_init_colorkey(vo)) {
        goto error;             // bail out, colorkey setup failed
    }
    xv_enable_vsync(vo);
    xv_get_max_img_dim(vo, &ctx->max_width, &ctx->max_height);

    ctx->fo = XvListImageFormats(x11->display, ctx->xv_port,
                                 (int *) &ctx->formats);

    return 0;

  error:
    uninit(vo);                 // free resources
    return -1;
}
Beispiel #11
0
static int recreate_video_proc(struct vf_instance *vf)
{
    struct vf_priv_s *p = vf->priv;
    HRESULT hr;

    destroy_video_proc(vf);

    D3D11_VIDEO_PROCESSOR_CONTENT_DESC vpdesc = {
        .InputFrameFormat = p->d3d_frame_format,
        .InputWidth = p->c_w,
        .InputHeight = p->c_h,
        .OutputWidth = p->params.w,
        .OutputHeight = p->params.h,
    };
    hr = ID3D11VideoDevice_CreateVideoProcessorEnumerator(p->video_dev, &vpdesc,
                                                          &p->vp_enum);
    if (FAILED(hr))
        goto fail;

    D3D11_VIDEO_PROCESSOR_CAPS caps;
    hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorCaps(p->vp_enum, &caps);
    if (FAILED(hr))
        goto fail;

    MP_VERBOSE(vf, "Found %d rate conversion caps. Looking for caps=0x%x.\n",
               (int)caps.RateConversionCapsCount, p->mode);

    int rindex = -1;
    for (int n = 0; n < caps.RateConversionCapsCount; n++) {
        D3D11_VIDEO_PROCESSOR_RATE_CONVERSION_CAPS rcaps;
        hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorRateConversionCaps
                (p->vp_enum, n, &rcaps);
        if (FAILED(hr))
            goto fail;
        MP_VERBOSE(vf, "  - %d: 0x%08x\n", n, (unsigned)rcaps.ProcessorCaps);
        if (rcaps.ProcessorCaps & p->mode) {
            MP_VERBOSE(vf, "       (matching)\n");
            if (rindex < 0)
                rindex = n;
        }
    }

    if (rindex < 0) {
        MP_WARN(vf, "No fitting video processor found, picking #0.\n");
        rindex = 0;
    }

    // TOOD: so, how do we select which rate conversion mode the processor uses?

    hr = ID3D11VideoDevice_CreateVideoProcessor(p->video_dev, p->vp_enum, rindex,
                                                &p->video_proc);
    if (FAILED(hr)) {
        MP_ERR(vf, "Failed to create D3D11 video processor.\n");
        goto fail;
    }

    // Note: libavcodec does not support cropping left/top with hwaccel.
    RECT src_rc = {
        .right = p->params.w,
        .bottom = p->params.h,
    };
    ID3D11VideoContext_VideoProcessorSetStreamSourceRect(p->video_ctx,
                                                         p->video_proc,
                                                         0, TRUE, &src_rc);

    // This is supposed to stop drivers from f*****g up the video quality.
    ID3D11VideoContext_VideoProcessorSetStreamAutoProcessingMode(p->video_ctx,
                                                                 p->video_proc,
                                                                 0, FALSE);

    ID3D11VideoContext_VideoProcessorSetStreamOutputRate(p->video_ctx,
                                                         p->video_proc,
                                                         0,
                                                         D3D11_VIDEO_PROCESSOR_OUTPUT_RATE_NORMAL,
                                                         FALSE, 0);

    D3D11_VIDEO_PROCESSOR_COLOR_SPACE csp = {
        .YCbCr_Matrix = p->params.color.space != MP_CSP_BT_601,
        .Nominal_Range = p->params.color.levels == MP_CSP_LEVELS_TV ? 1 : 2,
    };
    ID3D11VideoContext_VideoProcessorSetStreamColorSpace(p->video_ctx,
                                                         p->video_proc,
                                                         0, &csp);
    if (p->out_rgb) {
        if (p->params.color.space != MP_CSP_BT_601 &&
            p->params.color.space != MP_CSP_BT_709)
        {
            MP_WARN(vf, "Unsupported video colorspace (%s/%s). Consider "
                    "disabling hardware decoding, or using "
                    "--hwdec=d3d11va-copy to get correct output.\n",
                    m_opt_choice_str(mp_csp_names, p->params.color.space),
                    m_opt_choice_str(mp_csp_levels_names, p->params.color.levels));
        }
    } else {
        ID3D11VideoContext_VideoProcessorSetOutputColorSpace(p->video_ctx,
                                                             p->video_proc,
                                                             &csp);
    }

    return 0;
fail:
    destroy_video_proc(vf);
    return -1;
}

static int render(struct vf_instance *vf)
{
    struct vf_priv_s *p = vf->priv;
    int res = -1;
    HRESULT hr;
    ID3D11VideoProcessorInputView *in_view = NULL;
    ID3D11VideoProcessorOutputView *out_view = NULL;
    struct mp_image *in = NULL, *out = NULL;
    out = mp_image_pool_get(p->pool, p->out_params.imgfmt, p->params.w, p->params.h);
    if (!out)
        goto cleanup;

    ID3D11Texture2D *d3d_out_tex = (void *)out->planes[1];

    in = mp_refqueue_get(p->queue, 0);
    if (!in)
        goto cleanup;
    ID3D11Texture2D *d3d_tex = (void *)in->planes[1];
    int d3d_subindex = (intptr_t)in->planes[2];

    mp_image_copy_attributes(out, in);

    D3D11_VIDEO_FRAME_FORMAT d3d_frame_format;
    if (!mp_refqueue_should_deint(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
    } else if (mp_refqueue_top_field_first(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
    } else {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
    }

    D3D11_TEXTURE2D_DESC texdesc;
    ID3D11Texture2D_GetDesc(d3d_tex, &texdesc);
    if (!p->video_proc || p->c_w != texdesc.Width || p->c_h != texdesc.Height ||
        p->d3d_frame_format != d3d_frame_format)
    {
        p->c_w = texdesc.Width;
        p->c_h = texdesc.Height;
        p->d3d_frame_format = d3d_frame_format;
        if (recreate_video_proc(vf) < 0)
            goto cleanup;
    }

    if (!mp_refqueue_should_deint(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
    } else if (mp_refqueue_is_top_field(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
    } else {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
    }

    ID3D11VideoContext_VideoProcessorSetStreamFrameFormat(p->video_ctx,
                                                          p->video_proc,
                                                          0, d3d_frame_format);

    D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC indesc = {
        .ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D,
        .Texture2D = {
            .ArraySlice = d3d_subindex,
        },
    };
    hr = ID3D11VideoDevice_CreateVideoProcessorInputView(p->video_dev,
                                                         (ID3D11Resource *)d3d_tex,
                                                         p->vp_enum, &indesc,
                                                         &in_view);
    if (FAILED(hr)) {
        MP_ERR(vf, "Could not create ID3D11VideoProcessorInputView\n");
        goto cleanup;
    }

    D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC outdesc = {
        .ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D,
    };
    hr = ID3D11VideoDevice_CreateVideoProcessorOutputView(p->video_dev,
                                                          (ID3D11Resource *)d3d_out_tex,
                                                          p->vp_enum, &outdesc,
                                                          &out_view);
    if (FAILED(hr))
        goto cleanup;

    D3D11_VIDEO_PROCESSOR_STREAM stream = {
        .Enable = TRUE,
        .pInputSurface = in_view,
    };
    int frame = mp_refqueue_is_second_field(p->queue);
    hr = ID3D11VideoContext_VideoProcessorBlt(p->video_ctx, p->video_proc,
                                              out_view, frame, 1, &stream);
    if (FAILED(hr)) {
        MP_ERR(vf, "VideoProcessorBlt failed.\n");
        goto cleanup;
    }

    res = 0;
cleanup:
    if (in_view)
        ID3D11VideoProcessorInputView_Release(in_view);
    if (out_view)
        ID3D11VideoProcessorOutputView_Release(out_view);
    if (res >= 0) {
        vf_add_output_frame(vf, out);
    } else {
        talloc_free(out);
    }
    mp_refqueue_next_field(p->queue);
    return res;
}

static int filter_out(struct vf_instance *vf)
{
    struct vf_priv_s *p = vf->priv;

    if (!mp_refqueue_has_output(p->queue))
        return 0;

    // no filtering
    if (!mp_refqueue_should_deint(p->queue) && !p->require_filtering) {
        struct mp_image *in = mp_image_new_ref(mp_refqueue_get(p->queue, 0));
        if (!in)
            return -1;
        mp_image_set_params(in, &p->out_params);
        vf_add_output_frame(vf, in);
        mp_refqueue_next(p->queue);
        return 0;
    }

    return render(vf);
}

static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
                    struct mp_image_params *out)
{
    struct vf_priv_s *p = vf->priv;

    flush_frames(vf);
    talloc_free(p->pool);
    p->pool = NULL;

    destroy_video_proc(vf);

    *out = *in;

    if (vf_next_query_format(vf, IMGFMT_D3D11VA) ||
        vf_next_query_format(vf, IMGFMT_D3D11NV12))
    {
        out->imgfmt = vf_next_query_format(vf, IMGFMT_D3D11VA)
                    ? IMGFMT_D3D11VA : IMGFMT_D3D11NV12;
        out->hw_subfmt = IMGFMT_NV12;
        p->out_format = DXGI_FORMAT_NV12;
        p->out_shared = false;
        p->out_rgb = false;
    } else {
        out->imgfmt = IMGFMT_D3D11RGB;
        out->hw_subfmt = IMGFMT_RGB0;
        p->out_format = DXGI_FORMAT_B8G8R8A8_UNORM;
        p->out_shared = true;
        p->out_rgb = true;
    }

    p->require_filtering = in->hw_subfmt != out->hw_subfmt;

    p->params = *in;
    p->out_params = *out;

    p->pool = mp_image_pool_new(20);
    mp_image_pool_set_allocator(p->pool, alloc_pool, vf);
    mp_image_pool_set_lru(p->pool);

    return 0;
}

static void uninit(struct vf_instance *vf)
{
    struct vf_priv_s *p = vf->priv;

    destroy_video_proc(vf);

    flush_frames(vf);
    mp_refqueue_free(p->queue);
    talloc_free(p->pool);

    if (p->video_ctx)
        ID3D11VideoContext_Release(p->video_ctx);

    if (p->video_dev)
        ID3D11VideoDevice_Release(p->video_dev);

    if (p->device_ctx)
        ID3D11DeviceContext_Release(p->device_ctx);

    if (p->vo_dev)
        ID3D11Device_Release(p->vo_dev);
}

static int query_format(struct vf_instance *vf, unsigned int imgfmt)
{
    if (imgfmt == IMGFMT_D3D11VA ||
        imgfmt == IMGFMT_D3D11NV12 ||
        imgfmt == IMGFMT_D3D11RGB)
    {
        return vf_next_query_format(vf, IMGFMT_D3D11VA) ||
               vf_next_query_format(vf, IMGFMT_D3D11NV12) ||
               vf_next_query_format(vf, IMGFMT_D3D11RGB);
    }
    return 0;
}

static bool test_conversion(int in, int out)
{
    return (in == IMGFMT_D3D11VA ||
            in == IMGFMT_D3D11NV12 ||
            in == IMGFMT_D3D11RGB) &&
           (out == IMGFMT_D3D11VA ||
            out == IMGFMT_D3D11NV12 ||
            out == IMGFMT_D3D11RGB);
}

static int control(struct vf_instance *vf, int request, void* data)
{
    struct vf_priv_s *p = vf->priv;
    switch (request){
    case VFCTRL_GET_DEINTERLACE:
        *(int*)data = !!p->deint_enabled;
        return true;
    case VFCTRL_SET_DEINTERLACE:
        p->deint_enabled = !!*(int*)data;
        return true;
    case VFCTRL_SEEK_RESET:
        flush_frames(vf);
        return true;
    default:
        return CONTROL_UNKNOWN;
    }
}

static int vf_open(vf_instance_t *vf)
{
    struct vf_priv_s *p = vf->priv;

    vf->reconfig = reconfig;
    vf->filter_ext = filter_ext;
    vf->filter_out = filter_out;
    vf->query_format = query_format;
    vf->uninit = uninit;
    vf->control = control;

    p->queue = mp_refqueue_alloc();

    p->vo_dev = hwdec_devices_load(vf->hwdec_devs, HWDEC_D3D11VA);
    if (!p->vo_dev)
        return 0;

    ID3D11Device_AddRef(p->vo_dev);

    HRESULT hr;

    hr = ID3D11Device_QueryInterface(p->vo_dev, &IID_ID3D11VideoDevice,
                                     (void **)&p->video_dev);
    if (FAILED(hr))
        goto fail;

    ID3D11Device_GetImmediateContext(p->vo_dev, &p->device_ctx);
    if (!p->device_ctx)
        goto fail;
    hr = ID3D11DeviceContext_QueryInterface(p->device_ctx, &IID_ID3D11VideoContext,
                                            (void **)&p->video_ctx);
    if (FAILED(hr))
        goto fail;

    return 1;

fail:
    uninit(vf);
    return 0;
}

#define OPT_BASE_STRUCT struct vf_priv_s
static const m_option_t vf_opts_fields[] = {
    OPT_FLAG("deint", deint_enabled, 0),
    OPT_FLAG("interlaced-only", interlaced_only, 0),
    OPT_CHOICE("mode", mode, 0,
        ({"blend", D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BLEND},
         {"bob", D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BOB},
         {"adaptive", D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_ADAPTIVE},
         {"mocomp", D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_MOTION_COMPENSATION},
Beispiel #12
0
static int recreate_video_proc(struct mp_filter *vf)
{
    struct priv *p = vf->priv;
    HRESULT hr;

    destroy_video_proc(vf);

    D3D11_VIDEO_PROCESSOR_CONTENT_DESC vpdesc = {
        .InputFrameFormat = p->d3d_frame_format,
        .InputWidth = p->c_w,
        .InputHeight = p->c_h,
        .OutputWidth = p->params.w,
        .OutputHeight = p->params.h,
    };
    hr = ID3D11VideoDevice_CreateVideoProcessorEnumerator(p->video_dev, &vpdesc,
                                                          &p->vp_enum);
    if (FAILED(hr))
        goto fail;

    D3D11_VIDEO_PROCESSOR_CAPS caps;
    hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorCaps(p->vp_enum, &caps);
    if (FAILED(hr))
        goto fail;

    MP_VERBOSE(vf, "Found %d rate conversion caps. Looking for caps=0x%x.\n",
               (int)caps.RateConversionCapsCount, p->opts->mode);

    int rindex = -1;
    for (int n = 0; n < caps.RateConversionCapsCount; n++) {
        D3D11_VIDEO_PROCESSOR_RATE_CONVERSION_CAPS rcaps;
        hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorRateConversionCaps
                (p->vp_enum, n, &rcaps);
        if (FAILED(hr))
            goto fail;
        MP_VERBOSE(vf, "  - %d: 0x%08x\n", n, (unsigned)rcaps.ProcessorCaps);
        if (rcaps.ProcessorCaps & p->opts->mode) {
            MP_VERBOSE(vf, "       (matching)\n");
            if (rindex < 0)
                rindex = n;
        }
    }

    if (rindex < 0) {
        MP_WARN(vf, "No fitting video processor found, picking #0.\n");
        rindex = 0;
    }

    // TOOD: so, how do we select which rate conversion mode the processor uses?

    hr = ID3D11VideoDevice_CreateVideoProcessor(p->video_dev, p->vp_enum, rindex,
                                                &p->video_proc);
    if (FAILED(hr)) {
        MP_ERR(vf, "Failed to create D3D11 video processor.\n");
        goto fail;
    }

    // Note: libavcodec does not support cropping left/top with hwaccel.
    RECT src_rc = {
        .right = p->params.w,
        .bottom = p->params.h,
    };
    ID3D11VideoContext_VideoProcessorSetStreamSourceRect(p->video_ctx,
                                                         p->video_proc,
                                                         0, TRUE, &src_rc);

    // This is supposed to stop drivers from f*****g up the video quality.
    ID3D11VideoContext_VideoProcessorSetStreamAutoProcessingMode(p->video_ctx,
                                                                 p->video_proc,
                                                                 0, FALSE);

    ID3D11VideoContext_VideoProcessorSetStreamOutputRate(p->video_ctx,
                                                         p->video_proc,
                                                         0,
                                                         D3D11_VIDEO_PROCESSOR_OUTPUT_RATE_NORMAL,
                                                         FALSE, 0);

    D3D11_VIDEO_PROCESSOR_COLOR_SPACE csp = {
        .YCbCr_Matrix = p->params.color.space != MP_CSP_BT_601,
        .Nominal_Range = p->params.color.levels == MP_CSP_LEVELS_TV ? 1 : 2,
    };
    ID3D11VideoContext_VideoProcessorSetStreamColorSpace(p->video_ctx,
                                                         p->video_proc,
                                                         0, &csp);
    if (p->out_rgb) {
        if (p->params.color.space != MP_CSP_BT_601 &&
            p->params.color.space != MP_CSP_BT_709)
        {
            MP_WARN(vf, "Unsupported video colorspace (%s/%s). Consider "
                    "disabling hardware decoding, or using "
                    "--hwdec=d3d11va-copy to get correct output.\n",
                    m_opt_choice_str(mp_csp_names, p->params.color.space),
                    m_opt_choice_str(mp_csp_levels_names, p->params.color.levels));
        }
    } else {
        ID3D11VideoContext_VideoProcessorSetOutputColorSpace(p->video_ctx,
                                                             p->video_proc,
                                                             &csp);
    }

    return 0;
fail:
    destroy_video_proc(vf);
    return -1;
}

static struct mp_image *render(struct mp_filter *vf)
{
    struct priv *p = vf->priv;
    int res = -1;
    HRESULT hr;
    ID3D11VideoProcessorInputView *in_view = NULL;
    ID3D11VideoProcessorOutputView *out_view = NULL;
    struct mp_image *in = NULL, *out = NULL;
    out = mp_image_pool_get(p->pool, IMGFMT_D3D11, p->params.w, p->params.h);
    if (!out) {
        MP_WARN(vf, "failed to allocate frame\n");
        goto cleanup;
    }

    ID3D11Texture2D *d3d_out_tex = (void *)out->planes[0];

    in = mp_refqueue_get(p->queue, 0);
    if (!in)
        goto cleanup;
    ID3D11Texture2D *d3d_tex = (void *)in->planes[0];
    int d3d_subindex = (intptr_t)in->planes[1];

    mp_image_copy_attributes(out, in);

    D3D11_VIDEO_FRAME_FORMAT d3d_frame_format;
    if (!mp_refqueue_should_deint(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
    } else if (mp_refqueue_top_field_first(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
    } else {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
    }

    D3D11_TEXTURE2D_DESC texdesc;
    ID3D11Texture2D_GetDesc(d3d_tex, &texdesc);
    if (!p->video_proc || p->c_w != texdesc.Width || p->c_h != texdesc.Height ||
        p->d3d_frame_format != d3d_frame_format)
    {
        p->c_w = texdesc.Width;
        p->c_h = texdesc.Height;
        p->d3d_frame_format = d3d_frame_format;
        if (recreate_video_proc(vf) < 0)
            goto cleanup;
    }

    if (!mp_refqueue_should_deint(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
    } else if (mp_refqueue_is_top_field(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
    } else {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
    }

    ID3D11VideoContext_VideoProcessorSetStreamFrameFormat(p->video_ctx,
                                                          p->video_proc,
                                                          0, d3d_frame_format);

    D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC indesc = {
        .ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D,
        .Texture2D = {
            .ArraySlice = d3d_subindex,
        },
    };
    hr = ID3D11VideoDevice_CreateVideoProcessorInputView(p->video_dev,
                                                         (ID3D11Resource *)d3d_tex,
                                                         p->vp_enum, &indesc,
                                                         &in_view);
    if (FAILED(hr)) {
        MP_ERR(vf, "Could not create ID3D11VideoProcessorInputView\n");
        goto cleanup;
    }

    D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC outdesc = {
        .ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D,
    };
    hr = ID3D11VideoDevice_CreateVideoProcessorOutputView(p->video_dev,
                                                          (ID3D11Resource *)d3d_out_tex,
                                                          p->vp_enum, &outdesc,
                                                          &out_view);
    if (FAILED(hr)) {
        MP_ERR(vf, "Could not create ID3D11VideoProcessorOutputView\n");
        goto cleanup;
    }

    D3D11_VIDEO_PROCESSOR_STREAM stream = {
        .Enable = TRUE,
        .pInputSurface = in_view,
    };
    int frame = mp_refqueue_is_second_field(p->queue);
    hr = ID3D11VideoContext_VideoProcessorBlt(p->video_ctx, p->video_proc,
                                              out_view, frame, 1, &stream);
    if (FAILED(hr)) {
        MP_ERR(vf, "VideoProcessorBlt failed.\n");
        goto cleanup;
    }

    res = 0;
cleanup:
    if (in_view)
        ID3D11VideoProcessorInputView_Release(in_view);
    if (out_view)
        ID3D11VideoProcessorOutputView_Release(out_view);
    if (res < 0)
        TA_FREEP(&out);
    return out;
}

static bool vo_supports(struct priv *p, int subfmt)
{
    for (int n = 0; p->vo_formats && p->vo_formats[n]; n++) {
        if (p->vo_formats[n] == subfmt)
            return true;
    }
    return false;
}

static void vf_d3d11vpp_process(struct mp_filter *vf)
{
    struct priv *p = vf->priv;

    struct mp_image *in_fmt = mp_refqueue_execute_reinit(p->queue);
    if (in_fmt) {
        mp_image_pool_clear(p->pool);

        destroy_video_proc(vf);

        p->params = in_fmt->params;
        p->out_params = p->params;

        if (vo_supports(p, IMGFMT_NV12)) {
            p->out_params.hw_subfmt = IMGFMT_NV12;
            p->out_format = DXGI_FORMAT_NV12;
            p->out_shared = false;
            p->out_rgb = false;
        } else {
            p->out_params.hw_subfmt = IMGFMT_RGB0;
            p->out_format = DXGI_FORMAT_B8G8R8A8_UNORM;
            p->out_shared = true;
            p->out_rgb = true;
        }
        p->out_params.hw_flags = 0;

        p->require_filtering = p->params.hw_subfmt != p->out_params.hw_subfmt;
    }

    if (!mp_refqueue_can_output(p->queue))
        return;

    if (!mp_refqueue_should_deint(p->queue) && !p->require_filtering) {
        // no filtering
        struct mp_image *in = mp_image_new_ref(mp_refqueue_get(p->queue, 0));
        if (!in) {
            mp_filter_internal_mark_failed(vf);
            return;
        }
        mp_refqueue_write_out_pin(p->queue, in);
    } else {
        mp_refqueue_write_out_pin(p->queue, render(vf));
    }
}

static void uninit(struct mp_filter *vf)
{
    struct priv *p = vf->priv;

    destroy_video_proc(vf);

    flush_frames(vf);
    talloc_free(p->queue);
    talloc_free(p->pool);

    if (p->video_ctx)
        ID3D11VideoContext_Release(p->video_ctx);

    if (p->video_dev)
        ID3D11VideoDevice_Release(p->video_dev);

    if (p->device_ctx)
        ID3D11DeviceContext_Release(p->device_ctx);

    if (p->vo_dev)
        ID3D11Device_Release(p->vo_dev);
}

static const struct mp_filter_info vf_d3d11vpp_filter = {
    .name = "d3d11vpp",
    .process = vf_d3d11vpp_process,
    .reset = flush_frames,
    .destroy = uninit,
    .priv_size = sizeof(struct priv),
};

static struct mp_filter *vf_d3d11vpp_create(struct mp_filter *parent,
                                            void *options)
{
    struct mp_filter *f = mp_filter_create(parent, &vf_d3d11vpp_filter);
    if (!f) {
        talloc_free(options);
        return NULL;
    }

    mp_filter_add_pin(f, MP_PIN_IN, "in");
    mp_filter_add_pin(f, MP_PIN_OUT, "out");

    struct priv *p = f->priv;
    p->opts = talloc_steal(p, options);

    // Special path for vf_d3d11_create_outconv(): disable all processing except
    // possibly surface format conversions.
    if (!p->opts) {
        static const struct opts opts = {0};
        p->opts = (struct opts *)&opts;
    }

    p->queue = mp_refqueue_alloc(f);

    struct mp_stream_info *info = mp_filter_find_stream_info(f);
    if (!info || !info->hwdec_devs)
        goto fail;

    hwdec_devices_request_all(info->hwdec_devs);

    struct mp_hwdec_ctx *hwctx =
        hwdec_devices_get_by_lavc(info->hwdec_devs, AV_HWDEVICE_TYPE_D3D11VA);
    if (!hwctx || !hwctx->av_device_ref)
        goto fail;
    AVHWDeviceContext *avhwctx = (void *)hwctx->av_device_ref->data;
    AVD3D11VADeviceContext *d3dctx = avhwctx->hwctx;

    p->vo_dev = d3dctx->device;
    ID3D11Device_AddRef(p->vo_dev);

    p->vo_formats = hwctx->supported_formats;

    HRESULT hr;

    hr = ID3D11Device_QueryInterface(p->vo_dev, &IID_ID3D11VideoDevice,
                                     (void **)&p->video_dev);
    if (FAILED(hr))
        goto fail;

    ID3D11Device_GetImmediateContext(p->vo_dev, &p->device_ctx);
    if (!p->device_ctx)
        goto fail;
    hr = ID3D11DeviceContext_QueryInterface(p->device_ctx, &IID_ID3D11VideoContext,
                                            (void **)&p->video_ctx);
    if (FAILED(hr))
        goto fail;

    p->pool = mp_image_pool_new(f);
    mp_image_pool_set_allocator(p->pool, alloc_pool, f);
    mp_image_pool_set_lru(p->pool);

    mp_refqueue_add_in_format(p->queue, IMGFMT_D3D11, 0);

    mp_refqueue_set_refs(p->queue, 0, 0);
    mp_refqueue_set_mode(p->queue,
        (p->opts->deint_enabled ? MP_MODE_DEINT : 0) |
        MP_MODE_OUTPUT_FIELDS |
        (p->opts->interlaced_only ? MP_MODE_INTERLACED_ONLY : 0));

    return f;

fail:
    talloc_free(f);
    return NULL;
}

#define OPT_BASE_STRUCT struct opts
static const m_option_t vf_opts_fields[] = {
    OPT_FLAG("deint", deint_enabled, 0),
    OPT_FLAG("interlaced-only", interlaced_only, 0),
    OPT_CHOICE("mode", mode, 0,
        ({"blend", D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BLEND},
         {"bob", D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BOB},
         {"adaptive", D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_ADAPTIVE},
         {"mocomp", D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_MOTION_COMPENSATION},
         {"ivctc", D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_INVERSE_TELECINE},
Beispiel #13
0
struct mp_image *convert_image(struct mp_image *image, int destfmt,
                               struct mp_log *log)
{
    int d_w, d_h;
    mp_image_params_get_dsize(&image->params, &d_w, &d_h);

    struct mp_image_params p = {
        .imgfmt = destfmt,
        .w = d_w,
        .h = d_h,
        .p_w = 1,
        .p_h = 1,
    };
    mp_image_params_guess_csp(&p);

    // If RGB, just assume everything is correct.
    if (p.color.space != MP_CSP_RGB) {
        // Currently, assume what FFmpeg's jpg encoder needs.
        // Of course this works only for non-HDR (no HDR support in libswscale).
        p.color.levels = MP_CSP_LEVELS_PC;
        p.color.space = MP_CSP_BT_601;
        p.chroma_location = MP_CHROMA_CENTER;
        mp_image_params_guess_csp(&p);
    }

    if (mp_image_params_equal(&p, &image->params))
        return mp_image_new_ref(image);

    struct mp_image *dst = mp_image_alloc(p.imgfmt, p.w, p.h);
    if (!dst) {
        mp_err(log, "Out of memory.\n");
        return NULL;
    }
    mp_image_copy_attributes(dst, image);

    dst->params = p;

    if (mp_image_swscale(dst, image, mp_sws_hq_flags) < 0) {
        mp_err(log, "Error when converting image.\n");
        talloc_free(dst);
        return NULL;
    }

    return dst;
}

bool write_image(struct mp_image *image, const struct image_writer_opts *opts,
                const char *filename, struct mp_log *log)
{
    struct image_writer_opts defs = image_writer_opts_defaults;
    if (!opts)
        opts = &defs;

    struct image_writer_ctx ctx = { log, opts, image->fmt };
    bool (*write)(struct image_writer_ctx *, mp_image_t *, FILE *) = write_lavc;
    int destfmt = 0;

#if HAVE_JPEG
    if (opts->format == AV_CODEC_ID_MJPEG) {
        write = write_jpeg;
        destfmt = IMGFMT_RGB24;
    }
#endif

    if (!destfmt)
        destfmt = get_target_format(&ctx);

    struct mp_image *dst = convert_image(image, destfmt, log);
    if (!dst)
        return false;

    FILE *fp = fopen(filename, "wb");
    bool success = false;
    if (fp == NULL) {
        mp_err(log, "Error opening '%s' for writing!\n", filename);
    } else {
        success = write(&ctx, dst, fp);
        success = !fclose(fp) && success;
        if (!success)
            mp_err(log, "Error writing file '%s'!\n", filename);
    }

    talloc_free(dst);
    return success;
}

void dump_png(struct mp_image *image, const char *filename, struct mp_log *log)
{
    struct image_writer_opts opts = image_writer_opts_defaults;
    opts.format = AV_CODEC_ID_PNG;
    write_image(image, &opts, filename, log);
}
Beispiel #14
0
void video_reset(struct dec_video *d_video)
{
    video_vd_control(d_video, VDCTRL_RESET, NULL);
    d_video->first_packet_pdts = MP_NOPTS_VALUE;
    d_video->start_pts = MP_NOPTS_VALUE;
    d_video->decoded_pts = MP_NOPTS_VALUE;
    d_video->codec_pts = MP_NOPTS_VALUE;
    d_video->codec_dts = MP_NOPTS_VALUE;
    d_video->last_format = d_video->fixed_format = (struct mp_image_params){0};
    d_video->dropped_frames = 0;
    d_video->current_state = DATA_AGAIN;
    mp_image_unrefp(&d_video->current_mpi);
    talloc_free(d_video->packet);
    d_video->packet = NULL;
    talloc_free(d_video->new_segment);
    d_video->new_segment = NULL;
    d_video->start = d_video->end = MP_NOPTS_VALUE;
}

int video_vd_control(struct dec_video *d_video, int cmd, void *arg)
{
    const struct vd_functions *vd = d_video->vd_driver;
    if (vd)
        return vd->control(d_video, cmd, arg);
    return CONTROL_UNKNOWN;
}

void video_uninit(struct dec_video *d_video)
{
    if (!d_video)
        return;
    mp_image_unrefp(&d_video->current_mpi);
    mp_image_unrefp(&d_video->cover_art_mpi);
    if (d_video->vd_driver) {
        MP_VERBOSE(d_video, "Uninit video.\n");
        d_video->vd_driver->uninit(d_video);
    }
    talloc_free(d_video->packet);
    talloc_free(d_video->new_segment);
    talloc_free(d_video);
}

static int init_video_codec(struct dec_video *d_video, const char *decoder)
{
    if (!d_video->vd_driver->init(d_video, decoder)) {
        MP_VERBOSE(d_video, "Video decoder init failed.\n");
        return 0;
    }
    return 1;
}

struct mp_decoder_list *video_decoder_list(void)
{
    struct mp_decoder_list *list = talloc_zero(NULL, struct mp_decoder_list);
    for (int i = 0; mpcodecs_vd_drivers[i] != NULL; i++)
        mpcodecs_vd_drivers[i]->add_decoders(list);
    return list;
}

static struct mp_decoder_list *mp_select_video_decoders(const char *codec,
                                                        char *selection)
{
    struct mp_decoder_list *list = video_decoder_list();
    struct mp_decoder_list *new = mp_select_decoders(list, codec, selection);
    talloc_free(list);
    return new;
}

static const struct vd_functions *find_driver(const char *name)
{
    for (int i = 0; mpcodecs_vd_drivers[i] != NULL; i++) {
        if (strcmp(mpcodecs_vd_drivers[i]->name, name) == 0)
            return mpcodecs_vd_drivers[i];
    }
    return NULL;
}

bool video_init_best_codec(struct dec_video *d_video)
{
    struct MPOpts *opts = d_video->opts;

    assert(!d_video->vd_driver);
    video_reset(d_video);
    d_video->has_broken_packet_pts = -10; // needs 10 packets to reach decision

    struct mp_decoder_entry *decoder = NULL;
    struct mp_decoder_list *list =
        mp_select_video_decoders(d_video->codec->codec, opts->video_decoders);

    mp_print_decoders(d_video->log, MSGL_V, "Codec list:", list);

    for (int n = 0; n < list->num_entries; n++) {
        struct mp_decoder_entry *sel = &list->entries[n];
        const struct vd_functions *driver = find_driver(sel->family);
        if (!driver)
            continue;
        MP_VERBOSE(d_video, "Opening video decoder %s:%s\n",
                   sel->family, sel->decoder);
        d_video->vd_driver = driver;
        if (init_video_codec(d_video, sel->decoder)) {
            decoder = sel;
            break;
        }
        d_video->vd_driver = NULL;
        MP_WARN(d_video, "Video decoder init failed for "
                "%s:%s\n", sel->family, sel->decoder);
    }

    if (d_video->vd_driver) {
        d_video->decoder_desc =
            talloc_asprintf(d_video, "%s [%s:%s]", decoder->desc, decoder->family,
                            decoder->decoder);
        MP_VERBOSE(d_video, "Selected video codec: %s\n", d_video->decoder_desc);
    } else {
        MP_ERR(d_video, "Failed to initialize a video decoder for codec '%s'.\n",
               d_video->codec->codec);
    }

    if (d_video->header->missing_timestamps) {
        MP_WARN(d_video, "This stream has no timestamps!\n");
        MP_WARN(d_video, "Making up playback time using %f FPS.\n", d_video->fps);
        MP_WARN(d_video, "Seeking will probably fail badly.\n");
    }

    talloc_free(list);
    return !!d_video->vd_driver;
}

static void fix_image_params(struct dec_video *d_video,
                             struct mp_image_params *params)
{
    struct MPOpts *opts = d_video->opts;
    struct mp_image_params p = *params;
    struct mp_codec_params *c = d_video->codec;

    MP_VERBOSE(d_video, "Decoder format: %s\n", mp_image_params_to_str(params));

    // While mp_image_params normally always have to have d_w/d_h set, the
    // decoder signals unknown bitstream aspect ratio with both set to 0.
    float dec_aspect = p.p_w > 0 && p.p_h > 0 ? p.p_w / (float)p.p_h : 0;
    if (d_video->initial_decoder_aspect == 0)
        d_video->initial_decoder_aspect = dec_aspect;

    bool use_container = true;
    switch (opts->aspect_method) {
    case 0:
        // We normally prefer the container aspect, unless the decoder aspect
        // changes at least once.
        if (dec_aspect > 0 && d_video->initial_decoder_aspect != dec_aspect) {
            MP_VERBOSE(d_video, "Using bitstream aspect ratio.\n");
            // Even if the aspect switches back, don't use container aspect again.
            d_video->initial_decoder_aspect = -1;
            use_container = false;
        }
        break;
    case 1:
        use_container = false;
        break;
    }

    if (use_container && c->par_w > 0 && c->par_h) {
        MP_VERBOSE(d_video, "Using container aspect ratio.\n");
        p.p_w = c->par_w;
        p.p_h = c->par_h;
    }

    if (opts->movie_aspect >= 0) {
        MP_VERBOSE(d_video, "Forcing user-set aspect ratio.\n");
        if (opts->movie_aspect == 0) {
            p.p_w = p.p_h = 1;
        } else {
            AVRational a = av_d2q(opts->movie_aspect, INT_MAX);
            mp_image_params_set_dsize(&p, a.num, a.den);
        }
    }

    // Assume square pixels if no aspect ratio is set at all.
    if (p.p_w <= 0 || p.p_h <= 0)
        p.p_w = p.p_h = 1;

    // Detect colorspace from resolution.
    mp_image_params_guess_csp(&p);

    d_video->last_format = *params;
    d_video->fixed_format = p;
}

static struct mp_image *decode_packet(struct dec_video *d_video,
                                      struct demux_packet *packet,
                                      int drop_frame)
{
    struct MPOpts *opts = d_video->opts;

    if (!d_video->vd_driver)
        return NULL;

    double pkt_pts = packet ? packet->pts : MP_NOPTS_VALUE;
    double pkt_dts = packet ? packet->dts : MP_NOPTS_VALUE;

    if (pkt_pts == MP_NOPTS_VALUE)
        d_video->has_broken_packet_pts = 1;

    double pkt_pdts = pkt_pts == MP_NOPTS_VALUE ? pkt_dts : pkt_pts;
    if (pkt_pdts != MP_NOPTS_VALUE && d_video->first_packet_pdts == MP_NOPTS_VALUE)
        d_video->first_packet_pdts = pkt_pdts;

    MP_STATS(d_video, "start decode video");

    struct mp_image *mpi = d_video->vd_driver->decode(d_video, packet, drop_frame);

    MP_STATS(d_video, "end decode video");

    // Error, discarded frame, dropped frame, or initial codec delay.
    if (!mpi || drop_frame) {
        talloc_free(mpi);
        return NULL;
    }

    if (opts->field_dominance == 0) {
        mpi->fields |= MP_IMGFIELD_TOP_FIRST | MP_IMGFIELD_INTERLACED;
    } else if (opts->field_dominance == 1) {
        mpi->fields &= ~MP_IMGFIELD_TOP_FIRST;
        mpi->fields |= MP_IMGFIELD_INTERLACED;
    }

    // Note: the PTS is reordered, but the DTS is not. Both should be monotonic.
    double pts = mpi->pts;
    double dts = mpi->dts;

    if (pts != MP_NOPTS_VALUE) {
        if (pts < d_video->codec_pts)
            d_video->num_codec_pts_problems++;
        d_video->codec_pts = mpi->pts;
    }

    if (dts != MP_NOPTS_VALUE) {
        if (dts <= d_video->codec_dts)
            d_video->num_codec_dts_problems++;
        d_video->codec_dts = mpi->dts;
    }

    if (d_video->has_broken_packet_pts < 0)
        d_video->has_broken_packet_pts++;
    if (d_video->num_codec_pts_problems)
        d_video->has_broken_packet_pts = 1;

    // If PTS is unset, or non-monotonic, fall back to DTS.
    if ((d_video->num_codec_pts_problems > d_video->num_codec_dts_problems ||
         pts == MP_NOPTS_VALUE) && dts != MP_NOPTS_VALUE)
        pts = dts;

    if (!opts->correct_pts || pts == MP_NOPTS_VALUE) {
        if (opts->correct_pts && !d_video->header->missing_timestamps)
            MP_WARN(d_video, "No video PTS! Making something up.\n");

        double frame_time = 1.0f / (d_video->fps > 0 ? d_video->fps : 25);
        double base = d_video->first_packet_pdts;
        pts = d_video->decoded_pts;
        if (pts == MP_NOPTS_VALUE) {
            pts = base == MP_NOPTS_VALUE ? 0 : base;
        } else {
            pts += frame_time;
        }
    }

    if (!mp_image_params_equal(&d_video->last_format, &mpi->params))
        fix_image_params(d_video, &mpi->params);

    mpi->params = d_video->fixed_format;

    mpi->pts = pts;
    d_video->decoded_pts = pts;

    // Compensate for incorrectly using mpeg-style DTS for avi timestamps.
    if (d_video->codec->avi_dts && opts->correct_pts &&
        mpi->pts != MP_NOPTS_VALUE && d_video->fps > 0)
    {
        int delay = -1;
        video_vd_control(d_video, VDCTRL_GET_BFRAMES, &delay);
        mpi->pts -= MPMAX(delay, 0) / d_video->fps;
    }

    return mpi;
}

void video_reset_aspect(struct dec_video *d_video)
{
    d_video->last_format = (struct mp_image_params){0};
}

void video_set_framedrop(struct dec_video *d_video, bool enabled)
{
    d_video->framedrop_enabled = enabled;
}

// Frames before the start timestamp can be dropped. (Used for hr-seek.)
void video_set_start(struct dec_video *d_video, double start_pts)
{
    d_video->start_pts = start_pts;
}

void video_work(struct dec_video *d_video)
{
    if (d_video->current_mpi)
        return;

    if (d_video->header->attached_picture) {
        if (d_video->current_state == DATA_AGAIN && !d_video->cover_art_mpi) {
            d_video->cover_art_mpi =
                decode_packet(d_video, d_video->header->attached_picture, 0);
            // Might need flush.
            if (!d_video->cover_art_mpi)
                d_video->cover_art_mpi = decode_packet(d_video, NULL, 0);
            d_video->current_state = DATA_OK;
        }
        if (d_video->current_state == DATA_OK)
            d_video->current_mpi = mp_image_new_ref(d_video->cover_art_mpi);
        // (DATA_OK is returned the first time, when current_mpi is sill set)
        d_video->current_state = DATA_EOF;
        return;
    }

    if (!d_video->packet && !d_video->new_segment &&
        demux_read_packet_async(d_video->header, &d_video->packet) == 0)
    {
        d_video->current_state = DATA_WAIT;
        return;
    }

    if (d_video->packet) {
        if (d_video->packet->dts == MP_NOPTS_VALUE && !d_video->codec->avi_dts)
            d_video->packet->dts = d_video->packet->pts;
    }

    if (d_video->packet && d_video->packet->new_segment) {
        assert(!d_video->new_segment);
        d_video->new_segment = d_video->packet;
        d_video->packet = NULL;
    }

    bool had_input_packet = !!d_video->packet;
    bool had_packet = had_input_packet || d_video->new_segment;

    double start_pts = d_video->start_pts;
    if (d_video->start != MP_NOPTS_VALUE && (start_pts == MP_NOPTS_VALUE ||
                                             d_video->start > start_pts))
        start_pts = d_video->start;

    int framedrop_type = d_video->framedrop_enabled ? 1 : 0;
    if (start_pts != MP_NOPTS_VALUE && d_video->packet &&
        d_video->packet->pts < start_pts - .005 &&
        !d_video->has_broken_packet_pts)
    {
        framedrop_type = 2;
    }
    d_video->current_mpi = decode_packet(d_video, d_video->packet, framedrop_type);
    if (d_video->packet && d_video->packet->len == 0) {
        talloc_free(d_video->packet);
        d_video->packet = NULL;
    }

    d_video->current_state = DATA_OK;
    if (!d_video->current_mpi) {
        d_video->current_state = DATA_EOF;
        if (had_packet) {
            if (framedrop_type == 1)
                d_video->dropped_frames += 1;
            d_video->current_state = DATA_AGAIN;
        }
    }

    bool segment_ended = !d_video->current_mpi && !had_input_packet;

    if (d_video->current_mpi && d_video->current_mpi->pts != MP_NOPTS_VALUE) {
        double vpts = d_video->current_mpi->pts;
        segment_ended = d_video->end != MP_NOPTS_VALUE && vpts >= d_video->end;
        if ((d_video->start != MP_NOPTS_VALUE && vpts < d_video->start)
            || segment_ended)
        {
            talloc_free(d_video->current_mpi);
            d_video->current_mpi = NULL;
        }
    }

    // If there's a new segment, start it as soon as we're drained/finished.
    if (segment_ended && d_video->new_segment) {
        struct demux_packet *new_segment = d_video->new_segment;
        d_video->new_segment = NULL;

        // Could avoid decoder reinit; would still need flush.
        d_video->codec = new_segment->codec;
        if (d_video->vd_driver)
            d_video->vd_driver->uninit(d_video);
        d_video->vd_driver = NULL;
        video_init_best_codec(d_video);

        d_video->start = new_segment->start;
        d_video->end = new_segment->end;

        new_segment->new_segment = false;

        d_video->packet = new_segment;
        d_video->current_state = DATA_AGAIN;
    }
}

// Fetch an image decoded with video_work(). Returns one of:
//  DATA_OK:    *out_mpi is set to a new image
//  DATA_WAIT:  waiting for demuxer; will receive a wakeup signal
//  DATA_EOF:   end of file, no more frames to be expected
//  DATA_AGAIN: dropped frame or something similar
int video_get_frame(struct dec_video *d_video, struct mp_image **out_mpi)
{
    *out_mpi = NULL;
    if (d_video->current_mpi) {
        *out_mpi = d_video->current_mpi;
        d_video->current_mpi = NULL;
        return DATA_OK;
    }
    if (d_video->current_state == DATA_OK)
        return DATA_AGAIN;
    return d_video->current_state;
}