Exemple #1
0
static int vaapi_encode_wait(AVCodecContext *avctx,
                             VAAPIEncodePicture *pic)
{
    VAAPIEncodeContext *ctx = avctx->priv_data;
    VAStatus vas;

    av_assert0(pic->encode_issued);

    if (pic->encode_complete) {
        // Already waited for this picture.
        return 0;
    }

    av_log(avctx, AV_LOG_DEBUG, "Sync to pic %"PRId64"/%"PRId64" "
           "(recon surface %#x).\n", pic->display_order,
           pic->encode_order, pic->recon_surface);

    vas = vaSyncSurface(ctx->hwctx->display, pic->recon_surface);
    if (vas != VA_STATUS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "Failed to sync to picture completion: "
               "%d (%s).\n", vas, vaErrorStr(vas));
        return AVERROR(EIO);
    }

    // Input is definitely finished with now.
    av_frame_free(&pic->input_image);

    pic->encode_complete = 1;
    return 0;
}
bool GLXInteropResource::map(const surface_ptr& surface, GLuint tex, int w, int h, int)
{
    Q_UNUSED(w);
    Q_UNUSED(h);

    surface_glx_ptr glx = glx_surfaces[tex];

    if(!surface.isNull() && (surface->width() != m_width || m_height != surface->height())) {
        m_width = surface->width();
        m_height = surface->height();
        m_regenerateGlx = true;
    }

    if(m_regenerateGlx || !glx) {

        if (glx) { glx.clear(); }

        glx = surface_glx_ptr(new surface_glx_t(surface->display()));

        if (!glx->create(tex))
            glx =  surface_glx_ptr();
        glx_surfaces[tex] = glx;
    }

    if (!glx) {
        qWarning("Fail to create vaapi glx surface");
        return false;
    }
    if (!glx->copy(surface))
        return false;
    VAWARN(vaSyncSurface(surface->vadisplay(), surface->get()));
    return true;
}
Exemple #3
0
bool VaApiMixer::upload(const VideoFrame &frame, bool deint) {
	if (!m_glSurface)
		return false;
	static const int specs[MP_CSP_COUNT] = {
		0,					//MP_CSP_AUTO,
		VA_SRC_BT601,		//MP_CSP_BT_601,
		VA_SRC_BT709,		//MP_CSP_BT_709,
		VA_SRC_SMPTE_240,	//MP_CSP_SMPTE_240M,
		0,					//MP_CSP_RGB,
		0,					//MP_CSP_XYZ,
		0,					//MP_CSP_YCGCO,
	};
	static const int field[] = {
		// Picture = 0,   Top = 1,      Bottom = 2
		VA_FRAME_PICTURE, VA_TOP_FIELD, VA_BOTTOM_FIELD, VA_FRAME_PICTURE
	};
	const auto id = (VASurfaceID)(quintptr)frame.data(3);
	int flags = specs[frame.format().colorspace()];
	if (deint)
		flags |= field[frame.field() & VideoFrame::Interlaced];
	if (!check(vaCopySurfaceGLX(VaApi::glx(), m_glSurface, id,  flags), "Cannot copy OpenGL surface."))
		return false;
	if (!check(vaSyncSurface(VaApi::glx(), id), "Cannot sync video surface."))
		return false;
	return true;
}
bool VaapiSurfaceGLXImpl::update(boost::shared_ptr<VaapiSurface> surface)
{
    GNASH_REPORT_FUNCTION;

    if (!this->surface()) {
        return false;
    }

    VaapiGlobalContext * gvactx = VaapiGlobalContext::get();
    if (!gvactx) {
        return false;
    }

    VAStatus status;
    status = vaSyncSurface(gvactx->display(), surface->get());
    if (!vaapi_check_status(status, "vaSyncSurface()"))
        return false;

    status = vaCopySurfaceGLX(gvactx->display(), this->surface(),
                              surface->get(), VA_FRAME_PICTURE);
    if (!vaapi_check_status(status, "vaCopySurfaceGLX()")) {
        return false;
    }

    return true;
}
Exemple #5
0
bool VaapiSurface::sync()
{
    VAStatus status;

    status = vaSyncSurface((VADisplay) m_display->getID(), (VASurfaceID) m_ID);

    if (!checkVaapiStatus(status, "vaSyncSurface()"))
        return false;

    return true;
}
Exemple #6
0
Decode_Status DecodeOutputNull::renderOneFrame(bool drain)
{
    m_frame.memoryType = VIDEO_DATA_MEMORY_TYPE_SURFACE_ID;
    m_frame.width = 0;
    m_frame.height = 0;
    m_frame.fourcc = 0;

    Decode_Status status = m_decoder->getOutput(&m_frame, drain);
    if (status == RENDER_SUCCESS)
        vaSyncSurface((void*)(m_frame.handle), m_frame.internalID);
    return status;
}
Exemple #7
0
void VAApiWriter::draw( VASurfaceID _id, int _field )
{
	if ( _id != VA_INVALID_SURFACE && _field > -1 )
	{
		if ( id != _id || _field == field )
			vaSyncSurface( VADisp, _id );
		id = _id;
		field = _field;
	}
	if ( id == VA_INVALID_SURFACE )
		return;

	bool associated = false;

	osd_mutex.lock();
	if ( !osd_list.isEmpty() )
	{
		QRect bounds;
		const qreal scaleW = ( qreal )W / outW, scaleH = ( qreal )H / outH;
		bool mustRepaint = Functions::mustRepaintOSD( osd_list, osd_checksums, &scaleW, &scaleH, &bounds );
		if ( !mustRepaint )
			mustRepaint = vaImgSize != bounds.size();
		bool canAssociate = !mustRepaint;
		if ( mustRepaint )
		{
			if ( vaImgSize != bounds.size() )
			{
				clearRGBImage();
				vaImgSize = QSize();
				if ( vaCreateImage( VADisp, rgbImgFmt, bounds.width(), bounds.height(), &vaImg ) == VA_STATUS_SUCCESS )
				{
					if ( vaCreateSubpicture( VADisp, vaImg.image_id, &vaSubpicID ) == VA_STATUS_SUCCESS )
						vaImgSize = bounds.size();
					else
						clearRGBImage();
				}
			}
			if ( vaSubpicID )
			{
				quint8 *buff;
				if ( vaMapBuffer( VADisp, vaImg.buf, ( void ** )&buff ) == VA_STATUS_SUCCESS )
				{
					QImage osdImg( buff += vaImg.offsets[ 0 ], vaImg.pitches[ 0 ] >> 2, bounds.height(), QImage::Format_ARGB32 );
					osdImg.fill( 0 );
					QPainter p( &osdImg );
					p.translate( -bounds.topLeft() );
					Functions::paintOSD( osd_list, scaleW, scaleH, p, &osd_checksums );
					vaUnmapBuffer( VADisp, vaImg.buf );
					canAssociate = true;
				}
			}
		}
Exemple #8
0
uint8_t* VAAPIContext::GetSurfaceIDPointer(void* buf)
{
    if (!buf)
        return NULL;

    const vaapi_surface *surf = (vaapi_surface*)buf;
    if (!surf->m_id)
        return NULL;

    INIT_ST;
    va_status = vaSyncSurface(m_ctx.display, surf->m_id);
    CHECK_ST;
    return (uint8_t*)(uintptr_t)surf->m_id;
}
	mp_image *render(const VideoFrame &frame, int flags) {
		if (m_rebuild)
			update();
		auto in = VaApiSurfacePool::getSurface(frame.mpi());
		if (!in)
			return nullptr;
		m_pool.create(5, frame.width(), frame.height(), in->format());
		auto ret = m_pool.getMpImage();
		if (!ret)
			return nullptr;
		auto out = VaApiSurfacePool::getSurface(ret);
		if (!out)
			return nullptr;
		VAProcPipelineParameterBuffer *param = nullptr;
		VABufferID buffer = BufferMan::create(m_context, VAProcPipelineParameterBufferType, param, 1);
		if (buffer == VA_INVALID_ID)
			return nullptr;
		enum {Begun = 1, Rendered = 2};
		int state = 0;
		auto pass = [this, out, &ret, &buffer, &state, &frame] () -> mp_image* {
			if (state & Begun)
				vaEndPicture(VaApi::glx(), m_context);
			if (state & Rendered) {
				mp_image_copy_attributes(ret, frame.mpi());
			} else
				mp_image_unrefp(&ret);
			vaDestroyBuffer(VaApi::glx(), buffer);
			vaSyncSurface(VaApi::glx(), out->id());
			return ret;
		};
		if (!isSuccess(vaBeginPicture(VaApi::glx(), m_context, out->id())))
			return pass();
		state |= Begun;
		if (!BufferMan::map(buffer, param))
			return pass();
		memset(param, 0, sizeof(*param));
		param->surface = in->id();
		param->filter_flags = flags;
		param->filters = &m_buffers.first();
		param->num_filters = m_buffers.size();
		param->forward_references = m_forward_refs.data();
		param->backward_references = m_backward_refs.data();
		param->num_forward_references = m_caps.num_forward_references;
		param->num_backward_references = m_caps.num_backward_references;
		BufferMan::unmap(buffer);
		if (!isSuccess(vaRenderPicture(VaApi::glx(), m_context, &buffer, 1)))
			return pass();
		state |= Rendered;
		return pass();
	}
bool GLXInteropResource::map(const surface_ptr& surface, GLuint tex, int w, int h, int)
{
    Q_UNUSED(w);
    Q_UNUSED(h);
    surface_glx_ptr glx = surfaceGLX(surface->display(), tex);
    if (!glx) {
        qWarning("Fail to create vaapi glx surface");
        return false;
    }
    if (!glx->copy(surface))
        return false;
    VAWARN(vaSyncSurface(surface->vadisplay(), surface->get()));
    return true;
}
Exemple #11
0
quint8 *VAApiWriter::getImage( VAImage &image, VASurfaceID surfaceID, VAImageFormat *img_fmt ) const
{
	if ( vaCreateImage( VADisp, img_fmt, outW, outH, &image ) == VA_STATUS_SUCCESS )
	{
		quint8 *data;
		if
		(
			vaSyncSurface( VADisp, surfaceID ) == VA_STATUS_SUCCESS &&
			vaGetImage( VADisp, surfaceID, 0, 0, outW, outH, image.image_id ) == VA_STATUS_SUCCESS &&
			vaMapBuffer( VADisp, image.buf, ( void ** )&data ) == VA_STATUS_SUCCESS
		) return data;
		vaDestroyImage( VADisp, image.image_id );
	}
	return NULL;
}
/**
 * gst_vaapi_surface_sync:
 * @surface: a #GstVaapiSurface
 *
 * Blocks until all pending operations on the @surface have been
 * completed.
 *
 * Return value: %TRUE on success
 */
gboolean
gst_vaapi_surface_sync (GstVaapiSurface * surface)
{
  GstVaapiDisplay *display;
  VAStatus status;

  g_return_val_if_fail (surface != NULL, FALSE);

  display = GST_VAAPI_OBJECT_DISPLAY (surface);
  if (!display)
    return FALSE;

  GST_VAAPI_DISPLAY_LOCK (display);
  status = vaSyncSurface (GST_VAAPI_DISPLAY_VADISPLAY (display),
      GST_VAAPI_OBJECT_ID (surface));
  GST_VAAPI_DISPLAY_UNLOCK (display);
  if (!vaapi_check_status (status, "vaSyncSurface()"))
    return FALSE;

  return TRUE;
}
Exemple #13
0
VideoFrame VideoDecoderVAAPI::frame()
{
    DPTR_D(VideoDecoderVAAPI);
    if (!d.frame->opaque || !d.frame->data[0])
        return VideoFrame();
    VASurfaceID surface_id = (VASurfaceID)(uintptr_t)d.frame->data[3];
    VAStatus status = VA_STATUS_SUCCESS;
    if (display() == GLX) {
        d.surface_interop->setSurface((va_surface_t*)d.frame->opaque, d.surface_width, d.surface_height);
        VideoFrame f(d.surface_width, d.surface_height, VideoFormat::Format_RGB32);
        f.setBytesPerLine(d.surface_width*4); //used by gl to compute texture size
        f.setSurfaceInterop(d.surface_interop);
        return f;
    }
#if VA_CHECK_VERSION(0,31,0)
    if ((status = vaSyncSurface(d.display, surface_id)) != VA_STATUS_SUCCESS) {
        qWarning("vaSyncSurface(VADisplay:%p, VASurfaceID:%#x) == %#x", d.display, surface_id, status);
#else
    if (vaSyncSurface(d.display, d.context_id, surface_id)) {
        qWarning("vaSyncSurface(VADisplay:%#x, VAContextID:%#x, VASurfaceID:%#x) == %#x", d.display, d.context_id, surface_id, status);
#endif
        return VideoFrame();
    }

    if (!d.disable_derive && d.supports_derive) {
        /*
         * http://web.archiveorange.com/archive/v/OAywENyq88L319OcRnHI
         * vaDeriveImage is faster than vaGetImage. But VAImage is uncached memory and copying from it would be terribly slow
         * TODO: copy from USWC, see vlc and https://github.com/OpenELEC/OpenELEC.tv/pull/2937.diff
         * https://software.intel.com/en-us/articles/increasing-memory-throughput-with-intel-streaming-simd-extensions-4-intel-sse4-streaming-load
         */
        status = vaDeriveImage(d.display, surface_id, &d.image);
        if (status != VA_STATUS_SUCCESS) {
            qWarning("vaDeriveImage(VADisplay:%p, VASurfaceID:%#x, VAImage*:%p) == %#x", d.display, surface_id, &d.image, status);
            return VideoFrame();
        }
    } else {
        status = vaGetImage(d.display, surface_id, 0, 0, d.surface_width, d.surface_height, d.image.image_id);
        if (status != VA_STATUS_SUCCESS) {
            qWarning("vaGetImage(VADisplay:%p, VASurfaceID:%#x, 0,0, %d, %d, VAImageID:%#x) == %#x", d.display, surface_id, d.surface_width, d.surface_height, d.image.image_id, status);
            return VideoFrame();
        }
    }

    void *p_base;
    if ((status = vaMapBuffer(d.display, d.image.buf, &p_base)) != VA_STATUS_SUCCESS) {
        qWarning("vaMapBuffer(VADisplay:%p, VABufferID:%#x, pBuf:%p) == %#x", d.display, d.image.buf, &p_base, status);
        return VideoFrame();
    }

    VideoFormat::PixelFormat pixfmt = VideoFormat::Format_Invalid;
    bool swap_uv = false;
    switch (d.image.format.fourcc) {
    case VA_FOURCC_YV12:
        swap_uv |= d.disable_derive || !d.supports_derive;
        pixfmt = VideoFormat::Format_YUV420P;
        break;
    case VA_FOURCC_IYUV:
        swap_uv = true;
        pixfmt = VideoFormat::Format_YUV420P;
        break;
    case VA_FOURCC_NV12:
        pixfmt = VideoFormat::Format_NV12;
        break;
    default:
        break;
    }
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported vaapi pixel format: %#x", d.image.format.fourcc);
        return VideoFrame();
    }
    const VideoFormat fmt(pixfmt);
    uint8_t *src[3];
    int pitch[3];
    for (int i = 0; i < fmt.planeCount(); ++i) {
        src[i] = (uint8_t*)p_base + d.image.offsets[i];
        pitch[i] = d.image.pitches[i];
    }
    if (swap_uv) {
        std::swap(src[1], src[2]);
        std::swap(pitch[1], pitch[2]);
    }
    VideoFrame frame;
    if (d.copy_uswc && d.gpu_mem.isReady()) {
        int yuv_size = 0;
        if (pixfmt == VideoFormat::Format_NV12)
            yuv_size = pitch[0]*d.surface_height*3/2;
        else
            yuv_size = pitch[0]*d.surface_height + pitch[1]*d.surface_height/2 + pitch[2]*d.surface_height/2;
        // additional 15 bytes to ensure 16 bytes aligned
        QByteArray buf(15 + yuv_size, 0);
        const int offset_16 = (16 - ((uintptr_t)buf.data() & 0x0f)) & 0x0f;
        // plane 1, 2... is aligned?
        uchar* plane_ptr = (uchar*)buf.data() + offset_16;
        QVector<uchar*> dst(fmt.planeCount(), 0);
        for (int i = 0; i < dst.size(); ++i) {
            dst[i] = plane_ptr;
            // TODO: add VideoFormat::planeWidth/Height() ?
            const int plane_w = pitch[i];//(i == 0 || pixfmt == VideoFormat::Format_NV12) ? d.surface_width : fmt.chromaWidth(d.surface_width);
            const int plane_h = i == 0 ? d.surface_height : fmt.chromaHeight(d.surface_height);
            plane_ptr += pitch[i] * plane_h;
            d.gpu_mem.copyFrame(src[i], dst[i], plane_w, plane_h, pitch[i]);
        }
        frame = VideoFrame(buf, d.width, d.height, fmt);
        frame.setBits(dst);
        frame.setBytesPerLine(pitch);
    } else {
        frame = VideoFrame(d.width, d.height, fmt);
        frame.setBits(src);
        frame.setBytesPerLine(pitch);
        // TODO: why clone is faster()?
        frame = frame.clone();
    }

    if ((status = vaUnmapBuffer(d.display, d.image.buf)) != VA_STATUS_SUCCESS) {
        qWarning("vaUnmapBuffer(VADisplay:%p, VABufferID:%#x) == %#x", d.display, d.image.buf, status);
        return VideoFrame();
    }

    if (!d.disable_derive && d.supports_derive) {
        vaDestroyImage(d.display, d.image.image_id);
        d.image.image_id = VA_INVALID_ID;
    }
    return frame;
}

struct display_names_t {
    VideoDecoderVAAPI::DisplayType display;
    QString name;
};
static const display_names_t display_names[] = {
    { VideoDecoderVAAPI::GLX, "GLX" },
    { VideoDecoderVAAPI::X11, "X11" },
    { VideoDecoderVAAPI::DRM, "DRM" }
};

static VideoDecoderVAAPI::DisplayType displayFromName(QString name) {
    for (unsigned int i = 0; i < sizeof(display_names)/sizeof(display_names[0]); ++i) {
        if (name.toUpper().contains(display_names[i].name.toUpper())) {
            return display_names[i].display;
        }
    }
    return VideoDecoderVAAPI::X11;
}

static QString displayToName(VideoDecoderVAAPI::DisplayType t) {
    for (unsigned int i = 0; i < sizeof(display_names)/sizeof(display_names[0]); ++i) {
        if (t == display_names[i].display) {
            return display_names[i].name;
        }
    }
    return QString();
}

void VideoDecoderVAAPI::setDisplayPriority(const QStringList &priority)
{
    DPTR_D(VideoDecoderVAAPI);
    d.display_priority.clear();
    foreach (QString disp, priority) {
        d.display_priority.push_back(displayFromName(disp));
    }
Exemple #14
0
void
GtkAggVaapiGlue::render()
{
     VaapiGlobalContext * const gvactx = VaapiGlobalContext::get();
     if (!gvactx)
         return;

     if (!_window_is_setup)
         return;

     if (!_vaapi_image.get() || !_vaapi_surface.get())
         return;

     if (!_vaapi_image->unmap()) {
         printf("ERROR: failed to unmap VA-API image\n");
         return;
     }

     VAStatus status;
     status = vaPutSurface(gvactx->display(),
                           _vaapi_surface->get(),
                           GDK_DRAWABLE_XID(_drawing_area->window),
                           0, 0,
                           _vaapi_surface->width(),
                           _vaapi_surface->height(),
                           0, 0,
                           _window_width,
                           _window_height,
                           NULL, 0,
                           VA_FRAME_PICTURE);
     if (!vaapi_check_status(status, "vaPutSurface() canvas"))
         return;

     Renderer_agg_base::RenderImages::const_iterator img, first_img, last_img;
     first_img = _agg_renderer->getFirstRenderImage();
     last_img  = _agg_renderer->getLastRenderImage();

     if (first_img != last_img) {
         for (img = first_img; img != last_img; ++img) {
             boost::shared_ptr<VaapiSurface> surface = (*img)->surface();

             VaapiRectangle src_rect;
             src_rect.x      = (*img)->x();
             src_rect.y      = (*img)->y();
             src_rect.width  = (*img)->width();
             src_rect.height = (*img)->height();

             VaapiRectangle dst_rect;
             const float xscale = _window_width / (float)_vaapi_image_width;
             const float yscale = _window_height / (float)_vaapi_image_height;
             dst_rect.x      = src_rect.x * xscale;
             dst_rect.y      = src_rect.y * yscale;
             dst_rect.width  = src_rect.width * xscale;
             dst_rect.height = src_rect.height * yscale;

             VaapiVideoWindow *videoWindow;
             videoWindow = getVideoWindow(surface, _drawing_area->window, dst_rect);
             if (!videoWindow) {
                 log_debug(_("ERROR: failed to setup video window for surface 0x%08x."), surface->get());
                 continue;
             }
             videoWindow->moveResize(dst_rect);

             VaapiRectangle pic_rect(surface->width(), surface->height());
             if (!surface->associateSubpicture(_vaapi_subpicture, src_rect, pic_rect)) {
                 log_debug(_("ERROR: failed to associate subpicture to surface 0x%08x."), surface->get());
                 continue;
             }

             status = vaPutSurface(gvactx->display(),
                                   surface->get(),
                                   videoWindow->xid(),
                                   0, 0, surface->width(), surface->height(),
                                   0, 0, dst_rect.width, dst_rect.height,
                                   NULL, 0,
                                   VA_FRAME_PICTURE);
             if (!vaapi_check_status(status, "vaPutSurface() video"))
                 continue;

             surface->deassociateSubpicture(_vaapi_subpicture);
         }

         for (img = first_img; img != last_img; ++img) {
             boost::shared_ptr<VaapiSurface> surface = (*img)->surface();

             status = vaSyncSurface(gvactx->display(), surface->get());
             if (!vaapi_check_status(status, "vaSyncSurface() video"))
                 continue;
         }
     }
}
Exemple #15
0
static void h264_decode_frame(int f_width, int f_height, char *framedata, int framesize, int slice_type)
{
    VAStatus va_status;

    DebugLog(("%s: called for frame of %d bytes (%dx%d) slice_type=%d\n", __FUNCTION__, framesize, width, height, slice_type));

    /* Initialize decode pipeline if necessary */
    if ( (f_width > cur_width) || (f_height > cur_height) ) {
        if (va_dpy != NULL)
            h264_cleanup_decoder();
        cur_width = f_width;
        cur_height = f_height;

        h264_init_decoder(f_width, f_height);
        rfbClientLog("%s: decoder initialized\n", __FUNCTION__);
    }

    /* Decode frame */
    static VAPictureH264 va_picture_h264, va_old_picture_h264;

    /* The server should always send an I-frame when a new client connects
     * or when the resolution of the framebuffer changes, but we check
     * just in case.
     */
    if ( (slice_type != SLICE_TYPE_I) && (num_frames == 0) ) {
        rfbClientLog("First frame is not an I frame !!! Skipping!!!\n");
        return;
    }

    DebugLog(("%s: frame_id=%d va_surface_id[%d]=0x%x field_order_count=%d\n", __FUNCTION__, frame_id, sid, va_surface_id[sid], field_order_count));

    va_picture_h264.picture_id = va_surface_id[sid];
    va_picture_h264.frame_idx  = frame_id;
    va_picture_h264.flags = 0;
    va_picture_h264.BottomFieldOrderCnt = field_order_count;
    va_picture_h264.TopFieldOrderCnt = field_order_count;

    /* Set up picture parameter buffer */
    if (va_pic_param_buf_id[sid] == VA_INVALID_ID) {
        va_status = vaCreateBuffer(va_dpy, va_context_id, VAPictureParameterBufferType, sizeof(VAPictureParameterBufferH264), 1, NULL, &va_pic_param_buf_id[sid]);
        CHECK_VASTATUS(va_status, "vaCreateBuffer(PicParam)");
    }
    CHECK_SURF(va_surface_id[sid]);

    VAPictureParameterBufferH264 *pic_param_buf = NULL;
    va_status = vaMapBuffer(va_dpy, va_pic_param_buf_id[sid], (void **)&pic_param_buf);
    CHECK_VASTATUS(va_status, "vaMapBuffer(PicParam)");

    SetVAPictureParameterBufferH264(pic_param_buf, f_width, f_height);
    memcpy(&pic_param_buf->CurrPic, &va_picture_h264, sizeof(VAPictureH264));

    if (slice_type == SLICE_TYPE_P) {
        memcpy(&pic_param_buf->ReferenceFrames[0], &va_old_picture_h264, sizeof(VAPictureH264));
        pic_param_buf->ReferenceFrames[0].flags = 0;
    }
    else if (slice_type != SLICE_TYPE_I) {
        rfbClientLog("Frame type %d not supported!!!\n");
        return;
    }
    pic_param_buf->frame_num = frame_id;

    va_status = vaUnmapBuffer(va_dpy, va_pic_param_buf_id[sid]);
    CHECK_VASTATUS(va_status, "vaUnmapBuffer(PicParam)");

    /* Set up IQ matrix buffer */
    if (va_mat_param_buf_id[sid] == VA_INVALID_ID) {
        va_status = vaCreateBuffer(va_dpy, va_context_id, VAIQMatrixBufferType, sizeof(VAIQMatrixBufferH264), 1, NULL, &va_mat_param_buf_id[sid]);
        CHECK_VASTATUS(va_status, "vaCreateBuffer(IQMatrix)");
    }
    CHECK_SURF(va_surface_id[sid]);

    VAIQMatrixBufferH264 *iq_matrix_buf = NULL;
    va_status = vaMapBuffer(va_dpy, va_mat_param_buf_id[sid], (void **)&iq_matrix_buf);
    CHECK_VASTATUS(va_status, "vaMapBuffer(IQMatrix)");

    static const unsigned char m_MatrixBufferH264[]= {
        /* ScalingList4x4[6][16] */
        0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
        0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
        0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
        0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
        0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
        0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
        /* ScalingList8x8[2][64] */
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
        0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
    };

    memcpy(iq_matrix_buf, m_MatrixBufferH264, 224);
    va_status = vaUnmapBuffer(va_dpy, va_mat_param_buf_id[sid]);
    CHECK_VASTATUS(va_status, "vaUnmapBuffer(IQMatrix)");

    VABufferID buffer_ids[2];
    buffer_ids[0] = va_pic_param_buf_id[sid];
    buffer_ids[1] = va_mat_param_buf_id[sid];

    CHECK_SURF(va_surface_id[sid]);
    va_status = vaRenderPicture(va_dpy, va_context_id, buffer_ids, 2);
    CHECK_VASTATUS(va_status, "vaRenderPicture");

    /* Set up slice parameter buffer */
    if (va_sp_param_buf_id[sid] == VA_INVALID_ID) {
        va_status = vaCreateBuffer(va_dpy, va_context_id, VASliceParameterBufferType, sizeof(VASliceParameterBufferH264), 1, NULL, &va_sp_param_buf_id[sid]);
        CHECK_VASTATUS(va_status, "vaCreateBuffer(SliceParam)");
    }
    CHECK_SURF(va_surface_id[sid]);

    VASliceParameterBufferH264 *slice_param_buf = NULL;
    va_status = vaMapBuffer(va_dpy, va_sp_param_buf_id[sid], (void **)&slice_param_buf);
    CHECK_VASTATUS(va_status, "vaMapBuffer(SliceParam)");

    static int t2_first = 1;
    if (slice_type == SLICE_TYPE_I) {
        SetVASliceParameterBufferH264_Intra(slice_param_buf, t2_first);
        t2_first = 0;
    } else {
        SetVASliceParameterBufferH264(slice_param_buf);
        memcpy(&slice_param_buf->RefPicList0[0], &va_old_picture_h264, sizeof(VAPictureH264));
        slice_param_buf->RefPicList0[0].flags = 0;
    }
    slice_param_buf->slice_data_bit_offset = 0;
    slice_param_buf->slice_data_size = framesize;

    va_status = vaUnmapBuffer(va_dpy, va_sp_param_buf_id[sid]);
    CHECK_VASTATUS(va_status, "vaUnmapBuffer(SliceParam)");
    CHECK_SURF(va_surface_id[sid]);

    /* Set up slice data buffer and copy H.264 encoded data */
    if (va_d_param_buf_id[sid] == VA_INVALID_ID) {
        /* TODO use estimation matching framebuffer dimensions instead of this large value */
        va_status = vaCreateBuffer(va_dpy, va_context_id, VASliceDataBufferType, 4177920, 1, NULL, &va_d_param_buf_id[sid]); /* 1080p size */
        CHECK_VASTATUS(va_status, "vaCreateBuffer(SliceData)");
    }

    char *slice_data_buf;
    va_status = vaMapBuffer(va_dpy, va_d_param_buf_id[sid], (void **)&slice_data_buf);
    CHECK_VASTATUS(va_status, "vaMapBuffer(SliceData)");
    memcpy(slice_data_buf, framedata, framesize);

    CHECK_SURF(va_surface_id[sid]);
    va_status = vaUnmapBuffer(va_dpy, va_d_param_buf_id[sid]);
    CHECK_VASTATUS(va_status, "vaUnmapBuffer(SliceData)");

    buffer_ids[0] = va_sp_param_buf_id[sid];
    buffer_ids[1] = va_d_param_buf_id[sid];

    CHECK_SURF(va_surface_id[sid]);
    va_status = vaRenderPicture(va_dpy, va_context_id, buffer_ids, 2);
    CHECK_VASTATUS(va_status, "vaRenderPicture");

    va_status = vaEndPicture(va_dpy, va_context_id);
    CHECK_VASTATUS(va_status, "vaEndPicture");

    /* Prepare next one... */
    int sid_new = (sid + 1) % SURFACE_NUM;
    DebugLog(("%s: new Surface ID = %d\n", __FUNCTION__, sid_new));
    va_status = vaBeginPicture(va_dpy, va_context_id, va_surface_id[sid_new]);
    CHECK_VASTATUS(va_status, "vaBeginPicture");

    /* Get decoded data */
    va_status = vaSyncSurface(va_dpy, va_surface_id[sid]);
    CHECK_VASTATUS(va_status, "vaSyncSurface");
    CHECK_SURF(va_surface_id[sid]);

    curr_surface = va_surface_id[sid];

    sid = sid_new;

    field_order_count += 2;
    ++frame_id;
    if (frame_id > 15) {
        frame_id = 0;
    }

    ++num_frames;

    memcpy(&va_old_picture_h264, &va_picture_h264, sizeof(VAPictureH264));
}
Exemple #16
0
static int Extract( vlc_va_t *p_external, picture_t *p_picture, AVFrame *p_ff )
{
    vlc_va_vaapi_t *p_va = vlc_va_vaapi_Get(p_external);

    VASurfaceID i_surface_id = (VASurfaceID)(uintptr_t)p_ff->data[3];

#if VA_CHECK_VERSION(0,31,0)
    if( vaSyncSurface( p_va->p_display, i_surface_id ) )
#else
    if( vaSyncSurface( p_va->p_display, p_va->i_context_id, i_surface_id ) )
#endif
        return VLC_EGENERIC;

    if(p_va->b_supports_derive)
    {
        if(vaDeriveImage(p_va->p_display, i_surface_id, &(p_va->image)) != VA_STATUS_SUCCESS)
            return VLC_EGENERIC;
    }
    else
    {
        if( vaGetImage( p_va->p_display, i_surface_id,
                        0, 0, p_va->i_surface_width, p_va->i_surface_height,
                        p_va->image.image_id) )
            return VLC_EGENERIC;
    }

    void *p_base;
    if( vaMapBuffer( p_va->p_display, p_va->image.buf, &p_base ) )
        return VLC_EGENERIC;

    const uint32_t i_fourcc = p_va->image.format.fourcc;
    if( i_fourcc == VA_FOURCC('Y','V','1','2') ||
        i_fourcc == VA_FOURCC('I','4','2','0') )
    {
        bool b_swap_uv = i_fourcc == VA_FOURCC('I','4','2','0');
        uint8_t *pp_plane[3];
        size_t  pi_pitch[3];

        for( int i = 0; i < 3; i++ )
        {
            const int i_src_plane = (b_swap_uv && i != 0) ?  (3 - i) : i;
            pp_plane[i] = (uint8_t*)p_base + p_va->image.offsets[i_src_plane];
            pi_pitch[i] = p_va->image.pitches[i_src_plane];
        }
        CopyFromYv12( p_picture, pp_plane, pi_pitch,
                      p_va->i_surface_width,
                      p_va->i_surface_height,
                      &p_va->image_cache );
    }
    else
    {
        assert( i_fourcc == VA_FOURCC('N','V','1','2') );
        uint8_t *pp_plane[2];
        size_t  pi_pitch[2];

        for( int i = 0; i < 2; i++ )
        {
            pp_plane[i] = (uint8_t*)p_base + p_va->image.offsets[i];
            pi_pitch[i] = p_va->image.pitches[i];
        }
        CopyFromNv12( p_picture, pp_plane, pi_pitch,
                      p_va->i_surface_width,
                      p_va->i_surface_height,
                      &p_va->image_cache );
    }

    if( vaUnmapBuffer( p_va->p_display, p_va->image.buf ) )
        return VLC_EGENERIC;

    if(p_va->b_supports_derive)
    {
        vaDestroyImage( p_va->p_display, p_va->image.image_id );
        p_va->image.image_id = VA_INVALID_ID;
    }

    return VLC_SUCCESS;
}
Exemple #17
0
bool VAAPIContext::CopySurfaceToFrame(VideoFrame *frame, const void *buf)
{
    MythXLocker locker(m_display->m_x_disp);

    if (!m_deriveSupport && m_image.image_id == VA_INVALID_ID)
        InitImage(buf);

    if (!frame || !buf || (m_dispType != kVADisplayX11) ||
        (!m_deriveSupport && m_image.image_id == VA_INVALID_ID))
        return false;

    const vaapi_surface *surf = (vaapi_surface*)buf;

    INIT_ST;
    va_status = vaSyncSurface(m_ctx.display, surf->m_id);
    CHECK_ST;

    if (m_deriveSupport)
    {
        va_status = vaDeriveImage(m_ctx.display, surf->m_id, &m_image);
    }
    else
    {
        va_status = vaGetImage(m_ctx.display, surf->m_id, 0, 0,
                               m_size.width(), m_size.height(),
                               m_image.image_id);
    }
    CHECK_ST;

    if (ok)
    {
        VideoFrame src;
        void* source = NULL;

        if (vaMapBuffer(m_ctx.display, m_image.buf, &source))
            return false;

        if (m_image.format.fourcc == VA_FOURCC_NV12)
        {
            init(&src, FMT_NV12, (unsigned char*)source, m_image.width,
                 m_image.height, m_image.data_size, NULL,
                 NULL, frame->aspect, frame->frame_rate);
            for (int i = 0; i < 2; i++)
            {
                src.pitches[i] = m_image.pitches[i];
                src.offsets[i] = m_image.offsets[i];
            }
        }
        else
        {
            // Our VideoFrame YV12 format, is really YUV420P/IYUV
            bool swap = m_image.format.fourcc == VA_FOURCC_YV12;
            init(&src, FMT_YV12, (unsigned char*)source, m_image.width,
                 m_image.height, m_image.data_size, NULL,
                 NULL, frame->aspect, frame->frame_rate);
            src.pitches[0] = m_image.pitches[0];
            src.pitches[1] = m_image.pitches[swap ? 2 : 1];
            src.pitches[2] = m_image.pitches[swap ? 1 : 2];
            src.offsets[0] = m_image.offsets[0];
            src.offsets[1] = m_image.offsets[swap ? 2 : 1];
            src.offsets[2] = m_image.offsets[swap ? 1 : 2];
        }
        m_copy->copy(frame, &src);

        if (vaUnmapBuffer(m_ctx.display, m_image.buf))
            return false;

        if (m_deriveSupport)
        {
            vaDestroyImage(m_ctx.display, m_image.image_id );
            m_image.image_id = VA_INVALID_ID;
        }
        return true;
    }

    LOG(VB_GENERAL, LOG_ERR, LOC + "Failed to get image");
    return false;
}
Exemple #18
0
static int vaapi_map_frame(AVHWFramesContext *hwfc,
                           AVFrame *dst, const AVFrame *src, int flags)
{
    AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
    VAAPIFramesContext *ctx = hwfc->internal->priv;
    VASurfaceID surface_id;
    VAImageFormat *image_format;
    VAAPISurfaceMap *map;
    VAStatus vas;
    void *address = NULL;
    int err, i;

    surface_id = (VASurfaceID)(uintptr_t)src->data[3];
    av_log(hwfc, AV_LOG_DEBUG, "Map surface %#x.\n", surface_id);

    if (!ctx->derive_works && (flags & VAAPI_MAP_DIRECT)) {
        // Requested direct mapping but it is not possible.
        return AVERROR(EINVAL);
    }
    if (dst->format == AV_PIX_FMT_NONE)
        dst->format = hwfc->sw_format;
    if (dst->format != hwfc->sw_format && (flags & VAAPI_MAP_DIRECT)) {
        // Requested direct mapping but the formats do not match.
        return AVERROR(EINVAL);
    }

    err = vaapi_get_image_format(hwfc->device_ctx, dst->format, &image_format);
    if (err < 0) {
        // Requested format is not a valid output format.
        return AVERROR(EINVAL);
    }

    map = av_malloc(sizeof(VAAPISurfaceMap));
    if (!map)
        return AVERROR(ENOMEM);

    map->source         = src;
    map->flags          = flags;
    map->image.image_id = VA_INVALID_ID;

    vas = vaSyncSurface(hwctx->display, surface_id);
    if (vas != VA_STATUS_SUCCESS) {
        av_log(hwfc, AV_LOG_ERROR, "Failed to sync surface "
               "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
        err = AVERROR(EIO);
        goto fail;
    }

    // The memory which we map using derive need not be connected to the CPU
    // in a way conducive to fast access.  On Gen7-Gen9 Intel graphics, the
    // memory is mappable but not cached, so normal memcpy()-like access is
    // very slow to read it (but writing is ok).  It is possible to read much
    // faster with a copy routine which is aware of the limitation, but we
    // assume for now that the user is not aware of that and would therefore
    // prefer not to be given direct-mapped memory if they request read access.
    if (ctx->derive_works &&
        ((flags & VAAPI_MAP_DIRECT) || !(flags & VAAPI_MAP_READ))) {
        vas = vaDeriveImage(hwctx->display, surface_id, &map->image);
        if (vas != VA_STATUS_SUCCESS) {
            av_log(hwfc, AV_LOG_ERROR, "Failed to derive image from "
                   "surface %#x: %d (%s).\n",
                   surface_id, vas, vaErrorStr(vas));
            err = AVERROR(EIO);
            goto fail;
        }
        if (map->image.format.fourcc != image_format->fourcc) {
            av_log(hwfc, AV_LOG_ERROR, "Derive image of surface %#x "
                   "is in wrong format: expected %#08x, got %#08x.\n",
                   surface_id, image_format->fourcc, map->image.format.fourcc);
            err = AVERROR(EIO);
            goto fail;
        }
        map->flags |= VAAPI_MAP_DIRECT;
    } else {
        vas = vaCreateImage(hwctx->display, image_format,
                            hwfc->width, hwfc->height, &map->image);
        if (vas != VA_STATUS_SUCCESS) {
            av_log(hwfc, AV_LOG_ERROR, "Failed to create image for "
                   "surface %#x: %d (%s).\n",
                   surface_id, vas, vaErrorStr(vas));
            err = AVERROR(EIO);
            goto fail;
        }
        if (flags & VAAPI_MAP_READ) {
            vas = vaGetImage(hwctx->display, surface_id, 0, 0,
                             hwfc->width, hwfc->height, map->image.image_id);
            if (vas != VA_STATUS_SUCCESS) {
                av_log(hwfc, AV_LOG_ERROR, "Failed to read image from "
                       "surface %#x: %d (%s).\n",
                       surface_id, vas, vaErrorStr(vas));
                err = AVERROR(EIO);
                goto fail;
            }
        }
    }

    vas = vaMapBuffer(hwctx->display, map->image.buf, &address);
    if (vas != VA_STATUS_SUCCESS) {
        av_log(hwfc, AV_LOG_ERROR, "Failed to map image from surface "
               "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
        err = AVERROR(EIO);
        goto fail;
    }

    dst->width  = src->width;
    dst->height = src->height;

    for (i = 0; i < map->image.num_planes; i++) {
        dst->data[i] = (uint8_t*)address + map->image.offsets[i];
        dst->linesize[i] = map->image.pitches[i];
    }
    if (
#ifdef VA_FOURCC_YV16
        map->image.format.fourcc == VA_FOURCC_YV16 ||
#endif
        map->image.format.fourcc == VA_FOURCC_YV12) {
        // Chroma planes are YVU rather than YUV, so swap them.
        FFSWAP(uint8_t*, dst->data[1], dst->data[2]);
    }
Exemple #19
0
static int Extract( vlc_va_t *va, picture_t *p_picture, uint8_t *data )
{
    vlc_va_sys_t *sys = va->sys;
    VASurfaceID surface = (VASurfaceID)(uintptr_t)data;
    VAImage image;
    int ret = VLC_EGENERIC;

#if VA_CHECK_VERSION(0,31,0)
    if (vaSyncSurface(sys->hw_ctx.display, surface))
#else
    if (vaSyncSurface(sys->hw_ctx.display, sys->hw_ctx.context_id, surface))
#endif
        return VLC_EGENERIC;

    if (!sys->do_derive || vaDeriveImage(sys->hw_ctx.display, surface, &image))
    {   /* Fallback if image derivation is not supported */
        if (vaCreateImage(sys->hw_ctx.display, &sys->format, sys->width,
                          sys->height, &image))
            return VLC_EGENERIC;
        if (vaGetImage(sys->hw_ctx.display, surface, 0, 0, sys->width,
                       sys->height, image.image_id))
            goto error;
    }

    void *p_base;
    if (vaMapBuffer(sys->hw_ctx.display, image.buf, &p_base))
        goto error;

    const unsigned i_fourcc = sys->format.fourcc;
    if( i_fourcc == VA_FOURCC_YV12 ||
        i_fourcc == VA_FOURCC_IYUV )
    {
        bool b_swap_uv = i_fourcc == VA_FOURCC_IYUV;
        uint8_t *pp_plane[3];
        size_t  pi_pitch[3];

        for( int i = 0; i < 3; i++ )
        {
            const int i_src_plane = (b_swap_uv && i != 0) ?  (3 - i) : i;
            pp_plane[i] = (uint8_t*)p_base + image.offsets[i_src_plane];
            pi_pitch[i] = image.pitches[i_src_plane];
        }
        CopyFromYv12( p_picture, pp_plane, pi_pitch, sys->width, sys->height,
                      &sys->image_cache );
    }
    else
    {
        assert( i_fourcc == VA_FOURCC_NV12 );
        uint8_t *pp_plane[2];
        size_t  pi_pitch[2];

        for( int i = 0; i < 2; i++ )
        {
            pp_plane[i] = (uint8_t*)p_base + image.offsets[i];
            pi_pitch[i] = image.pitches[i];
        }
        CopyFromNv12( p_picture, pp_plane, pi_pitch, sys->width, sys->height,
                      &sys->image_cache );
    }

    vaUnmapBuffer(sys->hw_ctx.display, image.buf);
    ret = VLC_SUCCESS;
error:
    vaDestroyImage(sys->hw_ctx.display, image.image_id);
    return ret;
}
Exemple #20
0
VideoFrame VideoDecoderVAAPI::frame()
{
    DPTR_D(VideoDecoderVAAPI);
    if (!d.frame->opaque || !d.frame->data[0])
        return VideoFrame();
    VASurfaceID surface_id = (VASurfaceID)(uintptr_t)d.frame->data[3];
    VAStatus status = VA_STATUS_SUCCESS;
    if (display() == GLX || (copyMode() == ZeroCopy && display() == X11)) {
        surface_ptr p;
        std::list<surface_ptr>::iterator it = d.surfaces_used.begin();
        for (; it != d.surfaces_used.end() && !p; ++it) {
            if((*it)->get() == surface_id) {
                p = *it;
                break;
            }
        }
        if (!p) {
            for (it = d.surfaces_free.begin(); it != d.surfaces_free.end() && !p; ++it) {
                if((*it)->get() == surface_id) {
                    p = *it;
                    break;
                }
            }
        }
        if (!p) {
            qWarning("VAAPI - Unable to find surface");
            return VideoFrame();
        }
        ((SurfaceInteropVAAPI*)d.surface_interop.data())->setSurface(p);

        VideoFrame f(d.width, d.height, VideoFormat::Format_RGB32); //p->width()
        f.setBytesPerLine(d.width*4); //used by gl to compute texture size
        f.setMetaData("surface_interop", QVariant::fromValue(d.surface_interop));
        f.setTimestamp(double(d.frame->pkt_pts)/1000.0);
        return f;
    }
#if VA_CHECK_VERSION(0,31,0)
    if ((status = vaSyncSurface(d.display->get(), surface_id)) != VA_STATUS_SUCCESS) {
        qWarning("vaSyncSurface(VADisplay:%p, VASurfaceID:%#x) == %#x", d.display->get(), surface_id, status);
#else
    if (vaSyncSurface(d.display->get(), d.context_id, surface_id)) {
        qWarning("vaSyncSurface(VADisplay:%#x, VAContextID:%#x, VASurfaceID:%#x) == %#x", d.display, d.context_id, surface_id, status);
#endif
        return VideoFrame();
    }

    if (!d.disable_derive && d.supports_derive) {
        /*
         * http://web.archiveorange.com/archive/v/OAywENyq88L319OcRnHI
         * vaDeriveImage is faster than vaGetImage. But VAImage is uncached memory and copying from it would be terribly slow
         * TODO: copy from USWC, see vlc and https://github.com/OpenELEC/OpenELEC.tv/pull/2937.diff
         * https://software.intel.com/en-us/articles/increasing-memory-throughput-with-intel-streaming-simd-extensions-4-intel-sse4-streaming-load
         */
        VA_ENSURE_TRUE(vaDeriveImage(d.display->get(), surface_id, &d.image), VideoFrame());
    } else {
        VA_ENSURE_TRUE(vaGetImage(d.display->get(), surface_id, 0, 0, d.width, d.height, d.image.image_id), VideoFrame());
    }

    void *p_base;
    VA_ENSURE_TRUE(vaMapBuffer(d.display->get(), d.image.buf, &p_base), VideoFrame());

    VideoFormat::PixelFormat pixfmt = VideoFormat::Format_Invalid;
    bool swap_uv = false;
    switch (d.image.format.fourcc) {
    case VA_FOURCC_YV12:
        swap_uv |= d.disable_derive || !d.supports_derive;
        pixfmt = VideoFormat::Format_YUV420P;
        break;
    case VA_FOURCC_IYUV:
        swap_uv = true;
        pixfmt = VideoFormat::Format_YUV420P;
        break;
    case VA_FOURCC_NV12:
        pixfmt = VideoFormat::Format_NV12;
        break;
    default:
        break;
    }
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported vaapi pixel format: %#x", d.image.format.fourcc);
        return VideoFrame();
    }
    const VideoFormat fmt(pixfmt);
    uint8_t *src[3];
    int pitch[3];
    for (int i = 0; i < fmt.planeCount(); ++i) {
        src[i] = (uint8_t*)p_base + d.image.offsets[i];
        pitch[i] = d.image.pitches[i];
    }
    VideoFrame frame(copyToFrame(fmt, d.surface_height, src, pitch, swap_uv));
    VAWARN(vaUnmapBuffer(d.display->get(), d.image.buf));
    if (!d.disable_derive && d.supports_derive) {
        vaDestroyImage(d.display->get(), d.image.image_id);
        d.image.image_id = VA_INVALID_ID;
    }
    return frame;
}

void VideoDecoderVAAPI::setDisplayPriority(const QStringList &priority)
{
    DPTR_D(VideoDecoderVAAPI);
    d.display_priority.clear();
    int idx = staticMetaObject.indexOfEnumerator("DisplayType");
    const QMetaEnum me = staticMetaObject.enumerator(idx);
    foreach (const QString& disp, priority) {
        d.display_priority.push_back((DisplayType)me.keyToValue(disp.toUtf8().constData()));
    }