/** * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and * get surface information * * @vws: Screen to register the reference on * @handle: Kernel handle of the guest-backed surface * @flags: flags used when the surface was created * @format: Format used when the surface was created * @numMipLevels: Number of mipmap levels of the surface * @p_region: On successful return points to a newly allocated * struct vmw_region holding a reference to the surface backup buffer. * * Returns 0 on success, a system error on failure. */ int vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws, const struct winsys_handle *whandle, SVGA3dSurfaceFlags *flags, SVGA3dSurfaceFormat *format, uint32_t *numMipLevels, uint32_t *handle, struct vmw_region **p_region) { union drm_vmw_gb_surface_reference_arg s_arg; struct drm_vmw_surface_arg *req = &s_arg.req; struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep; struct vmw_region *region = NULL; boolean needs_unref = FALSE; int ret; vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format); assert(p_region != NULL); region = CALLOC_STRUCT(vmw_region); if (!region) return -ENOMEM; memset(&s_arg, 0, sizeof(s_arg)); ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref); if (ret) goto out_fail_req; *handle = req->sid; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF, &s_arg, sizeof(s_arg)); if (ret) goto out_fail_ref; region->handle = rep->crep.buffer_handle; region->map_handle = rep->crep.buffer_map_handle; region->drm_fd = vws->ioctl.drm_fd; region->size = rep->crep.backup_size; *p_region = region; *handle = rep->crep.handle; *flags = rep->creq.svga3d_flags; *format = rep->creq.format; *numMipLevels = rep->creq.mip_levels; if (needs_unref) vmw_ioctl_surface_destroy(vws, *handle); return 0; out_fail_ref: if (needs_unref) vmw_ioctl_surface_destroy(vws, *handle); out_fail_req: if (region) FREE(region); return ret; }
void vmw_svga_winsys_surface_reference(struct vmw_svga_winsys_surface **pdst, struct vmw_svga_winsys_surface *src) { struct pipe_reference *src_ref; struct pipe_reference *dst_ref; struct vmw_svga_winsys_surface *dst; if(pdst == NULL || *pdst == src) return; dst = *pdst; src_ref = src ? &src->refcnt : NULL; dst_ref = dst ? &dst->refcnt : NULL; if (pipe_reference(dst_ref, src_ref)) { vmw_ioctl_surface_destroy(dst->screen, dst->sid); #ifdef DEBUG /* to detect dangling pointers */ assert(p_atomic_read(&dst->validated) == 0); dst->sid = SVGA3D_INVALID_ID; #endif FREE(dst); } *pdst = src; }
static struct svga_winsys_surface * vmw_svga_winsys_surface_create(struct svga_winsys_screen *sws, SVGA3dSurfaceFlags flags, SVGA3dSurfaceFormat format, unsigned usage, SVGA3dSize size, uint32 numLayers, uint32 numMipLevels, unsigned sampleCount) { struct vmw_winsys_screen *vws = vmw_winsys_screen(sws); struct vmw_svga_winsys_surface *surface; struct vmw_buffer_desc desc; struct pb_manager *provider; uint32_t buffer_size; memset(&desc, 0, sizeof(desc)); surface = CALLOC_STRUCT(vmw_svga_winsys_surface); if(!surface) goto no_surface; pipe_reference_init(&surface->refcnt, 1); p_atomic_set(&surface->validated, 0); surface->screen = vws; pipe_mutex_init(surface->mutex); surface->shared = !!(usage & SVGA_SURFACE_USAGE_SHARED); provider = (surface->shared) ? vws->pools.gmr : vws->pools.mob_fenced; /* * Used for the backing buffer GB surfaces, and to approximate * when to flush on non-GB hosts. */ buffer_size = svga3dsurface_get_serialized_size(format, size, numMipLevels, numLayers); if (flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) buffer_size += sizeof(SVGA3dDXSOState); if (buffer_size > vws->ioctl.max_texture_size) { goto no_sid; } if (sws->have_gb_objects) { SVGAGuestPtr ptr = {0,0}; /* * If the backing buffer size is small enough, try to allocate a * buffer out of the buffer cache. Otherwise, let the kernel allocate * a suitable buffer for us. */ if (buffer_size < VMW_TRY_CACHED_SIZE && !surface->shared) { struct pb_buffer *pb_buf; surface->size = buffer_size; desc.pb_desc.alignment = 4096; desc.pb_desc.usage = 0; pb_buf = provider->create_buffer(provider, buffer_size, &desc.pb_desc); surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf); if (surface->buf && !vmw_gmr_bufmgr_region_ptr(pb_buf, &ptr)) assert(0); } surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage, size, numLayers, numMipLevels, sampleCount, ptr.gmrId, surface->buf ? NULL : &desc.region); if (surface->sid == SVGA3D_INVALID_ID && surface->buf) { /* * Kernel refused to allocate a surface for us. * Perhaps something was wrong with our buffer? * This is really a guard against future new size requirements * on the backing buffers. */ vmw_svga_winsys_buffer_destroy(sws, surface->buf); surface->buf = NULL; surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage, size, numLayers, numMipLevels, sampleCount, 0, &desc.region); if (surface->sid == SVGA3D_INVALID_ID) goto no_sid; } /* * If the kernel created the buffer for us, wrap it into a * vmw_svga_winsys_buffer. */ if (surface->buf == NULL) { struct pb_buffer *pb_buf; surface->size = vmw_region_size(desc.region); desc.pb_desc.alignment = 4096; desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED; pb_buf = provider->create_buffer(provider, surface->size, &desc.pb_desc); surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf); if (surface->buf == NULL) { vmw_ioctl_region_destroy(desc.region); vmw_ioctl_surface_destroy(vws, surface->sid); goto no_sid; } } } else { surface->sid = vmw_ioctl_surface_create(vws, flags, format, usage, size, numLayers, numMipLevels, sampleCount); if(surface->sid == SVGA3D_INVALID_ID) goto no_sid; /* Best estimate for surface size, used for early flushing. */ surface->size = buffer_size; surface->buf = NULL; } return svga_winsys_surface(surface); no_sid: if (surface->buf) vmw_svga_winsys_buffer_destroy(sws, surface->buf); FREE(surface); no_surface: return NULL; }
static struct svga_winsys_surface * vmw_drm_surface_from_handle(struct svga_winsys_screen *sws, struct winsys_handle *whandle, SVGA3dSurfaceFormat *format) { struct vmw_svga_winsys_surface *vsrf; struct svga_winsys_surface *ssrf; struct vmw_winsys_screen *vws = vmw_winsys_screen(sws); union drm_vmw_surface_reference_arg arg; struct drm_vmw_surface_arg *req = &arg.req; struct drm_vmw_surface_create_req *rep = &arg.rep; uint32_t handle = 0; struct drm_vmw_size size; SVGA3dSize base_size; int ret; int i; if (whandle->offset != 0) { fprintf(stderr, "Attempt to import unsupported winsys offset %u\n", whandle->offset); return NULL; } switch (whandle->type) { case DRM_API_HANDLE_TYPE_SHARED: case DRM_API_HANDLE_TYPE_KMS: handle = whandle->handle; break; case DRM_API_HANDLE_TYPE_FD: ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle); if (ret) { vmw_error("Failed to get handle from prime fd %d.\n", (int) whandle->handle); return NULL; } break; default: vmw_error("Attempt to import unsupported handle type %d.\n", whandle->type); return NULL; } memset(&arg, 0, sizeof(arg)); req->sid = handle; rep->size_addr = (unsigned long)&size; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE, &arg, sizeof(arg)); /* * Need to close the handle we got from prime. */ if (whandle->type == DRM_API_HANDLE_TYPE_FD) vmw_ioctl_surface_destroy(vws, handle); if (ret) { /* * Any attempt to share something other than a surface, like a dumb * kms buffer, should fail here. */ vmw_error("Failed referencing shared surface. SID %d.\n" "Error %d (%s).\n", handle, ret, strerror(-ret)); return NULL; } if (rep->mip_levels[0] != 1) { vmw_error("Incorrect number of mipmap levels on shared surface." " SID %d, levels %d\n", handle, rep->mip_levels[0]); goto out_mip; } for (i=1; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { if (rep->mip_levels[i] != 0) { vmw_error("Incorrect number of faces levels on shared surface." " SID %d, face %d present.\n", handle, i); goto out_mip; } } vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface); if (!vsrf) goto out_mip; pipe_reference_init(&vsrf->refcnt, 1); p_atomic_set(&vsrf->validated, 0); vsrf->screen = vws; vsrf->sid = handle; ssrf = svga_winsys_surface(vsrf); *format = rep->format; /* Estimate usage, for early flushing. */ base_size.width = size.width; base_size.height = size.height; base_size.depth = size.depth; vsrf->size = svga3dsurface_get_serialized_size(rep->format, base_size, rep->mip_levels[0], FALSE); return ssrf; out_mip: vmw_ioctl_surface_destroy(vws, handle); return NULL; }
/** * vmw_drm_gb_surface_from_handle - Create a shared surface * * @sws: Screen to register the surface with. * @whandle: struct winsys_handle identifying the kernel surface object * @format: On successful return points to a value describing the * surface format. * * Returns a refcounted pointer to a struct svga_winsys_surface * embedded in a struct vmw_svga_winsys_surface on success or NULL * on failure. */ static struct svga_winsys_surface * vmw_drm_gb_surface_from_handle(struct svga_winsys_screen *sws, struct winsys_handle *whandle, SVGA3dSurfaceFormat *format) { struct vmw_svga_winsys_surface *vsrf; struct svga_winsys_surface *ssrf; struct vmw_winsys_screen *vws = vmw_winsys_screen(sws); SVGA3dSurfaceFlags flags; uint32_t mip_levels; struct vmw_buffer_desc desc; struct pb_manager *provider = vws->pools.gmr; struct pb_buffer *pb_buf; uint32_t handle; int ret; if (whandle->offset != 0) { fprintf(stderr, "Attempt to import unsupported winsys offset %u\n", whandle->offset); return NULL; } ret = vmw_ioctl_gb_surface_ref(vws, whandle, &flags, format, &mip_levels, &handle, &desc.region); if (ret) { fprintf(stderr, "Failed referencing shared surface. SID %d.\n" "Error %d (%s).\n", whandle->handle, ret, strerror(-ret)); return NULL; } if (mip_levels != 1) { fprintf(stderr, "Incorrect number of mipmap levels on shared surface." " SID %d, levels %d\n", whandle->handle, mip_levels); goto out_mip; } vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface); if (!vsrf) goto out_mip; pipe_reference_init(&vsrf->refcnt, 1); p_atomic_set(&vsrf->validated, 0); vsrf->screen = vws; vsrf->sid = handle; vsrf->size = vmw_region_size(desc.region); /* * Synchronize backing buffers of shared surfaces using the * kernel, since we don't pass fence objects around between * processes. */ desc.pb_desc.alignment = 4096; desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED | VMW_BUFFER_USAGE_SYNC; pb_buf = provider->create_buffer(provider, vsrf->size, &desc.pb_desc); vsrf->buf = vmw_svga_winsys_buffer_wrap(pb_buf); if (!vsrf->buf) goto out_no_buf; ssrf = svga_winsys_surface(vsrf); return ssrf; out_no_buf: FREE(vsrf); out_mip: vmw_ioctl_region_destroy(desc.region); vmw_ioctl_surface_destroy(vws, whandle->handle); return NULL; }
static struct pipe_buffer * vmw_drm_buffer_from_handle(struct drm_api *drm_api, struct pipe_screen *screen, const char *name, unsigned handle) { struct vmw_svga_winsys_surface *vsrf; struct svga_winsys_surface *ssrf; struct vmw_winsys_screen *vws = vmw_winsys_screen(svga_winsys_screen(screen)); struct pipe_buffer *buf; union drm_vmw_surface_reference_arg arg; struct drm_vmw_surface_arg *req = &arg.req; struct drm_vmw_surface_create_req *rep = &arg.rep; int ret; int i; /** * The vmware device specific handle is the hardware SID. * FIXME: We probably want to move this to the ioctl implementations. */ memset(&arg, 0, sizeof(arg)); req->sid = handle; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE, &arg, sizeof(arg)); if (ret) { fprintf(stderr, "Failed referencing shared surface. SID %d.\n" "Error %d (%s).\n", handle, ret, strerror(-ret)); return NULL; } if (rep->mip_levels[0] != 1) { fprintf(stderr, "Incorrect number of mipmap levels on shared surface." " SID %d, levels %d\n", handle, rep->mip_levels[0]); goto out_mip; } for (i=1; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { if (rep->mip_levels[i] != 0) { fprintf(stderr, "Incorrect number of faces levels on shared surface." " SID %d, face %d present.\n", handle, i); goto out_mip; } } vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface); if (!vsrf) goto out_mip; pipe_reference_init(&vsrf->refcnt, 1); p_atomic_set(&vsrf->validated, 0); vsrf->sid = handle; ssrf = svga_winsys_surface(vsrf); buf = svga_screen_buffer_wrap_surface(screen, rep->format, ssrf); if (!buf) vmw_svga_winsys_surface_reference(&vsrf, NULL); return buf; out_mip: vmw_ioctl_surface_destroy(vws, handle); return NULL; }