void vmw_ioctl_fence_unref(struct vmw_winsys_screen *vws, uint32_t handle) { struct drm_vmw_fence_arg arg; int ret; memset(&arg, 0, sizeof(arg)); arg.handle = handle; ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_FENCE_UNREF, &arg, sizeof(arg)); if (ret != 0) vmw_error("%s Failed\n", __FUNCTION__); }
struct vmw_region * vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size) { struct vmw_region *region; union drm_vmw_alloc_dmabuf_arg arg; struct drm_vmw_alloc_dmabuf_req *req = &arg.req; struct drm_vmw_dmabuf_rep *rep = &arg.rep; int ret; vmw_printf("%s: size = %u\n", __FUNCTION__, size); region = CALLOC_STRUCT(vmw_region); if (!region) goto out_err1; memset(&arg, 0, sizeof(arg)); req->size = size; do { ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_ALLOC_DMABUF, &arg, sizeof(arg)); } while (ret == -ERESTART); if (ret) { vmw_error("IOCTL failed %d: %s\n", ret, strerror(-ret)); goto out_err1; } region->ptr.gmrId = rep->cur_gmr_id; region->ptr.offset = rep->cur_gmr_offset; region->data = NULL; region->handle = rep->handle; region->map_handle = rep->map_handle; region->map_count = 0; region->size = size; region->drm_fd = vws->ioctl.drm_fd; vmw_printf(" gmrId = %u, offset = %u\n", region->ptr.gmrId, region->ptr.offset); return region; out_err1: FREE(region); return NULL; }
void * vmw_ioctl_region_map(struct vmw_region *region) { void *map; vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__, region->ptr.gmrId, region->ptr.offset); if (region->data == NULL) { map = os_mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED, region->drm_fd, region->map_handle); if (map == MAP_FAILED) { vmw_error("%s: Map failed.\n", __FUNCTION__); return NULL; } region->data = map; } ++region->map_count; return region->data; }
int vmw_ioctl_fence_finish(struct vmw_winsys_screen *vws, uint32_t handle, uint32_t flags) { struct drm_vmw_fence_wait_arg arg; uint32_t vflags = vmw_drm_fence_flags(flags); int ret; memset(&arg, 0, sizeof(arg)); arg.handle = handle; arg.timeout_us = 10*1000000; arg.lazy = 0; arg.flags = vflags; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_WAIT, &arg, sizeof(arg)); if (ret != 0) vmw_error("%s Failed\n", __FUNCTION__); return 0; }
static struct svga_winsys_surface * vmw_drm_surface_from_handle(struct svga_winsys_screen *sws, struct winsys_handle *whandle, SVGA3dSurfaceFormat *format) { struct vmw_svga_winsys_surface *vsrf; struct svga_winsys_surface *ssrf; struct vmw_winsys_screen *vws = vmw_winsys_screen(sws); union drm_vmw_surface_reference_arg arg; struct drm_vmw_surface_arg *req = &arg.req; struct drm_vmw_surface_create_req *rep = &arg.rep; uint32_t handle = 0; struct drm_vmw_size size; SVGA3dSize base_size; int ret; int i; if (whandle->offset != 0) { fprintf(stderr, "Attempt to import unsupported winsys offset %u\n", whandle->offset); return NULL; } switch (whandle->type) { case DRM_API_HANDLE_TYPE_SHARED: case DRM_API_HANDLE_TYPE_KMS: handle = whandle->handle; break; case DRM_API_HANDLE_TYPE_FD: ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle); if (ret) { vmw_error("Failed to get handle from prime fd %d.\n", (int) whandle->handle); return NULL; } break; default: vmw_error("Attempt to import unsupported handle type %d.\n", whandle->type); return NULL; } memset(&arg, 0, sizeof(arg)); req->sid = handle; rep->size_addr = (unsigned long)&size; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE, &arg, sizeof(arg)); /* * Need to close the handle we got from prime. */ if (whandle->type == DRM_API_HANDLE_TYPE_FD) vmw_ioctl_surface_destroy(vws, handle); if (ret) { /* * Any attempt to share something other than a surface, like a dumb * kms buffer, should fail here. */ vmw_error("Failed referencing shared surface. SID %d.\n" "Error %d (%s).\n", handle, ret, strerror(-ret)); return NULL; } if (rep->mip_levels[0] != 1) { vmw_error("Incorrect number of mipmap levels on shared surface." " SID %d, levels %d\n", handle, rep->mip_levels[0]); goto out_mip; } for (i=1; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { if (rep->mip_levels[i] != 0) { vmw_error("Incorrect number of faces levels on shared surface." " SID %d, face %d present.\n", handle, i); goto out_mip; } } vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface); if (!vsrf) goto out_mip; pipe_reference_init(&vsrf->refcnt, 1); p_atomic_set(&vsrf->validated, 0); vsrf->screen = vws; vsrf->sid = handle; ssrf = svga_winsys_surface(vsrf); *format = rep->format; /* Estimate usage, for early flushing. */ base_size.width = size.width; base_size.height = size.height; base_size.depth = size.depth; vsrf->size = svga3dsurface_get_serialized_size(rep->format, base_size, rep->mip_levels[0], FALSE); return ssrf; out_mip: vmw_ioctl_surface_destroy(vws, handle); return NULL; }
boolean vmw_ioctl_init(struct vmw_winsys_screen *vws) { struct drm_vmw_getparam_arg gp_arg; struct drm_vmw_get_3d_cap_arg cap_arg; unsigned int size; int ret; uint32_t *cap_buffer; drmVersionPtr version; boolean have_drm_2_5; VMW_FUNC; version = drmGetVersion(vws->ioctl.drm_fd); if (!version) goto out_no_version; have_drm_2_5 = version->version_major > 2 || (version->version_major == 2 && version->version_minor > 4); vws->ioctl.have_drm_2_6 = version->version_major > 2 || (version->version_major == 2 && version->version_minor > 5); memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_3D; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret || gp_arg.value == 0) { vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret)); goto out_no_3d; } memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) { vmw_error("Failed to get fifo hw version (%i, %s).\n", ret, strerror(-ret)); goto out_no_3d; } vws->ioctl.hwversion = gp_arg.value; memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_HW_CAPS; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) vws->base.have_gb_objects = FALSE; else vws->base.have_gb_objects = !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS); if (vws->base.have_gb_objects && !have_drm_2_5) goto out_no_3d; if (vws->base.have_gb_objects) { memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t); else size = gp_arg.value; if (vws->base.have_gb_objects) vws->ioctl.num_cap_3d = size / sizeof(uint32_t); else vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX; memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) { /* Just guess a large enough value. */ vws->ioctl.max_mob_memory = 256*1024*1024; } else { vws->ioctl.max_mob_memory = gp_arg.value; } memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret || gp_arg.value == 0) { vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE; } else { vws->ioctl.max_texture_size = gp_arg.value; } /* Never early flush surfaces, mobs do accounting. */ vws->ioctl.max_surface_memory = -1; } else { vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX; memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY; if (have_drm_2_5) ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (!have_drm_2_5 || ret) { /* Just guess a large enough value, around 800mb. */ vws->ioctl.max_surface_memory = 0x30000000; } else { vws->ioctl.max_surface_memory = gp_arg.value; } vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE; size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t); } cap_buffer = calloc(1, size); if (!cap_buffer) { debug_printf("Failed alloc fifo 3D caps buffer.\n"); goto out_no_3d; } vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d, sizeof(*vws->ioctl.cap_3d)); if (!vws->ioctl.cap_3d) { debug_printf("Failed alloc fifo 3D caps buffer.\n"); goto out_no_caparray; } memset(&cap_arg, 0, sizeof(cap_arg)); cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer); cap_arg.max_size = size; ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP, &cap_arg, sizeof(cap_arg)); if (ret) { debug_printf("Failed to get 3D capabilities" " (%i, %s).\n", ret, strerror(-ret)); goto out_no_caps; } ret = vmw_ioctl_parse_caps(vws, cap_buffer); if (ret) { debug_printf("Failed to parse 3D capabilities" " (%i, %s).\n", ret, strerror(-ret)); goto out_no_caps; } free(cap_buffer); drmFreeVersion(version); vmw_printf("%s OK\n", __FUNCTION__); return TRUE; out_no_caps: free(vws->ioctl.cap_3d); out_no_caparray: free(cap_buffer); out_no_3d: drmFreeVersion(version); out_no_version: vws->ioctl.num_cap_3d = 0; debug_printf("%s Failed\n", __FUNCTION__); return FALSE; }
void vmw_ioctl_command(struct vmw_winsys_screen *vws, int32_t cid, uint32_t throttle_us, void *commands, uint32_t size, struct pipe_fence_handle **pfence) { struct drm_vmw_execbuf_arg arg; struct drm_vmw_fence_rep rep; int ret; #ifdef DEBUG { static boolean firsttime = TRUE; static boolean debug = FALSE; static boolean skip = FALSE; if (firsttime) { debug = debug_get_bool_option("SVGA_DUMP_CMD", FALSE); skip = debug_get_bool_option("SVGA_SKIP_CMD", FALSE); } if (debug) { VMW_FUNC; svga_dump_commands(commands, size); } firsttime = FALSE; if (skip) { size = 0; } } #endif memset(&arg, 0, sizeof(arg)); memset(&rep, 0, sizeof(rep)); rep.error = -EFAULT; if (pfence) arg.fence_rep = (unsigned long)&rep; arg.commands = (unsigned long)commands; arg.command_size = size; arg.throttle_us = throttle_us; arg.version = DRM_VMW_EXECBUF_VERSION; do { ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, sizeof(arg)); } while(ret == -ERESTART); if (ret) { vmw_error("%s error %s.\n", __FUNCTION__, strerror(-ret)); } if (rep.error) { /* * Kernel has already synced, or caller requested no fence. */ if (pfence) *pfence = NULL; } else { if (pfence) { vmw_fences_signal(vws->fence_ops, rep.passed_seqno, rep.seqno, TRUE); *pfence = vmw_fence_create(vws->fence_ops, rep.handle, rep.seqno, rep.mask); if (*pfence == NULL) { /* * Fence creation failed. Need to sync. */ (void) vmw_ioctl_fence_finish(vws, rep.handle, rep.mask); vmw_ioctl_fence_unref(vws, rep.handle); } } } }
boolean vmw_ioctl_init(struct vmw_winsys_screen *vws) { struct drm_vmw_getparam_arg gp_arg; struct drm_vmw_get_3d_cap_arg cap_arg; unsigned int size; int ret; uint32_t *cap_buffer; drmVersionPtr version; boolean drm_gb_capable; boolean have_drm_2_5; const char *getenv_val; VMW_FUNC; version = drmGetVersion(vws->ioctl.drm_fd); if (!version) goto out_no_version; have_drm_2_5 = version->version_major > 2 || (version->version_major == 2 && version->version_minor > 4); vws->ioctl.have_drm_2_6 = version->version_major > 2 || (version->version_major == 2 && version->version_minor > 5); vws->ioctl.have_drm_2_9 = version->version_major > 2 || (version->version_major == 2 && version->version_minor > 8); vws->ioctl.have_drm_2_15 = version->version_major > 2 || (version->version_major == 2 && version->version_minor > 14); vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1; drm_gb_capable = have_drm_2_5; memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_3D; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret || gp_arg.value == 0) { vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret)); goto out_no_3d; } memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) { vmw_error("Failed to get fifo hw version (%i, %s).\n", ret, strerror(-ret)); goto out_no_3d; } vws->ioctl.hwversion = gp_arg.value; getenv_val = getenv("SVGA_FORCE_HOST_BACKED"); if (!getenv_val || strcmp(getenv_val, "0") == 0) { memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_HW_CAPS; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); } else { ret = -EINVAL; } if (ret) vws->base.have_gb_objects = FALSE; else vws->base.have_gb_objects = !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS); if (vws->base.have_gb_objects && !drm_gb_capable) goto out_no_3d; vws->base.have_vgpu10 = FALSE; vws->base.have_sm4_1 = FALSE; vws->base.have_intra_surface_copy = FALSE; if (vws->base.have_gb_objects) { memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) { /* Just guess a large enough value. */ vws->ioctl.max_mob_memory = 256*1024*1024; } else { vws->ioctl.max_mob_memory = gp_arg.value; } memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret || gp_arg.value == 0) { vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE; } else { vws->ioctl.max_texture_size = gp_arg.value; } /* Never early flush surfaces, mobs do accounting. */ vws->ioctl.max_surface_memory = -1; if (vws->ioctl.have_drm_2_9) { memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_DX; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret == 0 && gp_arg.value != 0) { const char *vgpu10_val; debug_printf("Have VGPU10 interface and hardware.\n"); vws->base.have_vgpu10 = TRUE; vgpu10_val = getenv("SVGA_VGPU10"); if (vgpu10_val && strcmp(vgpu10_val, "0") == 0) { debug_printf("Disabling VGPU10 interface.\n"); vws->base.have_vgpu10 = FALSE; } else { debug_printf("Enabling VGPU10 interface.\n"); } } } if (vws->ioctl.have_drm_2_15 && vws->base.have_vgpu10) { memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_HW_CAPS2; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret == 0 && gp_arg.value != 0) { vws->base.have_intra_surface_copy = TRUE; } memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_SM4_1; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret == 0 && gp_arg.value != 0) { vws->base.have_sm4_1 = TRUE; } } memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (ret) size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t); else size = gp_arg.value; if (vws->base.have_gb_objects) vws->ioctl.num_cap_3d = size / sizeof(uint32_t); else vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX; } else { vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX; memset(&gp_arg, 0, sizeof(gp_arg)); gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY; if (have_drm_2_5) ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg, sizeof(gp_arg)); if (!have_drm_2_5 || ret) { /* Just guess a large enough value, around 800mb. */ vws->ioctl.max_surface_memory = 0x30000000; } else { vws->ioctl.max_surface_memory = gp_arg.value; } vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE; size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t); } debug_printf("VGPU10 interface is %s.\n", vws->base.have_vgpu10 ? "on" : "off"); cap_buffer = calloc(1, size); if (!cap_buffer) { debug_printf("Failed alloc fifo 3D caps buffer.\n"); goto out_no_3d; } vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d, sizeof(*vws->ioctl.cap_3d)); if (!vws->ioctl.cap_3d) { debug_printf("Failed alloc fifo 3D caps buffer.\n"); goto out_no_caparray; } memset(&cap_arg, 0, sizeof(cap_arg)); cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer); cap_arg.max_size = size; /* * This call must always be after DRM_VMW_PARAM_MAX_MOB_MEMORY and * DRM_VMW_PARAM_SM4_1. This is because, based on these calls, kernel * driver sends the supported cap. */ ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP, &cap_arg, sizeof(cap_arg)); if (ret) { debug_printf("Failed to get 3D capabilities" " (%i, %s).\n", ret, strerror(-ret)); goto out_no_caps; } ret = vmw_ioctl_parse_caps(vws, cap_buffer); if (ret) { debug_printf("Failed to parse 3D capabilities" " (%i, %s).\n", ret, strerror(-ret)); goto out_no_caps; } if (((version->version_major == 2 && version->version_minor >= 10) || version->version_major > 2) && vws->base.have_vgpu10) { /* support for these commands didn't make it into vmwgfx kernel * modules before 2.10. */ vws->base.have_generate_mipmap_cmd = TRUE; vws->base.have_set_predication_cmd = TRUE; } if (version->version_major == 2 && version->version_minor >= 14) { vws->base.have_fence_fd = TRUE; } free(cap_buffer); drmFreeVersion(version); vmw_printf("%s OK\n", __FUNCTION__); return TRUE; out_no_caps: free(vws->ioctl.cap_3d); out_no_caparray: free(cap_buffer); out_no_3d: drmFreeVersion(version); out_no_version: vws->ioctl.num_cap_3d = 0; debug_printf("%s Failed\n", __FUNCTION__); return FALSE; }
void vmw_ioctl_command(struct vmw_winsys_screen *vws, int32_t cid, uint32_t throttle_us, void *commands, uint32_t size, struct pipe_fence_handle **pfence, int32_t imported_fence_fd, uint32_t flags) { struct drm_vmw_execbuf_arg arg; struct drm_vmw_fence_rep rep; int ret; int argsize; #ifdef DEBUG { static boolean firsttime = TRUE; static boolean debug = FALSE; static boolean skip = FALSE; if (firsttime) { debug = debug_get_bool_option("SVGA_DUMP_CMD", FALSE); skip = debug_get_bool_option("SVGA_SKIP_CMD", FALSE); } if (debug) { VMW_FUNC; svga_dump_commands(commands, size); } firsttime = FALSE; if (skip) { size = 0; } } #endif memset(&arg, 0, sizeof(arg)); memset(&rep, 0, sizeof(rep)); if (flags & SVGA_HINT_FLAG_EXPORT_FENCE_FD) { arg.flags |= DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD; } if (imported_fence_fd != -1) { arg.flags |= DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD; } rep.error = -EFAULT; if (pfence) arg.fence_rep = (unsigned long)&rep; arg.commands = (unsigned long)commands; arg.command_size = size; arg.throttle_us = throttle_us; arg.version = vws->ioctl.drm_execbuf_version; arg.context_handle = (vws->base.have_vgpu10 ? cid : SVGA3D_INVALID_ID); /* Older DRM module requires this to be zero */ if (vws->base.have_fence_fd) arg.imported_fence_fd = imported_fence_fd; /* In DRM_VMW_EXECBUF_VERSION 1, the drm_vmw_execbuf_arg structure ends with * the flags field. The structure size sent to drmCommandWrite must match * the drm_execbuf_version. Otherwise, an invalid value will be returned. */ argsize = vws->ioctl.drm_execbuf_version > 1 ? sizeof(arg) : offsetof(struct drm_vmw_execbuf_arg, context_handle); do { ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, argsize); if (ret == -EBUSY) usleep(1000); } while(ret == -ERESTART || ret == -EBUSY); if (ret) { vmw_error("%s error %s.\n", __FUNCTION__, strerror(-ret)); abort(); } if (rep.error) { /* * Kernel has already synced, or caller requested no fence. */ if (pfence) *pfence = NULL; } else { if (pfence) { vmw_fences_signal(vws->fence_ops, rep.passed_seqno, rep.seqno, TRUE); /* Older DRM module will set this to zero, but -1 is the proper FD * to use for no Fence FD support */ if (!vws->base.have_fence_fd) rep.fd = -1; *pfence = vmw_fence_create(vws->fence_ops, rep.handle, rep.seqno, rep.mask, rep.fd); if (*pfence == NULL) { /* * Fence creation failed. Need to sync. */ (void) vmw_ioctl_fence_finish(vws, rep.handle, rep.mask); vmw_ioctl_fence_unref(vws, rep.handle); } } } }