static struct virgl_winsys * virgl_drm_winsys_create(int drmFD) { struct virgl_drm_winsys *qdws; int ret; struct drm_virtgpu_getparam getparam = {0}; qdws = CALLOC_STRUCT(virgl_drm_winsys); if (!qdws) return NULL; qdws->fd = drmFD; qdws->num_delayed = 0; qdws->usecs = 1000000; LIST_INITHEAD(&qdws->delayed); (void) mtx_init(&qdws->mutex, mtx_plain); (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain); qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare); qdws->bo_names = util_hash_table_create(handle_hash, handle_compare); qdws->base.destroy = virgl_drm_winsys_destroy; qdws->base.transfer_put = virgl_bo_transfer_put; qdws->base.transfer_get = virgl_bo_transfer_get; qdws->base.resource_create = virgl_drm_winsys_resource_cache_create; qdws->base.resource_unref = virgl_drm_winsys_resource_unref; qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle; qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle; qdws->base.resource_map = virgl_drm_resource_map; qdws->base.resource_wait = virgl_drm_resource_wait; qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create; qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy; qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd; qdws->base.emit_res = virgl_drm_emit_res; qdws->base.res_is_referenced = virgl_drm_res_is_ref; qdws->base.cs_create_fence = virgl_cs_create_fence; qdws->base.fence_wait = virgl_fence_wait; qdws->base.fence_reference = virgl_fence_reference; qdws->base.get_caps = virgl_drm_get_caps; uint32_t value; getparam.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX; getparam.value = (uint64_t)(uintptr_t)&value; ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam); if (ret == 0) { if (value == 1) qdws->has_capset_query_fix = true; } return &qdws->base; }
/* TODO with some refactoring we might be able to re-use debug_symbol_name_cached() * instead.. otoh if using libunwind I think u_debug_symbol could just be excluded * from build? */ static const char * symbol_name_cached(unw_cursor_t *cursor, unw_proc_info_t *pip) { void *addr = (void *)(uintptr_t)pip->start_ip; char *name; mtx_lock(&symbols_mutex); if(!symbols_hash) symbols_hash = util_hash_table_create(hash_ptr, compare_ptr); name = util_hash_table_get(symbols_hash, addr); if(!name) { char procname[256]; unw_word_t off; int ret; ret = unw_get_proc_name(cursor, procname, sizeof(procname), &off); if (ret && ret != -UNW_ENOMEM) { procname[0] = '?'; procname[1] = 0; } if (asprintf(&name, "%s%s", procname, ret == -UNW_ENOMEM ? "..." : "") == -1) name = "??"; util_hash_table_set(symbols_hash, addr, (void*)name); } mtx_unlock(&symbols_mutex); return name; }
HRESULT NineResource9_ctor( struct NineResource9 *This, struct NineUnknownParams *pParams, BOOL Allocate, D3DRESOURCETYPE Type, D3DPOOL Pool ) { struct pipe_screen *screen; HRESULT hr; hr = NineUnknown_ctor(&This->base, pParams); if (FAILED(hr)) return hr; This->info.screen = screen = This->base.device->screen; if (Allocate) { DBG("(%p) Creating pipe_resource.\n", This); This->resource = screen->resource_create(screen, &This->info); if (!This->resource) return D3DERR_OUTOFVIDEOMEMORY; } This->data = NULL; /* FIXME remove, rather set it to null in surface9.c*/ This->type = Type; This->pool = Pool; This->priority = 0; This->pdata = util_hash_table_create(ht_guid_hash, ht_guid_compare); if (!This->pdata) return E_OUTOFMEMORY; return D3D_OK; }
const char* debug_symbol_name_cached(const void *addr) { const char* name; #ifdef PIPE_SUBSYSTEM_WINDOWS_USER static boolean first = TRUE; if (first) { pipe_mutex_init(symbols_mutex); first = FALSE; } #endif pipe_mutex_lock(symbols_mutex); if(!symbols_hash) symbols_hash = util_hash_table_create(hash_ptr, compare_ptr); name = util_hash_table_get(symbols_hash, (void*)addr); if(!name) { char buf[1024]; debug_symbol_name(addr, buf, sizeof(buf)); name = strdup(buf); util_hash_table_set(symbols_hash, (void*)addr, (void*)name); } pipe_mutex_unlock(symbols_mutex); return name; }
static struct virgl_winsys * virgl_drm_winsys_create(int drmFD) { struct virgl_drm_winsys *qdws; qdws = CALLOC_STRUCT(virgl_drm_winsys); if (!qdws) return NULL; qdws->fd = drmFD; qdws->num_delayed = 0; qdws->usecs = 1000000; LIST_INITHEAD(&qdws->delayed); pipe_mutex_init(qdws->mutex); pipe_mutex_init(qdws->bo_handles_mutex); qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare); qdws->bo_names = util_hash_table_create(handle_hash, handle_compare); qdws->base.destroy = virgl_drm_winsys_destroy; qdws->base.transfer_put = virgl_bo_transfer_put; qdws->base.transfer_get = virgl_bo_transfer_get; qdws->base.resource_create = virgl_drm_winsys_resource_cache_create; qdws->base.resource_unref = virgl_drm_winsys_resource_unref; qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle; qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle; qdws->base.resource_map = virgl_drm_resource_map; qdws->base.resource_wait = virgl_drm_resource_wait; qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create; qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy; qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd; qdws->base.emit_res = virgl_drm_emit_res; qdws->base.res_is_referenced = virgl_drm_res_is_ref; qdws->base.cs_create_fence = virgl_cs_create_fence; qdws->base.fence_wait = virgl_fence_wait; qdws->base.fence_reference = virgl_fence_reference; qdws->base.get_caps = virgl_drm_get_caps; return &qdws->base; }
struct svga_winsys_context * vmw_svga_winsys_context_create(struct svga_winsys_screen *sws) { struct vmw_winsys_screen *vws = vmw_winsys_screen(sws); struct vmw_svga_winsys_context *vswc; vswc = CALLOC_STRUCT(vmw_svga_winsys_context); if(!vswc) return NULL; vswc->base.destroy = vmw_swc_destroy; vswc->base.reserve = vmw_swc_reserve; vswc->base.surface_relocation = vmw_swc_surface_relocation; vswc->base.region_relocation = vmw_swc_region_relocation; vswc->base.mob_relocation = vmw_swc_mob_relocation; vswc->base.context_relocation = vmw_swc_context_relocation; vswc->base.shader_relocation = vmw_swc_shader_relocation; vswc->base.commit = vmw_swc_commit; vswc->base.flush = vmw_swc_flush; vswc->base.surface_map = vmw_svga_winsys_surface_map; vswc->base.surface_unmap = vmw_svga_winsys_surface_unmap; vswc->base.cid = vmw_ioctl_context_create(vws); vswc->base.have_gb_objects = sws->have_gb_objects; vswc->vws = vws; vswc->command.size = VMW_COMMAND_SIZE; vswc->surface.size = VMW_SURFACE_RELOCS; vswc->shader.size = VMW_SHADER_RELOCS; vswc->region.size = VMW_REGION_RELOCS; vswc->validate = pb_validate_create(); if(!vswc->validate) goto out_no_validate; vswc->hash = util_hash_table_create(vmw_hash_ptr, vmw_ptr_compare); if (!vswc->hash) goto out_no_hash; #ifdef DEBUG vswc->fctx = debug_flush_ctx_create(TRUE, VMW_DEBUG_FLUSH_STACK); #endif return &vswc->base; out_no_hash: pb_validate_destroy(vswc->validate); out_no_validate: FREE(vswc); return NULL; }
struct native_display * x11_create_dri2_display(Display *dpy, const struct native_event_handler *event_handler) { struct dri2_display *dri2dpy; dri2dpy = CALLOC_STRUCT(dri2_display); if (!dri2dpy) return NULL; dri2dpy->event_handler = event_handler; dri2dpy->dpy = dpy; if (!dri2dpy->dpy) { dri2dpy->dpy = XOpenDisplay(NULL); if (!dri2dpy->dpy) { dri2_display_destroy(&dri2dpy->base); return NULL; } dri2dpy->own_dpy = TRUE; } dri2dpy->xscr_number = DefaultScreen(dri2dpy->dpy); dri2dpy->xscr = x11_screen_create(dri2dpy->dpy, dri2dpy->xscr_number); if (!dri2dpy->xscr) { dri2_display_destroy(&dri2dpy->base); return NULL; } dri2dpy->surfaces = util_hash_table_create(dri2_display_hash_table_hash, dri2_display_hash_table_compare); if (!dri2dpy->surfaces) { dri2_display_destroy(&dri2dpy->base); return NULL; } dri2dpy->base.init_screen = dri2_display_init_screen; dri2dpy->base.destroy = dri2_display_destroy; dri2dpy->base.get_param = dri2_display_get_param; dri2dpy->base.get_configs = dri2_display_get_configs; dri2dpy->base.get_pixmap_format = dri2_display_get_pixmap_format; dri2dpy->base.copy_to_pixmap = native_display_copy_to_pixmap; dri2dpy->base.create_window_surface = dri2_display_create_window_surface; dri2dpy->base.create_pixmap_surface = dri2_display_create_pixmap_surface; #ifdef HAVE_WAYLAND_BACKEND dri2dpy->base.wayland_bufmgr = &dri2_display_wayland_bufmgr; #endif return &dri2dpy->base; }
struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws) { struct radeon_bomgr *mgr; mgr = CALLOC_STRUCT(radeon_bomgr); if (!mgr) return NULL; mgr->base.destroy = radeon_bomgr_destroy; mgr->base.create_buffer = radeon_bomgr_create_bo; mgr->base.flush = radeon_bomgr_flush; mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy; mgr->rws = rws; mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare); pipe_mutex_init(mgr->bo_handles_mutex); return &mgr->base; }
PUBLIC struct pipe_screen * nouveau_drm_screen_create(int fd) { struct nouveau_drm *drm = NULL; struct nouveau_device *dev = NULL; struct nouveau_screen *(*init)(struct nouveau_device *); struct nouveau_screen *screen = NULL; int ret, dupfd; pipe_mutex_lock(nouveau_screen_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); if (!fd_tab) { pipe_mutex_unlock(nouveau_screen_mutex); return NULL; } } screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (screen) { screen->refcount++; pipe_mutex_unlock(nouveau_screen_mutex); return &screen->base; } /* Since the screen re-use is based on the device node and not the fd, * create a copy of the fd to be owned by the device. Otherwise a * scenario could occur where two screens are created, and the first * one is shut down, along with the fd being closed. The second * (identical) screen would now have a reference to the closed fd. We * avoid this by duplicating the original fd. Note that * nouveau_device_wrap does not close the fd in case of a device * creation error. */ dupfd = dup(fd); ret = nouveau_drm_new(dupfd, &drm); if (ret) goto err; ret = nouveau_device_new(&drm->client, NV_DEVICE, &(struct nv_device_v0) { .device = ~0ULL, }, sizeof(struct nv_device_v0), &dev);
static OMX_ERRORTYPE h264d_prc_allocate_resources(void *ap_obj, OMX_U32 a_pid) { vid_dec_PrivateType*priv = ap_obj; struct pipe_screen *screen; vl_csc_matrix csc; assert (priv); priv->screen = omx_get_screen(); if (!priv->screen) return OMX_ErrorInsufficientResources; screen = priv->screen->pscreen; priv->pipe = screen->context_create(screen, priv->screen, 0); if (!priv->pipe) return OMX_ErrorInsufficientResources; if (!vl_compositor_init(&priv->compositor, priv->pipe)) { priv->pipe->destroy(priv->pipe); priv->pipe = NULL; return OMX_ErrorInsufficientResources; } if (!vl_compositor_init_state(&priv->cstate, priv->pipe)) { vl_compositor_cleanup(&priv->compositor); priv->pipe->destroy(priv->pipe); priv->pipe = NULL; return OMX_ErrorInsufficientResources; } vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_BT_601, NULL, true, &csc); if (!vl_compositor_set_csc_matrix(&priv->cstate, (const vl_csc_matrix *)&csc, 1.0f, 0.0f)) { vl_compositor_cleanup(&priv->compositor); priv->pipe->destroy(priv->pipe); priv->pipe = NULL; return OMX_ErrorInsufficientResources; } LIST_INITHEAD(&priv->codec_data.h264.dpb_list); priv->video_buffer_map = util_hash_table_create(handle_hash, handle_compare); return OMX_ErrorNone; }
const char* debug_symbol_name_cached(const void *addr) { const char* name; pipe_mutex_lock(symbols_mutex); if(!symbols_hash) symbols_hash = util_hash_table_create(hash_ptr, compare_ptr); name = util_hash_table_get(symbols_hash, (void*)addr); if(!name) { char buf[1024]; debug_symbol_name(addr, buf, sizeof(buf)); name = strdup(buf); util_hash_table_set(symbols_hash, (void*)addr, (void*)name); } pipe_mutex_unlock(symbols_mutex); return name; }
/** * Return a small integer serial number for the given pointer. */ static boolean debug_serial(void *p, unsigned *pserial) { unsigned serial; boolean found = TRUE; #ifdef PIPE_SUBSYSTEM_WINDOWS_USER static boolean first = TRUE; if (first) { pipe_mutex_init(serials_mutex); first = FALSE; } #endif pipe_mutex_lock(serials_mutex); if (!serials_hash) serials_hash = util_hash_table_create(hash_ptr, compare_ptr); serial = (unsigned) (uintptr_t) util_hash_table_get(serials_hash, p); if (!serial) { /* time to stop logging... (you'll have a 100 GB logfile at least at * this point) TODO: avoid this */ serial = ++serials_last; if (!serial) { debug_error("More than 2^32 objects detected, aborting.\n"); os_abort(); } util_hash_table_set(serials_hash, p, (void *) (uintptr_t) serial); found = FALSE; } pipe_mutex_unlock(serials_mutex); *pserial = serial; return found; }
struct pipe_screen * virgl_drm_screen_create(int fd) { struct pipe_screen *pscreen = NULL; mtx_lock(&virgl_screen_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); if (!fd_tab) goto unlock; } pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (pscreen) { virgl_screen(pscreen)->refcnt++; } else { struct virgl_winsys *vws; int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); vws = virgl_drm_winsys_create(dup_fd); pscreen = virgl_create_screen(vws); if (pscreen) { util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen); /* Bit of a hack, to avoid circular linkage dependency, * ie. pipe driver having to call in to winsys, we * override the pipe drivers screen->destroy(): */ virgl_screen(pscreen)->winsys_priv = pscreen->destroy; pscreen->destroy = virgl_drm_screen_destroy; } } unlock: mtx_unlock(&virgl_screen_mutex); return pscreen; }
PUBLIC struct radeon_winsys * radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create) { struct radeon_drm_winsys *ws; pipe_mutex_lock(fd_tab_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); } ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (ws) { pipe_reference(NULL, &ws->reference); pipe_mutex_unlock(fd_tab_mutex); return &ws->base; } ws = CALLOC_STRUCT(radeon_drm_winsys); if (!ws) { pipe_mutex_unlock(fd_tab_mutex); return NULL; } ws->fd = dup(fd); if (!do_winsys_init(ws)) goto fail; /* Create managers. */ ws->kman = radeon_bomgr_create(ws); if (!ws->kman) goto fail; ws->cman = pb_cache_manager_create(ws->kman, 500000, 2.0f, 0, MIN2(ws->info.vram_size, ws->info.gart_size)); if (!ws->cman) goto fail; if (ws->gen >= DRV_R600) { ws->surf_man = radeon_surface_manager_new(ws->fd); if (!ws->surf_man) goto fail; } /* init reference */ pipe_reference_init(&ws->reference, 1); /* Set functions. */ ws->base.unref = radeon_winsys_unref; ws->base.destroy = radeon_winsys_destroy; ws->base.query_info = radeon_query_info; ws->base.cs_request_feature = radeon_cs_request_feature; ws->base.query_value = radeon_query_value; ws->base.read_registers = radeon_read_registers; radeon_bomgr_init_functions(ws); radeon_drm_cs_init_functions(ws); radeon_surface_init_functions(ws); pipe_mutex_init(ws->hyperz_owner_mutex); pipe_mutex_init(ws->cmask_owner_mutex); pipe_mutex_init(ws->cs_stack_lock); ws->ncs = 0; pipe_semaphore_init(&ws->cs_queued, 0); if (ws->num_cpus > 1 && debug_get_option_thread()) ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws); /* Create the screen at the end. The winsys must be initialized * completely. * * Alternatively, we could create the screen based on "ws->gen" * and link all drivers into one binary blob. */ ws->base.screen = screen_create(&ws->base); if (!ws->base.screen) { radeon_winsys_destroy(&ws->base); pipe_mutex_unlock(fd_tab_mutex); return NULL; } util_hash_table_set(fd_tab, intptr_to_pointer(ws->fd), ws); /* We must unlock the mutex once the winsys is fully initialized, so that * other threads attempting to create the winsys from the same fd will * get a fully initialized winsys and not just half-way initialized. */ pipe_mutex_unlock(fd_tab_mutex); return &ws->base; fail: pipe_mutex_unlock(fd_tab_mutex); if (ws->cman) ws->cman->destroy(ws->cman); if (ws->kman) ws->kman->destroy(ws->kman); if (ws->surf_man) radeon_surface_manager_free(ws->surf_man); if (ws->fd >= 0) close(ws->fd); FREE(ws); return NULL; }
PUBLIC struct pipe_screen * nouveau_drm_screen_create(int fd) { struct nouveau_device *dev = NULL; struct pipe_screen *(*init)(struct nouveau_device *); struct nouveau_screen *screen; int ret, dupfd = -1; pipe_mutex_lock(nouveau_screen_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); if (!fd_tab) goto err; } screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (screen) { screen->refcount++; pipe_mutex_unlock(nouveau_screen_mutex); return &screen->base; } /* Since the screen re-use is based on the device node and not the fd, * create a copy of the fd to be owned by the device. Otherwise a * scenario could occur where two screens are created, and the first * one is shut down, along with the fd being closed. The second * (identical) screen would now have a reference to the closed fd. We * avoid this by duplicating the original fd. Note that * nouveau_device_wrap does not close the fd in case of a device * creation error. */ dupfd = dup(fd); ret = nouveau_device_wrap(dupfd, 1, &dev); if (ret) goto err; switch (dev->chipset & ~0xf) { case 0x30: case 0x40: case 0x60: init = nv30_screen_create; break; case 0x50: case 0x80: case 0x90: case 0xa0: init = nv50_screen_create; break; case 0xc0: case 0xd0: case 0xe0: case 0xf0: case 0x100: init = nvc0_screen_create; break; default: debug_printf("%s: unknown chipset nv%02x\n", __func__, dev->chipset); goto err; } screen = (struct nouveau_screen*)init(dev); if (!screen) goto err; util_hash_table_set(fd_tab, intptr_to_pointer(fd), screen); screen->refcount = 1; pipe_mutex_unlock(nouveau_screen_mutex); return &screen->base; err: if (dev) nouveau_device_del(&dev); else if (dupfd >= 0) close(dupfd); pipe_mutex_unlock(nouveau_screen_mutex); return NULL; }
static HRESULT NineVolume9_ctor( struct NineVolume9 *This, struct NineUnknownParams *pParams, struct NineUnknown *pContainer, struct pipe_resource *pResource, unsigned Level, D3DVOLUME_DESC *pDesc ) { HRESULT hr; assert(pContainer); /* stand-alone volumes can't be created */ DBG("This=%p pContainer=%p pDevice=%p pResource=%p Level=%u pDesc=%p\n", This, pContainer, pParams->device, pResource, Level, pDesc); /* Mark this as a special surface held by another internal resource. */ pParams->container = pContainer; user_assert(!(pDesc->Usage & D3DUSAGE_DYNAMIC) || (pDesc->Pool != D3DPOOL_MANAGED), D3DERR_INVALIDCALL); assert(pResource || pDesc->Pool != D3DPOOL_DEFAULT); hr = NineUnknown_ctor(&This->base, pParams); if (FAILED(hr)) return hr; This->pdata = util_hash_table_create(ht_guid_hash, ht_guid_compare); if (!This->pdata) return E_OUTOFMEMORY; pipe_resource_reference(&This->resource, pResource); This->pipe = pParams->device->pipe; This->transfer = NULL; This->lock_count = 0; This->level = Level; This->level_actual = Level; This->desc = *pDesc; This->info.screen = pParams->device->screen; This->info.target = PIPE_TEXTURE_3D; This->info.width0 = pDesc->Width; This->info.height0 = pDesc->Height; This->info.depth0 = pDesc->Depth; This->info.last_level = 0; This->info.array_size = 1; This->info.nr_samples = 0; This->info.usage = PIPE_USAGE_DEFAULT; This->info.bind = PIPE_BIND_SAMPLER_VIEW; This->info.flags = 0; This->info.format = d3d9_to_pipe_format_checked(This->info.screen, pDesc->Format, This->info.target, This->info.nr_samples, This->info.bind, FALSE); if (This->info.format == PIPE_FORMAT_NONE) return D3DERR_DRIVERINTERNALERROR; This->stride = util_format_get_stride(This->info.format, pDesc->Width); This->stride = align(This->stride, 4); This->layer_stride = util_format_get_2d_size(This->info.format, This->stride, pDesc->Height); if (pDesc->Pool == D3DPOOL_SYSTEMMEM) This->info.usage = PIPE_USAGE_STAGING; if (!This->resource) { hr = NineVolume9_AllocateData(This); if (FAILED(hr)) return hr; } return D3D_OK; }
struct vmw_winsys_screen * vmw_winsys_create( int fd ) { struct vmw_winsys_screen *vws; struct stat stat_buf; if (dev_hash == NULL) { dev_hash = util_hash_table_create(vmw_dev_hash, vmw_dev_compare); if (dev_hash == NULL) return NULL; } if (fstat(fd, &stat_buf)) return NULL; vws = util_hash_table_get(dev_hash, &stat_buf.st_rdev); if (vws) { vws->open_count++; return vws; } vws = CALLOC_STRUCT(vmw_winsys_screen); if (!vws) goto out_no_vws; vws->device = stat_buf.st_rdev; vws->open_count = 1; vws->ioctl.drm_fd = dup(fd); vws->base.have_gb_dma = TRUE; vws->base.need_to_rebind_resources = FALSE; if (!vmw_ioctl_init(vws)) goto out_no_ioctl; vws->fence_ops = vmw_fence_ops_create(vws); if (!vws->fence_ops) goto out_no_fence_ops; if(!vmw_pools_init(vws)) goto out_no_pools; if (!vmw_winsys_screen_init_svga(vws)) goto out_no_svga; if (util_hash_table_set(dev_hash, &vws->device, vws) != PIPE_OK) goto out_no_hash_insert; return vws; out_no_hash_insert: out_no_svga: vmw_pools_cleanup(vws); out_no_pools: vws->fence_ops->destroy(vws->fence_ops); out_no_fence_ops: vmw_ioctl_cleanup(vws); out_no_ioctl: close(vws->ioctl.drm_fd); FREE(vws); out_no_vws: return NULL; }
struct radeon_winsys *radeon_drm_winsys_create(int fd) { struct radeon_drm_winsys *ws; if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); } ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (ws) { pipe_reference(NULL, &ws->base.reference); return &ws->base; } ws = CALLOC_STRUCT(radeon_drm_winsys); if (!ws) { return NULL; } ws->fd = fd; util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws); if (!do_winsys_init(ws)) goto fail; /* Create managers. */ ws->kman = radeon_bomgr_create(ws); if (!ws->kman) goto fail; ws->cman = pb_cache_manager_create(ws->kman, 1000000); if (!ws->cman) goto fail; if (ws->gen >= DRV_R600) { ws->surf_man = radeon_surface_manager_new(fd); if (!ws->surf_man) goto fail; } /* init reference */ pipe_reference_init(&ws->base.reference, 1); /* Set functions. */ ws->base.destroy = radeon_winsys_destroy; ws->base.query_info = radeon_query_info; ws->base.cs_request_feature = radeon_cs_request_feature; ws->base.surface_init = radeon_drm_winsys_surface_init; ws->base.surface_best = radeon_drm_winsys_surface_best; ws->base.query_value = radeon_query_value; radeon_bomgr_init_functions(ws); radeon_drm_cs_init_functions(ws); pipe_mutex_init(ws->hyperz_owner_mutex); pipe_mutex_init(ws->cmask_owner_mutex); pipe_mutex_init(ws->cs_stack_lock); p_atomic_set(&ws->ncs, 0); pipe_semaphore_init(&ws->cs_queued, 0); pipe_condvar_init(ws->cs_queue_empty); if (ws->num_cpus > 1 && debug_get_option_thread()) ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws); return &ws->base; fail: if (ws->cman) ws->cman->destroy(ws->cman); if (ws->kman) ws->kman->destroy(ws->kman); if (ws->surf_man) radeon_surface_manager_free(ws->surf_man); FREE(ws); return NULL; }