static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws, struct virgl_hw_res *res) { struct drm_gem_close args; if (res->flinked) { mtx_lock(&qdws->bo_handles_mutex); util_hash_table_remove(qdws->bo_names, (void *)(uintptr_t)res->flink); mtx_unlock(&qdws->bo_handles_mutex); } if (res->bo_handle) { mtx_lock(&qdws->bo_handles_mutex); util_hash_table_remove(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle); mtx_unlock(&qdws->bo_handles_mutex); } if (res->ptr) os_munmap(res->ptr, res->size); memset(&args, 0, sizeof(args)); args.handle = res->bo_handle; drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args); FREE(res); }
/** * Free the serial number for the given pointer. */ static void debug_serial_delete(void *p) { pipe_mutex_lock(serials_mutex); util_hash_table_remove(serials_hash, p); pipe_mutex_unlock(serials_mutex); }
static void radeon_winsys_destroy(struct radeon_winsys *rws) { struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws; if (ws->thread) { ws->kill_thread = 1; pipe_semaphore_signal(&ws->cs_queued); pipe_thread_wait(ws->thread); } pipe_semaphore_destroy(&ws->cs_queued); pipe_condvar_destroy(ws->cs_queue_empty); if (!pipe_reference(&ws->base.reference, NULL)) { return; } pipe_mutex_destroy(ws->hyperz_owner_mutex); pipe_mutex_destroy(ws->cmask_owner_mutex); pipe_mutex_destroy(ws->cs_stack_lock); ws->cman->destroy(ws->cman); ws->kman->destroy(ws->kman); if (ws->gen >= DRV_R600) { radeon_surface_manager_free(ws->surf_man); } if (fd_tab) { util_hash_table_remove(fd_tab, intptr_to_pointer(ws->fd)); } FREE(rws); }
void vmw_winsys_destroy(struct vmw_winsys_screen *vws) { if (--vws->open_count == 0) { util_hash_table_remove(dev_hash, &vws->device); vmw_pools_cleanup(vws); vws->fence_ops->destroy(vws->fence_ops); vmw_ioctl_cleanup(vws); close(vws->ioctl.drm_fd); FREE(vws); } }
drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo) { /* Remove the buffer from the hash tables. */ pthread_mutex_lock(&bo->dev->bo_table_mutex); util_hash_table_remove(bo->dev->bo_handles, (void*)(uintptr_t)bo->handle); if (bo->flink_name) { util_hash_table_remove(bo->dev->bo_flink_names, (void*)(uintptr_t)bo->flink_name); } pthread_mutex_unlock(&bo->dev->bo_table_mutex); /* Release CPU access. */ if (bo->cpu_map_count > 0) { bo->cpu_map_count = 1; amdgpu_bo_cpu_unmap(bo); } amdgpu_close_kms_handle(bo->dev, bo->handle); pthread_mutex_destroy(&bo->cpu_access_mutex); free(bo); }
bool nouveau_drm_screen_unref(struct nouveau_screen *screen) { int ret; if (screen->refcount == -1) return true; pipe_mutex_lock(nouveau_screen_mutex); ret = --screen->refcount; assert(ret >= 0); if (ret == 0) util_hash_table_remove(fd_tab, intptr_to_pointer(screen->drm->fd)); pipe_mutex_unlock(nouveau_screen_mutex); return ret == 0; }
HRESULT WINAPI NineVolume9_FreePrivateData( struct NineVolume9 *This, REFGUID refguid ) { struct pheader *header; DBG("This=%p refguid=%p\n", This, refguid); header = util_hash_table_get(This->pdata, refguid); if (!header) { return D3DERR_NOTFOUND; } ht_guid_delete(NULL, header, NULL); util_hash_table_remove(This->pdata, refguid); return D3D_OK; }
static bool radeon_winsys_unref(struct radeon_winsys *ws) { struct radeon_drm_winsys *rws = (struct radeon_drm_winsys*)ws; bool destroy; /* When the reference counter drops to zero, remove the fd from the table. * This must happen while the mutex is locked, so that * radeon_drm_winsys_create in another thread doesn't get the winsys * from the table when the counter drops to 0. */ pipe_mutex_lock(fd_tab_mutex); destroy = pipe_reference(&rws->reference, NULL); if (destroy && fd_tab) util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd)); pipe_mutex_unlock(fd_tab_mutex); return destroy; }
static void virgl_drm_screen_destroy(struct pipe_screen *pscreen) { struct virgl_screen *screen = virgl_screen(pscreen); boolean destroy; mtx_lock(&virgl_screen_mutex); destroy = --screen->refcnt == 0; if (destroy) { int fd = virgl_drm_winsys(screen->vws)->fd; util_hash_table_remove(fd_tab, intptr_to_pointer(fd)); } mtx_unlock(&virgl_screen_mutex); if (destroy) { pscreen->destroy = screen->winsys_priv; pscreen->destroy(pscreen); } }
static void radeon_bo_destroy(struct pb_buffer *_buf) { struct radeon_bo *bo = radeon_bo(_buf); struct drm_gem_close args = {}; if (bo->name) { pipe_mutex_lock(bo->mgr->bo_handles_mutex); util_hash_table_remove(bo->mgr->bo_handles, (void*)(uintptr_t)bo->name); pipe_mutex_unlock(bo->mgr->bo_handles_mutex); } if (bo->ptr) munmap(bo->ptr, bo->size); /* Close object. */ args.handle = bo->handle; drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args); pipe_mutex_destroy(bo->map_mutex); FREE(bo); }
static void dri2_surface_destroy(struct native_surface *nsurf) { struct dri2_surface *dri2surf = dri2_surface(nsurf); int i; FREE(dri2surf->last_xbufs); for (i = 0; i < NUM_NATIVE_ATTACHMENTS; i++) { struct pipe_resource *ptex = dri2surf->textures[i]; pipe_resource_reference(&ptex, NULL); } if (dri2surf->drawable) { x11_drawable_enable_dri2(dri2surf->dri2dpy->xscr, dri2surf->drawable, FALSE); util_hash_table_remove(dri2surf->dri2dpy->surfaces, (void *) dri2surf->drawable); } FREE(dri2surf); }