static void radeon_winsys_destroy(struct radeon_winsys *rws) { struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws; if (ws->thread) { ws->kill_thread = 1; pipe_semaphore_signal(&ws->cs_queued); pipe_thread_wait(ws->thread); } pipe_semaphore_destroy(&ws->cs_queued); pipe_condvar_destroy(ws->cs_queue_empty); if (!pipe_reference(&ws->base.reference, NULL)) { return; } pipe_mutex_destroy(ws->hyperz_owner_mutex); pipe_mutex_destroy(ws->cmask_owner_mutex); pipe_mutex_destroy(ws->cs_stack_lock); ws->cman->destroy(ws->cman); ws->kman->destroy(ws->kman); if (ws->gen >= DRV_R600) { radeon_surface_manager_free(ws->surf_man); } if (fd_tab) { util_hash_table_remove(fd_tab, intptr_to_pointer(ws->fd)); } FREE(rws); }
struct pipe_screen * virgl_drm_screen_create(int fd) { struct pipe_screen *pscreen = NULL; mtx_lock(&virgl_screen_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); if (!fd_tab) goto unlock; } pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (pscreen) { virgl_screen(pscreen)->refcnt++; } else { struct virgl_winsys *vws; int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3); vws = virgl_drm_winsys_create(dup_fd); pscreen = virgl_create_screen(vws); if (pscreen) { util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen); /* Bit of a hack, to avoid circular linkage dependency, * ie. pipe driver having to call in to winsys, we * override the pipe drivers screen->destroy(): */ virgl_screen(pscreen)->winsys_priv = pscreen->destroy; pscreen->destroy = virgl_drm_screen_destroy; } } unlock: mtx_unlock(&virgl_screen_mutex); return pscreen; }
bool nouveau_drm_screen_unref(struct nouveau_screen *screen) { int ret; if (screen->refcount == -1) return true; pipe_mutex_lock(nouveau_screen_mutex); ret = --screen->refcount; assert(ret >= 0); if (ret == 0) util_hash_table_remove(fd_tab, intptr_to_pointer(screen->drm->fd)); pipe_mutex_unlock(nouveau_screen_mutex); return ret == 0; }
static bool radeon_winsys_unref(struct radeon_winsys *ws) { struct radeon_drm_winsys *rws = (struct radeon_drm_winsys*)ws; bool destroy; /* When the reference counter drops to zero, remove the fd from the table. * This must happen while the mutex is locked, so that * radeon_drm_winsys_create in another thread doesn't get the winsys * from the table when the counter drops to 0. */ pipe_mutex_lock(fd_tab_mutex); destroy = pipe_reference(&rws->reference, NULL); if (destroy && fd_tab) util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd)); pipe_mutex_unlock(fd_tab_mutex); return destroy; }
PUBLIC struct pipe_screen * nouveau_drm_screen_create(int fd) { struct nouveau_drm *drm = NULL; struct nouveau_device *dev = NULL; struct nouveau_screen *(*init)(struct nouveau_device *); struct nouveau_screen *screen = NULL; int ret, dupfd; pipe_mutex_lock(nouveau_screen_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); if (!fd_tab) { pipe_mutex_unlock(nouveau_screen_mutex); return NULL; } } screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (screen) { screen->refcount++; pipe_mutex_unlock(nouveau_screen_mutex); return &screen->base; } /* Since the screen re-use is based on the device node and not the fd, * create a copy of the fd to be owned by the device. Otherwise a * scenario could occur where two screens are created, and the first * one is shut down, along with the fd being closed. The second * (identical) screen would now have a reference to the closed fd. We * avoid this by duplicating the original fd. Note that * nouveau_device_wrap does not close the fd in case of a device * creation error. */ dupfd = dup(fd); ret = nouveau_drm_new(dupfd, &drm); if (ret) goto err; ret = nouveau_device_new(&drm->client, NV_DEVICE, &(struct nv_device_v0) { .device = ~0ULL, }, sizeof(struct nv_device_v0), &dev);
static void virgl_drm_screen_destroy(struct pipe_screen *pscreen) { struct virgl_screen *screen = virgl_screen(pscreen); boolean destroy; mtx_lock(&virgl_screen_mutex); destroy = --screen->refcnt == 0; if (destroy) { int fd = virgl_drm_winsys(screen->vws)->fd; util_hash_table_remove(fd_tab, intptr_to_pointer(fd)); } mtx_unlock(&virgl_screen_mutex); if (destroy) { pscreen->destroy = screen->winsys_priv; pscreen->destroy(pscreen); } }
PUBLIC struct radeon_winsys * radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create) { struct radeon_drm_winsys *ws; pipe_mutex_lock(fd_tab_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); } ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (ws) { pipe_reference(NULL, &ws->reference); pipe_mutex_unlock(fd_tab_mutex); return &ws->base; } ws = CALLOC_STRUCT(radeon_drm_winsys); if (!ws) { pipe_mutex_unlock(fd_tab_mutex); return NULL; } ws->fd = dup(fd); if (!do_winsys_init(ws)) goto fail; /* Create managers. */ ws->kman = radeon_bomgr_create(ws); if (!ws->kman) goto fail; ws->cman = pb_cache_manager_create(ws->kman, 500000, 2.0f, 0, MIN2(ws->info.vram_size, ws->info.gart_size)); if (!ws->cman) goto fail; if (ws->gen >= DRV_R600) { ws->surf_man = radeon_surface_manager_new(ws->fd); if (!ws->surf_man) goto fail; } /* init reference */ pipe_reference_init(&ws->reference, 1); /* Set functions. */ ws->base.unref = radeon_winsys_unref; ws->base.destroy = radeon_winsys_destroy; ws->base.query_info = radeon_query_info; ws->base.cs_request_feature = radeon_cs_request_feature; ws->base.query_value = radeon_query_value; ws->base.read_registers = radeon_read_registers; radeon_bomgr_init_functions(ws); radeon_drm_cs_init_functions(ws); radeon_surface_init_functions(ws); pipe_mutex_init(ws->hyperz_owner_mutex); pipe_mutex_init(ws->cmask_owner_mutex); pipe_mutex_init(ws->cs_stack_lock); ws->ncs = 0; pipe_semaphore_init(&ws->cs_queued, 0); if (ws->num_cpus > 1 && debug_get_option_thread()) ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws); /* Create the screen at the end. The winsys must be initialized * completely. * * Alternatively, we could create the screen based on "ws->gen" * and link all drivers into one binary blob. */ ws->base.screen = screen_create(&ws->base); if (!ws->base.screen) { radeon_winsys_destroy(&ws->base); pipe_mutex_unlock(fd_tab_mutex); return NULL; } util_hash_table_set(fd_tab, intptr_to_pointer(ws->fd), ws); /* We must unlock the mutex once the winsys is fully initialized, so that * other threads attempting to create the winsys from the same fd will * get a fully initialized winsys and not just half-way initialized. */ pipe_mutex_unlock(fd_tab_mutex); return &ws->base; fail: pipe_mutex_unlock(fd_tab_mutex); if (ws->cman) ws->cman->destroy(ws->cman); if (ws->kman) ws->kman->destroy(ws->kman); if (ws->surf_man) radeon_surface_manager_free(ws->surf_man); if (ws->fd >= 0) close(ws->fd); FREE(ws); return NULL; }
PUBLIC struct pipe_screen * nouveau_drm_screen_create(int fd) { struct nouveau_device *dev = NULL; struct pipe_screen *(*init)(struct nouveau_device *); struct nouveau_screen *screen; int ret, dupfd = -1; pipe_mutex_lock(nouveau_screen_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); if (!fd_tab) goto err; } screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (screen) { screen->refcount++; pipe_mutex_unlock(nouveau_screen_mutex); return &screen->base; } /* Since the screen re-use is based on the device node and not the fd, * create a copy of the fd to be owned by the device. Otherwise a * scenario could occur where two screens are created, and the first * one is shut down, along with the fd being closed. The second * (identical) screen would now have a reference to the closed fd. We * avoid this by duplicating the original fd. Note that * nouveau_device_wrap does not close the fd in case of a device * creation error. */ dupfd = dup(fd); ret = nouveau_device_wrap(dupfd, 1, &dev); if (ret) goto err; switch (dev->chipset & ~0xf) { case 0x30: case 0x40: case 0x60: init = nv30_screen_create; break; case 0x50: case 0x80: case 0x90: case 0xa0: init = nv50_screen_create; break; case 0xc0: case 0xd0: case 0xe0: case 0xf0: case 0x100: init = nvc0_screen_create; break; default: debug_printf("%s: unknown chipset nv%02x\n", __func__, dev->chipset); goto err; } screen = (struct nouveau_screen*)init(dev); if (!screen) goto err; util_hash_table_set(fd_tab, intptr_to_pointer(fd), screen); screen->refcount = 1; pipe_mutex_unlock(nouveau_screen_mutex); return &screen->base; err: if (dev) nouveau_device_del(&dev); else if (dupfd >= 0) close(dupfd); pipe_mutex_unlock(nouveau_screen_mutex); return NULL; }
struct radeon_winsys *radeon_drm_winsys_create(int fd) { struct radeon_drm_winsys *ws; if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); } ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (ws) { pipe_reference(NULL, &ws->base.reference); return &ws->base; } ws = CALLOC_STRUCT(radeon_drm_winsys); if (!ws) { return NULL; } ws->fd = fd; util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws); if (!do_winsys_init(ws)) goto fail; /* Create managers. */ ws->kman = radeon_bomgr_create(ws); if (!ws->kman) goto fail; ws->cman = pb_cache_manager_create(ws->kman, 1000000); if (!ws->cman) goto fail; if (ws->gen >= DRV_R600) { ws->surf_man = radeon_surface_manager_new(fd); if (!ws->surf_man) goto fail; } /* init reference */ pipe_reference_init(&ws->base.reference, 1); /* Set functions. */ ws->base.destroy = radeon_winsys_destroy; ws->base.query_info = radeon_query_info; ws->base.cs_request_feature = radeon_cs_request_feature; ws->base.surface_init = radeon_drm_winsys_surface_init; ws->base.surface_best = radeon_drm_winsys_surface_best; ws->base.query_value = radeon_query_value; radeon_bomgr_init_functions(ws); radeon_drm_cs_init_functions(ws); pipe_mutex_init(ws->hyperz_owner_mutex); pipe_mutex_init(ws->cmask_owner_mutex); pipe_mutex_init(ws->cs_stack_lock); p_atomic_set(&ws->ncs, 0); pipe_semaphore_init(&ws->cs_queued, 0); pipe_condvar_init(ws->cs_queue_empty); if (ws->num_cpus > 1 && debug_get_option_thread()) ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws); return &ws->base; fail: if (ws->cman) ws->cman->destroy(ws->cman); if (ws->kman) ws->kman->destroy(ws->kman); if (ws->surf_man) radeon_surface_manager_free(ws->surf_man); FREE(ws); return NULL; }