static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws, enum ring_type ring_type, struct radeon_winsys_cs_handle *trace_buf) { struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); struct radeon_drm_cs *cs; cs = CALLOC_STRUCT(radeon_drm_cs); if (!cs) { return NULL; } pipe_semaphore_init(&cs->flush_completed, 0); cs->ws = ws; cs->trace_buf = (struct radeon_bo*)trace_buf; if (!radeon_init_cs_context(&cs->csc1, cs->ws)) { FREE(cs); return NULL; } if (!radeon_init_cs_context(&cs->csc2, cs->ws)) { radeon_destroy_cs_context(&cs->csc1); FREE(cs); return NULL; } /* Set the first command buffer as current. */ cs->csc = &cs->csc1; cs->cst = &cs->csc2; cs->base.buf = cs->csc->buf; cs->base.ring_type = ring_type; p_atomic_inc(&ws->num_cs); return &cs->base; }
static struct pb_buffer * radeon_winsys_bo_create(struct radeon_winsys *rws, unsigned size, unsigned alignment, unsigned bind, enum radeon_bo_domain domain) { struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); struct pb_desc desc; struct pb_manager *provider; struct pb_buffer *buffer; memset(&desc, 0, sizeof(desc)); desc.alignment = alignment; desc.usage = get_pb_usage_from_create_flags(domain); /* Assign a buffer manager. */ if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER | PIPE_BIND_CONSTANT_BUFFER)) provider = ws->cman; else provider = ws->kman; buffer = provider->create_buffer(provider, size, &desc); if (!buffer) return NULL; return (struct pb_buffer*)buffer; }
static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws) { struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); struct radeon_drm_cs *cs; cs = CALLOC_STRUCT(radeon_drm_cs); if (!cs) { return NULL; } pipe_semaphore_init(&cs->flush_queued, 0); pipe_semaphore_init(&cs->flush_completed, 0); cs->ws = ws; if (!radeon_init_cs_context(&cs->csc1, cs->ws)) { FREE(cs); return NULL; } if (!radeon_init_cs_context(&cs->csc2, cs->ws)) { radeon_destroy_cs_context(&cs->csc1); FREE(cs); return NULL; } /* Set the first command buffer as current. */ cs->csc = &cs->csc1; cs->cst = &cs->csc2; cs->base.buf = cs->csc->buf; p_atomic_inc(&ws->num_cs); if (cs->ws->num_cpus > 1 && debug_get_option_thread()) cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs); return &cs->base; }
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws, struct winsys_handle *whandle, unsigned *stride, unsigned *size) { struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); struct radeon_bo *bo; struct radeon_bomgr *mgr = radeon_bomgr(ws->kman); struct drm_gem_open open_arg = {}; /* We must maintain a list of pairs <handle, bo>, so that we always return * the same BO for one particular handle. If we didn't do that and created * more than one BO for the same handle and then relocated them in a CS, * we would hit a deadlock in the kernel. * * The list of pairs is guarded by a mutex, of course. */ pipe_mutex_lock(mgr->bo_handles_mutex); /* First check if there already is an existing bo for the handle. */ bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle); if (bo) { /* Increase the refcount. */ struct pb_buffer *b = NULL; pb_reference(&b, &bo->base); goto done; } /* There isn't, create a new one. */ bo = CALLOC_STRUCT(radeon_bo); if (!bo) { goto fail; } /* Open the BO. */ open_arg.name = whandle->handle; if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) { FREE(bo); goto fail; } bo->handle = open_arg.handle; bo->size = open_arg.size; bo->name = whandle->handle; /* Initialize it. */ pipe_reference_init(&bo->base.base.reference, 1); bo->base.base.alignment = 0; bo->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ; bo->base.base.size = bo->size; bo->base.vtbl = &radeon_bo_vtbl; bo->mgr = mgr; bo->rws = mgr->rws; pipe_mutex_init(bo->map_mutex); util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo); done: pipe_mutex_unlock(mgr->bo_handles_mutex); if (stride) *stride = whandle->stride; if (size) *size = bo->base.base.size; return (struct pb_buffer*)bo; fail: pipe_mutex_unlock(mgr->bo_handles_mutex); return NULL; }