static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws, enum ring_type ring_type, struct radeon_winsys_cs_handle *trace_buf) { struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); struct radeon_drm_cs *cs; cs = CALLOC_STRUCT(radeon_drm_cs); if (!cs) { return NULL; } pipe_semaphore_init(&cs->flush_completed, 0); cs->ws = ws; cs->trace_buf = (struct radeon_bo*)trace_buf; if (!radeon_init_cs_context(&cs->csc1, cs->ws)) { FREE(cs); return NULL; } if (!radeon_init_cs_context(&cs->csc2, cs->ws)) { radeon_destroy_cs_context(&cs->csc1); FREE(cs); return NULL; } /* Set the first command buffer as current. */ cs->csc = &cs->csc1; cs->cst = &cs->csc2; cs->base.buf = cs->csc->buf; cs->base.ring_type = ring_type; p_atomic_inc(&ws->num_cs); return &cs->base; }
static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws) { struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); struct radeon_drm_cs *cs; cs = CALLOC_STRUCT(radeon_drm_cs); if (!cs) { return NULL; } pipe_semaphore_init(&cs->flush_queued, 0); pipe_semaphore_init(&cs->flush_completed, 0); cs->ws = ws; if (!radeon_init_cs_context(&cs->csc1, cs->ws)) { FREE(cs); return NULL; } if (!radeon_init_cs_context(&cs->csc2, cs->ws)) { radeon_destroy_cs_context(&cs->csc1); FREE(cs); return NULL; } /* Set the first command buffer as current. */ cs->csc = &cs->csc1; cs->cst = &cs->csc2; cs->base.buf = cs->csc->buf; p_atomic_inc(&ws->num_cs); if (cs->ws->num_cpus > 1 && debug_get_option_thread()) cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs); return &cs->base; }
static struct radeon_winsys_cs * radeon_drm_cs_create(struct radeon_winsys_ctx *ctx, enum ring_type ring_type, void (*flush)(void *ctx, unsigned flags, struct pipe_fence_handle **fence), void *flush_ctx, struct radeon_winsys_cs_handle *trace_buf) { struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)ctx; struct radeon_drm_cs *cs; cs = CALLOC_STRUCT(radeon_drm_cs); if (!cs) { return NULL; } pipe_semaphore_init(&cs->flush_completed, 1); cs->ws = ws; cs->flush_cs = flush; cs->flush_data = flush_ctx; cs->trace_buf = (struct radeon_bo*)trace_buf; if (!radeon_init_cs_context(&cs->csc1, cs->ws)) { FREE(cs); return NULL; } if (!radeon_init_cs_context(&cs->csc2, cs->ws)) { radeon_destroy_cs_context(&cs->csc1); FREE(cs); return NULL; } /* Set the first command buffer as current. */ cs->csc = &cs->csc1; cs->cst = &cs->csc2; cs->base.buf = cs->csc->buf; cs->base.ring_type = ring_type; cs->base.max_dw = ARRAY_SIZE(cs->csc->buf); p_atomic_inc(&ws->num_cs); return &cs->base; }
static struct radeon_cmdbuf * radeon_drm_cs_create(struct radeon_winsys_ctx *ctx, enum ring_type ring_type, void (*flush)(void *ctx, unsigned flags, struct pipe_fence_handle **fence), void *flush_ctx, bool stop_exec_on_failure) { struct radeon_drm_winsys *ws = ((struct radeon_ctx*)ctx)->ws; struct radeon_drm_cs *cs; cs = CALLOC_STRUCT(radeon_drm_cs); if (!cs) { return NULL; } util_queue_fence_init(&cs->flush_completed); cs->ws = ws; cs->flush_cs = flush; cs->flush_data = flush_ctx; if (!radeon_init_cs_context(&cs->csc1, cs->ws)) { FREE(cs); return NULL; } if (!radeon_init_cs_context(&cs->csc2, cs->ws)) { radeon_destroy_cs_context(&cs->csc1); FREE(cs); return NULL; } /* Set the first command buffer as current. */ cs->csc = &cs->csc1; cs->cst = &cs->csc2; cs->base.current.buf = cs->csc->buf; cs->base.current.max_dw = ARRAY_SIZE(cs->csc->buf); cs->ring_type = ring_type; p_atomic_inc(&ws->num_cs); return &cs->base; }