static struct radeon_cmdbuf * radeon_drm_cs_create(struct radeon_winsys_ctx *ctx, enum ring_type ring_type, void (*flush)(void *ctx, unsigned flags, struct pipe_fence_handle **fence), void *flush_ctx, bool stop_exec_on_failure) { struct radeon_drm_winsys *ws = ((struct radeon_ctx*)ctx)->ws; struct radeon_drm_cs *cs; cs = CALLOC_STRUCT(radeon_drm_cs); if (!cs) { return NULL; } util_queue_fence_init(&cs->flush_completed); cs->ws = ws; cs->flush_cs = flush; cs->flush_data = flush_ctx; if (!radeon_init_cs_context(&cs->csc1, cs->ws)) { FREE(cs); return NULL; } if (!radeon_init_cs_context(&cs->csc2, cs->ws)) { radeon_destroy_cs_context(&cs->csc1); FREE(cs); return NULL; } /* Set the first command buffer as current. */ cs->csc = &cs->csc1; cs->cst = &cs->csc2; cs->base.current.buf = cs->csc->buf; cs->base.current.max_dw = ARRAY_SIZE(cs->csc->buf); cs->ring_type = ring_type; p_atomic_inc(&ws->num_cs); return &cs->base; }
static void *si_create_compute_state( struct pipe_context *ctx, const struct pipe_compute_state *cso) { struct si_context *sctx = (struct si_context *)ctx; struct si_screen *sscreen = (struct si_screen *)ctx->screen; struct si_compute *program = CALLOC_STRUCT(si_compute); pipe_reference_init(&program->reference, 1); program->screen = (struct si_screen *)ctx->screen; program->ir_type = cso->ir_type; program->local_size = cso->req_local_mem; program->private_size = cso->req_private_mem; program->input_size = cso->req_input_mem; program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE; if (cso->ir_type != PIPE_SHADER_IR_NATIVE) { if (cso->ir_type == PIPE_SHADER_IR_TGSI) { program->ir.tgsi = tgsi_dup_tokens(cso->prog); if (!program->ir.tgsi) { FREE(program); return NULL; } } else { assert(cso->ir_type == PIPE_SHADER_IR_NIR); program->ir.nir = (struct nir_shader *) cso->prog; } program->compiler_ctx_state.debug = sctx->debug; program->compiler_ctx_state.is_debug_context = sctx->is_debug; p_atomic_inc(&sscreen->num_shaders_created); util_queue_fence_init(&program->ready); struct util_async_debug_callback async_debug; bool wait = (sctx->debug.debug_message && !sctx->debug.async) || sctx->is_debug || si_can_dump_shader(sscreen, PIPE_SHADER_COMPUTE); if (wait) { u_async_debug_init(&async_debug); program->compiler_ctx_state.debug = async_debug.base; } util_queue_add_job(&sscreen->shader_compiler_queue, program, &program->ready, si_create_compute_state_async, NULL); if (wait) { util_queue_fence_wait(&program->ready); u_async_debug_drain(&async_debug, &sctx->debug); u_async_debug_cleanup(&async_debug); } } else { const struct pipe_llvm_program_header *header; const char *code; header = cso->prog; code = cso->prog + sizeof(struct pipe_llvm_program_header); ac_elf_read(code, header->num_bytes, &program->shader.binary); if (program->use_code_object_v2) { const amd_kernel_code_t *code_object = si_compute_get_code_object(program, 0); code_object_to_config(code_object, &program->shader.config); } else { si_shader_binary_read_config(&program->shader.binary, &program->shader.config, 0); } si_shader_dump(sctx->screen, &program->shader, &sctx->debug, PIPE_SHADER_COMPUTE, stderr, true); if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) { fprintf(stderr, "LLVM failed to upload shader\n"); FREE(program); return NULL; } } return program; }
static void *si_create_compute_state( struct pipe_context *ctx, const struct pipe_compute_state *cso) { struct si_context *sctx = (struct si_context *)ctx; struct si_screen *sscreen = (struct si_screen *)ctx->screen; struct si_compute *program = CALLOC_STRUCT(si_compute); program->screen = (struct si_screen *)ctx->screen; program->ir_type = cso->ir_type; program->local_size = cso->req_local_mem; program->private_size = cso->req_private_mem; program->input_size = cso->req_input_mem; program->use_code_object_v2 = HAVE_LLVM >= 0x0400 && cso->ir_type == PIPE_SHADER_IR_NATIVE; if (cso->ir_type == PIPE_SHADER_IR_TGSI) { program->tokens = tgsi_dup_tokens(cso->prog); if (!program->tokens) { FREE(program); return NULL; } program->compiler_ctx_state.tm = sctx->tm; program->compiler_ctx_state.debug = sctx->b.debug; program->compiler_ctx_state.is_debug_context = sctx->is_debug; p_atomic_inc(&sscreen->b.num_shaders_created); util_queue_fence_init(&program->ready); if ((sctx->b.debug.debug_message && !sctx->b.debug.async) || sctx->is_debug || r600_can_dump_shader(&sscreen->b, PIPE_SHADER_COMPUTE)) si_create_compute_state_async(program, -1); else util_queue_add_job(&sscreen->shader_compiler_queue, program, &program->ready, si_create_compute_state_async, NULL); } else { const struct pipe_llvm_program_header *header; const char *code; header = cso->prog; code = cso->prog + sizeof(struct pipe_llvm_program_header); ac_elf_read(code, header->num_bytes, &program->shader.binary); if (program->use_code_object_v2) { const amd_kernel_code_t *code_object = si_compute_get_code_object(program, 0); code_object_to_config(code_object, &program->shader.config); } else { si_shader_binary_read_config(&program->shader.binary, &program->shader.config, 0); } si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug, PIPE_SHADER_COMPUTE, stderr, true); if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) { fprintf(stderr, "LLVM failed to upload shader\n"); FREE(program); return NULL; } } return program; }