/* * Make sure previous submission of this cs are completed */ void radeon_drm_cs_sync_flush(struct radeon_cmdbuf *rcs) { struct radeon_drm_cs *cs = radeon_drm_cs(rcs); /* Wait for any pending ioctl of this CS to complete. */ if (util_queue_is_initialized(&cs->ws->cs_queue)) util_queue_fence_wait(&cs->flush_completed); }
static void si_bind_compute_state(struct pipe_context *ctx, void *state) { struct si_context *sctx = (struct si_context*)ctx; struct si_compute *program = (struct si_compute*)state; sctx->cs_shader_state.program = program; if (!program) return; /* Wait because we need active slot usage masks. */ if (program->ir_type != PIPE_SHADER_IR_NATIVE) util_queue_fence_wait(&program->ready); si_set_active_descriptors(sctx, SI_DESCS_FIRST_COMPUTE + SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS, program->active_const_and_shader_buffers); si_set_active_descriptors(sctx, SI_DESCS_FIRST_COMPUTE + SI_SHADER_DESCS_SAMPLERS_AND_IMAGES, program->active_samplers_and_images); }
static void *si_create_compute_state( struct pipe_context *ctx, const struct pipe_compute_state *cso) { struct si_context *sctx = (struct si_context *)ctx; struct si_screen *sscreen = (struct si_screen *)ctx->screen; struct si_compute *program = CALLOC_STRUCT(si_compute); pipe_reference_init(&program->reference, 1); program->screen = (struct si_screen *)ctx->screen; program->ir_type = cso->ir_type; program->local_size = cso->req_local_mem; program->private_size = cso->req_private_mem; program->input_size = cso->req_input_mem; program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE; if (cso->ir_type != PIPE_SHADER_IR_NATIVE) { if (cso->ir_type == PIPE_SHADER_IR_TGSI) { program->ir.tgsi = tgsi_dup_tokens(cso->prog); if (!program->ir.tgsi) { FREE(program); return NULL; } } else { assert(cso->ir_type == PIPE_SHADER_IR_NIR); program->ir.nir = (struct nir_shader *) cso->prog; } program->compiler_ctx_state.debug = sctx->debug; program->compiler_ctx_state.is_debug_context = sctx->is_debug; p_atomic_inc(&sscreen->num_shaders_created); util_queue_fence_init(&program->ready); struct util_async_debug_callback async_debug; bool wait = (sctx->debug.debug_message && !sctx->debug.async) || sctx->is_debug || si_can_dump_shader(sscreen, PIPE_SHADER_COMPUTE); if (wait) { u_async_debug_init(&async_debug); program->compiler_ctx_state.debug = async_debug.base; } util_queue_add_job(&sscreen->shader_compiler_queue, program, &program->ready, si_create_compute_state_async, NULL); if (wait) { util_queue_fence_wait(&program->ready); u_async_debug_drain(&async_debug, &sctx->debug); u_async_debug_cleanup(&async_debug); } } else { const struct pipe_llvm_program_header *header; const char *code; header = cso->prog; code = cso->prog + sizeof(struct pipe_llvm_program_header); ac_elf_read(code, header->num_bytes, &program->shader.binary); if (program->use_code_object_v2) { const amd_kernel_code_t *code_object = si_compute_get_code_object(program, 0); code_object_to_config(code_object, &program->shader.config); } else { si_shader_binary_read_config(&program->shader.binary, &program->shader.config, 0); } si_shader_dump(sctx->screen, &program->shader, &sctx->debug, PIPE_SHADER_COMPUTE, stderr, true); if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) { fprintf(stderr, "LLVM failed to upload shader\n"); FREE(program); return NULL; } } return program; }