void *evergreen_create_compute_state( struct pipe_context *ctx_, const const struct pipe_compute_state *cso) { struct r600_context *ctx = (struct r600_context *)ctx_; struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute); void *p; #ifdef HAVE_OPENCL const struct pipe_llvm_program_header * header; const unsigned char * code; COMPUTE_DBG("*** evergreen_create_compute_state\n"); header = cso->prog; code = cso->prog + sizeof(struct pipe_llvm_program_header); #endif shader->ctx = (struct r600_context*)ctx; shader->resources = (struct evergreen_compute_resource*) CALLOC(sizeof(struct evergreen_compute_resource), get_compute_resource_num()); shader->local_size = cso->req_local_mem; ///TODO: assert it shader->private_size = cso->req_private_mem; shader->input_size = cso->req_input_mem; #ifdef HAVE_OPENCL shader->mod = llvm_parse_bitcode(code, header->num_bytes); r600_compute_shader_create(ctx_, shader->mod, &shader->bc); #endif shader->shader_code_bo = r600_compute_buffer_alloc_vram(ctx->screen, shader->bc.ndw * 4); p = ctx->ws->buffer_map(shader->shader_code_bo->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE); memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4); ctx->ws->buffer_unmap(shader->shader_code_bo->cs_buf); return shader; }
void *evergreen_create_compute_state( struct pipe_context *ctx_, const const struct pipe_compute_state *cso) { struct r600_context *ctx = (struct r600_context *)ctx_; struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute); #ifdef HAVE_OPENCL const struct pipe_llvm_program_header * header; const unsigned char * code; unsigned i; COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n"); header = cso->prog; code = cso->prog + sizeof(struct pipe_llvm_program_header); #endif shader->ctx = (struct r600_context*)ctx; shader->resources = (struct evergreen_compute_resource*) CALLOC(sizeof(struct evergreen_compute_resource), get_compute_resource_num()); shader->local_size = cso->req_local_mem; ///TODO: assert it shader->private_size = cso->req_private_mem; shader->input_size = cso->req_input_mem; #ifdef HAVE_OPENCL shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes); shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels); for (i = 0; i < shader->num_kernels; i++) { struct r600_kernel *kernel = &shader->kernels[i]; kernel->llvm_module = radeon_llvm_get_kernel_module(i, code, header->num_bytes); } #endif return shader; }
void *evergreen_create_compute_state( struct pipe_context *ctx_, const const struct pipe_compute_state *cso) { struct r600_context *ctx = (struct r600_context *)ctx_; #ifdef HAVE_OPENCL const struct pipe_llvm_program_header * header; const unsigned char * code; header = cso->prog; code = cso->prog + sizeof(struct pipe_llvm_program_header); #endif if (!ctx->screen->screen.get_param(&ctx->screen->screen, PIPE_CAP_COMPUTE)) { fprintf(stderr, "Compute is not supported\n"); return NULL; } struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute); shader->ctx = (struct r600_context*)ctx; shader->resources = (struct evergreen_compute_resource*) CALLOC(sizeof(struct evergreen_compute_resource), get_compute_resource_num()); shader->local_size = cso->req_local_mem; ///TODO: assert it shader->private_size = cso->req_private_mem; shader->input_size = cso->req_input_mem; #ifdef HAVE_OPENCL shader->mod = llvm_parse_bitcode(code, header->num_bytes); r600_compute_shader_create(ctx_, shader->mod, &shader->bc); #endif return shader; }
static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, const uint *grid_layout) { struct radeon_winsys_cs *cs = ctx->cs; int i; struct r600_resource *onebo = NULL; struct r600_pipe_state *cb_state; struct evergreen_compute_resource *resources = ctx->cs_shader_state.shader->resources; /* Initialize all the compute-related registers. * * See evergreen_init_atom_start_compute_cs() in this file for the list * of registers initialized by the start_compute_cs_cmd atom. */ r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom); ctx->flags |= R600_CONTEXT_CB_FLUSH; r600_flush_emit(ctx); /* Emit cb_state */ cb_state = ctx->states[R600_PIPE_STATE_FRAMEBUFFER]; r600_context_pipe_state_emit(ctx, cb_state, RADEON_CP_PACKET3_COMPUTE_MODE); /* Set CB_TARGET_MASK XXX: Use cb_misc_state */ r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK, ctx->compute_cb_target_mask); /* Emit vertex buffer state */ ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask); r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom); /* Emit compute shader state */ r600_emit_atom(ctx, &ctx->cs_shader_state.atom); for (i = 0; i < get_compute_resource_num(); i++) { if (resources[i].enabled) { int j; COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw); for (j = 0; j < resources[i].cs_end; j++) { if (resources[i].do_reloc[j]) { assert(resources[i].bo); evergreen_emit_ctx_reloc(ctx, resources[i].bo, resources[i].usage); } cs->buf[cs->cdw++] = resources[i].cs[j]; } if (resources[i].bo) { onebo = resources[i].bo; evergreen_emit_ctx_reloc(ctx, resources[i].bo, resources[i].usage); ///special case for textures if (resources[i].do_reloc [resources[i].cs_end] == 2) { evergreen_emit_ctx_reloc(ctx, resources[i].bo, resources[i].usage); } } } } /* Emit dispatch state and dispatch packet */ evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout); /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff */ ctx->flags |= R600_CONTEXT_CB_FLUSH; r600_flush_emit(ctx); #if 0 COMPUTE_DBG("cdw: %i\n", cs->cdw); for (i = 0; i < cs->cdw; i++) { COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]); } #endif ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE); ctx->pm4_dirty_cdwords = 0; ctx->flags = 0; COMPUTE_DBG("shader started\n"); ctx->ws->buffer_wait(onebo->buf, 0); COMPUTE_DBG("...\n"); ctx->streamout_start = TRUE; ctx->streamout_append_bitmask = ~0; }
static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, const uint *grid_layout) { struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; unsigned flush_flags = 0; int i; struct r600_resource *onebo = NULL; struct evergreen_compute_resource *resources = ctx->cs_shader_state.shader->resources; /* make sure that the gfx ring is only one active */ if (ctx->rings.dma.cs) { ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); } /* Initialize all the compute-related registers. * * See evergreen_init_atom_start_compute_cs() in this file for the list * of registers initialized by the start_compute_cs_cmd atom. */ r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd); ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; r600_flush_emit(ctx); /* Emit colorbuffers. */ for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) { struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i]; unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx, (struct r600_resource*)cb->base.texture, RADEON_USAGE_READWRITE); r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ r600_write_value(cs, reloc); if (!ctx->keep_tiling_flags) { r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ r600_write_value(cs, reloc); } r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ r600_write_value(cs, reloc); } /* Set CB_TARGET_MASK XXX: Use cb_misc_state */ r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK, ctx->compute_cb_target_mask); /* Emit vertex buffer state */ ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask); r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom); /* Emit compute shader state */ r600_emit_atom(ctx, &ctx->cs_shader_state.atom); for (i = 0; i < get_compute_resource_num(); i++) { if (resources[i].enabled) { int j; COMPUTE_DBG(ctx->screen, "resnum: %i, cdw: %i\n", i, cs->cdw); for (j = 0; j < resources[i].cs_end; j++) { if (resources[i].do_reloc[j]) { assert(resources[i].bo); evergreen_emit_ctx_reloc(ctx, resources[i].bo, resources[i].usage); } cs->buf[cs->cdw++] = resources[i].cs[j]; } if (resources[i].bo) { onebo = resources[i].bo; evergreen_emit_ctx_reloc(ctx, resources[i].bo, resources[i].usage); ///special case for textures if (resources[i].do_reloc [resources[i].cs_end] == 2) { evergreen_emit_ctx_reloc(ctx, resources[i].bo, resources[i].usage); } } } } /* Emit dispatch state and dispatch packet */ evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout); /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff */ ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES; r600_flush_emit(ctx); #if 0 COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw); for (i = 0; i < cs->cdw; i++) { COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, ctx->cs->buf[i]); } #endif flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE; if (ctx->keep_tiling_flags) { flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS; } ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags); ctx->flags = 0; COMPUTE_DBG(ctx->screen, "shader started\n"); ctx->ws->buffer_wait(onebo->buf, 0); COMPUTE_DBG(ctx->screen, "...\n"); }
static void compute_emit_cs(struct r600_context *ctx) { struct radeon_winsys_cs *cs = ctx->cs; int i; r600_emit_atom(ctx, &ctx->start_cs_cmd.atom); struct r600_resource *onebo = NULL; for (i = 0; i < get_compute_resource_num(); i++) { if (ctx->cs_shader->resources[i].enabled) { int j; COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw); for (j = 0; j < ctx->cs_shader->resources[i].cs_end; j++) { if (ctx->cs_shader->resources[i].do_reloc[j]) { assert(ctx->cs_shader->resources[i].bo); evergreen_emit_ctx_reloc(ctx, ctx->cs_shader->resources[i].bo, ctx->cs_shader->resources[i].usage); } cs->buf[cs->cdw++] = ctx->cs_shader->resources[i].cs[j]; } if (ctx->cs_shader->resources[i].bo) { onebo = ctx->cs_shader->resources[i].bo; evergreen_emit_ctx_reloc(ctx, ctx->cs_shader->resources[i].bo, ctx->cs_shader->resources[i].usage); ///special case for textures if (ctx->cs_shader->resources[i].do_reloc [ctx->cs_shader->resources[i].cs_end] == 2) { evergreen_emit_ctx_reloc(ctx, ctx->cs_shader->resources[i].bo, ctx->cs_shader->resources[i].usage); } evergreen_set_buffer_sync(ctx, ctx->cs_shader->resources[i].bo, ctx->cs_shader->resources[i].coher_bo_size, ctx->cs_shader->resources[i].flags, ctx->cs_shader->resources[i].usage); } } } #if 0 COMPUTE_DBG("cdw: %i\n", cs->cdw); for (i = 0; i < cs->cdw; i++) { COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]); } #endif ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE); ctx->pm4_dirty_cdwords = 0; ctx->flags = 0; COMPUTE_DBG("shader started\n"); ctx->ws->buffer_wait(onebo->buf, 0); COMPUTE_DBG("...\n"); r600_emit_atom(ctx, &ctx->start_cs_cmd.atom); ctx->streamout_start = TRUE; ctx->streamout_append_bitmask = ~0; }