static void si_destroy_screen(struct pipe_screen* pscreen) { struct si_screen *sscreen = (struct si_screen *)pscreen; struct si_shader_part *parts[] = { sscreen->vs_prologs, sscreen->vs_epilogs, sscreen->tcs_epilogs, sscreen->ps_prologs, sscreen->ps_epilogs }; unsigned i; if (!sscreen) return; if (!sscreen->b.ws->unref(sscreen->b.ws)) return; /* Free shader parts. */ for (i = 0; i < ARRAY_SIZE(parts); i++) { while (parts[i]) { struct si_shader_part *part = parts[i]; parts[i] = part->next; radeon_shader_binary_clean(&part->binary); FREE(part); } } pipe_mutex_destroy(sscreen->shader_parts_mutex); si_destroy_shader_cache(sscreen); r600_destroy_common_screen(&sscreen->b); }
void evergreen_delete_compute_state(struct pipe_context *ctx_, void* state) { struct r600_context *ctx = (struct r600_context *)ctx_; COMPUTE_DBG(ctx->screen, "*** evergreen_delete_compute_state\n"); struct r600_pipe_compute *shader = state; if (!shader) return; #ifdef HAVE_OPENCL #if HAVE_LLVM < 0x0306 for (unsigned i = 0; i < shader->num_kernels; i++) { struct r600_kernel *kernel = &shader->kernels[i]; LLVMDisposeModule(module); } FREE(shader->kernels); LLVMContextDispose(shader->llvm_ctx); #else radeon_shader_binary_clean(&shader->binary); r600_destroy_shader(&shader->bc); /* TODO destroy shader->code_bo, shader->const_bo * we'll need something like r600_buffer_free */ #endif #endif FREE(shader); }
static void evergreen_delete_compute_state(struct pipe_context *ctx, void *state) { struct r600_context *rctx = (struct r600_context *)ctx; struct r600_pipe_compute *shader = state; COMPUTE_DBG(rctx->screen, "*** evergreen_delete_compute_state\n"); if (!shader) return; radeon_shader_binary_clean(&shader->binary); r600_destroy_shader(&shader->bc); /* TODO destroy shader->code_bo, shader->const_bo * we'll need something like r600_buffer_free */ FREE(shader); }
static void si_destroy_screen(struct pipe_screen* pscreen) { struct si_screen *sscreen = (struct si_screen *)pscreen; struct si_shader_part *parts[] = { sscreen->vs_prologs, sscreen->vs_epilogs, sscreen->tcs_epilogs, sscreen->gs_prologs, sscreen->ps_prologs, sscreen->ps_epilogs }; unsigned i; if (!sscreen) return; if (!sscreen->b.ws->unref(sscreen->b.ws)) return; if (util_queue_is_initialized(&sscreen->shader_compiler_queue)) util_queue_destroy(&sscreen->shader_compiler_queue); for (i = 0; i < ARRAY_SIZE(sscreen->tm); i++) if (sscreen->tm[i]) LLVMDisposeTargetMachine(sscreen->tm[i]); /* Free shader parts. */ for (i = 0; i < ARRAY_SIZE(parts); i++) { while (parts[i]) { struct si_shader_part *part = parts[i]; parts[i] = part->next; radeon_shader_binary_clean(&part->binary); FREE(part); } } pipe_mutex_destroy(sscreen->shader_parts_mutex); si_destroy_shader_cache(sscreen); r600_destroy_common_screen(&sscreen->b); }
unsigned r600_llvm_compile( LLVMModuleRef mod, enum radeon_family family, struct r600_bytecode *bc, boolean *use_kill, unsigned dump, struct pipe_debug_callback *debug) { unsigned r; struct radeon_shader_binary binary; const char * gpu_family = r600_get_llvm_processor_name(family); radeon_shader_binary_init(&binary); if (dump) LLVMDumpModule(mod); r = radeon_llvm_compile(mod, &binary, gpu_family, NULL, debug); r = r600_create_shader(bc, &binary, use_kill); radeon_shader_binary_clean(&binary); return r; }