bool init(void) { if (!ac_init_llvm_compiler(&llvm_info, true, family, tm_options)) return false; passes = ac_create_llvm_passes(llvm_info.tm); if (!passes) return false; return true; }
static void si_init_compiler(struct si_screen *sscreen, struct ac_llvm_compiler *compiler) { /* Only create the less-optimizing version of the compiler on APUs * predating Ryzen (Raven). */ bool create_low_opt_compiler = !sscreen->info.has_dedicated_vram && sscreen->info.chip_class <= VI; enum ac_target_machine_options tm_options = (sscreen->debug_flags & DBG(SI_SCHED) ? AC_TM_SISCHED : 0) | (sscreen->debug_flags & DBG(GISEL) ? AC_TM_ENABLE_GLOBAL_ISEL : 0) | (sscreen->info.chip_class >= GFX9 ? AC_TM_FORCE_ENABLE_XNACK : 0) | (sscreen->info.chip_class < GFX9 ? AC_TM_FORCE_DISABLE_XNACK : 0) | (!sscreen->llvm_has_working_vgpr_indexing ? AC_TM_PROMOTE_ALLOCA_TO_SCRATCH : 0) | (sscreen->debug_flags & DBG(CHECK_IR) ? AC_TM_CHECK_IR : 0) | (create_low_opt_compiler ? AC_TM_CREATE_LOW_OPT : 0); ac_init_llvm_once(); ac_init_llvm_compiler(compiler, sscreen->info.family, tm_options); compiler->passes = ac_create_llvm_passes(compiler->tm); if (compiler->low_opt_tm) compiler->low_opt_passes = ac_create_llvm_passes(compiler->low_opt_tm); }
bool radv_compile_to_binary(struct ac_llvm_compiler *info, LLVMModuleRef module, struct ac_shader_binary *binary) { radv_llvm_per_thread_info *thread_info = nullptr; for (auto &I : radv_llvm_per_thread_list) { if (I.llvm_info.tm == info->tm) { thread_info = &I; break; } } if (!thread_info) { struct ac_compiler_passes *passes = ac_create_llvm_passes(info->tm); bool ret = ac_compile_module_to_binary(passes, module, binary); ac_destroy_llvm_passes(passes); return ret; } return thread_info->compile_to_memory_buffer(module, binary); }