void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { Builder.DivergentTarget = true; bool EnableOpt = getOptLevel() > CodeGenOpt::None; bool Internalize = InternalizeSymbols; bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls; bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt; bool LibCallSimplify = EnableLibCallSimplify && EnableOpt; if (EnableFunctionCalls) { delete Builder.Inliner; Builder.Inliner = createAMDGPUFunctionInliningPass(); } Builder.addExtension( PassManagerBuilder::EP_ModuleOptimizerEarly, [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &, legacy::PassManagerBase &PM) { if (AMDGPUAA) { PM.add(createAMDGPUAAWrapperPass()); PM.add(createAMDGPUExternalAAWrapperPass()); } PM.add(createAMDGPUUnifyMetadataPass()); if (Internalize) { PM.add(createInternalizePass(mustPreserveGV)); PM.add(createGlobalDCEPass()); } if (EarlyInline) PM.add(createAMDGPUAlwaysInlinePass(false)); }); const auto &Opt = Options; Builder.addExtension( PassManagerBuilder::EP_EarlyAsPossible, [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &, legacy::PassManagerBase &PM) { if (AMDGPUAA) { PM.add(createAMDGPUAAWrapperPass()); PM.add(createAMDGPUExternalAAWrapperPass()); } PM.add(llvm::createAMDGPUUseNativeCallsPass()); if (LibCallSimplify) PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt)); }); Builder.addExtension( PassManagerBuilder::EP_CGSCCOptimizerLate, [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { // Add infer address spaces pass to the opt pipeline after inlining // but before SROA to increase SROA opportunities. PM.add(createInferAddressSpacesPass()); // This should run after inlining to have any chance of doing anything, // and before other cleanup optimizations. PM.add(createAMDGPULowerKernelAttributesPass()); }); }
void llvm::addCoroutinePassesToExtensionPoints(PassManagerBuilder &Builder) { Builder.addExtension(PassManagerBuilder::EP_EarlyAsPossible, addCoroutineEarlyPasses); Builder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addCoroutineOpt0Passes); Builder.addExtension(PassManagerBuilder::EP_CGSCCOptimizerLate, addCoroutineSCCPasses); Builder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate, addCoroutineScalarOptimizerPasses); Builder.addExtension(PassManagerBuilder::EP_OptimizerLast, addCoroutineOptimizerLastPasses); }
/** * Adds a set of optimization passes to the given module/function pass * managers based on the given optimization and size reduction levels. * * The selection mirrors Clang behavior and is based on LLVM's * PassManagerBuilder. */ static void addOptimizationPasses(PassManagerBase &mpm, FunctionPassManager &fpm, unsigned optLevel, unsigned sizeLevel) { fpm.add(createVerifierPass()); // Verify that input is correct PassManagerBuilder builder; builder.OptLevel = optLevel; builder.SizeLevel = sizeLevel; if (willInline()) { unsigned threshold = 225; if (sizeLevel == 1) // -Os threshold = 75; else if (sizeLevel == 2) // -Oz threshold = 25; if (optLevel > 2) threshold = 275; builder.Inliner = createFunctionInliningPass(threshold); } else { builder.Inliner = createAlwaysInlinerPass(); } builder.DisableSimplifyLibCalls = disableSimplifyLibCalls; builder.DisableUnitAtATime = !unitAtATime; builder.DisableUnrollLoops = optLevel == 0; /* builder.Vectorize is set in ctor from command line switch */ if (!disableLangSpecificPasses) { if (!disableSimplifyDruntimeCalls) builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd, addSimplifyDRuntimeCallsPass); #if USE_METADATA if (!disableGCToStack) builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd, addGarbageCollect2StackPass); #endif // USE_METADATA } #if LDC_LLVM_VER >= 301 // EP_OptimizerLast does not exist in LLVM 3.0, add it manually below. builder.addExtension(PassManagerBuilder::EP_OptimizerLast, addStripExternalsPass); #endif builder.populateFunctionPassManager(fpm); builder.populateModulePassManager(mpm); #if LDC_LLVM_VER < 301 addStripExternalsPass(builder, mpm); #endif }
void NVPTXTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { Builder.addExtension( PassManagerBuilder::EP_EarlyAsPossible, [&](const PassManagerBuilder &, legacy::PassManagerBase &PM) { PM.add(createNVVMReflectPass()); PM.add(createNVVMIntrRangePass(Subtarget.getSmVersion())); }); }
void LLVMZigOptimizeModule(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref) { TargetMachine* target_machine = reinterpret_cast<TargetMachine*>(targ_machine_ref); Module* module = unwrap(module_ref); TargetLibraryInfoImpl tlii(Triple(module->getTargetTriple())); PassManagerBuilder *PMBuilder = new PassManagerBuilder(); PMBuilder->OptLevel = target_machine->getOptLevel(); PMBuilder->SizeLevel = 0; PMBuilder->BBVectorize = true; PMBuilder->SLPVectorize = true; PMBuilder->LoopVectorize = true; PMBuilder->DisableUnitAtATime = false; PMBuilder->DisableUnrollLoops = false; PMBuilder->MergeFunctions = true; PMBuilder->PrepareForLTO = true; PMBuilder->RerollLoops = true; PMBuilder->addExtension(PassManagerBuilder::EP_EarlyAsPossible, addAddDiscriminatorsPass); PMBuilder->LibraryInfo = &tlii; PMBuilder->Inliner = createFunctionInliningPass(PMBuilder->OptLevel, PMBuilder->SizeLevel); // Set up the per-function pass manager. legacy::FunctionPassManager *FPM = new legacy::FunctionPassManager(module); FPM->add(createTargetTransformInfoWrapperPass(target_machine->getTargetIRAnalysis())); #ifndef NDEBUG bool verify_module = true; #else bool verify_module = false; #endif if (verify_module) { FPM->add(createVerifierPass()); } PMBuilder->populateFunctionPassManager(*FPM); // Set up the per-module pass manager. legacy::PassManager *MPM = new legacy::PassManager(); MPM->add(createTargetTransformInfoWrapperPass(target_machine->getTargetIRAnalysis())); PMBuilder->populateModulePassManager(*MPM); // run per function optimization passes FPM->doInitialization(); for (Function &F : *module) if (!F.isDeclaration()) FPM->run(F); FPM->doFinalization(); // run per module optimization passes MPM->run(*module); }
/// This routine adds optimization passes based on selected optimization level, /// OptLevel. /// /// OptLevel - Optimization Level static void AddOptimizationPasses(legacy::PassManagerBase &MPM, legacy::FunctionPassManager &FPM, TargetMachine *TM, unsigned OptLevel, unsigned SizeLevel) { if (!NoVerify || VerifyEach) FPM.add(createVerifierPass()); // Verify that input is correct PassManagerBuilder Builder; Builder.OptLevel = OptLevel; Builder.SizeLevel = SizeLevel; if (DisableInline) { // No inlining pass } else if (OptLevel > 1) { Builder.Inliner = createFunctionInliningPass(OptLevel, SizeLevel); } else { Builder.Inliner = createAlwaysInlinerLegacyPass(); } Builder.DisableUnitAtATime = !UnitAtATime; Builder.DisableUnrollLoops = (DisableLoopUnrolling.getNumOccurrences() > 0) ? DisableLoopUnrolling : OptLevel == 0; // This is final, unless there is a #pragma vectorize enable if (DisableLoopVectorization) Builder.LoopVectorize = false; // If option wasn't forced via cmd line (-vectorize-loops, -loop-vectorize) else if (!Builder.LoopVectorize) Builder.LoopVectorize = OptLevel > 1 && SizeLevel < 2; // When #pragma vectorize is on for SLP, do the same as above Builder.SLPVectorize = DisableSLPVectorization ? false : OptLevel > 1 && SizeLevel < 2; // Add target-specific passes that need to run as early as possible. if (TM) Builder.addExtension( PassManagerBuilder::EP_EarlyAsPossible, [&](const PassManagerBuilder &, legacy::PassManagerBase &PM) { TM->addEarlyAsPossiblePasses(PM); }); if (Coroutines) addCoroutinePassesToExtensionPoints(Builder); Builder.populateFunctionPassManager(FPM); Builder.populateModulePassManager(MPM); }
// main - Entry point for the sync compiler. // int main(int argc, char **argv) { sys::PrintStackTraceOnErrorSignal(); PrettyStackTraceProgram X(argc, argv); // Enable debug stream buffering. EnableDebugBuffering = true; LLVMContext &Context = getGlobalContext(); llvm_shutdown_obj Y; // Call llvm_shutdown() on exit. // Initialize target first, so that --version shows registered targets. LLVMInitializeVerilogBackendTarget(); LLVMInitializeVerilogBackendTargetInfo(); LLVMInitializeVerilogBackendTargetMC(); cl::ParseCommandLineOptions(argc, argv, "llvm system compiler\n"); SMDiagnostic Err; LuaScript *S = &scriptEngin(); S->init(); // Run the lua script. if (!S->runScriptFile(InputFilename, Err)){ Err.print(argv[0], errs()); return 1; } S->updateStatus(); // Load the module to be compiled... std::auto_ptr<Module> M; M.reset(ParseIRFile(S->getValue<std::string>("InputFile"), Err, Context)); if (M.get() == 0) { Err.print(argv[0], errs()); return 1; } Module &mod = *M.get(); // TODO: Build the right triple. Triple TheTriple(mod.getTargetTriple()); TargetOptions TO; std::auto_ptr<TargetMachine> target(TheVBackendTarget.createTargetMachine(TheTriple.getTriple(), "", S->getDataLayout(), TO)); // Build up all of the passes that we want to do to the module. PassManagerBuilder Builder; Builder.DisableUnrollLoops = true; Builder.LibraryInfo = new TargetLibraryInfo(); Builder.LibraryInfo->disableAllFunctions(); Builder.OptLevel = 3; Builder.SizeLevel = 2; Builder.DisableSimplifyLibCalls = true; Builder.Inliner = createHLSInlinerPass(); Builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd, LoopOptimizerEndExtensionFn); PassManager Passes; Passes.add(new TargetData(*target->getTargetData())); // Add the immutable target-specific alias analysis ahead of all others AAs. Passes.add(createVAliasAnalysisPass(target->getIntrinsicInfo())); Passes.add(createVerifierPass()); // This is the final bitcode, internalize it to expose more optimization // opportunities. Note that we should internalize it before SW/HW partition, // otherwise we may lost some information that help the later internalize. Passes.add(createInternalizePass(true)); // Perform Software/Hardware partition. Passes.add(createFunctionFilterPass(S->getOutputStream("SoftwareIROutput"))); Passes.add(createGlobalDCEPass()); // Optimize the hardware part. //Builder.populateFunctionPassManager(*FPasses); Builder.populateModulePassManager(Passes); Builder.populateLTOPassManager(Passes, /*Internalize*/true, /*RunInliner*/true); //PM.add(createPrintModulePass(&dbgs())); // We do not use the stream that passing into addPassesToEmitFile. formatted_raw_ostream formatted_nulls(nulls()); // Ask the target to add backend passes as necessary. target->addPassesToEmitFile(Passes, formatted_nulls, TargetMachine::CGFT_Null, false/*NoVerify*/); // Analyse the slack between registers. Passes.add(createCombPathDelayAnalysisPass()); Passes.add(createVerilogASTWriterPass(S->getOutputStream("RTLOutput"))); // Run some scripting passes. for (LuaScript::scriptpass_it I = S->passes_begin(), E = S->passes_end(); I != E; ++I) { const luabind::object &o = *I; Pass *P = createScriptingPass( luabind::object_cast<std::string>(I.key()).c_str(), luabind::object_cast<std::string>(o["FunctionScript"]).c_str(), luabind::object_cast<std::string>(o["GlobalScript"]).c_str()); Passes.add(P); } // Run the passes. Passes.run(mod); // If no error occur, keep the files. S->keepAllFiles(); return 0; }
void swift::performLLVMOptimizations(IRGenOptions &Opts, llvm::Module *Module, llvm::TargetMachine *TargetMachine) { SharedTimer timer("LLVM optimization"); // Set up a pipeline. PassManagerBuilder PMBuilder; if (Opts.Optimize && !Opts.DisableLLVMOptzns) { PMBuilder.OptLevel = 3; PMBuilder.Inliner = llvm::createFunctionInliningPass(200); PMBuilder.SLPVectorize = true; PMBuilder.LoopVectorize = true; PMBuilder.MergeFunctions = true; } else { PMBuilder.OptLevel = 0; if (!Opts.DisableLLVMOptzns) PMBuilder.Inliner = llvm::createAlwaysInlinerPass(/*insertlifetime*/false); } PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly, addSwiftStackPromotionPass); // If the optimizer is enabled, we run the ARCOpt pass in the scalar optimizer // and the Contract pass as late as possible. if (!Opts.DisableLLVMARCOpts) { PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate, addSwiftARCOptPass); PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addSwiftContractPass); } if (Opts.Sanitize == SanitizerKind::Address) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addAddressSanitizerPasses); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addAddressSanitizerPasses); } if (Opts.Sanitize == SanitizerKind::Thread) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addThreadSanitizerPass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addThreadSanitizerPass); } // Configure the function passes. legacy::FunctionPassManager FunctionPasses(Module); FunctionPasses.add(createTargetTransformInfoWrapperPass( TargetMachine->getTargetIRAnalysis())); if (Opts.Verify) FunctionPasses.add(createVerifierPass()); PMBuilder.populateFunctionPassManager(FunctionPasses); // The PMBuilder only knows about LLVM AA passes. We should explicitly add // the swift AA pass after the other ones. if (!Opts.DisableLLVMARCOpts) { FunctionPasses.add(createSwiftAAWrapperPass()); FunctionPasses.add(createExternalAAWrapperPass([](Pass &P, Function &, AAResults &AAR) { if (auto *WrapperPass = P.getAnalysisIfAvailable<SwiftAAWrapperPass>()) AAR.addAAResult(WrapperPass->getResult()); })); } // Run the function passes. FunctionPasses.doInitialization(); for (auto I = Module->begin(), E = Module->end(); I != E; ++I) if (!I->isDeclaration()) FunctionPasses.run(*I); FunctionPasses.doFinalization(); // Configure the module passes. legacy::PassManager ModulePasses; ModulePasses.add(createTargetTransformInfoWrapperPass( TargetMachine->getTargetIRAnalysis())); PMBuilder.populateModulePassManager(ModulePasses); // The PMBuilder only knows about LLVM AA passes. We should explicitly add // the swift AA pass after the other ones. if (!Opts.DisableLLVMARCOpts) { ModulePasses.add(createSwiftAAWrapperPass()); ModulePasses.add(createExternalAAWrapperPass([](Pass &P, Function &, AAResults &AAR) { if (auto *WrapperPass = P.getAnalysisIfAvailable<SwiftAAWrapperPass>()) AAR.addAAResult(WrapperPass->getResult()); })); } // If we're generating a profile, add the lowering pass now. if (Opts.GenerateProfile) ModulePasses.add(createInstrProfilingPass()); if (Opts.Verify) ModulePasses.add(createVerifierPass()); if (Opts.PrintInlineTree) ModulePasses.add(createInlineTreePrinterPass()); // Do it. ModulePasses.run(*Module); }
void EmitAssemblyHelper::CreatePasses() { unsigned OptLevel = CodeGenOpts.OptimizationLevel; CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining; // Handle disabling of LLVM optimization, where we want to preserve the // internal module before any optimization. if (CodeGenOpts.DisableLLVMOpts) { OptLevel = 0; Inlining = CodeGenOpts.NoInlining; } PassManagerBuilder PMBuilder; PMBuilder.OptLevel = OptLevel; PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize; PMBuilder.DisableSimplifyLibCalls = !CodeGenOpts.SimplifyLibCalls; PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime; PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops; // In ObjC ARC mode, add the main ARC optimization passes. if (LangOpts.ObjCAutoRefCount) { PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible, addObjCARCExpandPass); PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate, addObjCARCOptPass); } if (LangOpts.AddressSanitizer) { PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate, addAddressSanitizerPass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addAddressSanitizerPass); } // Figure out TargetLibraryInfo. Triple TargetTriple(TheModule->getTargetTriple()); PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple); if (!CodeGenOpts.SimplifyLibCalls) PMBuilder.LibraryInfo->disableAllFunctions(); switch (Inlining) { case CodeGenOptions::NoInlining: break; case CodeGenOptions::NormalInlining: { // FIXME: Derive these constants in a principled fashion. unsigned Threshold = 225; if (CodeGenOpts.OptimizeSize == 1) // -Os Threshold = 75; else if (CodeGenOpts.OptimizeSize == 2) // -Oz Threshold = 25; else if (OptLevel > 2) Threshold = 275; PMBuilder.Inliner = createFunctionInliningPass(Threshold); break; } case CodeGenOptions::OnlyAlwaysInlining: // Respect always_inline. PMBuilder.Inliner = createAlwaysInlinerPass(); break; } // Set up the per-function pass manager. FunctionPassManager *FPM = getPerFunctionPasses(); if (CodeGenOpts.VerifyModule) FPM->add(createVerifierPass()); PMBuilder.populateFunctionPassManager(*FPM); // Set up the per-module pass manager. PassManager *MPM = getPerModulePasses(); if (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) { MPM->add(createGCOVProfilerPass(CodeGenOpts.EmitGcovNotes, CodeGenOpts.EmitGcovArcs, TargetTriple.isMacOSX())); if (!CodeGenOpts.DebugInfo) MPM->add(createStripSymbolsPass(true)); } PMBuilder.populateModulePassManager(*MPM); }
static void addOptimizationPasses(PassManagerBase &mpm, FunctionPassManager &fpm, #endif unsigned optLevel, unsigned sizeLevel) { fpm.add(createVerifierPass()); // Verify that input is correct PassManagerBuilder builder; builder.OptLevel = optLevel; builder.SizeLevel = sizeLevel; if (willInline()) { unsigned threshold = 225; if (sizeLevel == 1) // -Os threshold = 75; else if (sizeLevel == 2) // -Oz threshold = 25; if (optLevel > 2) threshold = 275; builder.Inliner = createFunctionInliningPass(threshold); } else { builder.Inliner = createAlwaysInlinerPass(); } #if LDC_LLVM_VER < 304 builder.DisableSimplifyLibCalls = disableSimplifyLibCalls; #endif builder.DisableUnitAtATime = !unitAtATime; builder.DisableUnrollLoops = optLevel == 0; #if LDC_LLVM_VER >= 304 builder.DisableUnrollLoops = (disableLoopUnrolling.getNumOccurrences() > 0) ? disableLoopUnrolling : optLevel == 0; // This is final, unless there is a #pragma vectorize enable if (disableLoopVectorization) builder.LoopVectorize = false; // If option wasn't forced via cmd line (-vectorize-loops, -loop-vectorize) else if (!builder.LoopVectorize) builder.LoopVectorize = optLevel > 1 && sizeLevel < 2; // When #pragma vectorize is on for SLP, do the same as above builder.SLPVectorize = disableSLPVectorization ? false : optLevel > 1 && sizeLevel < 2; #else /* builder.Vectorize is set in ctor from command line switch */ #endif #if LDC_LLVM_VER >= 303 if (opts::sanitize == opts::AddressSanitizer) { builder.addExtension(PassManagerBuilder::EP_OptimizerLast, addAddressSanitizerPasses); builder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addAddressSanitizerPasses); } if (opts::sanitize == opts::MemorySanitizer) { builder.addExtension(PassManagerBuilder::EP_OptimizerLast, addMemorySanitizerPass); builder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addMemorySanitizerPass); } if (opts::sanitize == opts::ThreadSanitizer) { builder.addExtension(PassManagerBuilder::EP_OptimizerLast, addThreadSanitizerPass); builder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addThreadSanitizerPass); } #endif if (!disableLangSpecificPasses) { if (!disableSimplifyDruntimeCalls) builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd, addSimplifyDRuntimeCallsPass); if (!disableGCToStack) builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd, addGarbageCollect2StackPass); } // EP_OptimizerLast does not exist in LLVM 3.0, add it manually below. builder.addExtension(PassManagerBuilder::EP_OptimizerLast, addStripExternalsPass); builder.populateFunctionPassManager(fpm); builder.populateModulePassManager(mpm); }
void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { Builder.DivergentTarget = true; bool EnableOpt = getOptLevel() > CodeGenOpt::None; bool Internalize = InternalizeSymbols; bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableAMDGPUFunctionCalls; bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt; bool LibCallSimplify = EnableLibCallSimplify && EnableOpt; if (EnableAMDGPUFunctionCalls) { delete Builder.Inliner; Builder.Inliner = createAMDGPUFunctionInliningPass(); } if (Internalize) { // If we're generating code, we always have the whole program available. The // relocations expected for externally visible functions aren't supported, // so make sure every non-entry function is hidden. Builder.addExtension( PassManagerBuilder::EP_EnabledOnOptLevel0, [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { PM.add(createInternalizePass(mustPreserveGV)); }); } Builder.addExtension( PassManagerBuilder::EP_ModuleOptimizerEarly, [Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &, legacy::PassManagerBase &PM) { if (AMDGPUAA) { PM.add(createAMDGPUAAWrapperPass()); PM.add(createAMDGPUExternalAAWrapperPass()); } PM.add(createAMDGPUUnifyMetadataPass()); if (Internalize) { PM.add(createInternalizePass(mustPreserveGV)); PM.add(createGlobalDCEPass()); } if (EarlyInline) PM.add(createAMDGPUAlwaysInlinePass(false)); }); const auto &Opt = Options; Builder.addExtension( PassManagerBuilder::EP_EarlyAsPossible, [AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &, legacy::PassManagerBase &PM) { if (AMDGPUAA) { PM.add(createAMDGPUAAWrapperPass()); PM.add(createAMDGPUExternalAAWrapperPass()); } PM.add(llvm::createAMDGPUUseNativeCallsPass()); if (LibCallSimplify) PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt)); }); Builder.addExtension( PassManagerBuilder::EP_CGSCCOptimizerLate, [](const PassManagerBuilder &, legacy::PassManagerBase &PM) { // Add infer address spaces pass to the opt pipeline after inlining // but before SROA to increase SROA opportunities. PM.add(createInferAddressSpacesPass()); }); }
void EmitAssemblyHelper::CreatePasses() { unsigned OptLevel = CodeGenOpts.OptimizationLevel; CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining; // Handle disabling of LLVM optimization, where we want to preserve the // internal module before any optimization. if (CodeGenOpts.DisableLLVMOpts) { OptLevel = 0; Inlining = CodeGenOpts.NoInlining; } PassManagerBuilder PMBuilder; PMBuilder.OptLevel = OptLevel; PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize; PMBuilder.DisableSimplifyLibCalls = !CodeGenOpts.SimplifyLibCalls; PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime; PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops; // In ObjC ARC mode, add the main ARC optimization passes. if (LangOpts.ObjCAutoRefCount) { PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible, addObjCARCExpandPass); PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly, addObjCARCAPElimPass); PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate, addObjCARCOptPass); } if (CodeGenOpts.BoundsChecking > 0) { BoundsChecking = CodeGenOpts.BoundsChecking; PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate, addBoundsCheckingPass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addBoundsCheckingPass); } if (LangOpts.AddressSanitizer) { PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate, addAddressSanitizerPass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addAddressSanitizerPass); } if (LangOpts.ThreadSanitizer) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addThreadSanitizerPass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addThreadSanitizerPass); } // Figure out TargetLibraryInfo. Triple TargetTriple(TheModule->getTargetTriple()); PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple); if (!CodeGenOpts.SimplifyLibCalls) PMBuilder.LibraryInfo->disableAllFunctions(); switch (Inlining) { case CodeGenOptions::NoInlining: break; case CodeGenOptions::NormalInlining: { // FIXME: Derive these constants in a principled fashion. unsigned Threshold = 225; if (CodeGenOpts.OptimizeSize == 1) // -Os Threshold = 75; else if (CodeGenOpts.OptimizeSize == 2) // -Oz Threshold = 25; else if (OptLevel > 2) Threshold = 275; PMBuilder.Inliner = createFunctionInliningPass(Threshold); break; } case CodeGenOptions::OnlyAlwaysInlining: // Respect always_inline. if (OptLevel == 0) // Do not insert lifetime intrinsics at -O0. PMBuilder.Inliner = createAlwaysInlinerPass(false); else PMBuilder.Inliner = createAlwaysInlinerPass(); break; } // Set up the per-function pass manager. FunctionPassManager *FPM = getPerFunctionPasses(); if (CodeGenOpts.VerifyModule) FPM->add(createVerifierPass()); PMBuilder.populateFunctionPassManager(*FPM); // Set up the per-module pass manager. PassManager *MPM = getPerModulePasses(); if (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) { MPM->add(createGCOVProfilerPass(CodeGenOpts.EmitGcovNotes, CodeGenOpts.EmitGcovArcs, TargetTriple.isMacOSX())); if (CodeGenOpts.DebugInfo == CodeGenOptions::NoDebugInfo) MPM->add(createStripSymbolsPass(true)); } // Add the memory safety passes for control-flow integrity if (CodeGenOpts.MemSafety) { // Make sure everything that can be in an LLVM register is. MPM->add (createPromoteMemoryToRegisterPass()); MPM->add (createUnifyFunctionExitNodesPass()); MPM->add (new CFIChecks()); } PMBuilder.populateModulePassManager(*MPM); if (CodeGenOpts.SoftBound) { // Make sure SoftBound+CETS is run after optimization with atleast mem2reg run MPM->add(new DominatorTree()); MPM->add(new DominanceFrontier()); MPM->add(new LoopInfo()); MPM->add(new InitializeSoftBound()); MPM->add(new SoftBoundCETSPass()); } // Add the memory safety passes if (CodeGenOpts.MemSafety) { MPM->add (createCommonMSCInfoPass()); MPM->add (createSAFECodeMSCInfoPass()); // C standard library / format string function transforms if (!CodeGenOpts.BaggyBounds) { MPM->add (new StringTransform()); MPM->add (new FormatStringTransform()); MPM->add (new RegisterVarargCallSites()); MPM->add (new LoggingFunctions()); } MPM->add (new InitAllocas()); MPM->add (createRegisterGlobalsPass(/*RegUncertain=*/true, /*MakeInternal=*/false)); MPM->add (createRemoveUnsuitableGlobalRegistrationsPass()); MPM->add (new RegisterMainArgs()); MPM->add (createInstrumentFreeCallsPass()); MPM->add (new RegisterCustomizedAllocation()); MPM->add (new LoopInfo ()); MPM->add (new DominatorTree ()); MPM->add (createRegisterStackPoolsPass(/*RegByval=*/true)); MPM->add (createUnregisterStackPoolsPass()); MPM->add (createSpecializeCMSCallsPass()); MPM->add (new RegisterRuntimeInitializer(CodeGenOpts.MemSafetyLogFile.c_str())); MPM->add (new DebugInstrument()); MPM->add (createInstrumentMemoryAccessesPass()); MPM->add (createInstrumentGEPsPass()); MPM->add (createSpecializeCMSCallsPass()); MPM->add (new ScalarEvolution()); MPM->add (new ArrayBoundsCheckLocal()); MPM->add (createOptimizeGEPChecksPass()); MPM->add (createExactCheckOptPass()); MPM->add (new ScalarEvolution()); MPM->add (createLocalArrayBoundsAnalysisPass()); MPM->add (createOptimizeFastMemoryChecksPass()); MPM->add (createOptimizeIdenticalLSChecksPass()); MPM->add (new DominatorTree()); MPM->add (new ScalarEvolution()); MPM->add (createOptimizeImpliedFastLSChecksPass()); MPM->add (new OptimizeChecks()); MPM->add (createOptimizeMemoryRegistrationsPass(/*AllowFastChecks=*/true)); if (CodeGenOpts.MemSafeTerminate) { MPM->add (llvm::createSCTerminatePass ()); } } if (CodeGenOpts.BaggyBounds) { MPM->add (new InsertBaggyBoundsChecks()); } // // Rerun the LLVM optimizations again. // PMBuilder.populateModulePassManager(*MPM); // For SAFECode, do the debug instrumentation and OOB rewriting after // all optimization is done. if (CodeGenOpts.MemSafety) { MPM->add (new DebugInstrument()); MPM->add (new RewriteOOB()); } }