static void addHighLevelEarlyLoopOptPipeline(SILPassPipelinePlan &P) { P.startPipeline("HighLevel+EarlyLoopOpt"); // FIXME: update this to be a function pass. P.addEagerSpecializer(); addSSAPasses(P, OptimizationLevelKind::HighLevel); addHighLevelLoopOptPasses(P); }
SILPassPipelinePlan SILPassPipelinePlan::getPassPipelineForKinds(ArrayRef<PassKind> PassKinds) { SILPassPipelinePlan P; P.startPipeline("Pass List Pipeline"); P.addPasses(PassKinds); return P; }
/// Non-mandatory passes that should run as preparation for IRGen. static void addIRGenPreparePipeline(SILPassPipelinePlan &P) { P.startPipeline("IRGen Preparation"); // Insert SIL passes to run during IRGen. // Hoist generic alloc_stack instructions to the entry block to enable better // llvm-ir generation for dynamic alloca instructions. P.addAllocStackHoisting(); }
// Run passes that // - should only run after all general SIL transformations. // - have no reason to run before any other SIL optimizations. // - don't require IRGen information. static void addLastChanceOptPassPipeline(SILPassPipelinePlan &P) { // Optimize access markers for improved IRGen after all other optimizations. P.addAccessEnforcementOpts(); // Only has an effect if the -assume-single-thread option is specified. P.addAssumeSingleThreaded(); }
static void addMidLevelPassPipeline(SILPassPipelinePlan &P) { P.startPipeline("MidLevel"); addSSAPasses(P, OptimizationLevelKind::MidLevel); // Specialize partially applied functions with dead arguments as a preparation // for CapturePropagation. P.addDeadArgSignatureOpt(); }
static void addPerfEarlyModulePassPipeline(SILPassPipelinePlan &P) { P.startPipeline("EarlyModulePasses"); // Get rid of apparently dead functions as soon as possible so that // we do not spend time optimizing them. P.addDeadFunctionElimination(); // Start by cloning functions from stdlib. P.addSILLinker(); }
/// Mandatory IRGen preparation. It is the caller's job to set the set stage to /// "lowered" after running this pipeline. SILPassPipelinePlan SILPassPipelinePlan::getLoweringPassPipeline() { SILPassPipelinePlan P; P.startPipeline("Address Lowering"); P.addSILCleanup(); P.addAddressLowering(); return P; }
void addSimplifyCFGSILCombinePasses(SILPassPipelinePlan &P) { P.addSimplifyCFG(); P.addConditionForwarding(); // Jump threading can expose opportunity for silcombine (enum -> is_enum_tag-> // cond_br). P.addSILCombine(); // Which can expose opportunity for simplifcfg. P.addSimplifyCFG(); }
/// Mandatory IRGen preparation. It is the caller's job to set the set stage to /// "lowered" after running this pipeline. SILPassPipelinePlan SILPassPipelinePlan::getLoweringPassPipeline() { SILPassPipelinePlan P; P.startPipeline("Address Lowering"); P.addIRGenPrepare(); P.addAddressLowering(); return P; }
static void addLowLevelPassPipeline(SILPassPipelinePlan &P) { P.startPipeline("LowLevel"); // Should be after FunctionSignatureOpts and before the last inliner. P.addReleaseDevirtualizer(); addSSAPasses(P, OptimizationLevelKind::LowLevel); P.addDeadStoreElimination(); // We've done a lot of optimizations on this function, attempt to FSO. P.addFunctionSignatureOpts(); }
SILPassPipelinePlan SILPassPipelinePlan::getPassPipelineFromFile(StringRef Filename) { namespace yaml = llvm::yaml; DEBUG(llvm::dbgs() << "Parsing Pass Pipeline from " << Filename << "\n"); // Load the input file. llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileBufOrErr = llvm::MemoryBuffer::getFileOrSTDIN(Filename); if (!FileBufOrErr) { llvm_unreachable("Failed to read yaml file"); } StringRef Buffer = FileBufOrErr->get()->getBuffer(); llvm::SourceMgr SM; yaml::Stream Stream(Buffer, SM); yaml::document_iterator DI = Stream.begin(); assert(DI != Stream.end() && "Failed to read a document"); yaml::Node *N = DI->getRoot(); assert(N && "Failed to find a root"); SILPassPipelinePlan P; auto *RootList = cast<yaml::SequenceNode>(N); llvm::SmallVector<PassKind, 32> Passes; for (yaml::Node &PipelineNode : make_range(RootList->begin(), RootList->end())) { Passes.clear(); DEBUG(llvm::dbgs() << "New Pipeline:\n"); auto *Desc = cast<yaml::SequenceNode>(&PipelineNode); yaml::SequenceNode::iterator DescIter = Desc->begin(); StringRef Name = cast<yaml::ScalarNode>(&*DescIter)->getRawValue(); DEBUG(llvm::dbgs() << " Name: \"" << Name << "\"\n"); ++DescIter; for (auto DescEnd = Desc->end(); DescIter != DescEnd; ++DescIter) { auto *InnerPassList = cast<yaml::SequenceNode>(&*DescIter); auto *FirstNode = &*InnerPassList->begin(); StringRef PassName = cast<yaml::ScalarNode>(FirstNode)->getRawValue(); unsigned Size = PassName.size() - 2; PassName = PassName.substr(1, Size); DEBUG(llvm::dbgs() << " Pass: \"" << PassName << "\"\n"); auto Kind = PassKindFromString(PassName); assert(Kind != PassKind::invalidPassKind && "Found invalid pass kind?!"); Passes.push_back(Kind); } P.startPipeline(Name); P.addPasses(Passes); } return P; }
static void addMidLevelPassPipeline(SILPassPipelinePlan &P) { P.startPipeline("MidLevel"); addSSAPasses(P, OptimizationLevelKind::MidLevel); // Specialize partially applied functions with dead arguments as a preparation // for CapturePropagation. P.addDeadArgSignatureOpt(); // Run loop unrolling after inlining and constant propagation, because loop // trip counts may have became constant. P.addLoopUnroll(); }
static void addPerfEarlyModulePassPipeline(SILPassPipelinePlan &P) { P.startPipeline("EarlyModulePasses"); // Get rid of apparently dead functions as soon as possible so that // we do not spend time optimizing them. P.addDeadFunctionElimination(); // Start by cloning functions from stdlib. P.addSILLinker(); // Cleanup after SILGen: remove trivial copies to temporaries. P.addTempRValueOpt(); }
SILPassPipelinePlan SILPassPipelinePlan::getIRGenPreparePassPipeline(const SILOptions &Options) { SILPassPipelinePlan P; P.startPipeline("IRGen Preparation"); // Insert SIL passes to run during IRGen. // Hoist generic alloc_stack instructions to the entry block to enable better // llvm-ir generation for dynamic alloca instructions. P.addAllocStackHoisting(); if (Options.EnableLargeLoadableTypes) { P.addLoadableByAddress(); } return P; }
SILPassPipelinePlan SILPassPipelinePlan::getSILOptPreparePassPipeline(const SILOptions &Options) { SILPassPipelinePlan P; if (Options.DebugSerialization) { addPerfDebugSerializationPipeline(P); return P; } P.startPipeline("SILOpt Prepare Passes"); P.addAccessMarkerElimination(); return P; }
static void addLateLoopOptPassPipeline(SILPassPipelinePlan &P) { P.startPipeline("LateLoopOpt"); // Delete dead code and drop the bodies of shared functions. P.addDeadFunctionElimination(); // Perform the final lowering transformations. P.addCodeSinking(); P.addLICM(); // Optimize overflow checks. P.addRedundantOverflowCheckRemoval(); P.addMergeCondFails(); // Remove dead code. P.addDCE(); P.addSILCombine(); P.addSimplifyCFG(); // Try to hoist all releases, including epilogue releases. This should be // after FSO. P.addLateReleaseHoisting(); }
static void addMidModulePassesStackPromotePassPipeline(SILPassPipelinePlan &P) { P.startPipeline("MidModulePasses+StackPromote"); P.addDeadFunctionElimination(); P.addSILLinker(); P.addDeadObjectElimination(); P.addGlobalPropertyOpt(); // Do the first stack promotion on high-level SIL. P.addStackPromotion(); }
static void addClosureSpecializePassPipeline(SILPassPipelinePlan &P) { P.startPipeline("ClosureSpecialize"); P.addDeadFunctionElimination(); P.addDeadObjectElimination(); // Hoist globals out of loops. // Global-init functions should not be inlined GlobalOpt is done. P.addGlobalOpt(); P.addLetPropertiesOpt(); // Propagate constants into closures and convert to static dispatch. This // should run after specialization and inlining because we don't want to // specialize a call that can be inlined. It should run before // ClosureSpecialization, because constant propagation is more effective. At // least one round of SSA optimization and inlining should run after this to // take advantage of static dispatch. P.addCapturePropagation(); // Specialize closure. P.addClosureSpecializer(); // Do the second stack promotion on low-level SIL. P.addStackPromotion(); // Speculate virtual call targets. P.addSpeculativeDevirtualization(); // There should be at least one SILCombine+SimplifyCFG between the // ClosureSpecializer, etc. and the last inliner. Cleaning up after these // passes can expose more inlining opportunities. addSimplifyCFGSILCombinePasses(P); // We do this late since it is a pass like the inline caches that we only want // to run once very late. Make sure to run at least one round of the ARC // optimizer after this. }
SILPassPipelinePlan SILPassPipelinePlan::getOnonePassPipeline() { SILPassPipelinePlan P; // First specialize user-code. P.startPipeline("Prespecialization"); P.addUsePrespecialized(); P.startPipeline("Rest of Onone"); // Has only an effect if the -assume-single-thread option is specified. P.addAssumeSingleThreaded(); // Has only an effect if the -gsil option is specified. P.addSILDebugInfoGenerator(); return P; }
SILPassPipelinePlan SILPassPipelinePlan::getOnonePassPipeline() { SILPassPipelinePlan P; // First specialize user-code. P.startPipeline("Prespecialization"); P.addUsePrespecialized(); P.startPipeline("Rest of Onone"); // Don't keep external functions from stdlib and other modules. // We don't want that our unoptimized version will be linked instead // of the optimized version from the stdlib. // Here we just convert external definitions to declarations. LLVM will // eventually remove unused declarations. P.addExternalDefsToDecls(); // Has only an effect if the -assume-single-thread option is specified. P.addAssumeSingleThreaded(); // Has only an effect if the -gsil option is specified. P.addSILDebugInfoGenerator(); return P; }
static void addMandatoryOptPipeline(SILPassPipelinePlan &P, const SILOptions &Options) { P.startPipeline("Guaranteed Passes"); if (Options.EnableMandatorySemanticARCOpts) { P.addSemanticARCOpts(); } P.addDiagnoseStaticExclusivity(); P.addCapturePromotion(); P.addAllocBoxToStack(); P.addNoReturnFolding(); P.addOwnershipModelEliminator(); P.addMarkUninitializedFixup(); P.addDefiniteInitialization(); P.addAccessEnforcementSelection(); P.addAccessMarkerElimination(); P.addMandatoryInlining(); P.addPredictableMemoryOptimizations(); P.addDiagnosticConstantPropagation(); P.addGuaranteedARCOpts(); P.addDiagnoseUnreachable(); P.addEmitDFDiagnostics(); // Canonical swift requires all non cond_br critical edges to be split. P.addSplitNonCondBrCriticalEdges(); }
static void addOwnershipModelEliminatorPipeline(SILPassPipelinePlan &P) { P.startPipeline("Ownership Model Eliminator"); P.addOwnershipModelEliminator(); }
static void addMandatoryDebugSerialization(SILPassPipelinePlan &P) { P.startPipeline("Mandatory Debug Serialization"); P.addOwnershipModelEliminator(); P.addMandatoryInlining(); }
static void addCFGPrinterPipeline(SILPassPipelinePlan &P, StringRef Name) { P.startPipeline(Name); P.addCFGPrinter(); }
/// Perform semantic annotation/loop base optimizations. void addHighLevelLoopOptPasses(SILPassPipelinePlan &P) { // Perform classic SSA optimizations for cleanup. P.addLowerAggregateInstrs(); P.addSILCombine(); P.addSROA(); P.addMem2Reg(); P.addDCE(); P.addSILCombine(); addSimplifyCFGSILCombinePasses(P); // Run high-level loop opts. P.addLoopRotate(); // Cleanup. P.addDCE(); // Also CSE semantic calls. P.addHighLevelCSE(); P.addSILCombine(); P.addSimplifyCFG(); P.addHighLevelLICM(); // Start of loop unrolling passes. P.addArrayCountPropagation(); // To simplify induction variable. P.addSILCombine(); P.addLoopUnroll(); P.addSimplifyCFG(); P.addPerformanceConstantPropagation(); P.addSimplifyCFG(); P.addArrayElementPropagation(); // End of unrolling passes. P.addRemovePins(); P.addABCOpt(); // Cleanup. P.addDCE(); P.addCOWArrayOpts(); // Cleanup. P.addDCE(); P.addSwiftArrayOpts(); }
SILPassPipelinePlan SILPassPipelinePlan::getInstCountPassPipeline() { SILPassPipelinePlan P; P.startPipeline("Inst Count"); P.addInstCount(); return P; }
// Perform classic SSA optimizations. void addSSAPasses(SILPassPipelinePlan &P, OptimizationLevelKind OpLevel) { // Promote box allocations to stack allocations. P.addAllocBoxToStack(); // Propagate copies through stack locations. Should run after // box-to-stack promotion since it is limited to propagating through // stack locations. Should run before aggregate lowering since that // splits up copy_addr. P.addCopyForwarding(); // Split up opaque operations (copy_addr, retain_value, etc.). P.addLowerAggregateInstrs(); // Split up operations on stack-allocated aggregates (struct, tuple). P.addSROA(); // Promote stack allocations to values. P.addMem2Reg(); // Cleanup, which is important if the inliner has restarted the pass pipeline. P.addPerformanceConstantPropagation(); P.addSimplifyCFG(); P.addSILCombine(); // Mainly for Array.append(contentsOf) optimization. P.addArrayElementPropagation(); // Run the devirtualizer, specializer, and inliner. If any of these // makes a change we'll end up restarting the function passes on the // current function (after optimizing any new callees). P.addDevirtualizer(); P.addGenericSpecializer(); switch (OpLevel) { case OptimizationLevelKind::HighLevel: // Does not inline functions with defined semantics. P.addEarlyInliner(); break; case OptimizationLevelKind::MidLevel: // Does inline semantics-functions (except "availability"), but not // global-init functions. P.addGlobalOpt(); P.addLetPropertiesOpt(); P.addPerfInliner(); break; case OptimizationLevelKind::LowLevel: // Inlines everything P.addLateInliner(); break; } // Promote stack allocations to values and eliminate redundant // loads. P.addMem2Reg(); P.addPerformanceConstantPropagation(); // Do a round of CFG simplification, followed by peepholes, then // more CFG simplification. // Jump threading can expose opportunity for SILCombine (enum -> is_enum_tag-> // cond_br). P.addJumpThreadSimplifyCFG(); P.addSILCombine(); // SILCombine can expose further opportunities for SimplifyCFG. P.addSimplifyCFG(); P.addCSE(); P.addRedundantLoadElimination(); // Perform retain/release code motion and run the first ARC optimizer. P.addCSE(); P.addDCE(); P.addEarlyCodeMotion(); P.addReleaseHoisting(); P.addARCSequenceOpts(); P.addSimplifyCFG(); if (OpLevel == OptimizationLevelKind::LowLevel) { // Remove retain/releases based on Builtin.unsafeGuaranteed P.addUnsafeGuaranteedPeephole(); // Only hoist releases very late. P.addLateCodeMotion(); } else P.addEarlyCodeMotion(); P.addRetainSinking(); P.addReleaseHoisting(); P.addARCSequenceOpts(); P.addRemovePins(); }
static void addPerfDebugSerializationPipeline(SILPassPipelinePlan &P) { P.startPipeline("Performance Debug Serialization"); P.addSILLinker(); }
static void addSILDebugInfoGeneratorPipeline(SILPassPipelinePlan &P) { P.startPipeline("SIL Debug Info Generator"); P.addSILDebugInfoGenerator(); }
static void addLateLoopOptPassPipeline(SILPassPipelinePlan &P) { P.startPipeline("LateLoopOpt"); // Delete dead code and drop the bodies of shared functions. P.addExternalFunctionDefinitionsElimination(); P.addDeadFunctionElimination(); // Perform the final lowering transformations. P.addCodeSinking(); P.addLICM(); // Optimize overflow checks. P.addRedundantOverflowCheckRemoval(); P.addMergeCondFails(); // Remove dead code. P.addDCE(); P.addSimplifyCFG(); // Try to hoist all releases, including epilogue releases. This should be // after FSO. P.addLateReleaseHoisting(); // Has only an effect if the -assume-single-thread option is specified. P.addAssumeSingleThreaded(); }