static bool partialApplyEscapes(SILValue V, bool examineApply) { SILModuleConventions ModConv(*V->getModule()); llvm::SmallVector<Operand *, 32> Worklist(V->use_begin(), V->use_end()); while (!Worklist.empty()) { auto *Op = Worklist.pop_back_val(); // These instructions do not cause the address to escape. if (!useCaptured(Op)) continue; auto *User = Op->getUser(); // If we have a copy_value, the copy value does not cause an escape, but its // uses might do so... so add the copy_value's uses to the worklist and // continue. if (auto CVI = dyn_cast<CopyValueInst>(User)) { copy(CVI->getUses(), std::back_inserter(Worklist)); continue; } if (auto *Apply = dyn_cast<ApplyInst>(User)) { // Applying a function does not cause the function to escape. if (Op->getOperandNumber() == 0) continue; // apply instructions do not capture the pointer when it is passed // indirectly if (Apply->getArgumentConvention(Op->getOperandNumber() - 1) .isIndirectConvention()) continue; // Optionally drill down into an apply to see if the operand is // captured in or returned from the apply. if (examineApply && !partialApplyArgumentEscapes(Op)) continue; } // partial_apply instructions do not allow the pointer to escape // when it is passed indirectly, unless the partial_apply itself // escapes if (auto *PartialApply = dyn_cast<PartialApplyInst>(User)) { auto Args = PartialApply->getArguments(); auto Params = PartialApply->getSubstCalleeType()->getParameters(); Params = Params.slice(Params.size() - Args.size(), Args.size()); if (ModConv.isSILIndirect(Params[Op->getOperandNumber() - 1])) { if (partialApplyEscapes(PartialApply, /*examineApply = */ true)) return true; continue; } } return true; } return false; }
static void replaceProjectBoxUsers(SILValue HeapBox, SILValue StackBox) { llvm::SmallVector<Operand *, 8> Worklist(HeapBox->use_begin(), HeapBox->use_end()); while (!Worklist.empty()) { auto *Op = Worklist.pop_back_val(); if (auto *PBI = dyn_cast<ProjectBoxInst>(Op->getUser())) { // This may result in an alloc_stack being used by begin_access [dynamic]. PBI->replaceAllUsesWith(StackBox); continue; } auto *CVI = dyn_cast<CopyValueInst>(Op->getUser()); if (!CVI) continue; copy(CVI->getUses(), std::back_inserter(Worklist)); } }
void ClosureSpecializer::gatherCallSites( SILFunction *Caller, llvm::SmallVectorImpl<ClosureInfo*> &ClosureCandidates, llvm::DenseSet<FullApplySite> &MultipleClosureAI) { // A set of apply inst that we have associated with a closure. We use this to // make sure that we do not handle call sites with multiple closure arguments. llvm::DenseSet<FullApplySite> VisitedAI; // For each basic block BB in Caller... for (auto &BB : *Caller) { // For each instruction II in BB... for (auto &II : BB) { // If II is not a closure that we support specializing, skip it... if (!isSupportedClosure(&II)) continue; ClosureInfo *CInfo = nullptr; // Go through all uses of our closure. for (auto *Use : II.getUses()) { // If this use is not an apply inst or an apply inst with // substitutions, there is nothing interesting for us to do, so // continue... auto AI = FullApplySite::isa(Use->getUser()); if (!AI || AI.hasSubstitutions()) continue; // Check if we have already associated this apply inst with a closure to // be specialized. We do not handle applies that take in multiple // closures at this time. if (!VisitedAI.insert(AI).second) { MultipleClosureAI.insert(AI); continue; } // If AI does not have a function_ref definition as its callee, we can // not do anything here... so continue... SILFunction *ApplyCallee = AI.getReferencedFunction(); if (!ApplyCallee || ApplyCallee->isExternalDeclaration()) continue; // Ok, we know that we can perform the optimization but not whether or // not the optimization is profitable. Find the index of the argument // corresponding to our partial apply. Optional<unsigned> ClosureIndex; for (unsigned i = 0, e = AI.getNumArguments(); i != e; ++i) { if (AI.getArgument(i) != SILValue(&II)) continue; ClosureIndex = i; DEBUG(llvm::dbgs() << " Found callsite with closure argument at " << i << ": " << *AI.getInstruction()); break; } // If we did not find an index, there is nothing further to do, // continue. if (!ClosureIndex.hasValue()) continue; // Make sure that the Closure is invoked in the Apply's callee. We only // want to perform closure specialization if we know that we will be // able to change a partial_apply into an apply. // // TODO: Maybe just call the function directly instead of moving the // partial apply? SILValue Arg = ApplyCallee->getArgument(ClosureIndex.getValue()); if (std::none_of(Arg->use_begin(), Arg->use_end(), [&Arg](Operand *Op) -> bool { auto UserAI = FullApplySite::isa(Op->getUser()); return UserAI && UserAI.getCallee() == Arg; })) { continue; } auto NumIndirectResults = AI.getSubstCalleeType()->getNumIndirectResults(); assert(ClosureIndex.getValue() >= NumIndirectResults); auto ClosureParamIndex = ClosureIndex.getValue() - NumIndirectResults; auto ParamInfo = AI.getSubstCalleeType()->getParameters(); SILParameterInfo ClosureParamInfo = ParamInfo[ClosureParamIndex]; // Get all non-failure exit BBs in the Apply Callee if our partial apply // is guaranteed. If we do not understand one of the exit BBs, bail. // // We need this to make sure that we insert a release in the appropriate // locations to balance the +1 from the creation of the partial apply. llvm::TinyPtrVector<SILBasicBlock *> NonFailureExitBBs; if (ClosureParamInfo.isGuaranteed() && !findAllNonFailureExitBBs(ApplyCallee, NonFailureExitBBs)) { continue; } // Compute the final release points of the closure. We will insert // release of the captured arguments here. if (!CInfo) { CInfo = new ClosureInfo(&II); ValueLifetimeAnalysis VLA(CInfo->Closure); VLA.computeFrontier(CInfo->LifetimeFrontier, ValueLifetimeAnalysis::AllowToModifyCFG); } // Now we know that CSDesc is profitable to specialize. Add it to our // call site list. CInfo->CallSites.push_back( CallSiteDescriptor(CInfo, AI, ClosureIndex.getValue(), ClosureParamInfo, std::move(NonFailureExitBBs))); } if (CInfo) ClosureCandidates.push_back(CInfo); } } }
// Find the final releases of the alloc_box along any given path. // These can include paths from a release back to the alloc_box in a // loop. static bool getFinalReleases(SILValue Box, llvm::SmallVectorImpl<SILInstruction *> &Releases) { llvm::SmallPtrSet<SILBasicBlock*, 16> LiveIn; llvm::SmallPtrSet<SILBasicBlock*, 16> UseBlocks; auto *DefBB = Box->getParentBlock(); auto seenRelease = false; SILInstruction *OneRelease = nullptr; // We'll treat this like a liveness problem where the alloc_box is // the def. Each block that has a use of the owning pointer has the // value live-in unless it is the block with the alloc_box. llvm::SmallVector<Operand *, 32> Worklist(Box->use_begin(), Box->use_end()); while (!Worklist.empty()) { auto *Op = Worklist.pop_back_val(); auto *User = Op->getUser(); auto *BB = User->getParent(); if (isa<ProjectBoxInst>(User)) continue; if (BB != DefBB) LiveIn.insert(BB); // Also keep track of the blocks with uses. UseBlocks.insert(BB); // If we have a copy value or a mark_uninitialized, add its uses to the work // list and continue. if (isa<MarkUninitializedInst>(User) || isa<CopyValueInst>(User)) { copy(cast<SingleValueInstruction>(User)->getUses(), std::back_inserter(Worklist)); continue; } // Try to speed up the trivial case of single release/dealloc. if (isa<StrongReleaseInst>(User) || isa<DeallocBoxInst>(User) || isa<DestroyValueInst>(User)) { if (!seenRelease) OneRelease = User; else OneRelease = nullptr; seenRelease = true; } } // Only a single release/dealloc? We're done! if (OneRelease) { Releases.push_back(OneRelease); return true; } propagateLiveness(LiveIn, DefBB); // Now examine each block we saw a use in. If it has no successors // that are in LiveIn, then the last use in the block is the final // release/dealloc. for (auto *BB : UseBlocks) if (!successorHasLiveIn(BB, LiveIn)) if (!addLastRelease(Box, BB, Releases)) return false; return true; }