/// ----------------------------------------------------------/// /// Argument Explosion transformation. /// /// ----------------------------------------------------------/// bool FunctionSignatureTransform::ArgumentExplosionAnalyzeParameters() { // Did we decide we should optimize any parameter? bool SignatureOptimize = false; auto Args = F->begin()->getFunctionArguments(); ConsumedArgToEpilogueReleaseMatcher ArgToReturnReleaseMap(RCIA->get(F), F); // Analyze the argument information. for (unsigned i = 0, e = Args.size(); i != e; ++i) { ArgumentDescriptor &A = ArgumentDescList[i]; // Do not optimize argument. if (!A.canOptimizeLiveArg()) { continue; } A.ProjTree.computeUsesAndLiveness(A.Arg); A.Explode = A.shouldExplode(ArgToReturnReleaseMap); // Modified self argument. if (A.Explode && Args[i]->isSelf()) { shouldModifySelfArgument = true; } SignatureOptimize |= A.Explode; } return SignatureOptimize; }
void FunctionSignatureTransform::DeadArgumentTransformFunction() { SILBasicBlock *BB = &*F->begin(); for (const ArgumentDescriptor &AD : ArgumentDescList) { if (!AD.IsEntirelyDead) continue; eraseUsesOfValue(BB->getArgument(AD.Index)); } }
void FunctionSignatureTransform::DeadArgumentFinalizeOptimizedFunction() { auto *BB = &*NewF->begin(); // Remove any dead argument starting from the last argument to the first. for (const ArgumentDescriptor &AD : reverse(ArgumentDescList)) { if (!AD.IsEntirelyDead) continue; BB->eraseArgument(AD.Arg->getIndex()); } }
void FunctionSignatureTransform::ArgumentExplosionFinalizeOptimizedFunction() { SILBasicBlock *BB = &*NewF->begin(); SILBuilder Builder(BB->begin()); Builder.setCurrentDebugScope(BB->getParent()->getDebugScope()); unsigned TotalArgIndex = 0; for (ArgumentDescriptor &AD : ArgumentDescList) { // Simply continue if do not explode. if (!AD.Explode) { AIM[TotalArgIndex] = AD.Index; TotalArgIndex ++; continue; } // OK, we need to explode this argument. unsigned ArgOffset = ++TotalArgIndex; unsigned OldArgIndex = ArgOffset - 1; llvm::SmallVector<SILValue, 8> LeafValues; // We do this in the same order as leaf types since ProjTree expects that the // order of leaf values matches the order of leaf types. llvm::SmallVector<const ProjectionTreeNode*, 8> LeafNodes; AD.ProjTree.getLeafNodes(LeafNodes); for (auto *Node : LeafNodes) { auto OwnershipKind = *AD.getTransformedOwnershipKind(Node->getType()); LeafValues.push_back(BB->insertFunctionArgument( ArgOffset++, Node->getType(), OwnershipKind, BB->getArgument(OldArgIndex)->getDecl())); AIM[TotalArgIndex - 1] = AD.Index; TotalArgIndex ++; } // Then go through the projection tree constructing aggregates and replacing // uses. AD.ProjTree.replaceValueUsesWithLeafUses(Builder, BB->getParent()->getLocation(), LeafValues); // We ignored debugvalue uses when we constructed the new arguments, in order // to preserve as much information as possible, we construct a new value for // OrigArg from the leaf values and use that in place of the OrigArg. SILValue NewOrigArgValue = AD.ProjTree.computeExplodedArgumentValue(Builder, BB->getParent()->getLocation(), LeafValues); // Replace all uses of the original arg with the new value. SILArgument *OrigArg = BB->getArgument(OldArgIndex); OrigArg->replaceAllUsesWith(NewOrigArgValue); // Now erase the old argument since it does not have any uses. We also // decrement ArgOffset since we have one less argument now. BB->eraseArgument(OldArgIndex); TotalArgIndex --; } }
static bool processFunction(SILFunction &Fn) { bool Changed = false; for (auto BI = Fn.begin(), BE = Fn.end(); BI != BE; ++BI) { auto II = BI->begin(), IE = BI->end(); while (II != IE) { SILInstruction *Inst = &*II; DEBUG(llvm::dbgs() << "Visiting: " << *Inst); if (auto *CA = dyn_cast<CopyAddrInst>(Inst)) if (expandCopyAddr(CA)) { ++II; CA->eraseFromParent(); Changed = true; continue; } if (auto *DA = dyn_cast<DestroyAddrInst>(Inst)) if (expandDestroyAddr(DA)) { ++II; DA->eraseFromParent(); Changed = true; continue; } if (auto *CV = dyn_cast<RetainValueInst>(Inst)) if (expandRetainValue(CV)) { ++II; CV->eraseFromParent(); Changed = true; continue; } if (auto *DV = dyn_cast<ReleaseValueInst>(Inst)) if (expandReleaseValue(DV)) { ++II; DV->eraseFromParent(); Changed = true; continue; } ++II; } } return Changed; }
/// ----------------------------------------------------------/// /// Owned to Guaranteed transformation. /// /// ----------------------------------------------------------/// bool FunctionSignatureTransform::OwnedToGuaranteedAnalyzeParameters() { auto Args = F->begin()->getFunctionArguments(); // A map from consumed SILArguments to the release associated with an // argument. // // TODO: The return block and throw block should really be abstracted away. ConsumedArgToEpilogueReleaseMatcher ArgToReturnReleaseMap(RCIA->get(F), F); ConsumedArgToEpilogueReleaseMatcher ArgToThrowReleaseMap( RCIA->get(F), F, ConsumedArgToEpilogueReleaseMatcher::ExitKind::Throw); // Did we decide we should optimize any parameter? bool SignatureOptimize = false; // Analyze the argument information. for (unsigned i = 0, e = Args.size(); i != e; ++i) { ArgumentDescriptor &A = ArgumentDescList[i]; if (!A.canOptimizeLiveArg()) { continue; } // See if we can find a ref count equivalent strong_release or release_value // at the end of this function if our argument is an @owned parameter. if (A.hasConvention(SILArgumentConvention::Direct_Owned)) { auto Releases = ArgToReturnReleaseMap.getReleasesForArgument(A.Arg); if (!Releases.empty()) { // If the function has a throw block we must also find a matching // release in the throw block. auto ReleasesInThrow = ArgToThrowReleaseMap.getReleasesForArgument(A.Arg); if (!ArgToThrowReleaseMap.hasBlock() || !ReleasesInThrow.empty()) { A.CalleeRelease = Releases; A.CalleeReleaseInThrowBlock = ReleasesInThrow; // We can convert this parameter to a @guaranteed. A.OwnedToGuaranteed = true; SignatureOptimize = true; } } } // Modified self argument. if (A.OwnedToGuaranteed && Args[i]->isSelf()) { shouldModifySelfArgument = true; } } return SignatureOptimize; }
static SILFunction * moveFunctionBodyToNewFunctionWithName(SILFunction *F, const std::string &NewFName, SignatureOptimizer &Optimizer) { // First we create an empty function (i.e. no BB) whose function signature has // had its arity modified. // // We only do this to remove dead arguments. All other function signature // optimization is done later by modifying the function signature elements // themselves. SILFunction *NewF = Optimizer.createEmptyFunctionWithOptimizedSig(NewFName); // Then we transfer the body of F to NewF. At this point, the arguments of the // first BB will not match. NewF->spliceBody(F); // Do the same with the call graph. // Then perform any updates to the arguments of NewF. SILBasicBlock *NewFEntryBB = &*NewF->begin(); MutableArrayRef<ArgumentDescriptor> ArgDescs = Optimizer.getArgDescList(); unsigned ArgOffset = 0; SILBuilder Builder(NewFEntryBB->begin()); Builder.setCurrentDebugScope(NewFEntryBB->getParent()->getDebugScope()); for (auto &ArgDesc : ArgDescs) { // We always need to reset the insertion point in case we delete the first // instruction. Builder.setInsertionPoint(NewFEntryBB->begin()); DEBUG(llvm::dbgs() << "Updating arguments at ArgOffset: " << ArgOffset << " for: " << *ArgDesc.Arg); ArgOffset = ArgDesc.updateOptimizedBBArgs(Builder, NewFEntryBB, ArgOffset); } // Otherwise generate the thunk body just in case. SILBasicBlock *ThunkBody = F->createBasicBlock(); for (auto &ArgDesc : ArgDescs) { ThunkBody->createBBArg(ArgDesc.Arg->getType(), ArgDesc.Decl); } createThunkBody(ThunkBody, NewF, Optimizer); F->setThunk(IsThunk); assert(F->getDebugScope()->Parent != NewF->getDebugScope()->Parent); return NewF; }
bool FunctionSignatureTransform::ArgumentExplosionAnalyzeParameters() { // If we are not supposed to perform argument explosion, bail. if (FSODisableArgExplosion) return false; SILFunction *F = TransformDescriptor.OriginalFunction; // Did we decide we should optimize any parameter? bool SignatureOptimize = false; auto Args = F->begin()->getFunctionArguments(); ConsumedArgToEpilogueReleaseMatcher ArgToReturnReleaseMap( RCIA->get(F), F, {SILArgumentConvention::Direct_Owned}); // Analyze the argument information. for (unsigned i : indices(Args)) { ArgumentDescriptor &A = TransformDescriptor.ArgumentDescList[i]; // If the argument is dead, there is no point in trying to explode it. The // dead argument pass will get it. if (A.IsEntirelyDead) { continue; } // Do not optimize argument. if (!A.canOptimizeLiveArg()) { continue; } // Explosion of generic parameters is not supported yet. if (A.Arg->getType().hasArchetype()) continue; A.ProjTree.computeUsesAndLiveness(A.Arg); A.Explode = shouldExplode(A, ArgToReturnReleaseMap); // Modified self argument. if (A.Explode && Args[i]->isSelf()) { TransformDescriptor.shouldModifySelfArgument = true; } SignatureOptimize |= A.Explode; } return SignatureOptimize; }
/// ----------------------------------------------------------/// /// Dead argument transformation. /// /// ----------------------------------------------------------/// bool FunctionSignatureTransform::DeadArgumentAnalyzeParameters() { // Did we decide we should optimize any parameter? bool SignatureOptimize = false; auto Args = F->begin()->getFunctionArguments(); // Analyze the argument information. for (unsigned i = 0, e = Args.size(); i != e; ++i) { ArgumentDescriptor &A = ArgumentDescList[i]; if (!A.canOptimizeLiveArg()) { continue; } // Check whether argument is dead. if (!hasNonTrivialNonDebugUse(Args[i])) { A.IsEntirelyDead = true; SignatureOptimize = true; if (Args[i]->isSelf()) shouldModifySelfArgument = true; } } return SignatureOptimize; }
static bool removeUnreachableBlocks(SILFunction &F, SILModule &M, UnreachableUserCodeReportingState *State) { if (F.empty()) return false; SILBasicBlockSet Reachable; SmallVector<SILBasicBlock*, 128> Worklist; Worklist.push_back(&F.front()); Reachable.insert(&F.front()); // Collect all reachable blocks by walking the successors. do { SILBasicBlock *BB = Worklist.pop_back_val(); for (auto SI = BB->succ_begin(), SE = BB->succ_end(); SI != SE; ++SI) { if (Reachable.insert(*SI).second) Worklist.push_back(*SI); } } while (!Worklist.empty()); assert(Reachable.size() <= F.size()); // If everything is reachable, we are done. if (Reachable.size() == F.size()) return false; // Diagnose user written unreachable code. if (State) { for (auto BI = State->PossiblyUnreachableBlocks.begin(), BE = State->PossiblyUnreachableBlocks.end(); BI != BE; ++BI) { const SILBasicBlock *BB = *BI; if (!Reachable.count(BB)) { llvm::SmallPtrSet<const SILBasicBlock *, 1> visited; diagnoseUnreachableBlock(**BI, M, Reachable, State, BB, visited); } } } // Remove references from the dead blocks. for (auto I = F.begin(), E = F.end(); I != E; ++I) { SILBasicBlock *BB = &*I; if (Reachable.count(BB)) continue; // Drop references to other blocks. recursivelyDeleteTriviallyDeadInstructions(BB->getTerminator(), true); NumInstructionsRemoved++; } // Delete dead instructions and everything that could become dead after // their deletion. llvm::SmallVector<SILInstruction*, 32> ToBeDeleted; for (auto BI = F.begin(), BE = F.end(); BI != BE; ++BI) if (!Reachable.count(&*BI)) for (auto I = BI->begin(), E = BI->end(); I != E; ++I) ToBeDeleted.push_back(&*I); recursivelyDeleteTriviallyDeadInstructions(ToBeDeleted, true); NumInstructionsRemoved += ToBeDeleted.size(); // Delete the dead blocks. for (auto I = F.begin(), E = F.end(); I != E;) if (!Reachable.count(&*I)) { I = F.getBlocks().erase(I); NumBlocksRemoved++; } else ++I; return true; }
/// \brief Populate the body of the cloned closure, modifying instructions as /// necessary. This is where we create the actual specialized BB Arguments. void ClosureSpecCloner::populateCloned() { SILFunction *Cloned = getCloned(); SILFunction *ClosureUser = CallSiteDesc.getApplyCallee(); // Create arguments for the entry block. SILBasicBlock *ClosureUserEntryBB = &*ClosureUser->begin(); SILBasicBlock *ClonedEntryBB = Cloned->createBasicBlock(); SmallVector<SILValue, 4> entryArgs; entryArgs.reserve(ClosureUserEntryBB->getArguments().size()); // Remove the closure argument. SILArgument *ClosureArg = nullptr; for (size_t i = 0, e = ClosureUserEntryBB->args_size(); i != e; ++i) { SILArgument *Arg = ClosureUserEntryBB->getArgument(i); if (i == CallSiteDesc.getClosureIndex()) { ClosureArg = Arg; entryArgs.push_back(SILValue()); continue; } // Otherwise, create a new argument which copies the original argument SILValue MappedValue = ClonedEntryBB->createFunctionArgument(Arg->getType(), Arg->getDecl()); entryArgs.push_back(MappedValue); } // Next we need to add in any arguments that are not captured as arguments to // the cloned function. // // We do not insert the new mapped arguments into the value map since there by // definition is nothing in the partial apply user function that references // such arguments. After this pass is done the only thing that will reference // the arguments is the partial apply that we will create. SILFunction *ClosedOverFun = CallSiteDesc.getClosureCallee(); auto ClosedOverFunConv = ClosedOverFun->getConventions(); unsigned NumTotalParams = ClosedOverFunConv.getNumParameters(); unsigned NumNotCaptured = NumTotalParams - CallSiteDesc.getNumArguments(); llvm::SmallVector<SILValue, 4> NewPAIArgs; for (auto &PInfo : ClosedOverFunConv.getParameters().slice(NumNotCaptured)) { auto paramTy = ClosedOverFunConv.getSILType(PInfo); SILValue MappedValue = ClonedEntryBB->createFunctionArgument(paramTy); NewPAIArgs.push_back(MappedValue); } SILBuilder &Builder = getBuilder(); Builder.setInsertionPoint(ClonedEntryBB); // Clone FRI and PAI, and replace usage of the removed closure argument // with result of cloned PAI. SILValue FnVal = Builder.createFunctionRef(CallSiteDesc.getLoc(), ClosedOverFun); auto *NewClosure = CallSiteDesc.createNewClosure(Builder, FnVal, NewPAIArgs); // Clone a chain of ConvertFunctionInsts. This can create further // reabstraction partial_apply instructions. SmallVector<PartialApplyInst*, 4> NeedsRelease; SILValue ConvertedCallee = cloneCalleeConversion( CallSiteDesc.getClosureCallerArg(), NewClosure, Builder, NeedsRelease); // Make sure that we actually emit the releases for reabstraction thunks. We // have guaranteed earlier that we only allow reabstraction thunks if the // closure was passed trivial. assert(NeedsRelease.empty() || CallSiteDesc.isTrivialNoEscapeParameter()); entryArgs[CallSiteDesc.getClosureIndex()] = ConvertedCallee; // Visit original BBs in depth-first preorder, starting with the // entry block, cloning all instructions and terminators. cloneFunctionBody(ClosureUser, ClonedEntryBB, entryArgs); // Then insert a release in all non failure exit BBs if our partial apply was // guaranteed. This is b/c it was passed at +0 originally and we need to // balance the initial increment of the newly created closure(s). bool ClosureHasRefSemantics = CallSiteDesc.closureHasRefSemanticContext(); if ((CallSiteDesc.isClosureGuaranteed() || CallSiteDesc.isTrivialNoEscapeParameter()) && (ClosureHasRefSemantics || !NeedsRelease.empty())) { for (SILBasicBlock *BB : CallSiteDesc.getNonFailureExitBBs()) { SILBasicBlock *OpBB = getOpBasicBlock(BB); TermInst *TI = OpBB->getTerminator(); auto Loc = CleanupLocation::get(NewClosure->getLoc()); // If we have an exit, we place the release right before it so we know // that it will be executed at the end of the epilogue. if (TI->isFunctionExiting()) { Builder.setInsertionPoint(TI); if (ClosureHasRefSemantics) Builder.createReleaseValue(Loc, SILValue(NewClosure), Builder.getDefaultAtomicity()); for (auto PAI : NeedsRelease) Builder.createReleaseValue(Loc, SILValue(PAI), Builder.getDefaultAtomicity()); continue; } // We use casts where findAllNonFailureExitBBs should have made sure that // this is true. This will ensure that the code is updated when we hit the // cast failure in debug builds. auto *Unreachable = cast<UnreachableInst>(TI); auto PrevIter = std::prev(SILBasicBlock::iterator(Unreachable)); auto NoReturnApply = FullApplySite::isa(&*PrevIter); // We insert the release value right before the no return apply so that if // the partial apply is passed into the no-return function as an @owned // value, we will retain the partial apply before we release it and // potentially eliminate it. Builder.setInsertionPoint(NoReturnApply.getInstruction()); if (ClosureHasRefSemantics) Builder.createReleaseValue(Loc, SILValue(NewClosure), Builder.getDefaultAtomicity()); for (auto PAI : NeedsRelease) Builder.createReleaseValue(Loc, SILValue(PAI), Builder.getDefaultAtomicity()); } } }
bool SILCombiner::doOneIteration(SILFunction &F, unsigned Iteration) { MadeChange = false; DEBUG(llvm::dbgs() << "\n\nSILCOMBINE ITERATION #" << Iteration << " on " << F.getName() << "\n"); // Add reachable instructions to our worklist. addReachableCodeToWorklist(&*F.begin()); // Process until we run out of items in our worklist. while (!Worklist.isEmpty()) { SILInstruction *I = Worklist.removeOne(); // When we erase an instruction, we use the map in the worklist to check if // the instruction is in the worklist. If it is, we replace it with null // instead of shifting all members of the worklist towards the front. This // check makes sure that if we run into any such residual null pointers, we // skip them. if (I == nullptr) continue; // Check to see if we can DCE the instruction. if (isInstructionTriviallyDead(I)) { DEBUG(llvm::dbgs() << "SC: DCE: " << *I << '\n'); eraseInstFromFunction(*I); ++NumDeadInst; MadeChange = true; continue; } // Check to see if we can instsimplify the instruction. if (SILValue Result = simplifyInstruction(I)) { ++NumSimplified; DEBUG(llvm::dbgs() << "SC: Simplify Old = " << *I << '\n' << " New = " << *Result << '\n'); // Everything uses the new instruction now. replaceInstUsesWith(*I, Result); // Push the new instruction and any users onto the worklist. Worklist.addUsersToWorklist(Result); eraseInstFromFunction(*I); MadeChange = true; continue; } // If we have reached this point, all attempts to do simple simplifications // have failed. Prepare to SILCombine. Builder.setInsertionPoint(I); #ifndef NDEBUG std::string OrigI; #endif DEBUG(llvm::raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); DEBUG(llvm::dbgs() << "SC: Visiting: " << OrigI << '\n'); if (SILInstruction *Result = visit(I)) { ++NumCombined; // Should we replace the old instruction with a new one? if (Result != I) { assert(&*std::prev(SILBasicBlock::iterator(I)) == Result && "Expected new instruction inserted before existing instruction!"); DEBUG(llvm::dbgs() << "SC: Old = " << *I << '\n' << " New = " << *Result << '\n'); // Everything uses the new instruction now. replaceInstUsesWith(*I, Result); // Push the new instruction and any users onto the worklist. Worklist.add(Result); Worklist.addUsersToWorklist(Result); eraseInstFromFunction(*I); } else { DEBUG(llvm::dbgs() << "SC: Mod = " << OrigI << '\n' << " New = " << *I << '\n'); // If the instruction was modified, it's possible that it is now dead. // if so, remove it. if (isInstructionTriviallyDead(I)) { eraseInstFromFunction(*I); } else { Worklist.add(I); Worklist.addUsersToWorklist(I); } } MadeChange = true; } // Our tracking list has been accumulating instructions created by the // SILBuilder during this iteration. Go through the tracking list and add // its contents to the worklist and then clear said list in preparation for // the next iteration. auto &TrackingList = *Builder.getTrackingList(); for (SILInstruction *I : TrackingList) { DEBUG(llvm::dbgs() << "SC: add " << *I << " from tracking list to worklist\n"); Worklist.add(I); } TrackingList.clear(); }
/// Analyze the destructor for the class of ARI to see if any instructions in it /// could have side effects on the program outside the destructor. If it does /// not, then we can eliminate the destructor. static bool doesDestructorHaveSideEffects(AllocRefInst *ARI) { SILFunction *Fn = getDestructor(ARI); // If we can't find a constructor then assume it has side effects. if (!Fn) return true; // A destructor only has one argument, self. assert(Fn->begin()->getNumBBArg() == 1 && "Destructor should have only one argument, self."); SILArgument *Self = Fn->begin()->getBBArg(0); DEBUG(llvm::dbgs() << " Analyzing destructor.\n"); // For each BB in the destructor... for (auto &BB : *Fn) // For each instruction I in BB... for (auto &I : BB) { DEBUG(llvm::dbgs() << " Visiting: " << I); // If I has no side effects, we can ignore it. if (!I.mayHaveSideEffects()) { DEBUG(llvm::dbgs() << " SAFE! Instruction has no side " "effects.\n"); continue; } // RefCounting operations on Self are ok since we are already in the // destructor. RefCountingOperations on other instructions could have side // effects though. if (auto *RefInst = dyn_cast<RefCountingInst>(&I)) { if (stripCasts(RefInst->getOperand(0)) == Self) { // For now all ref counting insts have 1 operand. Put in an assert // just in case. assert(RefInst->getNumOperands() == 1 && "Make sure RefInst only has one argument."); DEBUG(llvm::dbgs() << " SAFE! Ref count operation on " "Self.\n"); continue; } else { DEBUG(llvm::dbgs() << " UNSAFE! Ref count operation not on" " self.\n"); return true; } } // dealloc_stack can be ignored. if (isa<DeallocStackInst>(I)) { DEBUG(llvm::dbgs() << " SAFE! dealloc_stack can be " "ignored.\n"); continue; } // dealloc_ref on self can be ignored, but dealloc_ref on anything else // cannot be eliminated. if (auto *DeallocRef = dyn_cast<DeallocRefInst>(&I)) { if (stripCasts(DeallocRef->getOperand()) == Self) { DEBUG(llvm::dbgs() << " SAFE! dealloc_ref on self.\n"); continue; } else { DEBUG(llvm::dbgs() << " UNSAFE! dealloc_ref on value " "besides self.\n"); return true; } } // Storing into the object can be ignored. if (auto *SI = dyn_cast<StoreInst>(&I)) if (stripAddressProjections(SI->getDest()) == Self) { DEBUG(llvm::dbgs() << " SAFE! Instruction is a store into " "self.\n"); continue; } DEBUG(llvm::dbgs() << " UNSAFE! Unknown instruction.\n"); // Otherwise, we can't remove the deallocation completely. return true; } // We didn't find any side effects. return false; }
/// \brief Inlines the callee of a given ApplyInst (which must be the value of a /// FunctionRefInst referencing a function with a known body), into the caller /// containing the ApplyInst, which must be the same function as provided to the /// constructor of SILInliner. It only performs one step of inlining: it does /// not recursively inline functions called by the callee. /// /// It is the responsibility of the caller of this function to delete /// the given ApplyInst when inlining is successful. /// /// \returns true on success or false if it is unable to inline the function /// (for any reason). bool SILInliner::inlineFunction(FullApplySite AI, ArrayRef<SILValue> Args) { SILFunction *CalleeFunction = &Original; this->CalleeFunction = CalleeFunction; // Do not attempt to inline an apply into its parent function. if (AI.getFunction() == CalleeFunction) return false; SILFunction &F = getBuilder().getFunction(); if (CalleeFunction->getName() == "_TTSg5Vs4Int8___TFVs12_ArrayBufferg9_isNativeSb" && F.getName() == "_TTSg5Vs4Int8___TFVs12_ArrayBufferg8endIndexSi") llvm::errs(); assert(AI.getFunction() && AI.getFunction() == &F && "Inliner called on apply instruction in wrong function?"); assert(((CalleeFunction->getRepresentation() != SILFunctionTypeRepresentation::ObjCMethod && CalleeFunction->getRepresentation() != SILFunctionTypeRepresentation::CFunctionPointer) || IKind == InlineKind::PerformanceInline) && "Cannot inline Objective-C methods or C functions in mandatory " "inlining"); CalleeEntryBB = &*CalleeFunction->begin(); // Compute the SILLocation which should be used by all the inlined // instructions. if (IKind == InlineKind::PerformanceInline) { Loc = InlinedLocation::getInlinedLocation(AI.getLoc()); } else { assert(IKind == InlineKind::MandatoryInline && "Unknown InlineKind."); Loc = MandatoryInlinedLocation::getMandatoryInlinedLocation(AI.getLoc()); } auto AIScope = AI.getDebugScope(); // FIXME: Turn this into an assertion instead. if (!AIScope) AIScope = AI.getFunction()->getDebugScope(); if (IKind == InlineKind::MandatoryInline) { // Mandatory inlining: every instruction inherits scope/location // from the call site. CallSiteScope = AIScope; } else { // Performance inlining. Construct a proper inline scope pointing // back to the call site. CallSiteScope = new (F.getModule()) SILDebugScope(AI.getLoc(), &F, AIScope); assert(CallSiteScope->getParentFunction() == &F); } assert(CallSiteScope && "call site has no scope"); // Increment the ref count for the inlined function, so it doesn't // get deleted before we can emit abstract debug info for it. CalleeFunction->setInlined(); // If the caller's BB is not the last BB in the calling function, then keep // track of the next BB so we always insert new BBs before it; otherwise, // we just leave the new BBs at the end as they are by default. auto IBI = std::next(SILFunction::iterator(AI.getParent())); InsertBeforeBB = IBI != F.end() ? &*IBI : nullptr; // Clear argument map and map ApplyInst arguments to the arguments of the // callee's entry block. ValueMap.clear(); assert(CalleeEntryBB->bbarg_size() == Args.size() && "Unexpected number of arguments to entry block of function?"); auto BAI = CalleeEntryBB->bbarg_begin(); for (auto AI = Args.begin(), AE = Args.end(); AI != AE; ++AI, ++BAI) ValueMap.insert(std::make_pair(*BAI, *AI)); InstructionMap.clear(); BBMap.clear(); // Do not allow the entry block to be cloned again SILBasicBlock::iterator InsertPoint = SILBasicBlock::iterator(AI.getInstruction()); BBMap.insert(std::make_pair(CalleeEntryBB, AI.getParent())); getBuilder().setInsertionPoint(InsertPoint); // Recursively visit callee's BB in depth-first preorder, starting with the // entry block, cloning all instructions other than terminators. visitSILBasicBlock(CalleeEntryBB); // If we're inlining into a normal apply and the callee's entry // block ends in a return, then we can avoid a split. if (auto nonTryAI = dyn_cast<ApplyInst>(AI)) { if (ReturnInst *RI = dyn_cast<ReturnInst>(CalleeEntryBB->getTerminator())) { // Replace all uses of the apply instruction with the operands of the // return instruction, appropriately mapped. nonTryAI->replaceAllUsesWith(remapValue(RI->getOperand())); return true; } } // If we're inlining into a try_apply, we already have a return-to BB. SILBasicBlock *ReturnToBB; if (auto tryAI = dyn_cast<TryApplyInst>(AI)) { ReturnToBB = tryAI->getNormalBB(); // Otherwise, split the caller's basic block to create a return-to BB. } else { SILBasicBlock *CallerBB = AI.getParent(); // Split the BB and do NOT create a branch between the old and new // BBs; we will create the appropriate terminator manually later. ReturnToBB = CallerBB->splitBasicBlock(InsertPoint); // Place the return-to BB after all the other mapped BBs. if (InsertBeforeBB) F.getBlocks().splice(SILFunction::iterator(InsertBeforeBB), F.getBlocks(), SILFunction::iterator(ReturnToBB)); else F.getBlocks().splice(F.getBlocks().end(), F.getBlocks(), SILFunction::iterator(ReturnToBB)); // Create an argument on the return-to BB representing the returned value. auto *RetArg = new (F.getModule()) SILArgument(ReturnToBB, AI.getInstruction()->getType()); // Replace all uses of the ApplyInst with the new argument. AI.getInstruction()->replaceAllUsesWith(RetArg); } // Now iterate over the callee BBs and fix up the terminators. for (auto BI = BBMap.begin(), BE = BBMap.end(); BI != BE; ++BI) { getBuilder().setInsertionPoint(BI->second); // Modify return terminators to branch to the return-to BB, rather than // trying to clone the ReturnInst. if (ReturnInst *RI = dyn_cast<ReturnInst>(BI->first->getTerminator())) { auto thrownValue = remapValue(RI->getOperand()); getBuilder().createBranch(Loc.getValue(), ReturnToBB, thrownValue); continue; } // Modify throw terminators to branch to the error-return BB, rather than // trying to clone the ThrowInst. if (ThrowInst *TI = dyn_cast<ThrowInst>(BI->first->getTerminator())) { if (auto *A = dyn_cast<ApplyInst>(AI)) { (void)A; assert(A->isNonThrowing() && "apply of a function with error result must be non-throwing"); getBuilder().createUnreachable(Loc.getValue()); continue; } auto tryAI = cast<TryApplyInst>(AI); auto returnedValue = remapValue(TI->getOperand()); getBuilder().createBranch(Loc.getValue(), tryAI->getErrorBB(), returnedValue); continue; } // Otherwise use normal visitor, which clones the existing instruction // but remaps basic blocks and values. visit(BI->first->getTerminator()); } return true; }
/// \brief Populate the body of the cloned closure, modifying instructions as /// necessary. This is where we create the actual specialized BB Arguments. void ClosureSpecCloner::populateCloned() { SILFunction *Cloned = getCloned(); SILFunction *ClosureUser = CallSiteDesc.getApplyCallee(); // Create arguments for the entry block. SILBasicBlock *ClosureUserEntryBB = &*ClosureUser->begin(); SILBasicBlock *ClonedEntryBB = Cloned->createBasicBlock(); // Remove the closure argument. SILArgument *ClosureArg = nullptr; for (size_t i = 0, e = ClosureUserEntryBB->args_size(); i != e; ++i) { SILArgument *Arg = ClosureUserEntryBB->getArgument(i); if (i == CallSiteDesc.getClosureIndex()) { ClosureArg = Arg; continue; } // Otherwise, create a new argument which copies the original argument SILValue MappedValue = ClonedEntryBB->createFunctionArgument(Arg->getType(), Arg->getDecl()); ValueMap.insert(std::make_pair(Arg, MappedValue)); } // Next we need to add in any arguments that are not captured as arguments to // the cloned function. // // We do not insert the new mapped arguments into the value map since there by // definition is nothing in the partial apply user function that references // such arguments. After this pass is done the only thing that will reference // the arguments is the partial apply that we will create. SILFunction *ClosedOverFun = CallSiteDesc.getClosureCallee(); CanSILFunctionType ClosedOverFunTy = ClosedOverFun->getLoweredFunctionType(); unsigned NumTotalParams = ClosedOverFunTy->getParameters().size(); unsigned NumNotCaptured = NumTotalParams - CallSiteDesc.getNumArguments(); llvm::SmallVector<SILValue, 4> NewPAIArgs; for (auto &PInfo : ClosedOverFunTy->getParameters().slice(NumNotCaptured)) { SILValue MappedValue = ClonedEntryBB->createFunctionArgument(PInfo.getSILType()); NewPAIArgs.push_back(MappedValue); } SILBuilder &Builder = getBuilder(); Builder.setInsertionPoint(ClonedEntryBB); // Clone FRI and PAI, and replace usage of the removed closure argument // with result of cloned PAI. SILValue FnVal = Builder.createFunctionRef(CallSiteDesc.getLoc(), ClosedOverFun); auto *NewClosure = CallSiteDesc.createNewClosure(Builder, FnVal, NewPAIArgs); ValueMap.insert(std::make_pair(ClosureArg, SILValue(NewClosure))); BBMap.insert(std::make_pair(ClosureUserEntryBB, ClonedEntryBB)); // Recursively visit original BBs in depth-first preorder, starting with the // entry block, cloning all instructions other than terminators. visitSILBasicBlock(ClosureUserEntryBB); // Now iterate over the BBs and fix up the terminators. for (auto BI = BBMap.begin(), BE = BBMap.end(); BI != BE; ++BI) { Builder.setInsertionPoint(BI->second); visit(BI->first->getTerminator()); } // Then insert a release in all non failure exit BBs if our partial apply was // guaranteed. This is b/c it was passed at +0 originally and we need to // balance the initial increment of the newly created closure. if (CallSiteDesc.isClosureGuaranteed() && CallSiteDesc.closureHasRefSemanticContext()) { for (SILBasicBlock *BB : CallSiteDesc.getNonFailureExitBBs()) { SILBasicBlock *OpBB = BBMap[BB]; TermInst *TI = OpBB->getTerminator(); auto Loc = CleanupLocation::get(NewClosure->getLoc()); // If we have a return, we place the release right before it so we know // that it will be executed at the end of the epilogue. if (isa<ReturnInst>(TI)) { Builder.setInsertionPoint(TI); Builder.createReleaseValue(Loc, SILValue(NewClosure), Atomicity::Atomic); continue; } // We use casts where findAllNonFailureExitBBs should have made sure that // this is true. This will ensure that the code is updated when we hit the // cast failure in debug builds. auto *Unreachable = cast<UnreachableInst>(TI); auto PrevIter = std::prev(SILBasicBlock::iterator(Unreachable)); auto NoReturnApply = FullApplySite::isa(&*PrevIter); // We insert the release value right before the no return apply so that if // the partial apply is passed into the no-return function as an @owned // value, we will retain the partial apply before we release it and // potentially eliminate it. Builder.setInsertionPoint(NoReturnApply.getInstruction()); Builder.createReleaseValue(Loc, SILValue(NewClosure), Atomicity::Atomic); } } }