TinyPtrVector<Action *> RealizeRMC::collectEdges(StringRef name) { TinyPtrVector<Action *> matches; if (name == "pre" || name == "post") return matches; // Since multiple blocks can have the same tag, we search for // them by name. // We could make this more efficient by building maps but I don't think // it is going to matter. for (auto & a : actions_) { if (a.name == name) { matches.push_back(&a); } } if (matches.size() == 0) { errs() << "Error: use of nonexistent label '" << name << "' in function '" << func_.getName() << "'\n"; rmc_error(); } return matches; }
/// propagateSiblingValue - Propagate the value in SVI to dependents if it is /// known. Otherwise remember the dependency for later. /// /// @param SVIIter SibValues entry to propagate. /// @param VNI Dependent value, or NULL to propagate to all saved dependents. void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter, VNInfo *VNI) { SibValueMap::value_type *SVI = &*SVIIter; // When VNI is non-NULL, add it to SVI's deps, and only propagate to that. TinyPtrVector<VNInfo*> FirstDeps; if (VNI) { FirstDeps.push_back(VNI); SVI->second.Deps.push_back(VNI); } // Has the value been completely determined yet? If not, defer propagation. if (!SVI->second.hasDef()) return; // Work list of values to propagate. SmallSetVector<SibValueMap::value_type *, 8> WorkList; WorkList.insert(SVI); do { SVI = WorkList.pop_back_val(); TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps; VNI = 0; SibValueInfo &SV = SVI->second; if (!SV.SpillMBB) SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def); DEBUG(dbgs() << " prop to " << Deps->size() << ": " << SVI->first->id << '@' << SVI->first->def << ":\t" << SV); assert(SV.hasDef() && "Propagating undefined value"); // Should this value be propagated as a preferred spill candidate? We don't // propagate values of registers that are about to spill. bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg); unsigned SpillDepth = ~0u; for (TinyPtrVector<VNInfo*>::iterator DepI = Deps->begin(), DepE = Deps->end(); DepI != DepE; ++DepI) { SibValueMap::iterator DepSVI = SibValues.find(*DepI); assert(DepSVI != SibValues.end() && "Dependent value not in SibValues"); SibValueInfo &DepSV = DepSVI->second; if (!DepSV.SpillMBB) DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def); bool Changed = false; // Propagate defining instruction. if (!DepSV.hasDef()) { Changed = true; DepSV.DefMI = SV.DefMI; DepSV.DefByOrigPHI = SV.DefByOrigPHI; } // Propagate AllDefsAreReloads. For PHI values, this computes an AND of // all predecessors. if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) { Changed = true; DepSV.AllDefsAreReloads = false; } // Propagate best spill value. if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) { if (SV.SpillMBB == DepSV.SpillMBB) { // DepSV is in the same block. Hoist when dominated. if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) { // This is an alternative def earlier in the same MBB. // Hoist the spill as far as possible in SpillMBB. This can ease // register pressure: // // x = def // y = use x // s = copy x // // Hoisting the spill of s to immediately after the def removes the // interference between x and y: // // x = def // spill x // y = use x<kill> // // This hoist only helps when the DepSV copy kills its source. Changed = true; DepSV.SpillReg = SV.SpillReg; DepSV.SpillVNI = SV.SpillVNI; DepSV.SpillMBB = SV.SpillMBB; } } else { // DepSV is in a different block. if (SpillDepth == ~0u) SpillDepth = Loops.getLoopDepth(SV.SpillMBB); // Also hoist spills to blocks with smaller loop depth, but make sure // that the new value dominates. Non-phi dependents are always // dominated, phis need checking. if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) && (!DepSVI->first->isPHIDef() || MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) { Changed = true; DepSV.SpillReg = SV.SpillReg; DepSV.SpillVNI = SV.SpillVNI; DepSV.SpillMBB = SV.SpillMBB; } } } if (!Changed) continue; // Something changed in DepSVI. Propagate to dependents. WorkList.insert(&*DepSVI); DEBUG(dbgs() << " update " << DepSVI->first->id << '@' << DepSVI->first->def << " to:\t" << DepSV); } } while (!WorkList.empty()); }
/// Find all closures that may be propagated into the given function-type value. /// /// Searches the use-def chain from the given value upward until a partial_apply /// is reached. Populates `results` with the set of partial_apply instructions. /// /// `funcVal` may be either a function type or an Optional function type. This /// might be called on a directly applied value or on a call argument, which may /// in turn be applied within the callee. void swift::findClosuresForFunctionValue( SILValue funcVal, TinyPtrVector<PartialApplyInst *> &results) { SILType funcTy = funcVal->getType(); // Handle `Optional<@convention(block) @noescape (_)->(_)>` if (auto optionalObjTy = funcTy.getOptionalObjectType()) funcTy = optionalObjTy; assert(funcTy.is<SILFunctionType>()); SmallVector<SILValue, 4> worklist; // Avoid exponential path exploration and prevent duplicate results. llvm::SmallDenseSet<SILValue, 8> visited; auto worklistInsert = [&](SILValue V) { if (visited.insert(V).second) worklist.push_back(V); }; worklistInsert(funcVal); while (!worklist.empty()) { SILValue V = worklist.pop_back_val(); if (auto *I = V->getDefiningInstruction()) { // Look through copies, borrows, and conversions. // // Handle copy_block and copy_block_without_actually_escaping before // calling findClosureStoredIntoBlock. if (SingleValueInstruction *SVI = getSingleValueCopyOrCast(I)) { worklistInsert(SVI->getOperand(0)); continue; } } // Look through Optionals. if (V->getType().getOptionalObjectType()) { auto *EI = dyn_cast<EnumInst>(V); if (EI && EI->hasOperand()) { worklistInsert(EI->getOperand()); } // Ignore the .None case. continue; } // Look through Phis. // // This should be done before calling findClosureStoredIntoBlock. if (auto *arg = dyn_cast<SILPhiArgument>(V)) { SmallVector<std::pair<SILBasicBlock *, SILValue>, 2> blockArgs; arg->getIncomingPhiValues(blockArgs); for (auto &blockAndArg : blockArgs) worklistInsert(blockAndArg.second); continue; } // Look through ObjC closures. auto fnType = V->getType().getAs<SILFunctionType>(); if (fnType && fnType->getRepresentation() == SILFunctionTypeRepresentation::Block) { if (SILValue storedClosure = findClosureStoredIntoBlock(V)) worklistInsert(storedClosure); continue; } if (auto *PAI = dyn_cast<PartialApplyInst>(V)) { SILValue thunkArg = isPartialApplyOfReabstractionThunk(PAI); if (thunkArg) { // Handle reabstraction thunks recursively. This may reabstract over // @convention(block). worklistInsert(thunkArg); continue; } results.push_back(PAI); continue; } // Ignore other unrecognized values that feed this applied argument. } }
static void checkNoEscapePartialApplyUse(Operand *oper, FollowUse followUses) { SILInstruction *user = oper->getUser(); // Ignore uses that are totally uninteresting. if (isIncidentalUse(user) || onlyAffectsRefCount(user)) return; // Before checking conversions in general below (getSingleValueCopyOrCast), // check for convert_function to [without_actually_escaping]. Assume such // conversion are not actually escaping without following their uses. if (auto *CFI = dyn_cast<ConvertFunctionInst>(user)) { if (CFI->withoutActuallyEscaping()) return; } // Look through copies, borrows, and conversions. // // Note: This handles ConversionInst, which already includes everything in // swift::stripConvertFunctions. if (SingleValueInstruction *copy = getSingleValueCopyOrCast(user)) { // Only follow the copied operand. Other operands are incidental, // as in the second operand of mark_dependence. if (oper->getOperandNumber() == 0) followUses(copy); return; } switch (user->getKind()) { default: break; // Look through Optionals. case SILInstructionKind::EnumInst: // @noescape block storage can be passed as an Optional (Nullable). followUses(cast<EnumInst>(user)); return; // Look through Phis. case SILInstructionKind::BranchInst: { const SILPhiArgument *arg = cast<BranchInst>(user)->getArgForOperand(oper); followUses(arg); return; } case SILInstructionKind::CondBranchInst: { const SILPhiArgument *arg = cast<CondBranchInst>(user)->getArgForOperand(oper); if (arg) // If the use isn't the branch condition, follow it. followUses(arg); return; } // Look through ObjC closures. case SILInstructionKind::StoreInst: if (oper->getOperandNumber() == StoreInst::Src) { if (auto *PBSI = dyn_cast<ProjectBlockStorageInst>( cast<StoreInst>(user)->getDest())) { SILValue storageAddr = PBSI->getOperand(); // The closure is stored to block storage. Recursively visit all // uses of any initialized block storage values derived from this // storage address.. for (Operand *oper : storageAddr->getUses()) { if (auto *IBS = dyn_cast<InitBlockStorageHeaderInst>(oper->getUser())) followUses(IBS); } return; } } break; case SILInstructionKind::IsEscapingClosureInst: // May be generated by withoutActuallyEscaping. return; case SILInstructionKind::PartialApplyInst: { // Recurse through partial_apply to handle special cases before handling // ApplySites in general below. PartialApplyInst *PAI = cast<PartialApplyInst>(user); // Use the same logic as checkForViolationAtApply applied to a def-use // traversal. // // checkForViolationAtApply recurses through partial_apply chains. if (oper->get() == PAI->getCallee()) { followUses(PAI); return; } // checkForViolationAtApply also uses findClosuresForAppliedArg which in // turn checks isPartialApplyOfReabstractionThunk. // // A closure with @inout_aliasable arguments may be applied to a // thunk as "escaping", but as long as the thunk is only used as a // '@noescape" type then it is safe. if (isPartialApplyOfReabstractionThunk(PAI)) { // Don't follow thunks that were generated by withoutActuallyEscaping. SILFunction *thunkDef = PAI->getReferencedFunction(); if (!thunkDef->isWithoutActuallyEscapingThunk()) followUses(PAI); return; } // Handle this use like a normal applied argument. break; } }; // Handle ApplySites in general after checking PartialApply above. if (isa<ApplySite>(user)) { SILValue arg = oper->get(); auto argumentFnType = getSILFunctionTypeForValue(arg); if (argumentFnType && argumentFnType->isNoEscape()) { // Verify that the inverse operation, finding a partial_apply from a // @noescape argument, is consistent. TinyPtrVector<PartialApplyInst *> partialApplies; findClosuresForFunctionValue(arg, partialApplies); assert(!partialApplies.empty() && "cannot find partial_apply from @noescape function argument"); return; } llvm::dbgs() << "Applied argument must be @noescape function type: " << *arg; } else llvm::dbgs() << "Unexpected partial_apply use: " << *user; llvm_unreachable("A partial_apply with @inout_aliasable may only be " "used as a @noescape function type argument."); }