/// \brief Find an insertion point that dominates all uses. Instruction *ConstantHoisting:: findConstantInsertionPoint(const ConstantInfo &ConstInfo) const { assert(!ConstInfo.RebasedConstants.empty() && "Invalid constant info entry."); // Collect all IDoms. SmallPtrSet<BasicBlock *, 8> BBs; for (auto const &RCI : ConstInfo.RebasedConstants) BBs.insert(getIDom(RCI)); assert(!BBs.empty() && "No dominators!?"); if (BBs.count(Entry)) return &Entry->front(); while (BBs.size() >= 2) { BasicBlock *BB, *BB1, *BB2; BB1 = *BBs.begin(); BB2 = *std::next(BBs.begin()); BB = DT->findNearestCommonDominator(BB1, BB2); if (BB == Entry) return &Entry->front(); BBs.erase(BB1); BBs.erase(BB2); BBs.insert(BB); } assert((BBs.size() == 1) && "Expected only one element."); Instruction &FirstInst = (*BBs.begin())->front(); return findMatInsertPt(&FirstInst); }
bool UnreachableBlockElim::runOnFunction(Function &F) { SmallPtrSet<BasicBlock*, 8> Reachable; // Mark all reachable blocks. for (df_ext_iterator<Function*, SmallPtrSet<BasicBlock*, 8> > I = df_ext_begin(&F, Reachable), E = df_ext_end(&F, Reachable); I != E; ++I) /* Mark all reachable blocks */; // Loop over all dead blocks, remembering them and deleting all instructions // in them. std::vector<BasicBlock*> DeadBlocks; for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) if (!Reachable.count(I)) { BasicBlock *BB = I; DeadBlocks.push_back(BB); while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) { PN->replaceAllUsesWith(Constant::getNullValue(PN->getType())); BB->getInstList().pop_front(); } for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) (*SI)->removePredecessor(BB); BB->dropAllReferences(); } // Actually remove the blocks now. ProfileInfo *PI = getAnalysisIfAvailable<ProfileInfo>(); for (unsigned i = 0, e = DeadBlocks.size(); i != e; ++i) { if (PI) PI->removeBlock(DeadBlocks[i]); DeadBlocks[i]->eraseFromParent(); } return DeadBlocks.size(); }
// Check PHI instructions at the beginning of MBB. It is assumed that // calcRegsPassed has been run so BBInfo::isLiveOut is valid. void MachineVerifier::checkPHIOps(const MachineBasicBlock *MBB) { SmallPtrSet<const MachineBasicBlock*, 8> seen; for (MachineBasicBlock::const_iterator BBI = MBB->begin(), BBE = MBB->end(); BBI != BBE && BBI->isPHI(); ++BBI) { seen.clear(); for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) { unsigned Reg = BBI->getOperand(i).getReg(); const MachineBasicBlock *Pre = BBI->getOperand(i + 1).getMBB(); if (!Pre->isSuccessor(MBB)) continue; seen.insert(Pre); BBInfo &PrInfo = MBBInfoMap[Pre]; if (PrInfo.reachable && !PrInfo.isLiveOut(Reg)) report("PHI operand is not live-out from predecessor", &BBI->getOperand(i), i); } // Did we see all predecessors? for (MachineBasicBlock::const_pred_iterator PrI = MBB->pred_begin(), PrE = MBB->pred_end(); PrI != PrE; ++PrI) { if (!seen.count(*PrI)) { report("Missing PHI operand", BBI); *OS << "BB#" << (*PrI)->getNumber() << " is a predecessor according to the CFG.\n"; } } } }
static void EliminateMultipleEntryLoops(MachineFunction &MF, const MachineLoopInfo &MLI) { SmallPtrSet<MachineBasicBlock *, 8> InSet; for (scc_iterator<MachineFunction *> I = scc_begin(&MF), E = scc_end(&MF); I != E; ++I) { const std::vector<MachineBasicBlock *> &CurrentSCC = *I; // Skip trivial SCCs. if (CurrentSCC.size() == 1) continue; InSet.insert(CurrentSCC.begin(), CurrentSCC.end()); MachineBasicBlock *Header = nullptr; for (MachineBasicBlock *MBB : CurrentSCC) { for (MachineBasicBlock *Pred : MBB->predecessors()) { if (InSet.count(Pred)) continue; if (!Header) { Header = MBB; break; } // TODO: Implement multiple-entry loops. report_fatal_error("multiple-entry loops are not supported yet"); } } assert(MLI.isLoopHeader(Header)); InSet.clear(); } }
/// isLiveInButUnusedBefore - Return true if register is livein the MBB not /// not used before it reaches the MI that defines register. static bool isLiveInButUnusedBefore(unsigned Reg, MachineInstr *MI, MachineBasicBlock *MBB, const TargetRegisterInfo *TRI, MachineRegisterInfo* MRI) { // First check if register is livein. bool isLiveIn = false; for (MachineBasicBlock::const_livein_iterator I = MBB->livein_begin(), E = MBB->livein_end(); I != E; ++I) if (Reg == *I || TRI->isSuperRegister(Reg, *I)) { isLiveIn = true; break; } if (!isLiveIn) return false; // Is there any use of it before the specified MI? SmallPtrSet<MachineInstr*, 4> UsesInMBB; for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg), UE = MRI->use_end(); UI != UE; ++UI) { MachineOperand &UseMO = UI.getOperand(); if (UseMO.isReg() && UseMO.isUndef()) continue; MachineInstr *UseMI = &*UI; if (UseMI->getParent() == MBB) UsesInMBB.insert(UseMI); } if (UsesInMBB.empty()) return true; for (MachineBasicBlock::iterator I = MBB->begin(), E = MI; I != E; ++I) if (UsesInMBB.count(&*I)) return false; return true; }
/// Collect cast instructions that can be ignored in the vectorizer's cost /// model, given a reduction exit value and the minimal type in which the /// reduction can be represented. static void collectCastsToIgnore(Loop *TheLoop, Instruction *Exit, Type *RecurrenceType, SmallPtrSetImpl<Instruction *> &Casts) { SmallVector<Instruction *, 8> Worklist; SmallPtrSet<Instruction *, 8> Visited; Worklist.push_back(Exit); while (!Worklist.empty()) { Instruction *Val = Worklist.pop_back_val(); Visited.insert(Val); if (auto *Cast = dyn_cast<CastInst>(Val)) if (Cast->getSrcTy() == RecurrenceType) { // If the source type of a cast instruction is equal to the recurrence // type, it will be eliminated, and should be ignored in the vectorizer // cost model. Casts.insert(Cast); continue; } // Add all operands to the work list if they are loop-varying values that // we haven't yet visited. for (Value *O : cast<User>(Val)->operands()) if (auto *I = dyn_cast<Instruction>(O)) if (TheLoop->contains(I) && !Visited.count(I)) Worklist.push_back(I); } }
bool LowerIntrinsics::InsertRootInitializers(Function &F, AllocaInst **Roots, unsigned Count) { // Scroll past alloca instructions. BasicBlock::iterator IP = F.getEntryBlock().begin(); while (isa<AllocaInst>(IP)) ++IP; // Search for initializers in the initial BB. SmallPtrSet<AllocaInst*,16> InitedRoots; for (; !CouldBecomeSafePoint(IP); ++IP) if (StoreInst *SI = dyn_cast<StoreInst>(IP)) if (AllocaInst *AI = dyn_cast<AllocaInst>(SI->getOperand(1)->stripPointerCasts())) InitedRoots.insert(AI); // Add root initializers. bool MadeChange = false; for (AllocaInst **I = Roots, **E = Roots + Count; I != E; ++I) if (!InitedRoots.count(*I)) { StoreInst* SI = new StoreInst(ConstantPointerNull::get(cast<PointerType>( cast<PointerType>((*I)->getType())->getElementType())), *I); SI->insertAfter(*I); MadeChange = true; } return MadeChange; }
bool LowerIntrinsics::InsertRootInitializers(Function &F, Instruction **Roots, unsigned Count) { // Scroll past alloca instructions. BasicBlock::iterator IP = F.getEntryBlock().begin(); while (isa<AllocaInst>(IP)) ++IP; // Search for initializers in the initial BB. SmallPtrSet<AllocaInst*,16> InitedRoots; for (; !CouldBecomeSafePoint(IP); ++IP) if (StoreInst *SI = dyn_cast<StoreInst>(IP)) if (AllocaInst *AI = dyn_cast<AllocaInst>(SI->getOperand(1)->stripPointerCasts())) InitedRoots.insert(AI); // Add root initializers. bool MadeChange = false; for (Instruction **II = Roots, **IE = Roots + Count; II != IE; ++II) { // Trace back through GEPs to find the actual alloca. Instruction *I = *II; while (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) I = cast<Instruction>(GEP->getPointerOperand()); AllocaInst *AI = cast<AllocaInst>(I); if (!InitedRoots.count(AI)) { Type *ElemTy = cast<PointerType>((*II)->getType())->getElementType(); PointerType *PElemTy = cast<PointerType>(ElemTy); StoreInst* SI = new StoreInst(ConstantPointerNull::get(PElemTy), *II); SI->insertAfter(*II); MadeChange = true; } } return MadeChange; }
/// \brief Checks if the padding bytes of an argument could be accessed. bool ArgPromotion::canPaddingBeAccessed(Argument *arg) { assert(arg->hasByValAttr()); // Track all the pointers to the argument to make sure they are not captured. SmallPtrSet<Value *, 16> PtrValues; PtrValues.insert(arg); // Track all of the stores. SmallVector<StoreInst *, 16> Stores; // Scan through the uses recursively to make sure the pointer is always used // sanely. SmallVector<Value *, 16> WorkList; WorkList.insert(WorkList.end(), arg->user_begin(), arg->user_end()); while (!WorkList.empty()) { Value *V = WorkList.back(); WorkList.pop_back(); if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) { if (PtrValues.insert(V).second) WorkList.insert(WorkList.end(), V->user_begin(), V->user_end()); } else if (StoreInst *Store = dyn_cast<StoreInst>(V)) { Stores.push_back(Store); } else if (!isa<LoadInst>(V)) { return true; } } // Check to make sure the pointers aren't captured for (StoreInst *Store : Stores) if (PtrValues.count(Store->getValueOperand())) return true; return false; }
bool CfgNaive::runOnFunction(Function &F) { errs() << F.getName() << "\n"; SmallPtrSet<BasicBlock*, 8> visitedBlocks; // Mark all reachable blocks. for (df_ext_iterator<Function*, SmallPtrSet<BasicBlock*, 8>> currentBlock = df_ext_begin(&F, visitedBlocks), endBlock = df_ext_end(&F, visitedBlocks); currentBlock != endBlock; currentBlock++) { //do nothing, iterator marks visited nodes automatically } // Build set of unreachable blocks std::vector<BasicBlock*> unreachableBlocks; for (Function::iterator currentBlock = F.begin(), endBlock = F.end(); currentBlock != endBlock; currentBlock++) { if (visitedBlocks.count(currentBlock) == 0) { unreachableBlocks.push_back(currentBlock); } } // Remove unreachable blocks for (int i = 0, e = unreachableBlocks.size(); i != e; i++) { errs() << unreachableBlocks[i]->getName() << " is unreachable\n"; unreachableBlocks[i]->eraseFromParent(); } bool hasModifiedBlocks = (unreachableBlocks.size() > 0); return hasModifiedBlocks; }
static bool bothUsedInPHI(const MachineBasicBlock &A, const SmallPtrSet<MachineBasicBlock *, 8> &SuccsB) { for (MachineBasicBlock *BB : A.successors()) if (SuccsB.count(BB) && !BB->empty() && BB->begin()->isPHI()) return true; return false; }
bool LiveVariables::runOnMachineFunction(MachineFunction &mf) { MF = &mf; MRI = &mf.getRegInfo(); TRI = MF->getSubtarget().getRegisterInfo(); const unsigned NumRegs = TRI->getNumRegs(); PhysRegDef.assign(NumRegs, nullptr); PhysRegUse.assign(NumRegs, nullptr); PHIVarInfo.resize(MF->getNumBlockIDs()); PHIJoins.clear(); // FIXME: LiveIntervals will be updated to remove its dependence on // LiveVariables to improve compilation time and eliminate bizarre pass // dependencies. Until then, we can't change much in -O0. if (!MRI->isSSA()) report_fatal_error("regalloc=... not currently supported with -O0"); analyzePHINodes(mf); // Calculate live variable information in depth first order on the CFG of the // function. This guarantees that we will see the definition of a virtual // register before its uses due to dominance properties of SSA (except for PHI // nodes, which are treated as a special case). MachineBasicBlock *Entry = &MF->front(); SmallPtrSet<MachineBasicBlock*,16> Visited; for (MachineBasicBlock *MBB : depth_first_ext(Entry, Visited)) { runOnBlock(MBB, NumRegs); PhysRegDef.assign(NumRegs, nullptr); PhysRegUse.assign(NumRegs, nullptr); } // Convert and transfer the dead / killed information we have gathered into // VirtRegInfo onto MI's. for (unsigned i = 0, e1 = VirtRegInfo.size(); i != e1; ++i) { const unsigned Reg = TargetRegisterInfo::index2VirtReg(i); for (unsigned j = 0, e2 = VirtRegInfo[Reg].Kills.size(); j != e2; ++j) if (VirtRegInfo[Reg].Kills[j] == MRI->getVRegDef(Reg)) VirtRegInfo[Reg].Kills[j]->addRegisterDead(Reg, TRI); else VirtRegInfo[Reg].Kills[j]->addRegisterKilled(Reg, TRI); } // Check to make sure there are no unreachable blocks in the MC CFG for the // function. If so, it is due to a bug in the instruction selector or some // other part of the code generator if this happens. #ifndef NDEBUG for(MachineFunction::iterator i = MF->begin(), e = MF->end(); i != e; ++i) assert(Visited.count(&*i) != 0 && "unreachable basic block found"); #endif PhysRegDef.clear(); PhysRegUse.clear(); PHIVarInfo.clear(); return false; }
/// isProfitableToCSE - Return true if it's profitable to eliminate MI with a /// common expression that defines Reg. bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg, MachineInstr *CSMI, MachineInstr *MI) { // FIXME: Heuristics that works around the lack the live range splitting. // Heuristics #1: Don't CSE "cheap" computation if the def is not local or in // an immediate predecessor. We don't want to increase register pressure and // end up causing other computation to be spilled. if (MI->getDesc().isAsCheapAsAMove()) { MachineBasicBlock *CSBB = CSMI->getParent(); MachineBasicBlock *BB = MI->getParent(); if (CSBB != BB && !CSBB->isSuccessor(BB)) return false; } // Heuristics #2: If the expression doesn't not use a vr and the only use // of the redundant computation are copies, do not cse. bool HasVRegUse = false; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (MO.isReg() && MO.isUse() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) { HasVRegUse = true; break; } } if (!HasVRegUse) { bool HasNonCopyUse = false; for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end(); I != E; ++I) { MachineInstr *Use = &*I; // Ignore copies. if (!Use->isCopyLike()) { HasNonCopyUse = true; break; } } if (!HasNonCopyUse) return false; } // Heuristics #3: If the common subexpression is used by PHIs, do not reuse // it unless the defined value is already used in the BB of the new use. bool HasPHI = false; SmallPtrSet<MachineBasicBlock*, 4> CSBBs; for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(CSReg), E = MRI->use_nodbg_end(); I != E; ++I) { MachineInstr *Use = &*I; HasPHI |= Use->isPHI(); CSBBs.insert(Use->getParent()); } if (!HasPHI) return true; return CSBBs.count(MI->getParent()); }
/// DetermineInsertionPoint - At this point, we're committed to promoting the /// alloca using IDF's, and the standard SSA construction algorithm. Determine /// which blocks need phi nodes and see if we can optimize out some work by /// avoiding insertion of dead phi nodes. void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum, AllocaInfo &Info) { // Unique the set of defining blocks for efficient lookup. SmallPtrSet<BasicBlock*, 32> DefBlocks; DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end()); // Determine which blocks the value is live in. These are blocks which lead // to uses. SmallPtrSet<BasicBlock*, 32> LiveInBlocks; ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks); // Compute the locations where PhiNodes need to be inserted. Look at the // dominance frontier of EACH basic-block we have a write in. unsigned CurrentVersion = 0; SmallPtrSet<PHINode*, 16> InsertedPHINodes; std::vector<std::pair<unsigned, BasicBlock*> > DFBlocks; while (!Info.DefiningBlocks.empty()) { BasicBlock *BB = Info.DefiningBlocks.back(); Info.DefiningBlocks.pop_back(); // Look up the DF for this write, add it to defining blocks. DominanceFrontier::const_iterator it = DF.find(BB); if (it == DF.end()) continue; const DominanceFrontier::DomSetType &S = it->second; // In theory we don't need the indirection through the DFBlocks vector. // In practice, the order of calling QueuePhiNode would depend on the // (unspecified) ordering of basic blocks in the dominance frontier, // which would give PHI nodes non-determinstic subscripts. Fix this by // processing blocks in order of the occurance in the function. for (DominanceFrontier::DomSetType::const_iterator P = S.begin(), PE = S.end(); P != PE; ++P) { // If the frontier block is not in the live-in set for the alloca, don't // bother processing it. if (!LiveInBlocks.count(*P)) continue; DFBlocks.push_back(std::make_pair(BBNumbers[*P], *P)); } // Sort by which the block ordering in the function. if (DFBlocks.size() > 1) std::sort(DFBlocks.begin(), DFBlocks.end()); for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i) { BasicBlock *BB = DFBlocks[i].second; if (QueuePhiNode(BB, AllocaNum, CurrentVersion, InsertedPHINodes)) Info.DefiningBlocks.push_back(BB); } DFBlocks.clear(); } }
/// Recursively traverse the conformance lists to determine sole conforming /// class, struct or enum type. NominalTypeDecl * ProtocolConformanceAnalysis::findSoleConformingType(ProtocolDecl *Protocol) { /// First check in the SoleConformingTypeCache. auto SoleConformingTypeIt = SoleConformingTypeCache.find(Protocol); if (SoleConformingTypeIt != SoleConformingTypeCache.end()) return SoleConformingTypeIt->second; SmallVector<ProtocolDecl *, 8> PDWorkList; SmallPtrSet<ProtocolDecl *, 8> VisitedPDs; NominalTypeDecl *SoleConformingNTD = nullptr; PDWorkList.push_back(Protocol); while (!PDWorkList.empty()) { auto *PD = PDWorkList.pop_back_val(); // Protocols must have internal or lower access. if (PD->getEffectiveAccess() > AccessLevel::Internal) { return nullptr; } VisitedPDs.insert(PD); auto NTDList = getConformances(PD); for (auto *ConformingNTD : NTDList) { // Recurse on protocol types. if (auto *Proto = dyn_cast<ProtocolDecl>(ConformingNTD)) { // Ignore visited protocol decls. if (!VisitedPDs.count(Proto)) PDWorkList.push_back(Proto); } else { // Classes, Structs and Enums are added here. // Bail if more than one conforming types were found. if (SoleConformingNTD && ConformingNTD != SoleConformingNTD) { return nullptr; } else { SoleConformingNTD = ConformingNTD; } } } } // Bail if we did not find a sole conforming type. if (!SoleConformingNTD) return nullptr; // Generic declarations are ignored. if (SoleConformingNTD->isGenericContext()) { return nullptr; } // Populate SoleConformingTypeCache. SoleConformingTypeCache.insert(std::pair<ProtocolDecl *, NominalTypeDecl *>( Protocol, SoleConformingNTD)); // Return SoleConformingNTD. return SoleConformingNTD; }
static bool bothUsedInPHI(const MachineBasicBlock &A, SmallPtrSet<MachineBasicBlock*, 8> SuccsB) { for (MachineBasicBlock::const_succ_iterator SI = A.succ_begin(), SE = A.succ_end(); SI != SE; ++SI) { MachineBasicBlock *BB = *SI; if (SuccsB.count(BB) && !BB->empty() && BB->begin()->isPHI()) return true; } return false; }
/// InsertCopies - insert copies into MBB and all of its successors void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN, SmallPtrSet<MachineBasicBlock*, 16>& visited) { MachineBasicBlock* MBB = MDTN->getBlock(); visited.insert(MBB); std::set<unsigned> pushed; LiveIntervals& LI = getAnalysis<LiveIntervals>(); // Rewrite register uses from Stacks for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; ++I) { if (I->isPHI()) continue; for (unsigned i = 0; i < I->getNumOperands(); ++i) if (I->getOperand(i).isReg() && Stacks[I->getOperand(i).getReg()].size()) { // Remove the live range for the old vreg. LiveInterval& OldInt = LI.getInterval(I->getOperand(i).getReg()); LiveInterval::iterator OldLR = OldInt.FindLiveRangeContaining(LI.getInstructionIndex(I).getUseIndex()); if (OldLR != OldInt.end()) OldInt.removeRange(*OldLR, true); // Change the register I->getOperand(i).setReg(Stacks[I->getOperand(i).getReg()].back()); // Add a live range for the new vreg LiveInterval& Int = LI.getInterval(I->getOperand(i).getReg()); VNInfo* FirstVN = *Int.vni_begin(); FirstVN->setHasPHIKill(false); LiveRange LR (LI.getMBBStartIdx(I->getParent()), LI.getInstructionIndex(I).getUseIndex().getNextSlot(), FirstVN); Int.addRange(LR); } } // Schedule the copies for this block ScheduleCopies(MBB, pushed); // Recur down the dominator tree. for (MachineDomTreeNode::iterator I = MDTN->begin(), E = MDTN->end(); I != E; ++I) if (!visited.count((*I)->getBlock())) InsertCopies(*I, visited); // As we exit this block, pop the names we pushed while processing it for (std::set<unsigned>::iterator I = pushed.begin(), E = pushed.end(); I != E; ++I) Stacks[*I].pop_back(); }
/// computeDFS - Computes the DFS-in and DFS-out numbers of the dominator tree /// of the given MachineFunction. These numbers are then used in other parts /// of the PHI elimination process. void StrongPHIElimination::computeDFS(MachineFunction& MF) { SmallPtrSet<MachineDomTreeNode*, 8> frontier; SmallPtrSet<MachineDomTreeNode*, 8> visited; unsigned time = 0; MachineDominatorTree& DT = getAnalysis<MachineDominatorTree>(); MachineDomTreeNode* node = DT.getRootNode(); std::vector<MachineDomTreeNode*> worklist; worklist.push_back(node); while (!worklist.empty()) { MachineDomTreeNode* currNode = worklist.back(); if (!frontier.count(currNode)) { frontier.insert(currNode); ++time; preorder.insert(std::make_pair(currNode->getBlock(), time)); } bool inserted = false; for (MachineDomTreeNode::iterator I = currNode->begin(), E = currNode->end(); I != E; ++I) if (!frontier.count(*I) && !visited.count(*I)) { worklist.push_back(*I); inserted = true; break; } if (!inserted) { frontier.erase(currNode); visited.insert(currNode); maxpreorder.insert(std::make_pair(currNode->getBlock(), time)); worklist.pop_back(); } } }
bool ReduceCrashingInstructions::TestInsts(std::vector<const Instruction*> &Insts) { // Clone the program to try hacking it apart... ValueToValueMapTy VMap; Module *M = CloneModule(BD.getProgram(), VMap); // Convert list to set for fast lookup... SmallPtrSet<Instruction*, 64> Instructions; for (unsigned i = 0, e = Insts.size(); i != e; ++i) { assert(!isa<TerminatorInst>(Insts[i])); Instructions.insert(cast<Instruction>(VMap[Insts[i]])); } outs() << "Checking for crash with only " << Instructions.size(); if (Instructions.size() == 1) outs() << " instruction: "; else outs() << " instructions: "; for (Module::iterator MI = M->begin(), ME = M->end(); MI != ME; ++MI) for (Function::iterator FI = MI->begin(), FE = MI->end(); FI != FE; ++FI) for (BasicBlock::iterator I = FI->begin(), E = FI->end(); I != E;) { Instruction *Inst = I++; if (!Instructions.count(Inst) && !isa<TerminatorInst>(Inst) && !isa<LandingPadInst>(Inst)) { if (!Inst->getType()->isVoidTy()) Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); Inst->eraseFromParent(); } } // Verify that this is still valid. PassManager Passes; Passes.add(createVerifierPass()); Passes.add(createDebugInfoVerifierPass()); Passes.run(*M); // Try running on the hacked up program... if (TestFn(BD, M)) { BD.setNewProgram(M); // It crashed, keep the trimmed version... // Make sure to use instruction pointers that point into the now-current // module, and that they don't include any deleted blocks. Insts.clear(); for (SmallPtrSet<Instruction*, 64>::const_iterator I = Instructions.begin(), E = Instructions.end(); I != E; ++I) Insts.push_back(*I); return true; } delete M; // It didn't crash, try something else. return false; }
void LowerEmAsyncify::FindContextVariables(AsyncCallEntry & Entry) { BasicBlock *AfterCallBlock = Entry.AfterCallBlock; Function & F = *AfterCallBlock->getParent(); // Create a new entry block as if in the callback function // theck check variables that no longer properly dominate their uses BasicBlock *EntryBlock = BasicBlock::Create(TheModule->getContext(), "", &F, &F.getEntryBlock()); BranchInst::Create(AfterCallBlock, EntryBlock); DominatorTreeWrapperPass DTW; DTW.runOnFunction(F); DominatorTree& DT = DTW.getDomTree(); // These blocks may be using some values defined at or before AsyncCallBlock BasicBlockSet Ramifications = FindReachableBlocksFrom(AfterCallBlock); SmallPtrSet<Value*, 256> ContextVariables; Values Pending; // Examine the instructions, find all variables that we need to store in the context for (BasicBlockSet::iterator RI = Ramifications.begin(), RE = Ramifications.end(); RI != RE; ++RI) { for (BasicBlock::iterator I = (*RI)->begin(), E = (*RI)->end(); I != E; ++I) { for (unsigned i = 0, NumOperands = I->getNumOperands(); i < NumOperands; ++i) { Value *O = I->getOperand(i); if (Instruction *Inst = dyn_cast<Instruction>(O)) { if (Inst == Entry.AsyncCallInst) continue; // for the original async call, we will load directly from async return value if (ContextVariables.count(Inst) != 0) continue; // already examined if (!DT.dominates(Inst, I->getOperandUse(i))) { // `I` is using `Inst`, yet `Inst` does not dominate `I` if we arrive directly at AfterCallBlock // so we need to save `Inst` in the context ContextVariables.insert(Inst); Pending.push_back(Inst); } } else if (Argument *Arg = dyn_cast<Argument>(O)) { // count() should be as fast/slow as insert, so just insert here ContextVariables.insert(Arg); } } } } // restore F EntryBlock->eraseFromParent(); Entry.ContextVariables.clear(); Entry.ContextVariables.reserve(ContextVariables.size()); for (SmallPtrSet<Value*, 256>::iterator I = ContextVariables.begin(), E = ContextVariables.end(); I != E; ++I) { Entry.ContextVariables.push_back(*I); } }
/// Record ICmp conditions relevant to any argument in CS following Pred's /// single successors. If there are conflicting conditions along a path, like /// x == 1 and x == 0, the first condition will be used. static void recordConditions(CallSite CS, BasicBlock *Pred, ConditionsTy &Conditions) { recordCondition(CS, Pred, CS.getInstruction()->getParent(), Conditions); BasicBlock *From = Pred; BasicBlock *To = Pred; SmallPtrSet<BasicBlock *, 4> Visited; while (!Visited.count(From->getSinglePredecessor()) && (From = From->getSinglePredecessor())) { recordCondition(CS, From, To, Conditions); Visited.insert(From); To = From; } }
bool LoopSafetyInfo::allLoopPathsLeadToBlock(const Loop *CurLoop, const BasicBlock *BB, const DominatorTree *DT) const { assert(CurLoop->contains(BB) && "Should only be called for loop blocks!"); // Fast path: header is always reached once the loop is entered. if (BB == CurLoop->getHeader()) return true; // Collect all transitive predecessors of BB in the same loop. This set will // be a subset of the blocks within the loop. SmallPtrSet<const BasicBlock *, 4> Predecessors; collectTransitivePredecessors(CurLoop, BB, Predecessors); // Make sure that all successors of all predecessors of BB are either: // 1) BB, // 2) Also predecessors of BB, // 3) Exit blocks which are not taken on 1st iteration. // Memoize blocks we've already checked. SmallPtrSet<const BasicBlock *, 4> CheckedSuccessors; for (auto *Pred : Predecessors) { // Predecessor block may throw, so it has a side exit. if (blockMayThrow(Pred)) return false; for (auto *Succ : successors(Pred)) if (CheckedSuccessors.insert(Succ).second && Succ != BB && !Predecessors.count(Succ)) // By discharging conditions that are not executed on the 1st iteration, // we guarantee that *at least* on the first iteration all paths from // header that *may* execute will lead us to the block of interest. So // that if we had virtually peeled one iteration away, in this peeled // iteration the set of predecessors would contain only paths from // header to BB without any exiting edges that may execute. // // TODO: We only do it for exiting edges currently. We could use the // same function to skip some of the edges within the loop if we know // that they will not be taken on the 1st iteration. // // TODO: If we somehow know the number of iterations in loop, the same // check may be done for any arbitrary N-th iteration as long as N is // not greater than minimum number of iterations in this loop. if (CurLoop->contains(Succ) || !CanProveNotTakenFirstIteration(Succ, DT, CurLoop)) return false; } // All predecessors can only lead us to BB. return true; }
bool ReduceCrashingNamedMDOps::TestNamedMDOps( std::vector<const MDNode *> &NamedMDOps) { // Convert list to set for fast lookup... SmallPtrSet<const MDNode *, 32> OldMDNodeOps; for (unsigned i = 0, e = NamedMDOps.size(); i != e; ++i) { OldMDNodeOps.insert(NamedMDOps[i]); } outs() << "Checking for crash with only " << OldMDNodeOps.size(); if (OldMDNodeOps.size() == 1) outs() << " named metadata operand: "; else outs() << " named metadata operands: "; ValueToValueMapTy VMap; Module *M = CloneModule(BD.getProgram(), VMap).release(); // This is a little wasteful. In the future it might be good if we could have // these dropped during cloning. for (auto &NamedMD : BD.getProgram()->named_metadata()) { // Drop the old one and create a new one M->eraseNamedMetadata(M->getNamedMetadata(NamedMD.getName())); NamedMDNode *NewNamedMDNode = M->getOrInsertNamedMetadata(NamedMD.getName()); for (MDNode *op : NamedMD.operands()) if (OldMDNodeOps.count(op)) NewNamedMDNode->addOperand(cast<MDNode>(MapMetadata(op, VMap))); } // Verify that this is still valid. legacy::PassManager Passes; Passes.add(createVerifierPass(/*FatalErrors=*/false)); Passes.run(*M); // Try running on the hacked up program... if (TestFn(BD, M)) { // Make sure to use instruction pointers that point into the now-current // module, and that they don't include any deleted blocks. NamedMDOps.clear(); for (const MDNode *Node : OldMDNodeOps) NamedMDOps.push_back(cast<MDNode>(*VMap.getMappedMD(Node))); BD.setNewProgram(M); // It crashed, keep the trimmed version... return true; } delete M; // It didn't crash, try something else. return false; }
static SmallPtrSet<BasicBlock *, 4> getCoroBeginPredBlocks(CoroBeginInst *CB) { // Collect all blocks that we need to look for instructions to relocate. SmallPtrSet<BasicBlock *, 4> RelocBlocks; SmallVector<BasicBlock *, 4> Work; Work.push_back(CB->getParent()); do { BasicBlock *Current = Work.pop_back_val(); for (BasicBlock *BB : predecessors(Current)) if (RelocBlocks.count(BB) == 0) { RelocBlocks.insert(BB); Work.push_back(BB); } } while (!Work.empty()); return RelocBlocks; }
void AssumptionCacheTracker::verifyAnalysis() const { #ifndef NDEBUG SmallPtrSet<const CallInst *, 4> AssumptionSet; for (const auto &I : AssumptionCaches) { for (auto &VH : I.second->assumptions()) if (VH) AssumptionSet.insert(cast<CallInst>(VH)); for (const BasicBlock &B : cast<Function>(*I.first)) for (const Instruction &II : B) if (match(&II, m_Intrinsic<Intrinsic::assume>())) assert(AssumptionSet.count(cast<CallInst>(&II)) && "Assumption in scanned function not in cache"); } #endif }
bool ADCE::runOnFunction(Function& F) { if (skipOptnoneFunction(F)) return false; SmallPtrSet<Instruction*, 128> alive; SmallVector<Instruction*, 128> worklist; // Collect the set of "root" instructions that are known live. for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) if (isa<TerminatorInst>(I.getInstructionIterator()) || isa<DbgInfoIntrinsic>(I.getInstructionIterator()) || isa<LandingPadInst>(I.getInstructionIterator()) || I->mayHaveSideEffects()) { alive.insert(I.getInstructionIterator()); worklist.push_back(I.getInstructionIterator()); } // Propagate liveness backwards to operands. while (!worklist.empty()) { Instruction* curr = worklist.pop_back_val(); for (Instruction::op_iterator OI = curr->op_begin(), OE = curr->op_end(); OI != OE; ++OI) if (Instruction* Inst = dyn_cast<Instruction>(OI)) if (alive.insert(Inst)) worklist.push_back(Inst); } // The inverse of the live set is the dead set. These are those instructions // which have no side effects and do not influence the control flow or return // value of the function, and may therefore be deleted safely. // NOTE: We reuse the worklist vector here for memory efficiency. for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) if (!alive.count(I.getInstructionIterator())) { worklist.push_back(I.getInstructionIterator()); I->dropAllReferences(); } for (SmallVectorImpl<Instruction *>::iterator I = worklist.begin(), E = worklist.end(); I != E; ++I) { ++NumRemoved; (*I)->eraseFromParent(); } return !worklist.empty(); }
/// Find an insertion point that dominates all uses. SmallPtrSet<Instruction *, 8> ConstantHoistingPass::findConstantInsertionPoint( const ConstantInfo &ConstInfo) const { assert(!ConstInfo.RebasedConstants.empty() && "Invalid constant info entry."); // Collect all basic blocks. SmallPtrSet<BasicBlock *, 8> BBs; SmallPtrSet<Instruction *, 8> InsertPts; for (auto const &RCI : ConstInfo.RebasedConstants) for (auto const &U : RCI.Uses) BBs.insert(findMatInsertPt(U.Inst, U.OpndIdx)->getParent()); if (BBs.count(Entry)) { InsertPts.insert(&Entry->front()); return InsertPts; } if (BFI) { findBestInsertionSet(*DT, *BFI, Entry, BBs); for (auto BB : BBs) { BasicBlock::iterator InsertPt = BB->begin(); for (; isa<PHINode>(InsertPt) || InsertPt->isEHPad(); ++InsertPt) ; InsertPts.insert(&*InsertPt); } return InsertPts; } while (BBs.size() >= 2) { BasicBlock *BB, *BB1, *BB2; BB1 = *BBs.begin(); BB2 = *std::next(BBs.begin()); BB = DT->findNearestCommonDominator(BB1, BB2); if (BB == Entry) { InsertPts.insert(&Entry->front()); return InsertPts; } BBs.erase(BB1); BBs.erase(BB2); BBs.insert(BB); } assert((BBs.size() == 1) && "Expected only one element."); Instruction &FirstInst = (*BBs.begin())->front(); InsertPts.insert(findMatInsertPt(&FirstInst)); return InsertPts; }
bool ADCE::runOnFunction(Function& F) { if (skipOptnoneFunction(F)) return false; SmallPtrSet<Instruction*, 128> Alive; SmallVector<Instruction*, 128> Worklist; // Collect the set of "root" instructions that are known live. for (Instruction &I : instructions(F)) { if (isa<TerminatorInst>(I) || isa<DbgInfoIntrinsic>(I) || I.isEHPad() || I.mayHaveSideEffects()) { Alive.insert(&I); Worklist.push_back(&I); } } // Propagate liveness backwards to operands. while (!Worklist.empty()) { Instruction *Curr = Worklist.pop_back_val(); for (Use &OI : Curr->operands()) { if (Instruction *Inst = dyn_cast<Instruction>(OI)) if (Alive.insert(Inst).second) Worklist.push_back(Inst); } } // The inverse of the live set is the dead set. These are those instructions // which have no side effects and do not influence the control flow or return // value of the function, and may therefore be deleted safely. // NOTE: We reuse the Worklist vector here for memory efficiency. for (Instruction &I : instructions(F)) { if (!Alive.count(&I)) { Worklist.push_back(&I); I.dropAllReferences(); } } for (Instruction *&I : Worklist) { ++NumRemoved; I->eraseFromParent(); } return !Worklist.empty(); }
/// FindFunctionBackedges - Analyze the specified function to find all of the /// loop backedges in the function and return them. This is a relatively cheap /// (compared to computing dominators and loop info) analysis. /// /// The output is added to Result, as pairs of <from,to> edge info. void llvm::FindFunctionBackedges(const Function &F, SmallVectorImpl<std::pair<const BasicBlock*,const BasicBlock*> > &Result) { const BasicBlock *BB = &F.getEntryBlock(); if (succ_begin(BB) == succ_end(BB)) return; SmallPtrSet<const BasicBlock*, 8> Visited; SmallVector<std::pair<const BasicBlock*, succ_const_iterator>, 8> VisitStack; SmallPtrSet<const BasicBlock*, 8> InStack; Visited.insert(BB); VisitStack.push_back(std::make_pair(BB, succ_begin(BB))); InStack.insert(BB); do { std::pair<const BasicBlock*, succ_const_iterator> &Top = VisitStack.back(); const BasicBlock *ParentBB = Top.first; succ_const_iterator &I = Top.second; bool FoundNew = false; while (I != succ_end(ParentBB)) { BB = *I++; if (Visited.insert(BB)) { FoundNew = true; break; } // Successor is in VisitStack, it's a back edge. if (InStack.count(BB)) Result.push_back(std::make_pair(ParentBB, BB)); } if (FoundNew) { // Go down one level if there is a unvisited successor. InStack.insert(BB); VisitStack.push_back(std::make_pair(BB, succ_begin(BB))); } else { // Go up one level. InStack.erase(VisitStack.pop_back_val().first); } } while (!VisitStack.empty()); }
// FindCopyInsertPoint - Find a safe place in MBB to insert a copy from SrcReg // when following the CFG edge to SuccMBB. This needs to be after any def of // SrcReg, but before any subsequent point where control flow might jump out of // the basic block. MachineBasicBlock::iterator llvm::PHIElimination::FindCopyInsertPoint(MachineBasicBlock &MBB, MachineBasicBlock &SuccMBB, unsigned SrcReg) { // Handle the trivial case trivially. if (MBB.empty()) return MBB.begin(); // Usually, we just want to insert the copy before the first terminator // instruction. However, for the edge going to a landing pad, we must insert // the copy before the call/invoke instruction. if (!SuccMBB.isLandingPad()) return MBB.getFirstTerminator(); // Discover any defs/uses in this basic block. SmallPtrSet<MachineInstr*, 8> DefUsesInMBB; for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(SrcReg), RE = MRI->reg_end(); RI != RE; ++RI) { MachineInstr *DefUseMI = &*RI; if (DefUseMI->getParent() == &MBB) DefUsesInMBB.insert(DefUseMI); } MachineBasicBlock::iterator InsertPoint; if (DefUsesInMBB.empty()) { // No defs. Insert the copy at the start of the basic block. InsertPoint = MBB.begin(); } else if (DefUsesInMBB.size() == 1) { // Insert the copy immediately after the def/use. InsertPoint = *DefUsesInMBB.begin(); ++InsertPoint; } else { // Insert the copy immediately after the last def/use. InsertPoint = MBB.end(); while (!DefUsesInMBB.count(&*--InsertPoint)) {} ++InsertPoint; } // Make sure the copy goes after any phi nodes however. return SkipPHIsAndLabels(MBB, InsertPoint); }