SetId Approximator::function_rhs_val(const Function& fun, SetId rhs, SetId val, Parity parity) const { POMAGMA_ASSERT1(rhs, "rhs is undefined"); POMAGMA_ASSERT1(val, "val is undefined"); POMAGMA_ASSERT1(parity == NABOVE or parity == NBELOW, "invalid parity"); const bool upward = (parity == NBELOW); const DenseSet& support = m_structure.carrier().support(); const DenseSet rhs_set = m_sets.load(rhs); // positive const DenseSet val_set = m_sets.load(val); // negative DenseSet lhs_set(m_item_dim); // negative for (auto iter = support.iter_diff(lhs_set); iter.ok(); iter.next()) { Ob lhs = *iter; if (unlikely(lhs_set.contains(lhs))) continue; // iterator latency for (auto iter = fun.get_Lx_set(lhs).iter_insn(rhs_set); iter.ok(); iter.next()) { Ob rhs = *iter; Ob val = fun.find(lhs, rhs); if (val_set.contains(val)) { convex_insert(lhs_set, lhs, upward); break; } } } return m_sets.store(std::move(lhs_set)); }
void LoopInfo::verifyAnalysis() const { // LoopInfo is a FunctionPass, but verifying every loop in the function // each time verifyAnalysis is called is very expensive. The // -verify-loop-info option can enable this. In order to perform some // checking by default, LoopPass has been taught to call verifyLoop // manually during loop pass sequences. if (!VerifyLoopInfo) return; DenseSet<const Loop*> Loops; for (iterator I = begin(), E = end(); I != E; ++I) { assert(!(*I)->getParentLoop() && "Top-level loop has a parent!"); (*I)->verifyLoopNest(&Loops); } // Verify that blocks are mapped to valid loops. // // FIXME: With an up-to-date DFS (see LoopIterator.h) and DominatorTree, we // could also verify that the blocks are still in the correct loops. for (DenseMap<BasicBlock*, Loop*>::const_iterator I = LI.BBMap.begin(), E = LI.BBMap.end(); I != E; ++I) { assert(Loops.count(I->second) && "orphaned loop"); assert(I->second->contains(I->first) && "orphaned block"); } }
void AliasAnalysisChecker::collectMissingAliases( const DenseSet<ValuePair> &DynamicAliases, vector<ValuePair> &MissingAliases) { AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); AliasAnalysis &BaselineAA = getAnalysis<BaselineAliasAnalysis>(); MissingAliases.clear(); for (DenseSet<ValuePair>::const_iterator I = DynamicAliases.begin(); I != DynamicAliases.end(); ++I) { Value *V1 = I->first, *V2 = I->second; if (IntraProc && !DynAAUtils::IsIntraProcQuery(V1, V2)) { continue; } // Ignore BitCasts and PhiNodes. The reports on them are typically // redundant. if (isa<BitCastInst>(V1) || isa<BitCastInst>(V2)) continue; if (isa<PHINode>(V1) || isa<PHINode>(V2)) continue; if (!CheckAllPointers) { if (!DynAAUtils::PointerIsDereferenced(V1) || !DynAAUtils::PointerIsDereferenced(V2)) { continue; } } if (BaselineAA.alias(V1, V2) != AliasAnalysis::NoAlias && AA.alias(V1, V2) == AliasAnalysis::NoAlias) { MissingAliases.push_back(make_pair(V1, V2)); } } }
// Returns all flat address expressions in function F. The elements are // If V is an unvisited flat address expression, appends V to PostorderStack // and marks it as visited. void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack( Value *V, std::vector<std::pair<Value *, bool>> &PostorderStack, DenseSet<Value *> &Visited) const { assert(V->getType()->isPointerTy()); // Generic addressing expressions may be hidden in nested constant // expressions. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { // TODO: Look in non-address parts, like icmp operands. if (isAddressExpression(*CE) && Visited.insert(CE).second) PostorderStack.push_back(std::make_pair(CE, false)); return; } if (isAddressExpression(*V) && V->getType()->getPointerAddressSpace() == FlatAddrSpace) { if (Visited.insert(V).second) { PostorderStack.push_back(std::make_pair(V, false)); Operator *Op = cast<Operator>(V); for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) { if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) { if (isAddressExpression(*CE) && Visited.insert(CE).second) PostorderStack.emplace_back(CE, false); } } } } }
bool searchPredecessors(const MachineBasicBlock *MBB, const MachineBasicBlock *CutOff, UnaryPredicate Predicate) { if (MBB == CutOff) return false; DenseSet<const MachineBasicBlock*> Visited; SmallVector<MachineBasicBlock*, 4> Worklist(MBB->pred_begin(), MBB->pred_end()); while (!Worklist.empty()) { MachineBasicBlock *MBB = Worklist.pop_back_val(); if (!Visited.insert(MBB).second) continue; if (MBB == CutOff) continue; if (Predicate(MBB)) return true; Worklist.append(MBB->pred_begin(), MBB->pred_end()); } return false; }
void GraphBuilder::visitPtrToIntInst(PtrToIntInst& I) { DSNode* N = getValueDest(I.getOperand(0)).getNode(); if(I.hasOneUse()) { if(isa<ICmpInst>(*(I.use_begin()))) { NumBoringIntToPtr++; return; } } if(I.hasOneUse()) { Value *V = dyn_cast<Value>(*(I.use_begin())); DenseSet<Value *> Seen; while(V && V->hasOneUse() && Seen.insert(V).second) { if(isa<LoadInst>(V)) break; if(isa<StoreInst>(V)) break; if(isa<CallInst>(V)) break; V = dyn_cast<Value>(*(V->use_begin())); } if(isa<BranchInst>(V)){ NumBoringIntToPtr++; return; } } if(N) N->setPtrToIntMarker(); }
// Check PHI instructions at the beginning of MBB. It is assumed that // calcRegsPassed has been run so BBInfo::isLiveOut is valid. void MachineVerifier::checkPHIOps(const MachineBasicBlock *MBB) { for (MachineBasicBlock::const_iterator BBI = MBB->begin(), BBE = MBB->end(); BBI != BBE && BBI->isPHI(); ++BBI) { DenseSet<const MachineBasicBlock*> seen; for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) { unsigned Reg = BBI->getOperand(i).getReg(); const MachineBasicBlock *Pre = BBI->getOperand(i + 1).getMBB(); if (!Pre->isSuccessor(MBB)) continue; seen.insert(Pre); BBInfo &PrInfo = MBBInfoMap[Pre]; if (PrInfo.reachable && !PrInfo.isLiveOut(Reg)) report("PHI operand is not live-out from predecessor", &BBI->getOperand(i), i); } // Did we see all predecessors? for (MachineBasicBlock::const_pred_iterator PrI = MBB->pred_begin(), PrE = MBB->pred_end(); PrI != PrE; ++PrI) { if (!seen.count(*PrI)) { report("Missing PHI operand", BBI); *OS << "BB#" << (*PrI)->getNumber() << " is a predecessor according to the CFG.\n"; } } } }
// Collects missing aliases to <MissingAliases>. void AliasAnalysisChecker::collectMissingAliases( const DenseSet<ValuePair> &DynamicAliases) { AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); AliasAnalysis &BaselineAA = getAnalysis<BaselineAliasAnalysis>(); MissingAliases.clear(); for (DenseSet<ValuePair>::const_iterator I = DynamicAliases.begin(); I != DynamicAliases.end(); ++I) { Value *V1 = I->first, *V2 = I->second; if (IntraProc && !DynAAUtils::IsIntraProcQuery(V1, V2)) { continue; } if (!CheckAllPointers) { if (!DynAAUtils::PointerIsDereferenced(V1) || !DynAAUtils::PointerIsDereferenced(V2)) { continue; } } if (BaselineAA.alias(V1, V2) != AliasAnalysis::NoAlias && AA.alias(V1, V2) == AliasAnalysis::NoAlias) { MissingAliases.push_back(make_pair(V1, V2)); } } }
void DebugTypeInfoRemoval::traverse(MDNode *N) { if (!N || Replacements.count(N)) return; // To avoid cycles, as well as for efficiency sake, we will sometimes prune // parts of the graph. auto prune = [](MDNode *Parent, MDNode *Child) { if (auto *MDS = dyn_cast<DISubprogram>(Parent)) return Child == MDS->getVariables().get(); return false; }; SmallVector<MDNode *, 16> ToVisit; DenseSet<MDNode *> Opened; // Visit each node starting at N in post order, and map them. ToVisit.push_back(N); while (!ToVisit.empty()) { auto *N = ToVisit.back(); if (!Opened.insert(N).second) { // Close it. remap(N); ToVisit.pop_back(); continue; } for (auto &I : N->operands()) if (auto *MDN = dyn_cast_or_null<MDNode>(I)) if (!Opened.count(MDN) && !Replacements.count(MDN) && !prune(N, MDN) && !isa<DICompileUnit>(MDN)) ToVisit.push_back(MDN); } }
/// setSubgraphColorHelper - Implement setSubgraphColor. Return /// whether we truncated the search. /// bool SelectionDAG::setSubgraphColorHelper(SDNode *N, const char *Color, DenseSet<SDNode *> &visited, int level, bool &printed) { bool hit_limit = false; #ifndef NDEBUG if (level >= 20) { if (!printed) { printed = true; DEBUG(errs() << "setSubgraphColor hit max level\n"); } return true; } unsigned oldSize = visited.size(); visited.insert(N); if (visited.size() != oldSize) { setGraphColor(N, Color); for(SDNodeIterator i = SDNodeIterator::begin(N), iend = SDNodeIterator::end(N); i != iend; ++i) { hit_limit = setSubgraphColorHelper(*i, Color, visited, level+1, printed) || hit_limit; } } #else errs() << "SelectionDAG::setSubgraphColor is only available in debug builds" << " on systems with Graphviz or gv!\n"; #endif return hit_limit; }
static void MarkNodesWhichMustBePassedIn(DenseSet<const DSNode*> &MarkedNodes, Function &F, DSGraph* G, EntryPointAnalysis* EPA) { // All DSNodes reachable from arguments must be passed in... // Unless this is an entry point to the program if (!EPA->isEntryPoint(&F)) { for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { DSGraph::ScalarMapTy::iterator AI = G->getScalarMap().find(I); if (AI != G->getScalarMap().end()) if (DSNode * N = AI->second.getNode()) N->markReachableNodes(MarkedNodes); } } // Marked the returned node as needing to be passed in. if (DSNode * RetNode = G->getReturnNodeFor(F).getNode()) RetNode->markReachableNodes(MarkedNodes); // Calculate which DSNodes are reachable from globals. If a node is reachable // from a global, we will create a global pool for it, so no argument passage // is required. DenseSet<const DSNode*> NodesFromGlobals; GetNodesReachableFromGlobals(G, NodesFromGlobals); // Remove any nodes reachable from a global. These nodes will be put into // global pools, which do not require arguments to be passed in. for (DenseSet<const DSNode*>::iterator I = NodesFromGlobals.begin(), E = NodesFromGlobals.end(); I != E; ++I) MarkedNodes.erase(*I); }
// // Method: findGlobalPoolNodes() // // Description: // This method finds DSNodes that are reachable from globals and that need a // pool. The Automatic Pool Allocation transform will use the returned // information to build global pools for the DSNodes in question. // // Note that this method does not assign DSNodes to pools; it merely decides // which DSNodes are reachable from globals and will need a pool of global // scope. // // Outputs: // Nodes - The DSNodes that are both reachable from globals and which should // have global pools will be *added* to this container. // void Heuristic::findGlobalPoolNodes (DSNodeSet_t & Nodes) { // Get the globals graph for the program. DSGraph* GG = Graphs->getGlobalsGraph(); // Get all of the nodes reachable from globals. DenseSet<const DSNode*> GlobalHeapNodes; GetNodesReachableFromGlobals (GG, GlobalHeapNodes); // // Now find all DSNodes belonging to function-local DSGraphs which are // mirrored in the globals graph. These DSNodes require a global pool, too. // for (Module::iterator F = M->begin(); F != M->end(); ++F) { if (Graphs->hasDSGraph(*F)) { DSGraph* G = Graphs->getDSGraph(*F); GetNodesReachableFromGlobals (G, GlobalHeapNodes); } } // // Copy the values into the output container. Note that DenseSet has no // iterator traits (or whatever allows us to treat DenseSet has a generic // container), so we have to use a loop to copy values from the DenseSet into // the output container. // for (DenseSet<const DSNode*>::iterator I = GlobalHeapNodes.begin(), E = GlobalHeapNodes.end(); I != E; ++I) { Nodes.insert (*I); } return; }
static bool IsUndefinedPhiRecursive(Value *V) { SmallVector<Value*, 4> Worklist; DenseSet<const Value *> VisitedValues; Worklist.push_back(V); VisitedValues.insert(V); while (!Worklist.empty()) { Value *CurValue = Worklist.back(); Worklist.pop_back(); if (!CanCheckValue(CurValue)) continue; if (auto *Phi = dyn_cast<PHINode>(CurValue)) { for (Value *InV : Phi->incoming_values()) { if (isa<UndefValue>(InV)) return true; } } if (User *U = dyn_cast<User>(CurValue)) { for (Value *V : U->operands()) if (VisitedValues.insert(V).second) Worklist.push_back(V); } } return false; }
void Server::aggregate (const std::string & survey_in) { Structure survey; survey.load(survey_in); if (POMAGMA_DEBUG_LEVEL > 1) { survey.validate(); } compact(m_structure); if (POMAGMA_DEBUG_LEVEL > 1) { m_structure.validate(); } DenseSet defined = restricted(survey.signature(), m_structure.signature()); size_t total_dim = m_structure.carrier().item_count() + defined.count_items(); if (m_structure.carrier().item_dim() < total_dim) { m_structure.resize(total_dim); if (POMAGMA_DEBUG_LEVEL > 1) { m_structure.validate(); } } pomagma::aggregate(m_structure, survey, defined); if (POMAGMA_DEBUG_LEVEL > 1) { m_structure.validate(); } }
void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc) { // When an edge in the graph has been threaded, values that we could not // determine a value for before (i.e. were marked overdefined) may be possible // to solve now. We do NOT try to proactively update these values. Instead, // we clear their entries from the cache, and allow lazy updating to recompute // them when needed. // The updating process is fairly simple: we need to dropped cached info // for all values that were marked overdefined in OldSucc, and for those same // values in any successor of OldSucc (except NewSucc) in which they were // also marked overdefined. std::vector<BasicBlock*> worklist; worklist.push_back(OldSucc); DenseSet<Value*> ClearSet; for (DenseSet<OverDefinedPairTy>::iterator I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E; ++I) { if (I->first == OldSucc) ClearSet.insert(I->second); } // Use a worklist to perform a depth-first search of OldSucc's successors. // NOTE: We do not need a visited list since any blocks we have already // visited will have had their overdefined markers cleared already, and we // thus won't loop to their successors. while (!worklist.empty()) { BasicBlock *ToUpdate = worklist.back(); worklist.pop_back(); // Skip blocks only accessible through NewSucc. if (ToUpdate == NewSucc) continue; bool changed = false; for (DenseSet<Value*>::iterator I = ClearSet.begin(), E = ClearSet.end(); I != E; ++I) { // If a value was marked overdefined in OldSucc, and is here too... DenseSet<OverDefinedPairTy>::iterator OI = OverDefinedCache.find(std::make_pair(ToUpdate, *I)); if (OI == OverDefinedCache.end()) continue; // Remove it from the caches. ValueCacheEntryTy &Entry = ValueCache[LVIValueHandle(*I, this)]; ValueCacheEntryTy::iterator CI = Entry.find(ToUpdate); assert(CI != Entry.end() && "Couldn't find entry to update?"); Entry.erase(CI); OverDefinedCache.erase(OI); // If we removed anything, then we potentially need to update // blocks successors too. changed = true; } if (!changed) continue; worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate)); } }
/// Model the effect of an instruction on the set of available values. static void TransferInstruction(const Instruction &I, bool &Cleared, DenseSet<const Value *> &Available) { if (isStatepoint(I)) { Cleared = true; Available.clear(); } else if (containsGCPtrType(I.getType())) Available.insert(&I); }
inline void Approximator::map(const BinaryFunction& fun, const DenseSet& lhs_set, const DenseSet& rhs_set, DenseSet& val_set, DenseSet& temp_set) { POMAGMA_ASSERT_EQ(lhs_set.item_dim(), m_item_dim); POMAGMA_ASSERT_EQ(rhs_set.item_dim(), m_item_dim); POMAGMA_ASSERT_EQ(val_set.item_dim(), m_item_dim); POMAGMA_ASSERT_EQ(temp_set.item_dim(), m_item_dim); for (auto iter = lhs_set.iter(); iter.ok(); iter.next()) { Ob lhs = *iter; // optimize for special cases of APP and COMP if (Ob lhs_top = fun.find(lhs, m_top)) { if (Ob lhs_bot = fun.find(lhs, m_bot)) { bool lhs_is_constant = (lhs_top == lhs_bot); if (lhs_is_constant) { val_set.raw_insert(lhs_top); continue; } } } temp_set.set_insn(rhs_set, fun.get_Lx_set(lhs)); for (auto iter = temp_set.iter(); iter.ok(); iter.next()) { Ob rhs = *iter; Ob val = fun.find(lhs, rhs); val_set.raw_insert(val); } } }
/// Return the baseType for Val which states whether Val is exclusively /// derived from constant/null, or not exclusively derived from constant. /// Val is exclusively derived off a constant base when all operands of phi and /// selects are derived off a constant base. static enum BaseType getBaseType(const Value *Val) { SmallVector<const Value *, 32> Worklist; DenseSet<const Value *> Visited; bool isExclusivelyDerivedFromNull = true; Worklist.push_back(Val); // Strip through all the bitcasts and geps to get base pointer. Also check for // the exclusive value when there can be multiple base pointers (through phis // or selects). while(!Worklist.empty()) { const Value *V = Worklist.pop_back_val(); if (!Visited.insert(V).second) continue; if (const auto *CI = dyn_cast<CastInst>(V)) { Worklist.push_back(CI->stripPointerCasts()); continue; } if (const auto *GEP = dyn_cast<GetElementPtrInst>(V)) { Worklist.push_back(GEP->getPointerOperand()); continue; } // Push all the incoming values of phi node into the worklist for // processing. if (const auto *PN = dyn_cast<PHINode>(V)) { for (Value *InV: PN->incoming_values()) Worklist.push_back(InV); continue; } if (const auto *SI = dyn_cast<SelectInst>(V)) { // Push in the true and false values Worklist.push_back(SI->getTrueValue()); Worklist.push_back(SI->getFalseValue()); continue; } if (isa<Constant>(V)) { // We found at least one base pointer which is non-null, so this derived // pointer is not exclusively derived from null. if (V != Constant::getNullValue(V->getType())) isExclusivelyDerivedFromNull = false; // Continue processing the remaining values to make sure it's exclusively // constant. continue; } // At this point, we know that the base pointer is not exclusively // constant. return BaseType::NonConstant; } // Now, we know that the base pointer is exclusively constant, but we need to // differentiate between exclusive null constant and non-null constant. return isExclusivelyDerivedFromNull ? BaseType::ExclusivelyNull : BaseType::ExclusivelySomeConstant; }
static void addDefsUsesToList(const MachineInstr &MI, DenseSet<unsigned> &RegDefs, DenseSet<unsigned> &PhysRegUses) { for (const MachineOperand &Op : MI.operands()) { if (Op.isReg()) { if (Op.isDef()) RegDefs.insert(Op.getReg()); else if (Op.readsReg() && TargetRegisterInfo::isPhysicalRegister(Op.getReg())) PhysRegUses.insert(Op.getReg()); } } }
bool AliasAnalysisChecker::runOnModule(Module &M) { DenseSet<ValuePair> DynamicAliases; collectDynamicAliases(DynamicAliases); NumDynamicAliases = DynamicAliases.size(); vector<ValuePair> MissingAliases; collectMissingAliases(DynamicAliases, MissingAliases); sortMissingAliases(MissingAliases); reportMissingAliases(MissingAliases); return false; }
bool LoopReroll::DAGRootTracker::collectUsedInstructions(SmallInstructionSet &PossibleRedSet) { // Populate the MapVector with all instructions in the block, in order first, // so we can iterate over the contents later in perfect order. for (auto &I : *L->getHeader()) { Uses[&I].resize(IL_End); } SmallInstructionSet Exclude; Exclude.insert(Roots.begin(), Roots.end()); Exclude.insert(LoopIncs.begin(), LoopIncs.end()); DenseSet<Instruction*> VBase; collectInLoopUserSet(IV, Exclude, PossibleRedSet, VBase); for (auto *I : VBase) { Uses[I].set(0); } unsigned Idx = 1; for (auto *Root : Roots) { DenseSet<Instruction*> V; collectInLoopUserSet(Root, Exclude, PossibleRedSet, V); // While we're here, check the use sets are the same size. if (V.size() != VBase.size()) { DEBUG(dbgs() << "LRR: Aborting - use sets are different sizes\n"); return false; } for (auto *I : V) { Uses[I].set(Idx); } ++Idx; } // Make sure the loop increments are also accounted for. Exclude.clear(); Exclude.insert(Roots.begin(), Roots.end()); DenseSet<Instruction*> V; collectInLoopUserSet(LoopIncs, Exclude, PossibleRedSet, V); for (auto *I : V) { Uses[I].set(IL_LoopIncIdx); } if (IV != RealIV) Uses[RealIV].set(IL_LoopIncIdx); return true; }
// Collect the set of all users of the provided root instruction. This set of // users contains not only the direct users of the root instruction, but also // all users of those users, and so on. There are two exceptions: // // 1. Instructions in the set of excluded instructions are never added to the // use set (even if they are users). This is used, for example, to exclude // including root increments in the use set of the primary IV. // // 2. Instructions in the set of final instructions are added to the use set // if they are users, but their users are not added. This is used, for // example, to prevent a reduction update from forcing all later reduction // updates into the use set. void LoopReroll::DAGRootTracker::collectInLoopUserSet( Instruction *Root, const SmallInstructionSet &Exclude, const SmallInstructionSet &Final, DenseSet<Instruction *> &Users) { SmallInstructionVector Queue(1, Root); while (!Queue.empty()) { Instruction *I = Queue.pop_back_val(); if (!Users.insert(I).second) continue; if (!Final.count(I)) for (Use &U : I->uses()) { Instruction *User = cast<Instruction>(U.getUser()); if (PHINode *PN = dyn_cast<PHINode>(User)) { // Ignore "wrap-around" uses to PHIs of this loop's header. if (PN->getIncomingBlock(U) == L->getHeader()) continue; } if (L->contains(User) && !Exclude.count(User)) { Queue.push_back(User); } } // We also want to collect single-user "feeder" values. for (User::op_iterator OI = I->op_begin(), OIE = I->op_end(); OI != OIE; ++OI) { if (Instruction *Op = dyn_cast<Instruction>(*OI)) if (Op->hasOneUse() && L->contains(Op) && !Exclude.count(Op) && !Final.count(Op)) Queue.push_back(Op); } } }
static void shuffleValueUseLists(Value *V, std::minstd_rand0 &Gen, DenseSet<Value *> &Seen) { if (!Seen.insert(V).second) return; if (auto *C = dyn_cast<Constant>(V)) if (!isa<GlobalValue>(C)) for (Value *Op : C->operands()) shuffleValueUseLists(Op, Gen, Seen); if (V->use_empty() || std::next(V->use_begin()) == V->use_end()) // Nothing to shuffle for 0 or 1 users. return; // Generate random numbers between 10 and 99, which will line up nicely in // debug output. We're not worried about collisons here. DEBUG(dbgs() << "V = "; V->dump()); std::uniform_int_distribution<short> Dist(10, 99); SmallDenseMap<const Use *, short, 16> Order; for (const Use &U : V->uses()) { auto I = Dist(Gen); Order[&U] = I; DEBUG(dbgs() << " - order: " << I << ", U = "; U.getUser()->dump()); } DEBUG(dbgs() << " => shuffle\n"); V->sortUseList( [&Order](const Use &L, const Use &R) { return Order[&L] < Order[&R]; }); DEBUG({ for (const Use &U : V->uses()) DEBUG(dbgs() << " - order: " << Order.lookup(&U) << ", U = "; U.getUser()->dump()); });
/// ProcessPHI - Process PHI node in TailBB by turning it into a copy in PredBB. /// Remember the source register that's contributed by PredBB and update SSA /// update map. void TailDuplicatePass::ProcessPHI(MachineInstr *MI, MachineBasicBlock *TailBB, MachineBasicBlock *PredBB, DenseMap<unsigned, unsigned> &LocalVRMap, SmallVector<std::pair<unsigned,unsigned>, 4> &Copies, const DenseSet<unsigned> &RegsUsedByPhi, bool Remove) { unsigned DefReg = MI->getOperand(0).getReg(); unsigned SrcOpIdx = getPHISrcRegOpIdx(MI, PredBB); assert(SrcOpIdx && "Unable to find matching PHI source?"); unsigned SrcReg = MI->getOperand(SrcOpIdx).getReg(); const TargetRegisterClass *RC = MRI->getRegClass(DefReg); LocalVRMap.insert(std::make_pair(DefReg, SrcReg)); // Insert a copy from source to the end of the block. The def register is the // available value liveout of the block. unsigned NewDef = MRI->createVirtualRegister(RC); Copies.push_back(std::make_pair(NewDef, SrcReg)); if (isDefLiveOut(DefReg, TailBB, MRI) || RegsUsedByPhi.count(DefReg)) AddSSAUpdateEntry(DefReg, NewDef, PredBB); if (!Remove) return; // Remove PredBB from the PHI node. MI->RemoveOperand(SrcOpIdx+1); MI->RemoveOperand(SrcOpIdx); if (MI->getNumOperands() == 1) MI->eraseFromParent(); }
/// DuplicateInstruction - Duplicate a TailBB instruction to PredBB and update /// the source operands due to earlier PHI translation. void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI, MachineBasicBlock *TailBB, MachineBasicBlock *PredBB, MachineFunction &MF, DenseMap<unsigned, unsigned> &LocalVRMap, const DenseSet<unsigned> &UsedByPhi) { MachineInstr *NewMI = TII->duplicate(MI, MF); for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { MachineOperand &MO = NewMI->getOperand(i); if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; if (MO.isDef()) { const TargetRegisterClass *RC = MRI->getRegClass(Reg); unsigned NewReg = MRI->createVirtualRegister(RC); MO.setReg(NewReg); LocalVRMap.insert(std::make_pair(Reg, NewReg)); if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg)) AddSSAUpdateEntry(Reg, NewReg, PredBB); } else { DenseMap<unsigned, unsigned>::iterator VI = LocalVRMap.find(Reg); if (VI != LocalVRMap.end()) MO.setReg(VI->second); } } PredBB->insert(PredBB->end(), NewMI); }
void ModuleLinker::dropReplacedComdat( GlobalValue &GV, const DenseSet<const Comdat *> &ReplacedDstComdats) { Comdat *C = GV.getComdat(); if (!C) return; if (!ReplacedDstComdats.count(C)) return; if (GV.use_empty()) { GV.eraseFromParent(); return; } if (auto *F = dyn_cast<Function>(&GV)) { F->deleteBody(); } else if (auto *Var = dyn_cast<GlobalVariable>(&GV)) { Var->setInitializer(nullptr); } else { auto &Alias = cast<GlobalAlias>(GV); Module &M = *Alias.getParent(); PointerType &Ty = *cast<PointerType>(Alias.getType()); GlobalValue *Declaration; if (auto *FTy = dyn_cast<FunctionType>(Alias.getValueType())) { Declaration = Function::Create(FTy, GlobalValue::ExternalLinkage, "", &M); } else { Declaration = new GlobalVariable(M, Ty.getElementType(), /*isConstant*/ false, GlobalValue::ExternalLinkage, /*Initializer*/ nullptr); } Declaration->takeName(&Alias); Alias.replaceAllUsesWith(Declaration); Alias.eraseFromParent(); } }
void BitcodeFile::parse(DenseSet<StringRef> &ComdatGroups) { LLVMContext Context; std::unique_ptr<IRObjectFile> Obj = check(IRObjectFile::create(MB, Context)); const Module &M = Obj->getModule(); DenseSet<const Comdat *> KeptComdats; for (const auto &P : M.getComdatSymbolTable()) { StringRef N = Saver.save(P.first()); if (ComdatGroups.insert(N).second) KeptComdats.insert(&P.second); } for (const BasicSymbolRef &Sym : Obj->symbols()) if (!shouldSkip(Sym)) SymbolBodies.push_back(createSymbolBody(KeptComdats, *Obj, Sym)); }
static void scanOneBB(Instruction *Start, Instruction *End, std::vector<CallInst *> &Calls, DenseSet<BasicBlock *> &Seen, std::vector<BasicBlock *> &Worklist) { for (BasicBlock::iterator BBI(Start), BBE0 = Start->getParent()->end(), BBE1 = BasicBlock::iterator(End); BBI != BBE0 && BBI != BBE1; BBI++) { if (CallInst *CI = dyn_cast<CallInst>(&*BBI)) Calls.push_back(CI); // FIXME: This code does not handle invokes assert(!isa<InvokeInst>(&*BBI) && "support for invokes in poll code needed"); // Only add the successor blocks if we reach the terminator instruction // without encountering end first if (BBI->isTerminator()) { BasicBlock *BB = BBI->getParent(); for (BasicBlock *Succ : successors(BB)) { if (Seen.insert(Succ).second) { Worklist.push_back(Succ); } } } } }
MachineConstantPool::~MachineConstantPool() { // A constant may be a member of both Constants and MachineCPVsSharingEntries, // so keep track of which we've deleted to avoid double deletions. DenseSet<MachineConstantPoolValue*> Deleted; for (unsigned i = 0, e = Constants.size(); i != e; ++i) if (Constants[i].isMachineConstantPoolEntry()) { Deleted.insert(Constants[i].Val.MachineCPVal); delete Constants[i].Val.MachineCPVal; } for (DenseSet<MachineConstantPoolValue*>::iterator I = MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end(); I != E; ++I) { if (Deleted.count(*I) == 0) delete *I; } }
/// DuplicateInstruction - Duplicate a TailBB instruction to PredBB and update /// the source operands due to earlier PHI translation. void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI, MachineBasicBlock *TailBB, MachineBasicBlock *PredBB, MachineFunction &MF, DenseMap<unsigned, unsigned> &LocalVRMap, const DenseSet<unsigned> &UsedByPhi) { MachineInstr *NewMI = TII->duplicate(MI, MF); for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { MachineOperand &MO = NewMI->getOperand(i); if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; if (MO.isDef()) { const TargetRegisterClass *RC = MRI->getRegClass(Reg); unsigned NewReg = MRI->createVirtualRegister(RC); MO.setReg(NewReg); LocalVRMap.insert(std::make_pair(Reg, NewReg)); if (isDefLiveOut(Reg, TailBB, MRI) || UsedByPhi.count(Reg)) AddSSAUpdateEntry(Reg, NewReg, PredBB); } else { DenseMap<unsigned, unsigned>::iterator VI = LocalVRMap.find(Reg); if (VI != LocalVRMap.end()) { MO.setReg(VI->second); // Clear any kill flags from this operand. The new register could have // uses after this one, so kills are not valid here. MO.setIsKill(false); MRI->constrainRegClass(VI->second, MRI->getRegClass(Reg)); } } } PredBB->insert(PredBB->instr_end(), NewMI); }