/// propagateSiblingValue - Propagate the value in SVI to dependents if it is /// known. Otherwise remember the dependency for later. /// /// @param SVIIter SibValues entry to propagate. /// @param VNI Dependent value, or NULL to propagate to all saved dependents. void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter, VNInfo *VNI) { SibValueMap::value_type *SVI = &*SVIIter; // When VNI is non-NULL, add it to SVI's deps, and only propagate to that. TinyPtrVector<VNInfo*> FirstDeps; if (VNI) { FirstDeps.push_back(VNI); SVI->second.Deps.push_back(VNI); } // Has the value been completely determined yet? If not, defer propagation. if (!SVI->second.hasDef()) return; // Work list of values to propagate. SmallSetVector<SibValueMap::value_type *, 8> WorkList; WorkList.insert(SVI); do { SVI = WorkList.pop_back_val(); TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps; VNI = 0; SibValueInfo &SV = SVI->second; if (!SV.SpillMBB) SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def); DEBUG(dbgs() << " prop to " << Deps->size() << ": " << SVI->first->id << '@' << SVI->first->def << ":\t" << SV); assert(SV.hasDef() && "Propagating undefined value"); // Should this value be propagated as a preferred spill candidate? We don't // propagate values of registers that are about to spill. bool PropSpill = !DisableHoisting && !isRegToSpill(SV.SpillReg); unsigned SpillDepth = ~0u; for (TinyPtrVector<VNInfo*>::iterator DepI = Deps->begin(), DepE = Deps->end(); DepI != DepE; ++DepI) { SibValueMap::iterator DepSVI = SibValues.find(*DepI); assert(DepSVI != SibValues.end() && "Dependent value not in SibValues"); SibValueInfo &DepSV = DepSVI->second; if (!DepSV.SpillMBB) DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def); bool Changed = false; // Propagate defining instruction. if (!DepSV.hasDef()) { Changed = true; DepSV.DefMI = SV.DefMI; DepSV.DefByOrigPHI = SV.DefByOrigPHI; } // Propagate AllDefsAreReloads. For PHI values, this computes an AND of // all predecessors. if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) { Changed = true; DepSV.AllDefsAreReloads = false; } // Propagate best spill value. if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) { if (SV.SpillMBB == DepSV.SpillMBB) { // DepSV is in the same block. Hoist when dominated. if (DepSV.KillsSource && SV.SpillVNI->def < DepSV.SpillVNI->def) { // This is an alternative def earlier in the same MBB. // Hoist the spill as far as possible in SpillMBB. This can ease // register pressure: // // x = def // y = use x // s = copy x // // Hoisting the spill of s to immediately after the def removes the // interference between x and y: // // x = def // spill x // y = use x<kill> // // This hoist only helps when the DepSV copy kills its source. Changed = true; DepSV.SpillReg = SV.SpillReg; DepSV.SpillVNI = SV.SpillVNI; DepSV.SpillMBB = SV.SpillMBB; } } else { // DepSV is in a different block. if (SpillDepth == ~0u) SpillDepth = Loops.getLoopDepth(SV.SpillMBB); // Also hoist spills to blocks with smaller loop depth, but make sure // that the new value dominates. Non-phi dependents are always // dominated, phis need checking. if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) && (!DepSVI->first->isPHIDef() || MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) { Changed = true; DepSV.SpillReg = SV.SpillReg; DepSV.SpillVNI = SV.SpillVNI; DepSV.SpillMBB = SV.SpillMBB; } } } if (!Changed) continue; // Something changed in DepSVI. Propagate to dependents. WorkList.insert(&*DepSVI); DEBUG(dbgs() << " update " << DepSVI->first->id << '@' << DepSVI->first->def << " to:\t" << DepSV); } } while (!WorkList.empty()); }
/// Sort the blocks, taking special care to make sure that loops are not /// interrupted by blocks not dominated by their header. /// TODO: There are many opportunities for improving the heuristics here. /// Explore them. static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, const MachineDominatorTree &MDT) { // Prepare for a topological sort: Record the number of predecessors each // block has, ignoring loop backedges. MF.RenumberBlocks(); SmallVector<unsigned, 16> NumPredsLeft(MF.getNumBlockIDs(), 0); for (MachineBasicBlock &MBB : MF) { unsigned N = MBB.pred_size(); if (MachineLoop *L = MLI.getLoopFor(&MBB)) if (L->getHeader() == &MBB) for (const MachineBasicBlock *Pred : MBB.predecessors()) if (L->contains(Pred)) --N; NumPredsLeft[MBB.getNumber()] = N; } // Topological sort the CFG, with additional constraints: // - Between a loop header and the last block in the loop, there can be // no blocks not dominated by the loop header. // - It's desirable to preserve the original block order when possible. // We use two ready lists; Preferred and Ready. Preferred has recently // processed sucessors, to help preserve block sequences from the original // order. Ready has the remaining ready blocks. PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>, CompareBlockNumbers> Preferred; PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>, CompareBlockNumbersBackwards> Ready; SmallVector<Entry, 4> Loops; for (MachineBasicBlock *MBB = &MF.front();;) { const MachineLoop *L = MLI.getLoopFor(MBB); if (L) { // If MBB is a loop header, add it to the active loop list. We can't put // any blocks that it doesn't dominate until we see the end of the loop. if (L->getHeader() == MBB) Loops.push_back(Entry(L)); // For each active loop the block is in, decrement the count. If MBB is // the last block in an active loop, take it off the list and pick up any // blocks deferred because the header didn't dominate them. for (Entry &E : Loops) if (E.Loop->contains(MBB) && --E.NumBlocksLeft == 0) for (auto DeferredBlock : E.Deferred) Ready.push(DeferredBlock); while (!Loops.empty() && Loops.back().NumBlocksLeft == 0) Loops.pop_back(); } // The main topological sort logic. for (MachineBasicBlock *Succ : MBB->successors()) { // Ignore backedges. if (MachineLoop *SuccL = MLI.getLoopFor(Succ)) if (SuccL->getHeader() == Succ && SuccL->contains(MBB)) continue; // Decrement the predecessor count. If it's now zero, it's ready. if (--NumPredsLeft[Succ->getNumber()] == 0) Preferred.push(Succ); } // Determine the block to follow MBB. First try to find a preferred block, // to preserve the original block order when possible. MachineBasicBlock *Next = nullptr; while (!Preferred.empty()) { Next = Preferred.top(); Preferred.pop(); // If X isn't dominated by the top active loop header, defer it until that // loop is done. if (!Loops.empty() && !MDT.dominates(Loops.back().Loop->getHeader(), Next)) { Loops.back().Deferred.push_back(Next); Next = nullptr; continue; } // If Next was originally ordered before MBB, and it isn't because it was // loop-rotated above the header, it's not preferred. if (Next->getNumber() < MBB->getNumber() && (!L || !L->contains(Next) || L->getHeader()->getNumber() < Next->getNumber())) { Ready.push(Next); Next = nullptr; continue; } break; } // If we didn't find a suitable block in the Preferred list, check the // general Ready list. if (!Next) { // If there are no more blocks to process, we're done. if (Ready.empty()) { MaybeUpdateTerminator(MBB); break; } for (;;) { Next = Ready.top(); Ready.pop(); // If Next isn't dominated by the top active loop header, defer it until // that loop is done. if (!Loops.empty() && !MDT.dominates(Loops.back().Loop->getHeader(), Next)) { Loops.back().Deferred.push_back(Next); continue; } break; } } // Move the next block into place and iterate. Next->moveAfter(MBB); MaybeUpdateTerminator(MBB); MBB = Next; } assert(Loops.empty() && "Active loop list not finished"); MF.RenumberBlocks(); #ifndef NDEBUG SmallSetVector<MachineLoop *, 8> OnStack; // Insert a sentinel representing the degenerate loop that starts at the // function entry block and includes the entire function as a "loop" that // executes once. OnStack.insert(nullptr); for (auto &MBB : MF) { assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative."); MachineLoop *Loop = MLI.getLoopFor(&MBB); if (Loop && &MBB == Loop->getHeader()) { // Loop header. The loop predecessor should be sorted above, and the other // predecessors should be backedges below. for (auto Pred : MBB.predecessors()) assert( (Pred->getNumber() < MBB.getNumber() || Loop->contains(Pred)) && "Loop header predecessors must be loop predecessors or backedges"); assert(OnStack.insert(Loop) && "Loops should be declared at most once."); } else { // Not a loop header. All predecessors should be sorted above. for (auto Pred : MBB.predecessors()) assert(Pred->getNumber() < MBB.getNumber() && "Non-loop-header predecessors should be topologically sorted"); assert(OnStack.count(MLI.getLoopFor(&MBB)) && "Blocks must be nested in their loops"); } while (OnStack.size() > 1 && &MBB == LoopBottom(OnStack.back())) OnStack.pop_back(); } assert(OnStack.pop_back_val() == nullptr && "The function entry block shouldn't actually be a loop header"); assert(OnStack.empty() && "Control flow stack pushes and pops should be balanced."); #endif }
/// \brief Figure out if the loop is worth full unrolling. /// /// Complete loop unrolling can make some loads constant, and we need to know /// if that would expose any further optimization opportunities. This routine /// estimates this optimization. It computes cost of unrolled loop /// (UnrolledCost) and dynamic cost of the original loop (RolledDynamicCost). By /// dynamic cost we mean that we won't count costs of blocks that are known not /// to be executed (i.e. if we have a branch in the loop and we know that at the /// given iteration its condition would be resolved to true, we won't add up the /// cost of the 'false'-block). /// \returns Optional value, holding the RolledDynamicCost and UnrolledCost. If /// the analysis failed (no benefits expected from the unrolling, or the loop is /// too big to analyze), the returned value is None. static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(const Loop *L, unsigned TripCount, DominatorTree &DT, ScalarEvolution &SE, const TargetTransformInfo &TTI, int MaxUnrolledLoopSize) { // We want to be able to scale offsets by the trip count and add more offsets // to them without checking for overflows, and we already don't want to // analyze *massive* trip counts, so we force the max to be reasonably small. assert(UnrollMaxIterationsCountToAnalyze < (INT_MAX / 2) && "The unroll iterations max is too large!"); // Only analyze inner loops. We can't properly estimate cost of nested loops // and we won't visit inner loops again anyway. if (!L->empty()) return None; // Don't simulate loops with a big or unknown tripcount if (!UnrollMaxIterationsCountToAnalyze || !TripCount || TripCount > UnrollMaxIterationsCountToAnalyze) return None; SmallSetVector<BasicBlock *, 16> BBWorklist; SmallSetVector<std::pair<BasicBlock *, BasicBlock *>, 4> ExitWorklist; DenseMap<Value *, Constant *> SimplifiedValues; SmallVector<std::pair<Value *, Constant *>, 4> SimplifiedInputValues; // The estimated cost of the unrolled form of the loop. We try to estimate // this by simplifying as much as we can while computing the estimate. int UnrolledCost = 0; // We also track the estimated dynamic (that is, actually executed) cost in // the rolled form. This helps identify cases when the savings from unrolling // aren't just exposing dead control flows, but actual reduced dynamic // instructions due to the simplifications which we expect to occur after // unrolling. int RolledDynamicCost = 0; // We track the simplification of each instruction in each iteration. We use // this to recursively merge costs into the unrolled cost on-demand so that // we don't count the cost of any dead code. This is essentially a map from // <instruction, int> to <bool, bool>, but stored as a densely packed struct. DenseSet<UnrolledInstState, UnrolledInstStateKeyInfo> InstCostMap; // A small worklist used to accumulate cost of instructions from each // observable and reached root in the loop. SmallVector<Instruction *, 16> CostWorklist; // PHI-used worklist used between iterations while accumulating cost. SmallVector<Instruction *, 4> PHIUsedList; // Helper function to accumulate cost for instructions in the loop. auto AddCostRecursively = [&](Instruction &RootI, int Iteration) { assert(Iteration >= 0 && "Cannot have a negative iteration!"); assert(CostWorklist.empty() && "Must start with an empty cost list"); assert(PHIUsedList.empty() && "Must start with an empty phi used list"); CostWorklist.push_back(&RootI); for (;; --Iteration) { do { Instruction *I = CostWorklist.pop_back_val(); // InstCostMap only uses I and Iteration as a key, the other two values // don't matter here. auto CostIter = InstCostMap.find({I, Iteration, 0, 0}); if (CostIter == InstCostMap.end()) // If an input to a PHI node comes from a dead path through the loop // we may have no cost data for it here. What that actually means is // that it is free. continue; auto &Cost = *CostIter; if (Cost.IsCounted) // Already counted this instruction. continue; // Mark that we are counting the cost of this instruction now. Cost.IsCounted = true; // If this is a PHI node in the loop header, just add it to the PHI set. if (auto *PhiI = dyn_cast<PHINode>(I)) if (PhiI->getParent() == L->getHeader()) { assert(Cost.IsFree && "Loop PHIs shouldn't be evaluated as they " "inherently simplify during unrolling."); if (Iteration == 0) continue; // Push the incoming value from the backedge into the PHI used list // if it is an in-loop instruction. We'll use this to populate the // cost worklist for the next iteration (as we count backwards). if (auto *OpI = dyn_cast<Instruction>( PhiI->getIncomingValueForBlock(L->getLoopLatch()))) if (L->contains(OpI)) PHIUsedList.push_back(OpI); continue; } // First accumulate the cost of this instruction. if (!Cost.IsFree) { UnrolledCost += TTI.getUserCost(I); DEBUG(dbgs() << "Adding cost of instruction (iteration " << Iteration << "): "); DEBUG(I->dump()); } // We must count the cost of every operand which is not free, // recursively. If we reach a loop PHI node, simply add it to the set // to be considered on the next iteration (backwards!). for (Value *Op : I->operands()) { // Check whether this operand is free due to being a constant or // outside the loop. auto *OpI = dyn_cast<Instruction>(Op); if (!OpI || !L->contains(OpI)) continue; // Otherwise accumulate its cost. CostWorklist.push_back(OpI); } } while (!CostWorklist.empty()); if (PHIUsedList.empty()) // We've exhausted the search. break; assert(Iteration > 0 && "Cannot track PHI-used values past the first iteration!"); CostWorklist.append(PHIUsedList.begin(), PHIUsedList.end()); PHIUsedList.clear(); } }; // Ensure that we don't violate the loop structure invariants relied on by // this analysis. assert(L->isLoopSimplifyForm() && "Must put loop into normal form first."); assert(L->isLCSSAForm(DT) && "Must have loops in LCSSA form to track live-out values."); DEBUG(dbgs() << "Starting LoopUnroll profitability analysis...\n"); // Simulate execution of each iteration of the loop counting instructions, // which would be simplified. // Since the same load will take different values on different iterations, // we literally have to go through all loop's iterations. for (unsigned Iteration = 0; Iteration < TripCount; ++Iteration) { DEBUG(dbgs() << " Analyzing iteration " << Iteration << "\n"); // Prepare for the iteration by collecting any simplified entry or backedge // inputs. for (Instruction &I : *L->getHeader()) { auto *PHI = dyn_cast<PHINode>(&I); if (!PHI) break; // The loop header PHI nodes must have exactly two input: one from the // loop preheader and one from the loop latch. assert( PHI->getNumIncomingValues() == 2 && "Must have an incoming value only for the preheader and the latch."); Value *V = PHI->getIncomingValueForBlock( Iteration == 0 ? L->getLoopPreheader() : L->getLoopLatch()); Constant *C = dyn_cast<Constant>(V); if (Iteration != 0 && !C) C = SimplifiedValues.lookup(V); if (C) SimplifiedInputValues.push_back({PHI, C}); } // Now clear and re-populate the map for the next iteration. SimplifiedValues.clear(); while (!SimplifiedInputValues.empty()) SimplifiedValues.insert(SimplifiedInputValues.pop_back_val()); UnrolledInstAnalyzer Analyzer(Iteration, SimplifiedValues, SE, L); BBWorklist.clear(); BBWorklist.insert(L->getHeader()); // Note that we *must not* cache the size, this loop grows the worklist. for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { BasicBlock *BB = BBWorklist[Idx]; // Visit all instructions in the given basic block and try to simplify // it. We don't change the actual IR, just count optimization // opportunities. for (Instruction &I : *BB) { // Track this instruction's expected baseline cost when executing the // rolled loop form. RolledDynamicCost += TTI.getUserCost(&I); // Visit the instruction to analyze its loop cost after unrolling, // and if the visitor returns true, mark the instruction as free after // unrolling and continue. bool IsFree = Analyzer.visit(I); bool Inserted = InstCostMap.insert({&I, (int)Iteration, (unsigned)IsFree, /*IsCounted*/ false}).second; (void)Inserted; assert(Inserted && "Cannot have a state for an unvisited instruction!"); if (IsFree) continue; // If the instruction might have a side-effect recursively account for // the cost of it and all the instructions leading up to it. if (I.mayHaveSideEffects()) AddCostRecursively(I, Iteration); // Can't properly model a cost of a call. // FIXME: With a proper cost model we should be able to do it. if(isa<CallInst>(&I)) return None; // If unrolled body turns out to be too big, bail out. if (UnrolledCost > MaxUnrolledLoopSize) { DEBUG(dbgs() << " Exceeded threshold.. exiting.\n" << " UnrolledCost: " << UnrolledCost << ", MaxUnrolledLoopSize: " << MaxUnrolledLoopSize << "\n"); return None; } } TerminatorInst *TI = BB->getTerminator(); // Add in the live successors by first checking whether we have terminator // that may be simplified based on the values simplified by this call. BasicBlock *KnownSucc = nullptr; if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { if (BI->isConditional()) { if (Constant *SimpleCond = SimplifiedValues.lookup(BI->getCondition())) { // Just take the first successor if condition is undef if (isa<UndefValue>(SimpleCond)) KnownSucc = BI->getSuccessor(0); else if (ConstantInt *SimpleCondVal = dyn_cast<ConstantInt>(SimpleCond)) KnownSucc = BI->getSuccessor(SimpleCondVal->isZero() ? 1 : 0); } } } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { if (Constant *SimpleCond = SimplifiedValues.lookup(SI->getCondition())) { // Just take the first successor if condition is undef if (isa<UndefValue>(SimpleCond)) KnownSucc = SI->getSuccessor(0); else if (ConstantInt *SimpleCondVal = dyn_cast<ConstantInt>(SimpleCond)) KnownSucc = SI->findCaseValue(SimpleCondVal).getCaseSuccessor(); } } if (KnownSucc) { if (L->contains(KnownSucc)) BBWorklist.insert(KnownSucc); else ExitWorklist.insert({BB, KnownSucc}); continue; } // Add BB's successors to the worklist. for (BasicBlock *Succ : successors(BB)) if (L->contains(Succ)) BBWorklist.insert(Succ); else ExitWorklist.insert({BB, Succ}); AddCostRecursively(*TI, Iteration); } // If we found no optimization opportunities on the first iteration, we // won't find them on later ones too. if (UnrolledCost == RolledDynamicCost) { DEBUG(dbgs() << " No opportunities found.. exiting.\n" << " UnrolledCost: " << UnrolledCost << "\n"); return None; } } while (!ExitWorklist.empty()) { BasicBlock *ExitingBB, *ExitBB; std::tie(ExitingBB, ExitBB) = ExitWorklist.pop_back_val(); for (Instruction &I : *ExitBB) { auto *PN = dyn_cast<PHINode>(&I); if (!PN) break; Value *Op = PN->getIncomingValueForBlock(ExitingBB); if (auto *OpI = dyn_cast<Instruction>(Op)) if (L->contains(OpI)) AddCostRecursively(*OpI, TripCount - 1); } } DEBUG(dbgs() << "Analysis finished:\n" << "UnrolledCost: " << UnrolledCost << ", " << "RolledDynamicCost: " << RolledDynamicCost << "\n"); return {{UnrolledCost, RolledDynamicCost}}; }
/// handleEndBlock - Remove dead stores to stack-allocated locations in the /// function end block. Ex: /// %A = alloca i32 /// ... /// store i32 1, i32* %A /// ret void bool DSE::handleEndBlock(BasicBlock &BB) { bool MadeChange = false; // Keep track of all of the stack objects that are dead at the end of the // function. SmallSetVector<Value*, 16> DeadStackObjects; // Find all of the alloca'd pointers in the entry block. BasicBlock *Entry = BB.getParent()->begin(); for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) { if (isa<AllocaInst>(I)) DeadStackObjects.insert(I); // Okay, so these are dead heap objects, but if the pointer never escapes // then it's leaked by this function anyways. else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true)) DeadStackObjects.insert(I); } // Treat byval or inalloca arguments the same, stores to them are dead at the // end of the function. for (Function::arg_iterator AI = BB.getParent()->arg_begin(), AE = BB.getParent()->arg_end(); AI != AE; ++AI) if (AI->hasByValOrInAllocaAttr()) DeadStackObjects.insert(AI); // Scan the basic block backwards for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ --BBI; // If we find a store, check to see if it points into a dead stack value. if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) { // See through pointer-to-pointer bitcasts SmallVector<Value *, 4> Pointers; GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers); // Stores to stack values are valid candidates for removal. bool AllDead = true; for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), E = Pointers.end(); I != E; ++I) if (!DeadStackObjects.count(*I)) { AllDead = false; break; } if (AllDead) { Instruction *Dead = BBI++; DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " << *Dead << "\n Objects: "; for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), E = Pointers.end(); I != E; ++I) { dbgs() << **I; if (std::next(I) != E) dbgs() << ", "; } dbgs() << '\n'); // DCE instructions only used to calculate that store. DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects); ++NumFastStores; MadeChange = true; continue; } } // Remove any dead non-memory-mutating instructions. if (isInstructionTriviallyDead(BBI, TLI)) { Instruction *Inst = BBI++; DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects); ++NumFastOther; MadeChange = true; continue; } if (isa<AllocaInst>(BBI)) { // Remove allocas from the list of dead stack objects; there can't be // any references before the definition. DeadStackObjects.remove(BBI); continue; } if (CallSite CS = cast<Value>(BBI)) { // Remove allocation function calls from the list of dead stack objects; // there can't be any references before the definition. if (isAllocLikeFn(BBI, TLI)) DeadStackObjects.remove(BBI); // If this call does not access memory, it can't be loading any of our // pointers. if (AA->doesNotAccessMemory(CS)) continue; // If the call might load from any of our allocas, then any store above // the call is live. DeadStackObjects.remove_if([&](Value *I) { // See if the call site touches the value. AliasAnalysis::ModRefResult A = AA->getModRefInfo(CS, I, getPointerSize(I, *AA)); return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref; }); // If all of the allocas were clobbered by the call then we're not going // to find anything else to process. if (DeadStackObjects.empty()) break; continue; } AliasAnalysis::Location LoadedLoc; // If we encounter a use of the pointer, it is no longer considered dead if (LoadInst *L = dyn_cast<LoadInst>(BBI)) { if (!L->isUnordered()) // Be conservative with atomic/volatile load break; LoadedLoc = AA->getLocation(L); } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) { LoadedLoc = AA->getLocation(V); } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) { LoadedLoc = AA->getLocationForSource(MTI); } else if (!BBI->mayReadFromMemory()) { // Instruction doesn't read memory. Note that stores that weren't removed // above will hit this case. continue; } else { // Unknown inst; assume it clobbers everything. break; } // Remove any allocas from the DeadPointer set that are loaded, as this // makes any stores above the access live. RemoveAccessedObjects(LoadedLoc, DeadStackObjects); // If all of the allocas were clobbered by the access then we're not going // to find anything else to process. if (DeadStackObjects.empty()) break; }
/// Remove dead stores to stack-allocated locations in the function end block. /// Ex: /// %A = alloca i32 /// ... /// store i32 1, i32* %A /// ret void static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA, MemoryDependenceResults *MD, const TargetLibraryInfo *TLI, InstOverlapIntervalsTy &IOL, DenseMap<Instruction*, size_t> *InstrOrdering) { bool MadeChange = false; // Keep track of all of the stack objects that are dead at the end of the // function. SmallSetVector<Value*, 16> DeadStackObjects; // Find all of the alloca'd pointers in the entry block. BasicBlock &Entry = BB.getParent()->front(); for (Instruction &I : Entry) { if (isa<AllocaInst>(&I)) DeadStackObjects.insert(&I); // Okay, so these are dead heap objects, but if the pointer never escapes // then it's leaked by this function anyways. else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true)) DeadStackObjects.insert(&I); } // Treat byval or inalloca arguments the same, stores to them are dead at the // end of the function. for (Argument &AI : BB.getParent()->args()) if (AI.hasByValOrInAllocaAttr()) DeadStackObjects.insert(&AI); const DataLayout &DL = BB.getModule()->getDataLayout(); // Scan the basic block backwards for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ --BBI; // If we find a store, check to see if it points into a dead stack value. if (hasMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) { // See through pointer-to-pointer bitcasts SmallVector<Value *, 4> Pointers; GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL); // Stores to stack values are valid candidates for removal. bool AllDead = true; for (Value *Pointer : Pointers) if (!DeadStackObjects.count(Pointer)) { AllDead = false; break; } if (AllDead) { Instruction *Dead = &*BBI; DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " << *Dead << "\n Objects: "; for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), E = Pointers.end(); I != E; ++I) { dbgs() << **I; if (std::next(I) != E) dbgs() << ", "; } dbgs() << '\n'); // DCE instructions only used to calculate that store. deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects); ++NumFastStores; MadeChange = true; continue; } } // Remove any dead non-memory-mutating instructions. if (isInstructionTriviallyDead(&*BBI, TLI)) { DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: " << *&*BBI << '\n'); deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects); ++NumFastOther; MadeChange = true; continue; } if (isa<AllocaInst>(BBI)) { // Remove allocas from the list of dead stack objects; there can't be // any references before the definition. DeadStackObjects.remove(&*BBI); continue; } if (auto CS = CallSite(&*BBI)) { // Remove allocation function calls from the list of dead stack objects; // there can't be any references before the definition. if (isAllocLikeFn(&*BBI, TLI)) DeadStackObjects.remove(&*BBI); // If this call does not access memory, it can't be loading any of our // pointers. if (AA->doesNotAccessMemory(CS)) continue; // If the call might load from any of our allocas, then any store above // the call is live. DeadStackObjects.remove_if([&](Value *I) { // See if the call site touches the value. ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI)); return A == MRI_ModRef || A == MRI_Ref; }); // If all of the allocas were clobbered by the call then we're not going // to find anything else to process. if (DeadStackObjects.empty()) break; continue; } // We can remove the dead stores, irrespective of the fence and its ordering // (release/acquire/seq_cst). Fences only constraints the ordering of // already visible stores, it does not make a store visible to other // threads. So, skipping over a fence does not change a store from being // dead. if (isa<FenceInst>(*BBI)) continue; MemoryLocation LoadedLoc; // If we encounter a use of the pointer, it is no longer considered dead if (LoadInst *L = dyn_cast<LoadInst>(BBI)) { if (!L->isUnordered()) // Be conservative with atomic/volatile load break; LoadedLoc = MemoryLocation::get(L); } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) { LoadedLoc = MemoryLocation::get(V); } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) { LoadedLoc = MemoryLocation::getForSource(MTI); } else if (!BBI->mayReadFromMemory()) { // Instruction doesn't read memory. Note that stores that weren't removed // above will hit this case. continue; } else { // Unknown inst; assume it clobbers everything. break; } // Remove any allocas from the DeadPointer set that are loaded, as this // makes any stores above the access live. removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI); // If all of the allocas were clobbered by the access then we're not going // to find anything else to process. if (DeadStackObjects.empty()) break; }
/// Sort the blocks in RPO, taking special care to make sure that loops are /// contiguous even in the case of split backedges. /// /// TODO: Determine whether RPO is actually worthwhile, or whether we should /// move to just a stable-topological-sort-based approach that would preserve /// more of the original order. static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI) { // Note that we do our own RPO rather than using // "llvm/ADT/PostOrderIterator.h" because we want control over the order that // successors are visited in (see above). Also, we can sort the blocks in the // MachineFunction as we go. SmallPtrSet<MachineBasicBlock *, 16> Visited; SmallVector<POStackEntry, 16> Stack; MachineBasicBlock *EntryBlock = &*MF.begin(); Visited.insert(EntryBlock); Stack.push_back(POStackEntry(EntryBlock, MF, MLI)); for (;;) { POStackEntry &Entry = Stack.back(); SmallVectorImpl<MachineBasicBlock *> &Succs = Entry.Succs; if (!Succs.empty()) { MachineBasicBlock *Succ = Succs.pop_back_val(); if (Visited.insert(Succ).second) Stack.push_back(POStackEntry(Succ, MF, MLI)); continue; } // Put the block in its position in the MachineFunction. MachineBasicBlock &MBB = *Entry.MBB; MBB.moveBefore(&*MF.begin()); // Branch instructions may utilize a fallthrough, so update them if a // fallthrough has been added or removed. if (!MBB.empty() && MBB.back().isTerminator() && !MBB.back().isBranch() && !MBB.back().isBarrier()) report_fatal_error( "Non-branch terminator with fallthrough cannot yet be rewritten"); if (MBB.empty() || !MBB.back().isTerminator() || MBB.back().isBranch()) MBB.updateTerminator(); Stack.pop_back(); if (Stack.empty()) break; } // Now that we've sorted the blocks in RPO, renumber them. MF.RenumberBlocks(); #ifndef NDEBUG SmallSetVector<MachineLoop *, 8> OnStack; // Insert a sentinel representing the degenerate loop that starts at the // function entry block and includes the entire function as a "loop" that // executes once. OnStack.insert(nullptr); for (auto &MBB : MF) { assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative."); MachineLoop *Loop = MLI.getLoopFor(&MBB); if (Loop && &MBB == Loop->getHeader()) { // Loop header. The loop predecessor should be sorted above, and the other // predecessors should be backedges below. for (auto Pred : MBB.predecessors()) assert( (Pred->getNumber() < MBB.getNumber() || Loop->contains(Pred)) && "Loop header predecessors must be loop predecessors or backedges"); assert(OnStack.insert(Loop) && "Loops should be declared at most once."); } else { // Not a loop header. All predecessors should be sorted above. for (auto Pred : MBB.predecessors()) assert(Pred->getNumber() < MBB.getNumber() && "Non-loop-header predecessors should be topologically sorted"); assert(OnStack.count(MLI.getLoopFor(&MBB)) && "Blocks must be nested in their loops"); } while (OnStack.size() > 1 && &MBB == LoopBottom(OnStack.back())) OnStack.pop_back(); } assert(OnStack.pop_back_val() == nullptr && "The function entry block shouldn't actually be a loop header"); assert(OnStack.empty() && "Control flow stack pushes and pops should be balanced."); #endif }