// Visit all the blocks dominated by dominatorRoot. bool ValueNumberer::visitDominatorTree(MBasicBlock* dominatorRoot) { JitSpew(JitSpew_GVN, " Visiting dominator tree (with %llu blocks) rooted at block%u%s", uint64_t(dominatorRoot->numDominated()), dominatorRoot->id(), dominatorRoot == graph_.entryBlock() ? " (normal entry block)" : dominatorRoot == graph_.osrBlock() ? " (OSR entry block)" : dominatorRoot->numPredecessors() == 0 ? " (odd unreachable block)" : " (merge point from normal entry and OSR entry)"); MOZ_ASSERT(dominatorRoot->immediateDominator() == dominatorRoot, "root is not a dominator tree root"); // Visit all blocks dominated by dominatorRoot, in RPO. This has the nice // property that we'll always visit a block before any block it dominates, // so we can make a single pass through the list and see every full // redundance. size_t numVisited = 0; size_t numDiscarded = 0; for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot)); ; ) { MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information"); MBasicBlock* block = *iter++; // We're only visiting blocks in dominatorRoot's tree right now. if (!dominatorRoot->dominates(block)) continue; // If this is a loop backedge, remember the header, as we may not be able // to find it after we simplify the block. MBasicBlock* header = block->isLoopBackedge() ? block->loopHeaderOfBackedge() : nullptr; if (block->isMarked()) { // This block has become unreachable; handle it specially. if (!visitUnreachableBlock(block)) return false; ++numDiscarded; } else { // Visit the block! if (!visitBlock(block, dominatorRoot)) return false; ++numVisited; } // If the block is/was a loop backedge, check to see if the block that // is/was its header has optimizable phis, which would want a re-run. if (!rerun_ && header && loopHasOptimizablePhi(header)) { JitSpew(JitSpew_GVN, " Loop phi in block%u can now be optimized; will re-run GVN!", header->id()); rerun_ = true; remainingBlocks_.clear(); } MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numDiscarded, "Visited blocks too many times"); if (numVisited >= dominatorRoot->numDominated() - numDiscarded) break; } totalNumVisited_ += numVisited; values_.clear(); return true; }
static void AssertReversePostOrder(MIRGraph &graph) { // Check that every block is visited after all its predecessors (except backedges). for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) { JS_ASSERT(!block->isMarked()); for (size_t i = 0; i < block->numPredecessors(); i++) { MBasicBlock *pred = block->getPredecessor(i); JS_ASSERT_IF(!pred->isLoopBackedge(), pred->isMarked()); } block->mark(); } graph.unmarkBlocks(); }
bool UnreachableCodeElimination::removeUnmarkedBlocksAndClearDominators() { // Removes blocks that are not marked from the graph. For blocks // that *are* marked, clears the mark and adjusts the id to its // new value. Also adds blocks that are immediately reachable // from an unmarked block to the frontier. size_t id = marked_; for (PostorderIterator iter(graph_.poBegin()); iter != graph_.poEnd();) { if (mir_->shouldCancel("Eliminate Unreachable Code")) return false; MBasicBlock *block = *iter; iter++; // Unconditionally clear the dominators. It's somewhat complex to // adjust the values and relatively fast to just recompute. block->clearDominatorInfo(); if (block->isMarked()) { block->setId(--id); for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++) checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter); for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter); } else { if (block->numPredecessors() > 1) { // If this block had phis, then any reachable // predecessors need to have the successorWithPhis // flag cleared. for (size_t i = 0; i < block->numPredecessors(); i++) block->getPredecessor(i)->setSuccessorWithPhis(nullptr, 0); } if (block->isLoopBackedge()) { // NB. We have to update the loop header if we // eliminate the backedge. At first I thought this // check would be insufficient, because it would be // possible to have code like this: // // while (true) { // ...; // if (1 == 1) break; // } // // in which the backedge is removed as part of // rewriting the condition, but no actual blocks are // removed. However, in all such cases, the backedge // would be a critical edge and hence the critical // edge block is being removed. block->loopHeaderOfBackedge()->clearLoopHeader(); } for (size_t i = 0, c = block->numSuccessors(); i < c; i++) { MBasicBlock *succ = block->getSuccessor(i); if (succ->isMarked()) { // succ is on the frontier of blocks to be removed: succ->removePredecessor(block); if (!redundantPhis_) { for (MPhiIterator iter(succ->phisBegin()); iter != succ->phisEnd(); iter++) { if (iter->operandIfRedundant()) { redundantPhis_ = true; break; } } } } } // When we remove a call, we can't leave the corresponding MPassArg // in the graph. Since lowering will fail. Replace it with the // argument for the exceptional case when it is kept alive in a // ResumePoint. DCE will remove the unused MPassArg instruction. for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { if (iter->isCall()) { MCall *call = iter->toCall(); for (size_t i = 0; i < call->numStackArgs(); i++) { JS_ASSERT(call->getArg(i)->isPassArg()); JS_ASSERT(call->getArg(i)->hasOneDefUse()); MPassArg *arg = call->getArg(i)->toPassArg(); arg->replaceAllUsesWith(arg->getArgument()); } } } graph_.removeBlock(block); } } JS_ASSERT(id == 0); return true; }