bool LICM::analyze() { IonSpew(IonSpew_LICM, "Beginning LICM pass."); // Iterate in RPO to visit outer loops before inner loops. for (ReversePostorderIterator i(graph.rpoBegin()); i != graph.rpoEnd(); i++) { MBasicBlock *header = *i; // Skip non-headers and self-loops. if (!header->isLoopHeader() || header->numPredecessors() < 2) continue; // Attempt to optimize loop. Loop loop(mir, header); Loop::LoopReturn lr = loop.init(); if (lr == Loop::LoopReturn_Error) return false; if (lr == Loop::LoopReturn_Skip) { graph.unmarkBlocks(); continue; } if (!loop.optimize()) return false; graph.unmarkBlocks(); } return true; }
// A critical edge is an edge which is neither its successor's only predecessor // nor its predecessor's only successor. Critical edges must be split to // prevent copy-insertion and code motion from affecting other edges. bool ion::SplitCriticalEdges(MIRGraph &graph) { for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { if (block->numSuccessors() < 2) continue; for (size_t i = 0; i < block->numSuccessors(); i++) { MBasicBlock *target = block->getSuccessor(i); if (target->numPredecessors() < 2) continue; // Create a new block inheriting from the predecessor. MBasicBlock *split = MBasicBlock::NewSplitEdge(graph, block->info(), *block); split->setLoopDepth(block->loopDepth()); graph.insertBlockAfter(*block, split); split->end(MGoto::New(target)); block->replaceSuccessor(i, split); target->replacePredecessor(*block, split); } } return true; }
Loop::LoopReturn Loop::init() { IonSpew(IonSpew_LICM, "Loop identified, headed by block %d", header_->id()); IonSpew(IonSpew_LICM, "footer is block %d", header_->backedge()->id()); // The first predecessor of the loop header must dominate the header. JS_ASSERT(header_->id() > header_->getPredecessor(0)->id()); // Loops from backedge to header and marks all visited blocks // as part of the loop. At the same time add all hoistable instructions // (in RPO order) to the instruction worklist. Vector<MBasicBlock *, 1, IonAllocPolicy> inlooplist; if (!inlooplist.append(header_->backedge())) return LoopReturn_Error; header_->backedge()->mark(); while (!inlooplist.empty()) { MBasicBlock *block = inlooplist.back(); // Hoisting requires more finesse if the loop contains a block that // self-dominates: there exists control flow that may enter the loop // without passing through the loop preheader. // // Rather than perform a complicated analysis of the dominance graph, // just return a soft error to ignore this loop. if (block->immediateDominator() == block) { while (!worklist_.empty()) popFromWorklist(); return LoopReturn_Skip; } // Add not yet visited predecessors to the inlooplist. if (block != header_) { for (size_t i = 0; i < block->numPredecessors(); i++) { MBasicBlock *pred = block->getPredecessor(i); if (pred->isMarked()) continue; if (!inlooplist.append(pred)) return LoopReturn_Error; pred->mark(); } } // If any block was added, process them first. if (block != inlooplist.back()) continue; // Add all instructions in this block (but the control instruction) to the worklist for (MInstructionIterator i = block->begin(); i != block->end(); i++) { MInstruction *ins = *i; if (isHoistable(ins)) { if (!insertInWorklist(ins)) return LoopReturn_Error; } } // All successors of this block are visited. inlooplist.popBack(); } return LoopReturn_Success; }
bool LiveRangeAllocator<VREG>::buildLivenessInfo() { if (!init()) return false; Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList; BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds()); if (!loopDone) return false; for (size_t i = graph.numBlocks(); i > 0; i--) { if (mir->shouldCancel("Build Liveness Info (main loop)")) return false; LBlock *block = graph.getBlock(i - 1); MBasicBlock *mblock = block->mir(); BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters()); if (!live) return false; liveIn[mblock->id()] = live; // Propagate liveIn from our successors to us for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) { MBasicBlock *successor = mblock->lastIns()->getSuccessor(i); // Skip backedges, as we fix them up at the loop header. if (mblock->id() < successor->id()) live->insertAll(liveIn[successor->id()]); } // Add successor phis if (mblock->successorWithPhis()) { LBlock *phiSuccessor = mblock->successorWithPhis()->lir(); for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) { LPhi *phi = phiSuccessor->getPhi(j); LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor()); uint32_t reg = use->toUse()->virtualRegister(); live->insert(reg); } } // Variables are assumed alive for the entire block, a define shortens // the interval to the point of definition. for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()), outputOf(block->lastId()).next())) { return false; } } // Shorten the front end of live intervals for live variables to their // point of definition, if found. for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) { // Calls may clobber registers, so force a spill and reload around the callsite. if (ins->isCall()) { for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) { if (forLSRA) { if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins))) return false; } else { bool found = false; for (size_t i = 0; i < ins->numDefs(); i++) { if (ins->getDef(i)->isPreset() && *ins->getDef(i)->output() == LAllocation(*iter)) { found = true; break; } } if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next())) return false; } } } for (size_t i = 0; i < ins->numDefs(); i++) { if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) { LDefinition *def = ins->getDef(i); CodePosition from; if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) { // The fixed range covers the current instruction so the // interval for the virtual register starts at the next // instruction. If the next instruction has a fixed use, // this can lead to unnecessary register moves. To avoid // special handling for this, assert the next instruction // has no fixed uses. defineFixed guarantees this by inserting // an LNop. JS_ASSERT(!NextInstructionHasFixedUses(block, *ins)); AnyRegister reg = def->output()->toRegister(); if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next())) return false; from = outputOf(*ins).next(); } else { from = forLSRA ? inputOf(*ins) : outputOf(*ins); } if (def->policy() == LDefinition::MUST_REUSE_INPUT) { // MUST_REUSE_INPUT is implemented by allocating an output // register and moving the input to it. Register hints are // used to avoid unnecessary moves. We give the input an // LUse::ANY policy to avoid allocating a register for the // input. LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse(); JS_ASSERT(inputUse->policy() == LUse::REGISTER); JS_ASSERT(inputUse->usedAtStart()); *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true); } LiveInterval *interval = vregs[def].getInterval(0); interval->setFrom(from); // Ensure that if there aren't any uses, there's at least // some interval for the output to go into. if (interval->numRanges() == 0) { if (!interval->addRangeAtHead(from, from.next())) return false; } live->remove(def->virtualRegister()); } } for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition *temp = ins->getTemp(i); if (temp->isBogusTemp()) continue; if (forLSRA) { if (temp->policy() == LDefinition::PRESET) { if (ins->isCall()) continue; AnyRegister reg = temp->output()->toRegister(); if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins))) return false; // Fixed intervals are not added to safepoints, so do it // here. if (LSafepoint *safepoint = ins->safepoint()) AddRegisterToSafepoint(safepoint, reg, *temp); } else { JS_ASSERT(!ins->isCall()); if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins))) return false; } } else { // Normally temps are considered to cover both the input // and output of the associated instruction. In some cases // though we want to use a fixed register as both an input // and clobbered register in the instruction, so watch for // this and shorten the temp to cover only the output. CodePosition from = inputOf(*ins); if (temp->policy() == LDefinition::PRESET) { AnyRegister reg = temp->output()->toRegister(); for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) { if (alloc->isUse()) { LUse *use = alloc->toUse(); if (use->isFixedRegister()) { if (GetFixedRegister(vregs[use].def(), use) == reg) from = outputOf(*ins); } } } } CodePosition to = ins->isCall() ? outputOf(*ins) : outputOf(*ins).next(); if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to)) return false; } } DebugOnly<bool> hasUseRegister = false; DebugOnly<bool> hasUseRegisterAtStart = false; for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) { if (inputAlloc->isUse()) { LUse *use = inputAlloc->toUse(); // The first instruction, LLabel, has no uses. JS_ASSERT(inputOf(*ins) > outputOf(block->firstId())); // Call uses should always be at-start or fixed, since the fixed intervals // use all registers. JS_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(), use->isFixedRegister() || use->usedAtStart()); #ifdef DEBUG // Don't allow at-start call uses if there are temps of the same kind, // so that we don't assign the same register. if (ins->isCall() && use->usedAtStart()) { for (size_t i = 0; i < ins->numTemps(); i++) JS_ASSERT(vregs[ins->getTemp(i)].isDouble() != vregs[use].isDouble()); } // If there are both useRegisterAtStart(x) and useRegister(y) // uses, we may assign the same register to both operands due to // interval splitting (bug 772830). Don't allow this for now. if (use->policy() == LUse::REGISTER) { if (use->usedAtStart()) { if (!IsInputReused(*ins, use)) hasUseRegisterAtStart = true; } else { hasUseRegister = true; } } JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart)); #endif // Don't treat RECOVERED_INPUT uses as keeping the vreg alive. if (use->policy() == LUse::RECOVERED_INPUT) continue; CodePosition to; if (forLSRA) { if (use->isFixedRegister()) { AnyRegister reg = GetFixedRegister(vregs[use].def(), use); if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins))) return false; to = inputOf(*ins); // Fixed intervals are not added to safepoints, so do it // here. LSafepoint *safepoint = ins->safepoint(); if (!ins->isCall() && safepoint) AddRegisterToSafepoint(safepoint, reg, *vregs[use].def()); } else { to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins); } } else { to = (use->usedAtStart() || ins->isCall()) ? inputOf(*ins) : outputOf(*ins); if (use->isFixedRegister()) { LAllocation reg(AnyRegister::FromCode(use->registerCode())); for (size_t i = 0; i < ins->numDefs(); i++) { LDefinition *def = ins->getDef(i); if (def->policy() == LDefinition::PRESET && *def->output() == reg) to = inputOf(*ins); } } } LiveInterval *interval = vregs[use].getInterval(0); if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next())) return false; interval->addUse(new(alloc()) UsePosition(use, to)); live->insert(use->virtualRegister()); } } } // Phis have simultaneous assignment semantics at block begin, so at // the beginning of the block we can be sure that liveIn does not // contain any phi outputs. for (unsigned int i = 0; i < block->numPhis(); i++) { LDefinition *def = block->getPhi(i)->getDef(0); if (live->contains(def->virtualRegister())) { live->remove(def->virtualRegister()); } else { // This is a dead phi, so add a dummy range over all phis. This // can go away if we have an earlier dead code elimination pass. if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()), outputOf(block->firstId()))) { return false; } } } if (mblock->isLoopHeader()) { // A divergence from the published algorithm is required here, as // our block order does not guarantee that blocks of a loop are // contiguous. As a result, a single live interval spanning the // loop is not possible. Additionally, we require liveIn in a later // pass for resolution, so that must also be fixed up here. MBasicBlock *loopBlock = mblock->backedge(); while (true) { // Blocks must already have been visited to have a liveIn set. JS_ASSERT(loopBlock->id() >= mblock->id()); // Add an interval for this entire loop block CodePosition from = inputOf(loopBlock->lir()->firstId()); CodePosition to = outputOf(loopBlock->lir()->lastId()).next(); for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { if (!vregs[*liveRegId].getInterval(0)->addRange(from, to)) return false; } // Fix up the liveIn set to account for the new interval liveIn[loopBlock->id()]->insertAll(live); // Make sure we don't visit this node again loopDone->insert(loopBlock->id()); // If this is the loop header, any predecessors are either the // backedge or out of the loop, so skip any predecessors of // this block if (loopBlock != mblock) { for (size_t i = 0; i < loopBlock->numPredecessors(); i++) { MBasicBlock *pred = loopBlock->getPredecessor(i); if (loopDone->contains(pred->id())) continue; if (!loopWorkList.append(pred)) return false; } } // Terminate loop if out of work. if (loopWorkList.empty()) break; // Grab the next block off the work list, skipping any OSR block. while (!loopWorkList.empty()) { loopBlock = loopWorkList.popCopy(); if (loopBlock->lir() != graph.osrBlock()) break; } // If end is reached without finding a non-OSR block, then no more work items were found. if (loopBlock->lir() == graph.osrBlock()) { JS_ASSERT(loopWorkList.empty()); break; } } // Clear the done set for other loops loopDone->clear(); } JS_ASSERT_IF(!mblock->numPredecessors(), live->empty()); } validateVirtualRegisters(); // If the script has an infinite loop, there may be no MReturn and therefore // no fixed intervals. Add a small range to fixedIntervalsUnion so that the // rest of the allocator can assume it has at least one range. if (fixedIntervalsUnion->numRanges() == 0) { if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT), CodePosition(0, CodePosition::OUTPUT))) { return false; } } return true; }
bool Sink(MIRGenerator* mir, MIRGraph& graph) { TempAllocator& alloc = graph.alloc(); bool sinkEnabled = mir->optimizationInfo().sinkEnabled(); for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { if (mir->shouldCancel("Sink")) return false; for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) { MInstruction* ins = *iter++; // Only instructions which can be recovered on bailout can be moved // into the bailout paths. if (ins->isGuard() || ins->isGuardRangeBailouts() || ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout()) { continue; } // Compute a common dominator for all uses of the current // instruction. bool hasLiveUses = false; bool hasUses = false; MBasicBlock* usesDominator = nullptr; for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) { hasUses = true; MNode* consumerNode = (*i)->consumer(); if (consumerNode->isResumePoint()) continue; MDefinition* consumer = consumerNode->toDefinition(); if (consumer->isRecoveredOnBailout()) continue; hasLiveUses = true; // If the instruction is a Phi, then we should dominate the // predecessor from which the value is coming from. MBasicBlock* consumerBlock = consumer->block(); if (consumer->isPhi()) consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i)); usesDominator = CommonDominator(usesDominator, consumerBlock); if (usesDominator == *block) break; } // Leave this instruction for DCE. if (!hasUses) continue; // We have no uses, so sink this instruction in all the bailout // paths. if (!hasLiveUses) { MOZ_ASSERT(!usesDominator); ins->setRecoveredOnBailout(); JitSpewDef(JitSpew_Sink, " No live uses, recover the instruction on bailout\n", ins); continue; } // This guard is temporarly moved here as the above code deals with // Dead Code elimination, which got moved into this Sink phase, as // the Dead Code elimination used to move instructions with no-live // uses to the bailout path. if (!sinkEnabled) continue; // To move an effectful instruction, we would have to verify that the // side-effect is not observed. In the mean time, we just inhibit // this optimization on effectful instructions. if (ins->isEffectful()) continue; // If all the uses are under a loop, we might not want to work // against LICM by moving everything back into the loop, but if the // loop is it-self inside an if, then we still want to move the // computation under this if statement. while (block->loopDepth() < usesDominator->loopDepth()) { MOZ_ASSERT(usesDominator != usesDominator->immediateDominator()); usesDominator = usesDominator->immediateDominator(); } // Only move instructions if there is a branch between the dominator // of the uses and the original instruction. This prevent moving the // computation of the arguments into an inline function if there is // no major win. MBasicBlock* lastJoin = usesDominator; while (*block != lastJoin && lastJoin->numPredecessors() == 1) { MOZ_ASSERT(lastJoin != lastJoin->immediateDominator()); MBasicBlock* next = lastJoin->immediateDominator(); if (next->numSuccessors() > 1) break; lastJoin = next; } if (*block == lastJoin) continue; // Skip to the next instruction if we cannot find a common dominator // for all the uses of this instruction, or if the common dominator // correspond to the block of the current instruction. if (!usesDominator || usesDominator == *block) continue; // Only instruction which can be recovered on bailout and which are // sinkable can be moved into blocks which are below while filling // the resume points with a clone which is recovered on bailout. // If the instruction has live uses and if it is clonable, then we // can clone the instruction for all non-dominated uses and move the // instruction into the block which is dominating all live uses. if (!ins->canClone()) continue; // If the block is a split-edge block, which is created for folding // test conditions, then the block has no resume point and has // multiple predecessors. In such case, we cannot safely move // bailing instruction to these blocks as we have no way to bailout. if (!usesDominator->entryResumePoint() && usesDominator->numPredecessors() != 1) continue; JitSpewDef(JitSpew_Sink, " Can Clone & Recover, sink instruction\n", ins); JitSpew(JitSpew_Sink, " into Block %u", usesDominator->id()); // Copy the arguments and clone the instruction. MDefinitionVector operands(alloc); for (size_t i = 0, end = ins->numOperands(); i < end; i++) { if (!operands.append(ins->getOperand(i))) return false; } MInstruction* clone = ins->clone(alloc, operands); ins->block()->insertBefore(ins, clone); clone->setRecoveredOnBailout(); // We should not update the producer of the entry resume point, as // it cannot refer to any instruction within the basic block excepts // for Phi nodes. MResumePoint* entry = usesDominator->entryResumePoint(); // Replace the instruction by its clone in all the resume points / // recovered-on-bailout instructions which are not in blocks which // are dominated by the usesDominator block. for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; ) { MUse* use = *i++; MNode* consumer = use->consumer(); // If the consumer is a Phi, then we look for the index of the // use to find the corresponding predecessor block, which is // then used as the consumer block. MBasicBlock* consumerBlock = consumer->block(); if (consumer->isDefinition() && consumer->toDefinition()->isPhi()) { consumerBlock = consumerBlock->getPredecessor( consumer->toDefinition()->toPhi()->indexOf(use)); } // Keep the current instruction for all dominated uses, except // for the entry resume point of the block in which the // instruction would be moved into. if (usesDominator->dominates(consumerBlock) && (!consumer->isResumePoint() || consumer->toResumePoint() != entry)) { continue; } use->replaceProducer(clone); } // As we move this instruction in a different block, we should // verify that we do not carry over a resume point which would refer // to an outdated state of the control flow. if (ins->resumePoint()) ins->clearResumePoint(); // Now, that all uses which are not dominated by usesDominator are // using the cloned instruction, we can safely move the instruction // into the usesDominator block. MInstruction* at = usesDominator->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover); block->moveBefore(at, ins); } } return true; }
// OSR fixups serve the purpose of representing the non-OSR entry into a loop // when the only real entry is an OSR entry into the middle. However, if the // entry into the middle is subsequently folded away, the loop may actually // have become unreachable. Mark-and-sweep all blocks to remove all such code. bool ValueNumberer::cleanupOSRFixups() { // Mark. Vector<MBasicBlock*, 0, JitAllocPolicy> worklist(graph_.alloc()); unsigned numMarked = 2; graph_.entryBlock()->mark(); graph_.osrBlock()->mark(); if (!worklist.append(graph_.entryBlock()) || !worklist.append(graph_.osrBlock())) return false; while (!worklist.empty()) { MBasicBlock* block = worklist.popCopy(); for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) { MBasicBlock* succ = block->getSuccessor(i); if (!succ->isMarked()) { ++numMarked; succ->mark(); if (!worklist.append(succ)) return false; } else if (succ->isLoopHeader() && succ->loopPredecessor() == block && succ->numPredecessors() == 3) { // Unmark fixup blocks if the loop predecessor is marked after // the loop header. succ->getPredecessor(1)->unmarkUnchecked(); } } // OSR fixup blocks are needed if and only if the loop header is // reachable from its backedge (via the OSR block) and not from its // original loop predecessor. // // Thus OSR fixup blocks are removed if the loop header is not // reachable, or if the loop header is reachable from both its backedge // and its original loop predecessor. if (block->isLoopHeader()) { MBasicBlock* maybeFixupBlock = nullptr; if (block->numPredecessors() == 2) { maybeFixupBlock = block->getPredecessor(0); } else { MOZ_ASSERT(block->numPredecessors() == 3); if (!block->loopPredecessor()->isMarked()) maybeFixupBlock = block->getPredecessor(1); } if (maybeFixupBlock && !maybeFixupBlock->isMarked() && maybeFixupBlock->numPredecessors() == 0) { MOZ_ASSERT(maybeFixupBlock->numSuccessors() == 1, "OSR fixup block should have exactly one successor"); MOZ_ASSERT(maybeFixupBlock != graph_.entryBlock(), "OSR fixup block shouldn't be the entry block"); MOZ_ASSERT(maybeFixupBlock != graph_.osrBlock(), "OSR fixup block shouldn't be the OSR entry block"); maybeFixupBlock->mark(); } } } // And sweep. return RemoveUnmarkedBlocks(mir_, graph_, numMarked); }
bool MIRGraph::removeSuccessorBlocks(MBasicBlock* start) { if (!start->hasLastIns()) return true; start->mark(); // Mark all successors. Vector<MBasicBlock*, 4, SystemAllocPolicy> blocks; for (size_t i = 0; i < start->numSuccessors(); i++) { if (start->getSuccessor(i)->isMarked()) continue; if (!blocks.append(start->getSuccessor(i))) return false; start->getSuccessor(i)->mark(); } for (size_t i = 0; i < blocks.length(); i++) { MBasicBlock* block = blocks[i]; if (!block->hasLastIns()) continue; for (size_t j = 0; j < block->numSuccessors(); j++) { if (block->getSuccessor(j)->isMarked()) continue; if (!blocks.append(block->getSuccessor(j))) return false; block->getSuccessor(j)->mark(); } } if (osrBlock()) { if (osrBlock()->getSuccessor(0)->isMarked()) osrBlock()->mark(); } // Remove blocks. // If they don't have any predecessor for (size_t i = 0; i < blocks.length(); i++) { MBasicBlock* block = blocks[i]; bool allMarked = true; for (size_t i = 0; i < block->numPredecessors(); i++) { if (block->getPredecessor(i)->isMarked()) continue; allMarked = false; break; } if (allMarked) { removeBlock(block); } else { MOZ_ASSERT(block != osrBlock()); for (size_t j = 0; j < block->numPredecessors(); ) { if (!block->getPredecessor(j)->isMarked()) { j++; continue; } block->removePredecessor(block->getPredecessor(j)); } // This shouldn't have any instructions yet. MOZ_ASSERT(block->begin() == block->end()); } } if (osrBlock()) { if (osrBlock()->getSuccessor(0)->isDead()) removeBlock(osrBlock()); } for (size_t i = 0; i < blocks.length(); i++) blocks[i]->unmark(); start->unmark(); return true; }
bool UnreachableCodeElimination::removeUnmarkedBlocksAndClearDominators() { // Removes blocks that are not marked from the graph. For blocks // that *are* marked, clears the mark and adjusts the id to its // new value. Also adds blocks that are immediately reachable // from an unmarked block to the frontier. size_t id = marked_; for (PostorderIterator iter(graph_.poBegin()); iter != graph_.poEnd();) { if (mir_->shouldCancel("Eliminate Unreachable Code")) return false; MBasicBlock *block = *iter; iter++; // Unconditionally clear the dominators. It's somewhat complex to // adjust the values and relatively fast to just recompute. block->clearDominatorInfo(); if (block->isMarked()) { block->setId(--id); for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++) checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter); for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter); } else { if (block->numPredecessors() > 1) { // If this block had phis, then any reachable // predecessors need to have the successorWithPhis // flag cleared. for (size_t i = 0; i < block->numPredecessors(); i++) block->getPredecessor(i)->setSuccessorWithPhis(nullptr, 0); } if (block->isLoopBackedge()) { // NB. We have to update the loop header if we // eliminate the backedge. At first I thought this // check would be insufficient, because it would be // possible to have code like this: // // while (true) { // ...; // if (1 == 1) break; // } // // in which the backedge is removed as part of // rewriting the condition, but no actual blocks are // removed. However, in all such cases, the backedge // would be a critical edge and hence the critical // edge block is being removed. block->loopHeaderOfBackedge()->clearLoopHeader(); } for (size_t i = 0, c = block->numSuccessors(); i < c; i++) { MBasicBlock *succ = block->getSuccessor(i); if (succ->isMarked()) { // succ is on the frontier of blocks to be removed: succ->removePredecessor(block); if (!redundantPhis_) { for (MPhiIterator iter(succ->phisBegin()); iter != succ->phisEnd(); iter++) { if (iter->operandIfRedundant()) { redundantPhis_ = true; break; } } } } } // When we remove a call, we can't leave the corresponding MPassArg // in the graph. Since lowering will fail. Replace it with the // argument for the exceptional case when it is kept alive in a // ResumePoint. DCE will remove the unused MPassArg instruction. for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { if (iter->isCall()) { MCall *call = iter->toCall(); for (size_t i = 0; i < call->numStackArgs(); i++) { JS_ASSERT(call->getArg(i)->isPassArg()); JS_ASSERT(call->getArg(i)->hasOneDefUse()); MPassArg *arg = call->getArg(i)->toPassArg(); arg->replaceAllUsesWith(arg->getArgument()); } } } graph_.removeBlock(block); } } JS_ASSERT(id == 0); return true; }