bool ion::BuildPhiReverseMapping(MIRGraph &graph) { // Build a mapping such that given a basic block, whose successor has one or // more phis, we can find our specific input to that phi. To make this fast // mapping work we rely on a specific property of our structured control // flow graph: For a block with phis, its predecessors each have only one // successor with phis. Consider each case: // * Blocks with less than two predecessors cannot have phis. // * Breaks. A break always has exactly one successor, and the break // catch block has exactly one predecessor for each break, as // well as a final predecessor for the actual loop exit. // * Continues. A continue always has exactly one successor, and the // continue catch block has exactly one predecessor for each // continue, as well as a final predecessor for the actual // loop continuation. The continue itself has exactly one // successor. // * An if. Each branch as exactly one predecessor. // * A switch. Each branch has exactly one predecessor. // * Loop tail. A new block is always created for the exit, and if a // break statement is present, the exit block will forward // directly to the break block. for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { if (block->numPredecessors() < 2) { JS_ASSERT(block->phisEmpty()); continue; } // Assert on the above. for (size_t j = 0; j < block->numPredecessors(); j++) { MBasicBlock *pred = block->getPredecessor(j); #ifdef DEBUG size_t numSuccessorsWithPhis = 0; for (size_t k = 0; k < pred->numSuccessors(); k++) { MBasicBlock *successor = pred->getSuccessor(k); if (!successor->phisEmpty()) numSuccessorsWithPhis++; } JS_ASSERT(numSuccessorsWithPhis <= 1); #endif pred->setSuccessorWithPhis(*block, j); } } return true; }
void RegisterAllocator::dumpInstructions() { #ifdef DEBUG fprintf(stderr, "Instructions:\n"); for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { LBlock* block = graph.getBlock(blockIndex); MBasicBlock* mir = block->mir(); fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex)); for (size_t i = 0; i < mir->numSuccessors(); i++) fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id()); fprintf(stderr, "\n"); for (size_t i = 0; i < block->numPhis(); i++) { LPhi* phi = block->getPhi(i); fprintf(stderr, "[%u,%u Phi] [def %s]", inputOf(phi).bits(), outputOf(phi).bits(), phi->getDef(0)->toString()); for (size_t j = 0; j < phi->numOperands(); j++) fprintf(stderr, " [use %s]", phi->getOperand(j)->toString()); fprintf(stderr, "\n"); } for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { LInstruction* ins = *iter; fprintf(stderr, "["); if (ins->id() != 0) fprintf(stderr, "%u,%u ", inputOf(ins).bits(), outputOf(ins).bits()); fprintf(stderr, "%s]", ins->opName()); if (ins->isMoveGroup()) { LMoveGroup* group = ins->toMoveGroup(); for (int i = group->numMoves() - 1; i >= 0; i--) { // Use two printfs, as LAllocation::toString is not reentant. fprintf(stderr, " [%s", group->getMove(i).from()->toString()); fprintf(stderr, " -> %s]", group->getMove(i).to()->toString()); } fprintf(stderr, "\n"); continue; } for (size_t i = 0; i < ins->numDefs(); i++) fprintf(stderr, " [def %s]", ins->getDef(i)->toString()); for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition* temp = ins->getTemp(i); if (!temp->isBogusTemp()) fprintf(stderr, " [temp %s]", temp->toString()); } for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { if (!alloc->isBogus()) fprintf(stderr, " [use %s]", alloc->toString()); } fprintf(stderr, "\n"); } } fprintf(stderr, "\n"); #endif // DEBUG }
void AllocationIntegrityState::dump() { #ifdef DEBUG fprintf(stderr, "Register Allocation Integrity State:\n"); for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { LBlock* block = graph.getBlock(blockIndex); MBasicBlock* mir = block->mir(); fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex)); for (size_t i = 0; i < mir->numSuccessors(); i++) fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id()); fprintf(stderr, "\n"); for (size_t i = 0; i < block->numPhis(); i++) { const InstructionInfo& info = blocks[blockIndex].phis[i]; LPhi* phi = block->getPhi(i); CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT); CodePosition output(block->getPhi(block->numPhis() - 1)->id(), CodePosition::OUTPUT); fprintf(stderr, "[%u,%u Phi] [def %s] ", input.bits(), output.bits(), phi->getDef(0)->toString()); for (size_t j = 0; j < phi->numOperands(); j++) fprintf(stderr, " [use %s]", info.inputs[j].toString()); fprintf(stderr, "\n"); } for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { LInstruction* ins = *iter; const InstructionInfo& info = instructions[ins->id()]; CodePosition input(ins->id(), CodePosition::INPUT); CodePosition output(ins->id(), CodePosition::OUTPUT); fprintf(stderr, "["); if (input != CodePosition::MIN) fprintf(stderr, "%u,%u ", input.bits(), output.bits()); fprintf(stderr, "%s]", ins->opName()); if (ins->isMoveGroup()) { LMoveGroup* group = ins->toMoveGroup(); for (int i = group->numMoves() - 1; i >= 0; i--) { // Use two printfs, as LAllocation::toString is not reentrant. fprintf(stderr, " [%s", group->getMove(i).from()->toString()); fprintf(stderr, " -> %s]", group->getMove(i).to()->toString()); } fprintf(stderr, "\n"); continue; } for (size_t i = 0; i < ins->numDefs(); i++) fprintf(stderr, " [def %s]", ins->getDef(i)->toString()); for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition* temp = ins->getTemp(i); if (!temp->isBogusTemp()) fprintf(stderr, " [temp v%u %s]", info.temps[i].virtualRegister(), temp->toString()); } size_t index = 0; for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { fprintf(stderr, " [use %s", info.inputs[index++].toString()); if (!alloc->isConstant()) fprintf(stderr, " %s", alloc->toString()); fprintf(stderr, "]"); } fprintf(stderr, "\n"); } } // Print discovered allocations at the ends of blocks, in the order they // were discovered. Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered; seenOrdered.appendN(IntegrityItem(), seen.count()); for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) { IntegrityItem item = iter.front(); seenOrdered[item.index] = item; } if (!seenOrdered.empty()) { fprintf(stderr, "Intermediate Allocations:\n"); for (size_t i = 0; i < seenOrdered.length(); i++) { IntegrityItem item = seenOrdered[i]; fprintf(stderr, " block %u reg v%u alloc %s\n", item.block->mir()->id(), item.vreg, item.alloc.toString()); } } fprintf(stderr, "\n"); #endif }
bool Sink(MIRGenerator* mir, MIRGraph& graph) { TempAllocator& alloc = graph.alloc(); bool sinkEnabled = mir->optimizationInfo().sinkEnabled(); for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { if (mir->shouldCancel("Sink")) return false; for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) { MInstruction* ins = *iter++; // Only instructions which can be recovered on bailout can be moved // into the bailout paths. if (ins->isGuard() || ins->isGuardRangeBailouts() || ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout()) { continue; } // Compute a common dominator for all uses of the current // instruction. bool hasLiveUses = false; bool hasUses = false; MBasicBlock* usesDominator = nullptr; for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) { hasUses = true; MNode* consumerNode = (*i)->consumer(); if (consumerNode->isResumePoint()) continue; MDefinition* consumer = consumerNode->toDefinition(); if (consumer->isRecoveredOnBailout()) continue; hasLiveUses = true; // If the instruction is a Phi, then we should dominate the // predecessor from which the value is coming from. MBasicBlock* consumerBlock = consumer->block(); if (consumer->isPhi()) consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i)); usesDominator = CommonDominator(usesDominator, consumerBlock); if (usesDominator == *block) break; } // Leave this instruction for DCE. if (!hasUses) continue; // We have no uses, so sink this instruction in all the bailout // paths. if (!hasLiveUses) { MOZ_ASSERT(!usesDominator); ins->setRecoveredOnBailout(); JitSpewDef(JitSpew_Sink, " No live uses, recover the instruction on bailout\n", ins); continue; } // This guard is temporarly moved here as the above code deals with // Dead Code elimination, which got moved into this Sink phase, as // the Dead Code elimination used to move instructions with no-live // uses to the bailout path. if (!sinkEnabled) continue; // To move an effectful instruction, we would have to verify that the // side-effect is not observed. In the mean time, we just inhibit // this optimization on effectful instructions. if (ins->isEffectful()) continue; // If all the uses are under a loop, we might not want to work // against LICM by moving everything back into the loop, but if the // loop is it-self inside an if, then we still want to move the // computation under this if statement. while (block->loopDepth() < usesDominator->loopDepth()) { MOZ_ASSERT(usesDominator != usesDominator->immediateDominator()); usesDominator = usesDominator->immediateDominator(); } // Only move instructions if there is a branch between the dominator // of the uses and the original instruction. This prevent moving the // computation of the arguments into an inline function if there is // no major win. MBasicBlock* lastJoin = usesDominator; while (*block != lastJoin && lastJoin->numPredecessors() == 1) { MOZ_ASSERT(lastJoin != lastJoin->immediateDominator()); MBasicBlock* next = lastJoin->immediateDominator(); if (next->numSuccessors() > 1) break; lastJoin = next; } if (*block == lastJoin) continue; // Skip to the next instruction if we cannot find a common dominator // for all the uses of this instruction, or if the common dominator // correspond to the block of the current instruction. if (!usesDominator || usesDominator == *block) continue; // Only instruction which can be recovered on bailout and which are // sinkable can be moved into blocks which are below while filling // the resume points with a clone which is recovered on bailout. // If the instruction has live uses and if it is clonable, then we // can clone the instruction for all non-dominated uses and move the // instruction into the block which is dominating all live uses. if (!ins->canClone()) continue; // If the block is a split-edge block, which is created for folding // test conditions, then the block has no resume point and has // multiple predecessors. In such case, we cannot safely move // bailing instruction to these blocks as we have no way to bailout. if (!usesDominator->entryResumePoint() && usesDominator->numPredecessors() != 1) continue; JitSpewDef(JitSpew_Sink, " Can Clone & Recover, sink instruction\n", ins); JitSpew(JitSpew_Sink, " into Block %u", usesDominator->id()); // Copy the arguments and clone the instruction. MDefinitionVector operands(alloc); for (size_t i = 0, end = ins->numOperands(); i < end; i++) { if (!operands.append(ins->getOperand(i))) return false; } MInstruction* clone = ins->clone(alloc, operands); ins->block()->insertBefore(ins, clone); clone->setRecoveredOnBailout(); // We should not update the producer of the entry resume point, as // it cannot refer to any instruction within the basic block excepts // for Phi nodes. MResumePoint* entry = usesDominator->entryResumePoint(); // Replace the instruction by its clone in all the resume points / // recovered-on-bailout instructions which are not in blocks which // are dominated by the usesDominator block. for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; ) { MUse* use = *i++; MNode* consumer = use->consumer(); // If the consumer is a Phi, then we look for the index of the // use to find the corresponding predecessor block, which is // then used as the consumer block. MBasicBlock* consumerBlock = consumer->block(); if (consumer->isDefinition() && consumer->toDefinition()->isPhi()) { consumerBlock = consumerBlock->getPredecessor( consumer->toDefinition()->toPhi()->indexOf(use)); } // Keep the current instruction for all dominated uses, except // for the entry resume point of the block in which the // instruction would be moved into. if (usesDominator->dominates(consumerBlock) && (!consumer->isResumePoint() || consumer->toResumePoint() != entry)) { continue; } use->replaceProducer(clone); } // As we move this instruction in a different block, we should // verify that we do not carry over a resume point which would refer // to an outdated state of the control flow. if (ins->resumePoint()) ins->clearResumePoint(); // Now, that all uses which are not dominated by usesDominator are // using the cloned instruction, we can safely move the instruction // into the usesDominator block. MInstruction* at = usesDominator->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover); block->moveBefore(at, ins); } } return true; }
// OSR fixups serve the purpose of representing the non-OSR entry into a loop // when the only real entry is an OSR entry into the middle. However, if the // entry into the middle is subsequently folded away, the loop may actually // have become unreachable. Mark-and-sweep all blocks to remove all such code. bool ValueNumberer::cleanupOSRFixups() { // Mark. Vector<MBasicBlock*, 0, JitAllocPolicy> worklist(graph_.alloc()); unsigned numMarked = 2; graph_.entryBlock()->mark(); graph_.osrBlock()->mark(); if (!worklist.append(graph_.entryBlock()) || !worklist.append(graph_.osrBlock())) return false; while (!worklist.empty()) { MBasicBlock* block = worklist.popCopy(); for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) { MBasicBlock* succ = block->getSuccessor(i); if (!succ->isMarked()) { ++numMarked; succ->mark(); if (!worklist.append(succ)) return false; } else if (succ->isLoopHeader() && succ->loopPredecessor() == block && succ->numPredecessors() == 3) { // Unmark fixup blocks if the loop predecessor is marked after // the loop header. succ->getPredecessor(1)->unmarkUnchecked(); } } // OSR fixup blocks are needed if and only if the loop header is // reachable from its backedge (via the OSR block) and not from its // original loop predecessor. // // Thus OSR fixup blocks are removed if the loop header is not // reachable, or if the loop header is reachable from both its backedge // and its original loop predecessor. if (block->isLoopHeader()) { MBasicBlock* maybeFixupBlock = nullptr; if (block->numPredecessors() == 2) { maybeFixupBlock = block->getPredecessor(0); } else { MOZ_ASSERT(block->numPredecessors() == 3); if (!block->loopPredecessor()->isMarked()) maybeFixupBlock = block->getPredecessor(1); } if (maybeFixupBlock && !maybeFixupBlock->isMarked() && maybeFixupBlock->numPredecessors() == 0) { MOZ_ASSERT(maybeFixupBlock->numSuccessors() == 1, "OSR fixup block should have exactly one successor"); MOZ_ASSERT(maybeFixupBlock != graph_.entryBlock(), "OSR fixup block shouldn't be the entry block"); MOZ_ASSERT(maybeFixupBlock != graph_.osrBlock(), "OSR fixup block shouldn't be the OSR entry block"); maybeFixupBlock->mark(); } } } // And sweep. return RemoveUnmarkedBlocks(mir_, graph_, numMarked); }
bool MIRGraph::removeSuccessorBlocks(MBasicBlock* start) { if (!start->hasLastIns()) return true; start->mark(); // Mark all successors. Vector<MBasicBlock*, 4, SystemAllocPolicy> blocks; for (size_t i = 0; i < start->numSuccessors(); i++) { if (start->getSuccessor(i)->isMarked()) continue; if (!blocks.append(start->getSuccessor(i))) return false; start->getSuccessor(i)->mark(); } for (size_t i = 0; i < blocks.length(); i++) { MBasicBlock* block = blocks[i]; if (!block->hasLastIns()) continue; for (size_t j = 0; j < block->numSuccessors(); j++) { if (block->getSuccessor(j)->isMarked()) continue; if (!blocks.append(block->getSuccessor(j))) return false; block->getSuccessor(j)->mark(); } } if (osrBlock()) { if (osrBlock()->getSuccessor(0)->isMarked()) osrBlock()->mark(); } // Remove blocks. // If they don't have any predecessor for (size_t i = 0; i < blocks.length(); i++) { MBasicBlock* block = blocks[i]; bool allMarked = true; for (size_t i = 0; i < block->numPredecessors(); i++) { if (block->getPredecessor(i)->isMarked()) continue; allMarked = false; break; } if (allMarked) { removeBlock(block); } else { MOZ_ASSERT(block != osrBlock()); for (size_t j = 0; j < block->numPredecessors(); ) { if (!block->getPredecessor(j)->isMarked()) { j++; continue; } block->removePredecessor(block->getPredecessor(j)); } // This shouldn't have any instructions yet. MOZ_ASSERT(block->begin() == block->end()); } } if (osrBlock()) { if (osrBlock()->getSuccessor(0)->isDead()) removeBlock(osrBlock()); } for (size_t i = 0; i < blocks.length(); i++) blocks[i]->unmark(); start->unmark(); return true; }
bool UnreachableCodeElimination::prunePointlessBranchesAndMarkReachableBlocks() { BlockList worklist, optimizableBlocks; // Process everything reachable from the start block, ignoring any // OSR block. if (!enqueue(graph_.entryBlock(), worklist)) return false; while (!worklist.empty()) { if (mir_->shouldCancel("Eliminate Unreachable Code")) return false; MBasicBlock *block = worklist.popCopy(); // If this block is a test on a constant operand, only enqueue // the relevant successor. Also, remember the block for later. if (MBasicBlock *succ = optimizableSuccessor(block)) { if (!optimizableBlocks.append(block)) return false; if (!enqueue(succ, worklist)) return false; } else { // Otherwise just visit all successors. for (size_t i = 0; i < block->numSuccessors(); i++) { MBasicBlock *succ = block->getSuccessor(i); if (!enqueue(succ, worklist)) return false; } } } // Now, if there is an OSR block, check that all of its successors // were reachable (bug 880377). If not, we are in danger of // creating a CFG with two disjoint parts, so simply mark all // blocks as reachable. This generally occurs when the TI info for // stack types is incorrect or incomplete, due to operations that // have not yet executed in baseline. if (graph_.osrBlock()) { MBasicBlock *osrBlock = graph_.osrBlock(); JS_ASSERT(!osrBlock->isMarked()); if (!enqueue(osrBlock, worklist)) return false; for (size_t i = 0; i < osrBlock->numSuccessors(); i++) { if (!osrBlock->getSuccessor(i)->isMarked()) { // OSR block has an otherwise unreachable successor, abort. for (MBasicBlockIterator iter(graph_.begin()); iter != graph_.end(); iter++) iter->mark(); marked_ = graph_.numBlocks(); return true; } } } // Now that we know we will not abort due to OSR, go back and // transform any tests on constant operands into gotos. for (uint32_t i = 0; i < optimizableBlocks.length(); i++) { MBasicBlock *block = optimizableBlocks[i]; MBasicBlock *succ = optimizableSuccessor(block); JS_ASSERT(succ); MGoto *gotoIns = MGoto::New(graph_.alloc(), succ); block->discardLastIns(); block->end(gotoIns); MBasicBlock *successorWithPhis = block->successorWithPhis(); if (successorWithPhis && successorWithPhis != succ) block->setSuccessorWithPhis(nullptr, 0); } return true; }
bool UnreachableCodeElimination::removeUnmarkedBlocksAndClearDominators() { // Removes blocks that are not marked from the graph. For blocks // that *are* marked, clears the mark and adjusts the id to its // new value. Also adds blocks that are immediately reachable // from an unmarked block to the frontier. size_t id = marked_; for (PostorderIterator iter(graph_.poBegin()); iter != graph_.poEnd();) { if (mir_->shouldCancel("Eliminate Unreachable Code")) return false; MBasicBlock *block = *iter; iter++; // Unconditionally clear the dominators. It's somewhat complex to // adjust the values and relatively fast to just recompute. block->clearDominatorInfo(); if (block->isMarked()) { block->setId(--id); for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++) checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter); for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter); } else { if (block->numPredecessors() > 1) { // If this block had phis, then any reachable // predecessors need to have the successorWithPhis // flag cleared. for (size_t i = 0; i < block->numPredecessors(); i++) block->getPredecessor(i)->setSuccessorWithPhis(nullptr, 0); } if (block->isLoopBackedge()) { // NB. We have to update the loop header if we // eliminate the backedge. At first I thought this // check would be insufficient, because it would be // possible to have code like this: // // while (true) { // ...; // if (1 == 1) break; // } // // in which the backedge is removed as part of // rewriting the condition, but no actual blocks are // removed. However, in all such cases, the backedge // would be a critical edge and hence the critical // edge block is being removed. block->loopHeaderOfBackedge()->clearLoopHeader(); } for (size_t i = 0, c = block->numSuccessors(); i < c; i++) { MBasicBlock *succ = block->getSuccessor(i); if (succ->isMarked()) { // succ is on the frontier of blocks to be removed: succ->removePredecessor(block); if (!redundantPhis_) { for (MPhiIterator iter(succ->phisBegin()); iter != succ->phisEnd(); iter++) { if (iter->operandIfRedundant()) { redundantPhis_ = true; break; } } } } } // When we remove a call, we can't leave the corresponding MPassArg // in the graph. Since lowering will fail. Replace it with the // argument for the exceptional case when it is kept alive in a // ResumePoint. DCE will remove the unused MPassArg instruction. for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { if (iter->isCall()) { MCall *call = iter->toCall(); for (size_t i = 0; i < call->numStackArgs(); i++) { JS_ASSERT(call->getArg(i)->isPassArg()); JS_ASSERT(call->getArg(i)->hasOneDefUse()); MPassArg *arg = call->getArg(i)->toPassArg(); arg->replaceAllUsesWith(arg->getArgument()); } } } graph_.removeBlock(block); } } JS_ASSERT(id == 0); return true; }