// Walk up the dominator tree from |block| to the root and test for any defs // which look potentially interesting to GVN. static bool ScanDominatorsForDefs(MBasicBlock *block) { for (MBasicBlock *i = block;;) { if (BlockHasInterestingDefs(block)) return true; MBasicBlock *immediateDominator = i->immediateDominator(); if (immediateDominator == i) break; i = immediateDominator; } return false; }
// Visit the control instruction at the end of |block|. bool ValueNumberer::visitControlInstruction(MBasicBlock *block, const MBasicBlock *dominatorRoot) { // Look for a simplified form of the control instruction. MControlInstruction *control = block->lastIns(); MDefinition *rep = simplified(control); if (rep == control) return true; if (rep == nullptr) return false; MControlInstruction *newControl = rep->toControlInstruction(); MOZ_ASSERT(!newControl->block(), "Control instruction replacement shouldn't already be in a block"); #ifdef DEBUG JitSpew(JitSpew_GVN, " Folded control instruction %s%u to %s%u", control->opName(), control->id(), newControl->opName(), graph_.getNumInstructionIds()); #endif // If the simplification removes any CFG edges, update the CFG and remove // any blocks that become dead. size_t oldNumSuccs = control->numSuccessors(); size_t newNumSuccs = newControl->numSuccessors(); if (newNumSuccs != oldNumSuccs) { MOZ_ASSERT(newNumSuccs < oldNumSuccs, "New control instruction has too many successors"); for (size_t i = 0; i != oldNumSuccs; ++i) { MBasicBlock *succ = control->getSuccessor(i); if (HasSuccessor(newControl, succ)) continue; if (succ->isMarked()) continue; if (!removePredecessorAndCleanUp(succ, block)) return false; if (succ->isMarked()) continue; if (!rerun_) { if (!remainingBlocks_.append(succ)) return false; } } } if (!releaseOperands(control)) return false; block->discardIgnoreOperands(control); block->end(newControl); return processDeadDefs(); }
MBasicBlock * MBasicBlock::New(MIRGraph &graph, BytecodeAnalysis *analysis, CompileInfo &info, MBasicBlock *pred, const BytecodeSite &site, Kind kind) { JS_ASSERT(site.pc() != nullptr); MBasicBlock *block = new(graph.alloc()) MBasicBlock(graph, info, site, kind); if (!block->init()) return nullptr; if (!block->inherit(graph.alloc(), analysis, pred, 0)) return nullptr; return block; }
MBasicBlock * MBasicBlock::New(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, jsbytecode *entryPc, Kind kind) { JS_ASSERT(entryPc != NULL); MBasicBlock *block = new MBasicBlock(graph, info, entryPc, kind); if (!block->init()) return NULL; if (!block->inherit(pred, 0)) return NULL; return block; }
MBasicBlock * MBasicBlock::NewParBailout(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, jsbytecode *entryPc) { MBasicBlock *block = MBasicBlock::New(graph, info, pred, entryPc, NORMAL); if (!block) return NULL; MParBailout *bailout = new MParBailout(); if (!bailout) return NULL; block->end(bailout); return block; }
MBasicBlock* MBasicBlock::New(MIRGraph& graph, size_t stackDepth, const CompileInfo& info, MBasicBlock* maybePred, BytecodeSite* site, Kind kind) { MOZ_ASSERT(site->pc() != nullptr); MBasicBlock* block = new(graph.alloc()) MBasicBlock(graph, info, site, kind); if (!block->init()) return nullptr; if (!block->inherit(graph.alloc(), stackDepth, maybePred, 0)) return nullptr; return block; }
void MIRGraph::removeBlocksAfter(MBasicBlock *start) { MBasicBlockIterator iter(begin()); iter++; while (iter != end()) { MBasicBlock *block = *iter; iter++; if (block->id() <= start->id()) continue; removeBlock(block); } }
MBasicBlock * MBasicBlock::New(MIRGraph &graph, BytecodeAnalysis *analysis, CompileInfo &info, MBasicBlock *pred, jsbytecode *entryPc, Kind kind) { JS_ASSERT(entryPc != nullptr); MBasicBlock *block = new MBasicBlock(graph, info, entryPc, kind); if (!block->init()) return nullptr; if (!block->inherit(analysis, pred, 0)) return nullptr; return block; }
MBasicBlock * MBasicBlock::NewPendingLoopHeader(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, const BytecodeSite &site, unsigned stackPhiCount) { JS_ASSERT(site.pc() != nullptr); MBasicBlock *block = new(graph.alloc()) MBasicBlock(graph, info, site, PENDING_LOOP_HEADER); if (!block->init()) return nullptr; if (!block->inherit(graph.alloc(), nullptr, pred, 0, stackPhiCount)) return nullptr; return block; }
// |block| is unreachable. Mine it for opportunities to delete more dead // code, and then discard it. bool ValueNumberer::visitUnreachableBlock(MBasicBlock* block) { JitSpew(JitSpew_GVN, " Visiting unreachable block%u%s%s%s", block->id(), block->isLoopHeader() ? " (loop header)" : "", block->isSplitEdge() ? " (split edge)" : "", block->immediateDominator() == block ? " (dominator root)" : ""); MOZ_ASSERT(block->isMarked(), "Visiting unmarked (and therefore reachable?) block"); MOZ_ASSERT(block->numPredecessors() == 0, "Block marked unreachable still has predecessors"); MOZ_ASSERT(block != graph_.entryBlock(), "Removing normal entry block"); MOZ_ASSERT(block != graph_.osrBlock(), "Removing OSR entry block"); MOZ_ASSERT(deadDefs_.empty(), "deadDefs_ not cleared"); // Disconnect all outgoing CFG edges. for (size_t i = 0, e = block->numSuccessors(); i < e; ++i) { MBasicBlock* succ = block->getSuccessor(i); if (succ->isDead() || succ->isMarked()) continue; if (!removePredecessorAndCleanUp(succ, block)) return false; if (succ->isMarked()) continue; // |succ| is still reachable. Make a note of it so that we can scan // it for interesting dominator tree changes later. if (!rerun_) { if (!remainingBlocks_.append(succ)) return false; } } // Discard any instructions with no uses. The remaining instructions will be // discarded when their last use is discarded. MOZ_ASSERT(nextDef_ == nullptr); for (MDefinitionIterator iter(block); iter; ) { MDefinition* def = *iter++; if (def->hasUses()) continue; nextDef_ = *iter; if (!discardDefsRecursively(def)) return false; } nextDef_ = nullptr; MControlInstruction* control = block->lastIns(); return discardDefsRecursively(control); }
void StupidAllocator::syncForBlockEnd(LBlock *block, LInstruction *ins) { // Sync any dirty registers, and update the synced state for phi nodes at // each successor of a block. We cannot conflate the storage for phis with // that of their inputs, as we cannot prove the live ranges of the phi and // its input do not overlap. The values for the two may additionally be // different, as the phi could be for the value of the input in a previous // loop iteration. for (size_t i = 0; i < registerCount; i++) syncRegister(ins, i); LMoveGroup *group = nullptr; MBasicBlock *successor = block->mir()->successorWithPhis(); if (successor) { uint32_t position = block->mir()->positionInPhiSuccessor(); LBlock *lirsuccessor = graph.getBlock(successor->id()); for (size_t i = 0; i < lirsuccessor->numPhis(); i++) { LPhi *phi = lirsuccessor->getPhi(i); uint32_t sourcevreg = phi->getOperand(position)->toUse()->virtualRegister(); uint32_t destvreg = phi->getDef(0)->virtualRegister(); if (sourcevreg == destvreg) continue; LAllocation *source = stackLocation(sourcevreg); LAllocation *dest = stackLocation(destvreg); if (!group) { // The moves we insert here need to happen simultaneously with // each other, yet after any existing moves before the instruction. LMoveGroup *input = getInputMoveGroup(ins->id()); if (input->numMoves() == 0) { group = input; } else { group = new LMoveGroup(alloc()); block->insertAfter(input, group); } } group->add(source, dest); } } }
static void AssertReversePostOrder(MIRGraph &graph) { // Check that every block is visited after all its predecessors (except backedges). for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) { JS_ASSERT(!block->isMarked()); for (size_t i = 0; i < block->numPredecessors(); i++) { MBasicBlock *pred = block->getPredecessor(i); JS_ASSERT_IF(!pred->isLoopBackedge(), pred->isMarked()); } block->mark(); } graph.unmarkBlocks(); }
void MIRGraph::removeBlocksAfter(MBasicBlock *start) { MBasicBlockIterator iter(begin()); iter++; while (iter != end()) { MBasicBlock *block = *iter; iter++; if (block->id() <= start->id()) continue; // removeBlock will not remove the resumepoints, since // it can be shared with outer blocks. So remove them now. block->discardAllResumePoints(); removeBlock(block); } }
MBasicBlock * MBasicBlock::NewWithResumePoint(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, jsbytecode *entryPc, MResumePoint *resumePoint) { MBasicBlock *block = new MBasicBlock(graph, info, entryPc, NORMAL); resumePoint->block_ = block; block->entryResumePoint_ = resumePoint; if (!block->init()) return NULL; if (!block->inheritResumePoint(pred)) return NULL; return block; }
MBasicBlock * MBasicBlock::NewWithResumePoint(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, const BytecodeSite &site, MResumePoint *resumePoint) { MBasicBlock *block = new(graph.alloc()) MBasicBlock(graph, info, site, NORMAL); resumePoint->block_ = block; block->entryResumePoint_ = resumePoint; if (!block->init()) return nullptr; if (!block->inheritResumePoint(pred)) return nullptr; return block; }
MBasicBlock * MBasicBlock::NewAbortPar(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, jsbytecode *entryPc, MResumePoint *resumePoint) { MBasicBlock *block = new MBasicBlock(graph, info, entryPc, NORMAL); resumePoint->block_ = block; block->entryResumePoint_ = resumePoint; if (!block->init()) return NULL; if (!block->addPredecessorWithoutPhis(pred)) return NULL; block->end(new MAbortPar()); return block; }
bool GreedyAllocator::buildPhiMoves(LBlock *block) { IonSpew(IonSpew_RegAlloc, " Merging phi state."); phiMoves = Mover(); MBasicBlock *mblock = block->mir(); if (!mblock->successorWithPhis()) return true; // Insert moves from our state into our successor's phi. uint32 pos = mblock->positionInPhiSuccessor(); LBlock *successor = mblock->successorWithPhis()->lir(); for (size_t i = 0; i < successor->numPhis(); i++) { LPhi *phi = successor->getPhi(i); JS_ASSERT(phi->numDefs() == 1); VirtualRegister *phiReg = getVirtualRegister(phi->getDef(0)); allocateStack(phiReg); LAllocation *in = phi->getOperand(pos); VirtualRegister *inReg = getVirtualRegister(in->toUse()); allocateStack(inReg); // Try to get a register for the input. if (!inReg->hasRegister() && !allocatableRegs().empty(inReg->isDouble())) { if (!allocateReg(inReg)) return false; } // Add a move from the input to the phi. if (inReg->hasRegister()) { if (!phiMoves.move(inReg->reg(), phiReg->backingStack())) return false; } else { if (!phiMoves.move(inReg->backingStack(), phiReg->backingStack())) return false; } } return true; }
MBasicBlock * MBasicBlock::NewAbortPar(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, const BytecodeSite &site, MResumePoint *resumePoint) { MBasicBlock *block = new(graph.alloc()) MBasicBlock(graph, info, site, NORMAL); resumePoint->block_ = block; block->entryResumePoint_ = resumePoint; if (!block->init()) return nullptr; if (!block->addPredecessorWithoutPhis(pred)) return nullptr; block->end(MAbortPar::New(graph.alloc())); return block; }
MBasicBlock * MBasicBlock::NewAsmJS(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, Kind kind) { MBasicBlock *block = new(graph.alloc()) MBasicBlock(graph, info, BytecodeSite(), kind); if (!block->init()) return nullptr; if (pred) { block->stackPosition_ = pred->stackPosition_; if (block->kind_ == PENDING_LOOP_HEADER) { size_t nphis = block->stackPosition_; TempAllocator &alloc = graph.alloc(); MPhi *phis = (MPhi*)alloc.allocateArray<sizeof(MPhi)>(nphis); if (!phis) return nullptr; // Note: Phis are inserted in the same order as the slots. for (size_t i = 0; i < nphis; i++) { MDefinition *predSlot = pred->getSlot(i); JS_ASSERT(predSlot->type() != MIRType_Value); MPhi *phi = new(phis + i) MPhi(alloc, predSlot->type()); JS_ALWAYS_TRUE(phi->reserveLength(2)); phi->addInput(predSlot); // Add append Phis in the block. block->addPhi(phi); block->setSlot(i, phi); } } else { block->copySlots(pred); } if (!block->predecessors_.append(pred)) return nullptr; } return block; }
void AdjustTruncatedInputs(MInstruction *truncated) { MBasicBlock *block = truncated->block(); for (size_t i = 0; i < truncated->numOperands(); i++) { if (!truncated->isOperandTruncated(i)) continue; if (truncated->getOperand(i)->type() == MIRType_Int32) continue; MTruncateToInt32 *op = MTruncateToInt32::New(truncated->getOperand(i)); block->insertBefore(truncated, op); truncated->replaceOperand(i, op); } if (truncated->isToDouble()) { truncated->replaceAllUsesWith(truncated->getOperand(0)); block->discard(truncated); } }
MBasicBlock* MBasicBlock::NewWithResumePoint(MIRGraph& graph, CompileInfo& info, MBasicBlock* pred, BytecodeSite* site, MResumePoint* resumePoint) { MBasicBlock* block = new(graph.alloc()) MBasicBlock(graph, info, site, NORMAL); MOZ_ASSERT(!resumePoint->instruction()); resumePoint->block()->discardResumePoint(resumePoint, RefType_None); resumePoint->block_ = block; block->addResumePoint(resumePoint); block->entryResumePoint_ = resumePoint; if (!block->init()) return nullptr; if (!block->inheritResumePoint(pred)) return nullptr; return block; }
// Visit all the blocks dominated by dominatorRoot. bool ValueNumberer::visitDominatorTree(MBasicBlock* dominatorRoot) { JitSpew(JitSpew_GVN, " Visiting dominator tree (with %llu blocks) rooted at block%u%s", uint64_t(dominatorRoot->numDominated()), dominatorRoot->id(), dominatorRoot == graph_.entryBlock() ? " (normal entry block)" : dominatorRoot == graph_.osrBlock() ? " (OSR entry block)" : dominatorRoot->numPredecessors() == 0 ? " (odd unreachable block)" : " (merge point from normal entry and OSR entry)"); MOZ_ASSERT(dominatorRoot->immediateDominator() == dominatorRoot, "root is not a dominator tree root"); // Visit all blocks dominated by dominatorRoot, in RPO. This has the nice // property that we'll always visit a block before any block it dominates, // so we can make a single pass through the list and see every full // redundance. size_t numVisited = 0; size_t numDiscarded = 0; for (ReversePostorderIterator iter(graph_.rpoBegin(dominatorRoot)); ; ) { MOZ_ASSERT(iter != graph_.rpoEnd(), "Inconsistent dominator information"); MBasicBlock* block = *iter++; // We're only visiting blocks in dominatorRoot's tree right now. if (!dominatorRoot->dominates(block)) continue; // If this is a loop backedge, remember the header, as we may not be able // to find it after we simplify the block. MBasicBlock* header = block->isLoopBackedge() ? block->loopHeaderOfBackedge() : nullptr; if (block->isMarked()) { // This block has become unreachable; handle it specially. if (!visitUnreachableBlock(block)) return false; ++numDiscarded; } else { // Visit the block! if (!visitBlock(block, dominatorRoot)) return false; ++numVisited; } // If the block is/was a loop backedge, check to see if the block that // is/was its header has optimizable phis, which would want a re-run. if (!rerun_ && header && loopHasOptimizablePhi(header)) { JitSpew(JitSpew_GVN, " Loop phi in block%u can now be optimized; will re-run GVN!", header->id()); rerun_ = true; remainingBlocks_.clear(); } MOZ_ASSERT(numVisited <= dominatorRoot->numDominated() - numDiscarded, "Visited blocks too many times"); if (numVisited >= dominatorRoot->numDominated() - numDiscarded) break; } totalNumVisited_ += numVisited; values_.clear(); return true; }
// Whether there might be a path from src to dest, excluding loop backedges. This is // approximate and really ought to depend on precomputed reachability information. static inline bool BlockMightReach(MBasicBlock* src, MBasicBlock* dest) { while (src->id() <= dest->id()) { if (src == dest) return true; switch (src->numSuccessors()) { case 0: return false; case 1: { MBasicBlock* successor = src->getSuccessor(0); if (successor->id() <= src->id()) return true; // Don't iloop. src = successor; break; } default: return true; } } return false; }
// Test whether any instruction in the loop possiblyCalls(). static bool LoopContainsPossibleCall(MIRGraph &graph, MBasicBlock *header, MBasicBlock *backedge) { for (auto i(graph.rpoBegin(header)); ; ++i) { MOZ_ASSERT(i != graph.rpoEnd(), "Reached end of graph searching for blocks in loop"); MBasicBlock *block = *i; if (!block->isMarked()) continue; for (auto insIter(block->begin()), insEnd(block->end()); insIter != insEnd; ++insIter) { MInstruction *ins = *insIter; if (ins->possiblyCalls()) { JitSpew(JitSpew_LICM, " Possile call found at %s%u", ins->opName(), ins->id()); return true; } } if (block == backedge) break; } return false; }
// Given a block which has had predecessors removed but is still reachable, test // whether the block's new dominator will be closer than its old one and whether // it will expose potential optimization opportunities. static MBasicBlock* ComputeNewDominator(MBasicBlock* block, MBasicBlock* old) { MBasicBlock* now = block->getPredecessor(0); for (size_t i = 1, e = block->numPredecessors(); i < e; ++i) { MBasicBlock* pred = block->getPredecessor(i); // Note that dominators haven't been recomputed yet, so we have to check // whether now dominates pred, not block. while (!now->dominates(pred)) { MBasicBlock* next = now->immediateDominator(); if (next == old) return old; if (next == now) { MOZ_ASSERT(block == old, "Non-self-dominating block became self-dominating"); return block; } now = next; } } MOZ_ASSERT(old != block || old != now, "Missed self-dominating block staying self-dominating"); return now; }
bool RangeAnalysis::analyze() { IonSpew(IonSpew_Range, "Doing range propagation"); for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) { MBasicBlock *block = *iter; for (MDefinitionIterator iter(block); iter; iter++) { MDefinition *def = *iter; def->computeRange(); IonSpew(IonSpew_Range, "computing range on %d", def->id()); SpewRange(def); } if (block->isLoopHeader()) analyzeLoop(block); } return true; }
// A loop is about to be made reachable only through an OSR entry into one of // its nested loops. Fix everything up. bool ValueNumberer::fixupOSROnlyLoop(MBasicBlock* block, MBasicBlock* backedge) { // Create an empty and unreachable(!) block which jumps to |block|. This // allows |block| to remain marked as a loop header, so we don't have to // worry about moving a different block into place as the new loop header, // which is hard, especially if the OSR is into a nested loop. Doing all // that would produce slightly more optimal code, but this is so // extraordinarily rare that it isn't worth the complexity. MBasicBlock* fake = MBasicBlock::New(graph_, block->info(), nullptr, MBasicBlock::NORMAL); if (fake == nullptr) return false; graph_.insertBlockBefore(block, fake); fake->setImmediateDominator(fake); fake->addNumDominated(1); fake->setDomIndex(fake->id()); fake->setUnreachable(); // Create zero-input phis to use as inputs for any phis in |block|. // Again, this is a little odd, but it's the least-odd thing we can do // without significant complexity. for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) { MPhi* phi = *iter; MPhi* fakePhi = MPhi::New(graph_.alloc(), phi->type()); fake->addPhi(fakePhi); if (!phi->addInputSlow(fakePhi)) return false; } fake->end(MGoto::New(graph_.alloc(), block)); if (!block->addPredecessorWithoutPhis(fake)) return false; // Restore |backedge| as |block|'s loop backedge. block->clearLoopHeader(); block->setLoopHeader(backedge); JitSpew(JitSpew_GVN, " Created fake block%u", fake->id()); hasOSRFixups_ = true; return true; }
// Scan all instructions inside the loop. If any instruction has a use of a // definition that is defined outside its containing loop, then stack space // for that definition must be reserved ahead of time. Otherwise, we could // re-use storage that has been temporarily allocated - see bug 694481. bool GreedyAllocator::findLoopCarriedUses(LBlock *backedge) { Vector<LBlock *, 4, SystemAllocPolicy> worklist; MBasicBlock *mheader = backedge->mir()->loopHeaderOfBackedge(); uint32 upperBound = backedge->lastId(); uint32 lowerBound = mheader->lir()->firstId(); IonSpew(IonSpew_RegAlloc, " Finding loop-carried uses."); for (size_t i = 0; i < mheader->numContainedInLoop(); i++) { LBlock *block = mheader->getContainedInLoop(i)->lir(); for (LInstructionIterator i = block->begin(); i != block->end(); i++) findLoopCarriedUses(*i, lowerBound, upperBound); for (size_t i = 0; i < block->numPhis(); i++) findLoopCarriedUses(block->getPhi(i), lowerBound, upperBound); } IonSpew(IonSpew_RegAlloc, " Done finding loop-carried uses."); return true; }
bool ValueNumberer::insertOSRFixups() { ReversePostorderIterator end(graph_.end()); for (ReversePostorderIterator iter(graph_.begin()); iter != end; ) { MBasicBlock* block = *iter++; // Only add fixup block above for loops which can be reached from OSR. if (!block->isLoopHeader()) continue; // If the loop header is not self-dominated, then this loop does not // have to deal with a second entry point, so there is no need to add a // second entry point with a fixup block. if (block->immediateDominator() != block) continue; if (!fixupOSROnlyLoop(block, block->backedge())) return false; } return true; }
static void ComputeImmediateDominators(MIRGraph &graph) { // The default start block is a root and therefore only self-dominates. MBasicBlock *startBlock = *graph.begin(); startBlock->setImmediateDominator(startBlock); // Any OSR block is a root and therefore only self-dominates. MBasicBlock *osrBlock = graph.osrBlock(); if (osrBlock) osrBlock->setImmediateDominator(osrBlock); bool changed = true; while (changed) { changed = false; ReversePostorderIterator block = graph.rpoBegin(); // For each block in RPO, intersect all dominators. for (; block != graph.rpoEnd(); block++) { // If a node has once been found to have no exclusive dominator, // it will never have an exclusive dominator, so it may be skipped. if (block->immediateDominator() == *block) continue; MBasicBlock *newIdom = block->getPredecessor(0); // Find the first common dominator. for (size_t i = 1; i < block->numPredecessors(); i++) { MBasicBlock *pred = block->getPredecessor(i); if (pred->immediateDominator() != NULL) newIdom = IntersectDominators(pred, newIdom); // If there is no common dominator, the block self-dominates. if (newIdom == NULL) { block->setImmediateDominator(*block); changed = true; break; } } if (newIdom && block->immediateDominator() != newIdom) { block->setImmediateDominator(newIdom); changed = true; } } } #ifdef DEBUG // Assert that all blocks have dominator information. for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { JS_ASSERT(block->immediateDominator() != NULL); } #endif }