void MBasicBlock::inheritPhis(MBasicBlock *header) { MResumePoint *headerRp = header->entryResumePoint(); size_t stackDepth = headerRp->numOperands(); for (size_t slot = 0; slot < stackDepth; slot++) { MDefinition *exitDef = getSlot(slot); MDefinition *loopDef = headerRp->getOperand(slot); if (!loopDef->isPhi()) { MOZ_ASSERT(loopDef->block()->id() < header->id()); MOZ_ASSERT(loopDef == exitDef); continue; } // Phis are allocated by NewPendingLoopHeader. MPhi *phi = loopDef->toPhi(); MOZ_ASSERT(phi->numOperands() == 2); // The entry definition is always the leftmost input to the phi. MDefinition *entryDef = phi->getOperand(0); if (entryDef != exitDef) continue; // If the entryDef is the same as exitDef, then we must propagate the // phi down to this successor. This chance was missed as part of // setBackedge() because exits are not captured in resume points. setSlot(slot, phi); } }
MBasicBlock * MBasicBlock::NewAsmJS(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, Kind kind) { MBasicBlock *block = new MBasicBlock(graph, info, /* entryPC = */ NULL, kind); if (!block->init()) return NULL; if (pred) { block->stackPosition_ = pred->stackPosition_; if (block->kind_ == PENDING_LOOP_HEADER) { for (size_t i = 0; i < block->stackPosition_; i++) { MDefinition *predSlot = pred->getSlot(i); JS_ASSERT(predSlot->type() != MIRType_Value); MPhi *phi = MPhi::New(i, predSlot->type()); JS_ALWAYS_TRUE(phi->reserveLength(2)); phi->addInput(predSlot); block->addPhi(phi); block->setSlot(i, phi); } } else { block->copySlots(pred); } if (!block->predecessors_.append(pred)) return NULL; } return block; }
// Remove the CFG edge between |pred| and |block|, after releasing the phi // operands on that edge and discarding any definitions consequently made dead. bool ValueNumberer::removePredecessorAndDoDCE(MBasicBlock *block, MBasicBlock *pred) { MOZ_ASSERT(!block->isMarked(), "Block marked unreachable should have predecessors removed already"); // Before removing the predecessor edge, scan the phi operands for that edge // for dead code before they get removed. if (!block->phisEmpty()) { uint32_t index = pred->positionInPhiSuccessor(); for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) { MPhi *phi = *iter; MOZ_ASSERT(!values_.has(phi), "Visited phi in block having predecessor removed"); MDefinition *op = phi->getOperand(index); if (op == phi) continue; // Set the operand to the phi itself rather than just releasing it // because removePredecessor expects to have something to release. phi->replaceOperand(index, phi); if (!handleUseReleased(op, DontSetUseRemoved) || !processDeadDefs()) return false; } } block->removePredecessor(pred); return true; }
bool MBasicBlock::inherit(MBasicBlock *pred, uint32_t popped) { if (pred) { stackPosition_ = pred->stackPosition_; JS_ASSERT(stackPosition_ >= popped); stackPosition_ -= popped; if (kind_ != PENDING_LOOP_HEADER) copySlots(pred); } else if (pc()) { uint32_t stackDepth = info().script()->analysis()->getCode(pc()).stackDepth; stackPosition_ = info().firstStackSlot() + stackDepth; JS_ASSERT(stackPosition_ >= popped); stackPosition_ -= popped; } else { stackPosition_ = info().firstStackSlot(); } JS_ASSERT(info_.nslots() >= stackPosition_); JS_ASSERT(!entryResumePoint_); if (pc()) { // Propagate the caller resume point from the inherited block. MResumePoint *callerResumePoint = pred ? pred->callerResumePoint() : NULL; // Create a resume point using our initial stack state. entryResumePoint_ = new MResumePoint(this, pc(), callerResumePoint, MResumePoint::ResumeAt); if (!entryResumePoint_->init()) return false; } if (pred) { if (!predecessors_.append(pred)) return false; if (kind_ == PENDING_LOOP_HEADER) { for (size_t i = 0; i < stackDepth(); i++) { MPhi *phi = MPhi::New(i); if (!phi->addInputSlow(pred->getSlot(i))) return false; addPhi(phi); setSlot(i, phi); if (entryResumePoint()) entryResumePoint()->setOperand(i, phi); } } else if (entryResumePoint()) { for (size_t i = 0; i < stackDepth(); i++) entryResumePoint()->setOperand(i, getSlot(i)); } } else if (entryResumePoint()) { /* * Don't leave the operands uninitialized for the caller, as it may not * initialize them later on. */ for (size_t i = 0; i < stackDepth(); i++) entryResumePoint()->clearOperand(i); } return true; }
// Remove the CFG edge between |pred| and |block|, after releasing the phi // operands on that edge and discarding any definitions consequently made dead. bool ValueNumberer::removePredecessorAndDoDCE(MBasicBlock* block, MBasicBlock* pred, size_t predIndex) { MOZ_ASSERT(!block->isMarked(), "Block marked unreachable should have predecessors removed already"); // Before removing the predecessor edge, scan the phi operands for that edge // for dead code before they get removed. MOZ_ASSERT(nextDef_ == nullptr); for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ) { MPhi* phi = *iter++; MOZ_ASSERT(!values_.has(phi), "Visited phi in block having predecessor removed"); MDefinition* op = phi->getOperand(predIndex); phi->removeOperand(predIndex); nextDef_ = iter != end ? *iter : nullptr; if (!handleUseReleased(op, DontSetUseRemoved) || !processDeadDefs()) return false; // If |nextDef_| became dead while we had it pinned, advance the iterator // and discard it now. while (nextDef_ && !nextDef_->hasUses()) { phi = nextDef_->toPhi(); iter++; nextDef_ = iter != end ? *iter : nullptr; discardDefsRecursively(phi); } } nextDef_ = nullptr; block->removePredecessorWithoutPhiOperands(pred, predIndex); return true; }
// Remove the CFG edge between |pred| and |block|, after releasing the phi // operands on that edge and discarding any definitions consequently made dead. bool ValueNumberer::removePredecessorAndDoDCE(MBasicBlock *block, MBasicBlock *pred, size_t predIndex) { MOZ_ASSERT(!block->isMarked(), "Block marked unreachable should have predecessors removed already"); // Before removing the predecessor edge, scan the phi operands for that edge // for dead code before they get removed. MOZ_ASSERT(nextDef_ == nullptr); for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ) { MPhi *phi = *iter++; MOZ_ASSERT(!values_.has(phi), "Visited phi in block having predecessor removed"); MDefinition *op = phi->getOperand(predIndex); phi->removeOperand(predIndex); nextDef_ = *iter; if (!handleUseReleased(op, DontSetUseRemoved) || !processDeadDefs()) return false; } nextDef_ = nullptr; block->removePredecessorWithoutPhiOperands(pred, predIndex); return true; }
// Determine whether the possible value of start (a phi node within the loop) // can become smaller than an initial value at loop entry. bool Loop::nonDecreasing(MDefinition *initial, MDefinition *start) { MDefinitionVector worklist; MDefinitionVector seen; if (!worklist.append(start)) return false; while (!worklist.empty()) { MDefinition *def = worklist.popCopy(); bool duplicate = false; for (size_t i = 0; i < seen.length() && !duplicate; i++) { if (seen[i] == def) duplicate = true; } if (duplicate) continue; if (!seen.append(def)) return false; if (def->type() != MIRType_Int32) return false; if (!isInLoop(def)) { if (def != initial) return false; continue; } if (def->isPhi()) { MPhi *phi = def->toPhi(); for (size_t i = 0; i < phi->numOperands(); i++) { if (!worklist.append(phi->getOperand(i))) return false; } continue; } if (def->isAdd()) { if (def->toAdd()->specialization() != MIRType_Int32) return false; MDefinition *lhs = def->toAdd()->getOperand(0); MDefinition *rhs = def->toAdd()->getOperand(1); if (!rhs->isConstant()) return false; Value v = rhs->toConstant()->value(); if (!v.isInt32() || v.toInt32() < 0) return false; if (!worklist.append(lhs)) return false; continue; } return false; } return true; }
void MBasicBlock::specializePhis() { for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) { MPhi *phi = *iter; phi->specializeType(); } }
bool MBasicBlock::specializePhis() { for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) { MPhi *phi = *iter; if (!phi->specializeType()) return false; } return true; }
MBasicBlock* MBasicBlock::New(MIRGraph& graph, const CompileInfo& info, MBasicBlock* pred, Kind kind) { BytecodeSite* site = new(graph.alloc()) BytecodeSite(); MBasicBlock* block = new(graph.alloc()) MBasicBlock(graph, info, site, kind); if (!block->init()) return nullptr; if (pred) { block->stackPosition_ = pred->stackPosition_; if (block->kind_ == PENDING_LOOP_HEADER) { size_t nphis = block->stackPosition_; size_t nfree = graph.phiFreeListLength(); TempAllocator& alloc = graph.alloc(); MPhi* phis = nullptr; if (nphis > nfree) { phis = alloc.allocateArray<MPhi>(nphis - nfree); if (!phis) return nullptr; } // Note: Phis are inserted in the same order as the slots. for (size_t i = 0; i < nphis; i++) { MDefinition* predSlot = pred->getSlot(i); MOZ_ASSERT(predSlot->type() != MIRType::Value); MPhi* phi; if (i < nfree) phi = graph.takePhiFromFreeList(); else phi = phis + (i - nfree); new(phi) MPhi(alloc, predSlot->type()); phi->addInlineInput(predSlot); // Add append Phis in the block. block->addPhi(phi); block->setSlot(i, phi); } } else { block->copySlots(pred); } if (!block->predecessors_.append(pred)) return nullptr; } return block; }
bool MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred, uint32_t popped) { MOZ_ASSERT(pred); MOZ_ASSERT(predecessors_.length() > 0); // Predecessors must be finished, and at the correct stack depth. MOZ_ASSERT(pred->hasLastIns()); MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped); for (uint32_t i = 0, e = stackPosition_; i < e; ++i) { MDefinition* mine = getSlot(i); MDefinition* other = pred->getSlot(i); if (mine != other) { // If the current instruction is a phi, and it was created in this // basic block, then we have already placed this phi and should // instead append to its operands. if (mine->isPhi() && mine->block() == this) { MOZ_ASSERT(predecessors_.length()); if (!mine->toPhi()->addInputSlow(other)) return false; } else { // Otherwise, create a new phi node. MPhi* phi; if (mine->type() == other->type()) phi = MPhi::New(alloc.fallible(), mine->type()); else phi = MPhi::New(alloc.fallible()); if (!phi) return false; addPhi(phi); // Prime the phi for each predecessor, so input(x) comes from // predecessor(x). if (!phi->reserveLength(predecessors_.length() + 1)) return false; for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds; ++j) { MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine); phi->addInput(mine); } phi->addInput(other); setSlot(i, phi); if (entryResumePoint()) entryResumePoint()->replaceOperand(i, phi); } } } return predecessors_.append(pred); }
void MBasicBlock::discardAllPhiOperands() { for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) { MPhi *phi = *iter; for (size_t i = 0, e = phi->numOperands(); i < e; i++) phi->discardOperand(i); } for (MBasicBlock **pred = predecessors_.begin(); pred != predecessors_.end(); pred++) (*pred)->setSuccessorWithPhis(NULL, 0); }
bool MBasicBlock::inheritPhisFromBackedge(MBasicBlock* backedge, bool* hadTypeChange) { // We must be a pending loop header MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER); size_t stackDepth = entryResumePoint()->stackDepth(); for (size_t slot = 0; slot < stackDepth; slot++) { // Get the value stack-slot of the back edge. MDefinition* exitDef = backedge->getSlot(slot); // Get the value of the loop header. MDefinition* loopDef = entryResumePoint()->getOperand(slot); if (loopDef->block() != this) { // If we are finishing a pending loop header, then we need to ensure // that all operands are phis. This is usualy the case, except for // object/arrays build with generators, in which case we share the // same allocations across all blocks. MOZ_ASSERT(loopDef->block()->id() < id()); MOZ_ASSERT(loopDef == exitDef); continue; } // Phis are allocated by NewPendingLoopHeader. MPhi* entryDef = loopDef->toPhi(); MOZ_ASSERT(entryDef->block() == this); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } bool typeChange = false; if (!entryDef->addInputSlow(exitDef)) return false; if (!entryDef->checkForTypeChange(exitDef, &typeChange)) return false; *hadTypeChange |= typeChange; setSlot(slot, entryDef); } return true; }
bool MBasicBlock::specializePhis(TempAllocator& alloc) { if (specialized_) return true; specialized_ = true; for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) { MPhi* phi = *iter; if (!phi->specializeType(alloc)) return false; } return true; }
bool MBasicBlock::setBackedgeWasm(MBasicBlock* pred) { // Predecessors must be finished, and at the correct stack depth. MOZ_ASSERT(hasLastIns()); MOZ_ASSERT(pred->hasLastIns()); MOZ_ASSERT(stackDepth() == pred->stackDepth()); // We must be a pending loop header MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER); // Add exit definitions to each corresponding phi at the entry. // Note: Phis are inserted in the same order as the slots. (see // MBasicBlock::New) size_t slot = 0; for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) { MPhi* entryDef = *phi; MDefinition* exitDef = pred->getSlot(slot); // Assert that we already placed phis for each slot. MOZ_ASSERT(entryDef->block() == this); // Assert that the phi already has the correct type. MOZ_ASSERT(entryDef->type() == exitDef->type()); MOZ_ASSERT(entryDef->type() != MIRType::Value); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } // Phis always have room for 2 operands, so this can't fail. MOZ_ASSERT(phi->numOperands() == 1); entryDef->addInlineInput(exitDef); MOZ_ASSERT(slot < pred->stackDepth()); setSlot(slot, entryDef); } // We are now a loop header proper kind_ = LOOP_HEADER; return predecessors_.append(pred); }
bool MBasicBlock::addPredecessorPopN(MBasicBlock *pred, uint32_t popped) { JS_ASSERT(pred); JS_ASSERT(predecessors_.length() > 0); // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(pred->lastIns_); JS_ASSERT(pred->stackPosition_ == stackPosition_ + popped); for (uint32_t i = 0; i < stackPosition_; i++) { MDefinition *mine = getSlot(i); MDefinition *other = pred->getSlot(i); if (mine != other) { // If the current instruction is a phi, and it was created in this // basic block, then we have already placed this phi and should // instead append to its operands. if (mine->isPhi() && mine->block() == this) { JS_ASSERT(predecessors_.length()); if (!mine->toPhi()->addInputSlow(other)) return false; } else { // Otherwise, create a new phi node. MPhi *phi = MPhi::New(i); addPhi(phi); // Prime the phi for each predecessor, so input(x) comes from // predecessor(x). if (!phi->initLength(predecessors_.length() + 1)) return false; for (size_t j = 0; j < predecessors_.length(); j++) { JS_ASSERT(predecessors_[j]->getSlot(i) == mine); phi->setOperand(j, mine); } phi->setOperand(predecessors_.length(), other); setSlot(i, phi); if (entryResumePoint()) entryResumePoint()->replaceOperand(i, phi); } } } return predecessors_.append(pred); }
bool LBlock::init(TempAllocator& alloc) { // Count the number of LPhis we'll need. size_t numLPhis = 0; for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) { MPhi* phi = *i; switch (phi->type()) { case MIRType::Value: numLPhis += BOX_PIECES; break; case MIRType::Int64: numLPhis += INT64_PIECES; break; default: numLPhis += 1; break; } } // Allocate space for the LPhis. if (!phis_.init(alloc, numLPhis)) return false; // For each MIR phi, set up LIR phis as appropriate. We'll fill in their // operands on each incoming edge, and set their definitions at the start of // their defining block. size_t phiIndex = 0; size_t numPreds = block_->numPredecessors(); for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) { MPhi* phi = *i; MOZ_ASSERT(phi->numOperands() == numPreds); int numPhis; switch (phi->type()) { case MIRType::Value: numPhis = BOX_PIECES; break; case MIRType::Int64: numPhis = INT64_PIECES; break; default: numPhis = 1; break; } for (int i = 0; i < numPhis; i++) { LAllocation* inputs = alloc.allocateArray<LAllocation>(numPreds); if (!inputs) return false; void* addr = &phis_[phiIndex++]; LPhi* lphi = new (addr) LPhi(phi, inputs); lphi->setBlock(this); } } return true; }
// Test whether there are any phis in |header| which are newly optimizable, as a // result of optimizations done inside the loop. This is not a sparse approach, // but restarting is rare enough in practice. Termination is ensured by // discarding the phi triggering the iteration. bool ValueNumberer::loopHasOptimizablePhi(MBasicBlock* header) const { // If the header is unreachable, don't bother re-optimizing it. if (header->isMarked()) return false; // Rescan the phis for any that can be simplified, since they may be reading // values from backedges. for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd()); iter != end; ++iter) { MPhi* phi = *iter; MOZ_ASSERT(phi->hasUses(), "Missed an unused phi"); if (phi->operandIfRedundant() || hasLeader(phi, header)) return true; // Phi can be simplified. } return false; }
// A loop is about to be made reachable only through an OSR entry into one of // its nested loops. Fix everything up. bool ValueNumberer::fixupOSROnlyLoop(MBasicBlock* block, MBasicBlock* backedge) { // Create an empty and unreachable(!) block which jumps to |block|. This // allows |block| to remain marked as a loop header, so we don't have to // worry about moving a different block into place as the new loop header, // which is hard, especially if the OSR is into a nested loop. Doing all // that would produce slightly more optimal code, but this is so // extraordinarily rare that it isn't worth the complexity. MBasicBlock* fake = MBasicBlock::New(graph_, block->info(), nullptr, MBasicBlock::NORMAL); if (fake == nullptr) return false; graph_.insertBlockBefore(block, fake); fake->setImmediateDominator(fake); fake->addNumDominated(1); fake->setDomIndex(fake->id()); fake->setUnreachable(); // Create zero-input phis to use as inputs for any phis in |block|. // Again, this is a little odd, but it's the least-odd thing we can do // without significant complexity. for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) { MPhi* phi = *iter; MPhi* fakePhi = MPhi::New(graph_.alloc(), phi->type()); fake->addPhi(fakePhi); if (!phi->addInputSlow(fakePhi)) return false; } fake->end(MGoto::New(graph_.alloc(), block)); if (!block->addPredecessorWithoutPhis(fake)) return false; // Restore |backedge| as |block|'s loop backedge. block->clearLoopHeader(); block->setLoopHeader(backedge); JitSpew(JitSpew_GVN, " Created fake block%u", fake->id()); hasOSRFixups_ = true; return true; }
MBasicBlock * MBasicBlock::NewAsmJS(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, Kind kind) { MBasicBlock *block = new(graph.alloc()) MBasicBlock(graph, info, BytecodeSite(), kind); if (!block->init()) return nullptr; if (pred) { block->stackPosition_ = pred->stackPosition_; if (block->kind_ == PENDING_LOOP_HEADER) { size_t nphis = block->stackPosition_; TempAllocator &alloc = graph.alloc(); MPhi *phis = (MPhi*)alloc.allocateArray<sizeof(MPhi)>(nphis); if (!phis) return nullptr; // Note: Phis are inserted in the same order as the slots. for (size_t i = 0; i < nphis; i++) { MDefinition *predSlot = pred->getSlot(i); JS_ASSERT(predSlot->type() != MIRType_Value); MPhi *phi = new(phis + i) MPhi(alloc, predSlot->type()); JS_ALWAYS_TRUE(phi->reserveLength(2)); phi->addInput(predSlot); // Add append Phis in the block. block->addPhi(phi); block->setSlot(i, phi); } } else { block->copySlots(pred); } if (!block->predecessors_.append(pred)) return nullptr; } return block; }
AbortReason MBasicBlock::setBackedge(MBasicBlock *pred) { // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(lastIns_); JS_ASSERT(pred->lastIns_); JS_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth()); // We must be a pending loop header JS_ASSERT(kind_ == PENDING_LOOP_HEADER); bool hadTypeChange = false; // Add exit definitions to each corresponding phi at the entry. for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++) { MPhi *entryDef = *phi; MDefinition *exitDef = pred->slots_[entryDef->slot()]; // Assert that we already placed phis for each slot. JS_ASSERT(entryDef->block() == this); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } bool typeChange = false; if (!entryDef->addInputSlow(exitDef, &typeChange)) return AbortReason_Alloc; hadTypeChange |= typeChange; JS_ASSERT(entryDef->slot() < pred->stackDepth()); setSlot(entryDef->slot(), entryDef); } if (hadTypeChange) { for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++) phi->removeOperand(phi->numOperands() - 1); return AbortReason_Disable; } // We are now a loop header proper kind_ = LOOP_HEADER; if (!predecessors_.append(pred)) return AbortReason_Alloc; return AbortReason_NoAbort; }
void MBasicBlock::inheritPhis(MBasicBlock *header) { for (MPhiIterator iter = header->phisBegin(); iter != header->phisEnd(); iter++) { MPhi *phi = *iter; JS_ASSERT(phi->numOperands() == 2); // The entry definition is always the leftmost input to the phi. MDefinition *entryDef = phi->getOperand(0); MDefinition *exitDef = getSlot(phi->slot()); if (entryDef != exitDef) continue; // If the entryDef is the same as exitDef, then we must propagate the // phi down to this successor. This chance was missed as part of // setBackedge() because exits are not captured in resume points. setSlot(phi->slot(), phi); } }
bool MBasicBlock::setBackedge(MBasicBlock *pred) { // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(lastIns_); JS_ASSERT(pred->lastIns_); JS_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth()); // We must be a pending loop header JS_ASSERT(kind_ == PENDING_LOOP_HEADER); // Add exit definitions to each corresponding phi at the entry. for (uint32_t i = 0; i < pred->stackDepth(); i++) { MPhi *entryDef = entryResumePoint()->getOperand(i)->toPhi(); MDefinition *exitDef = pred->slots_[i]; // Assert that we already placed phis for each slot. JS_ASSERT(entryDef->block() == this); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } if (!entryDef->addInput(exitDef)) return false; setSlot(i, entryDef); } // We are now a loop header proper kind_ = LOOP_HEADER; return predecessors_.append(pred); }
bool LBlock::init(TempAllocator& alloc) { // Count the number of LPhis we'll need. size_t numLPhis = 0; for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) { MPhi* phi = *i; numLPhis += (phi->type() == MIRType_Value) ? BOX_PIECES : 1; } // Allocate space for the LPhis. if (!phis_.init(alloc, numLPhis)) return false; // For each MIR phi, set up LIR phis as appropriate. We'll fill in their // operands on each incoming edge, and set their definitions at the start of // their defining block. size_t phiIndex = 0; size_t numPreds = block_->numPredecessors(); for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) { MPhi* phi = *i; MOZ_ASSERT(phi->numOperands() == numPreds); int numPhis = (phi->type() == MIRType_Value) ? BOX_PIECES : 1; for (int i = 0; i < numPhis; i++) { void* array = alloc.allocateArray<sizeof(LAllocation)>(numPreds); LAllocation* inputs = static_cast<LAllocation*>(array); if (!inputs) return false; // MSVC 2015 cannot handle "new (&phis_[phiIndex++])" void* addr = &phis_[phiIndex++]; LPhi* lphi = new (addr) LPhi(phi, inputs); lphi->setBlock(this); } } return true; }
void MBasicBlock::setLoopHeader(MBasicBlock* newBackedge) { MOZ_ASSERT(!isLoopHeader()); kind_ = LOOP_HEADER; size_t numPreds = numPredecessors(); MOZ_ASSERT(numPreds != 0); size_t lastIndex = numPreds - 1; size_t oldIndex = 0; for (; ; ++oldIndex) { MOZ_ASSERT(oldIndex < numPreds); MBasicBlock* pred = getPredecessor(oldIndex); if (pred == newBackedge) break; } // Set the loop backedge to be the last element in predecessors_. Swap(predecessors_[oldIndex], predecessors_[lastIndex]); // If we have phis, reorder their operands accordingly. if (!phisEmpty()) { getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex); getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex); for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) { MPhi* phi = *iter; MDefinition* last = phi->getOperand(oldIndex); MDefinition* old = phi->getOperand(lastIndex); phi->replaceOperand(oldIndex, old); phi->replaceOperand(lastIndex, last); } } MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this); MOZ_ASSERT(backedge() == newBackedge); }
bool MBasicBlock::setBackedgeAsmJS(MBasicBlock *pred) { // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(lastIns_); JS_ASSERT(pred->lastIns_); JS_ASSERT(stackDepth() == pred->stackDepth()); // We must be a pending loop header JS_ASSERT(kind_ == PENDING_LOOP_HEADER); // Add exit definitions to each corresponding phi at the entry. for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++) { MPhi *entryDef = *phi; MDefinition *exitDef = pred->getSlot(entryDef->slot()); // Assert that we already placed phis for each slot. JS_ASSERT(entryDef->block() == this); // Assert that the phi already has the correct type. JS_ASSERT(entryDef->type() == exitDef->type()); JS_ASSERT(entryDef->type() != MIRType_Value); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } // MBasicBlock::NewAsmJS calls reserveLength(2) for loop header phis. entryDef->addInput(exitDef); JS_ASSERT(entryDef->slot() < pred->stackDepth()); setSlot(entryDef->slot(), entryDef); } // We are now a loop header proper kind_ = LOOP_HEADER; return predecessors_.append(pred); }
bool TypeAnalyzer::propagateSpecialization(MPhi *phi) { JS_ASSERT(phi->type() != MIRType_None); // Verify that this specialization matches any phis depending on it. for (MUseDefIterator iter(phi); iter; iter++) { if (!iter.def()->isPhi()) continue; MPhi *use = iter.def()->toPhi(); if (!use->triedToSpecialize()) continue; if (use->type() == MIRType_None) { // We tried to specialize this phi, but were unable to guess its // type. Now that we know the type of one of its operands, we can // specialize it. if (!respecialize(use, phi->type())) return false; continue; } if (use->type() != phi->type()) { // Specialize phis with int32 and double operands as double. if (IsNumberType(use->type()) && IsNumberType(phi->type())) { if (!respecialize(use, MIRType_Double)) return false; continue; } // This phi in our use chain can now no longer be specialized. if (!respecialize(use, MIRType_Value)) return false; } } return true; }
bool MBasicBlock::inherit(TempAllocator &alloc, BytecodeAnalysis *analysis, MBasicBlock *pred, uint32_t popped, unsigned stackPhiCount) { if (pred) { stackPosition_ = pred->stackPosition_; JS_ASSERT(stackPosition_ >= popped); stackPosition_ -= popped; if (kind_ != PENDING_LOOP_HEADER) copySlots(pred); } else { uint32_t stackDepth = analysis->info(pc()).stackDepth; stackPosition_ = info().firstStackSlot() + stackDepth; JS_ASSERT(stackPosition_ >= popped); stackPosition_ -= popped; } JS_ASSERT(info_.nslots() >= stackPosition_); JS_ASSERT(!entryResumePoint_); // Propagate the caller resume point from the inherited block. MResumePoint *callerResumePoint = pred ? pred->callerResumePoint() : nullptr; // Create a resume point using our initial stack state. entryResumePoint_ = new(alloc) MResumePoint(this, pc(), callerResumePoint, MResumePoint::ResumeAt); if (!entryResumePoint_->init(alloc)) return false; if (pred) { if (!predecessors_.append(pred)) return false; if (kind_ == PENDING_LOOP_HEADER) { size_t i = 0; for (i = 0; i < info().firstStackSlot(); i++) { MPhi *phi = MPhi::New(alloc, i); if (!phi->addInputSlow(pred->getSlot(i))) return false; addPhi(phi); setSlot(i, phi); entryResumePoint()->setOperand(i, phi); } JS_ASSERT(stackPhiCount <= stackDepth()); JS_ASSERT(info().firstStackSlot() <= stackDepth() - stackPhiCount); // Avoid creating new phis for stack values that aren't part of the // loop. Note that for loop headers that can OSR, all values on the // stack are part of the loop. for (; i < stackDepth() - stackPhiCount; i++) { MDefinition *val = pred->getSlot(i); setSlot(i, val); entryResumePoint()->setOperand(i, val); } for (; i < stackDepth(); i++) { MPhi *phi = MPhi::New(alloc, i); if (!phi->addInputSlow(pred->getSlot(i))) return false; addPhi(phi); setSlot(i, phi); entryResumePoint()->setOperand(i, phi); } } else { for (size_t i = 0; i < stackDepth(); i++) entryResumePoint()->setOperand(i, getSlot(i)); } } else { /* * Don't leave the operands uninitialized for the caller, as it may not * initialize them later on. */ for (size_t i = 0; i < stackDepth(); i++) entryResumePoint()->clearOperand(i); } return true; }
void LoopUnroller::go(LoopIterationBound *bound) { // For now we always unroll loops the same number of times. static const size_t UnrollCount = 10; JitSpew(JitSpew_Unrolling, "Attempting to unroll loop"); header = bound->header; // UCE might have determined this isn't actually a loop. if (!header->isLoopHeader()) return; backedge = header->backedge(); oldPreheader = header->loopPredecessor(); JS_ASSERT(oldPreheader->numSuccessors() == 1); // Only unroll loops with two blocks: an initial one ending with the // bound's test, and the body ending with the backedge. MTest *test = bound->test; if (header->lastIns() != test) return; if (test->ifTrue() == backedge) { if (test->ifFalse()->id() <= backedge->id()) return; } else if (test->ifFalse() == backedge) { if (test->ifTrue()->id() <= backedge->id()) return; } else { return; } if (backedge->numPredecessors() != 1 || backedge->numSuccessors() != 1) return; JS_ASSERT(backedge->phisEmpty()); MBasicBlock *bodyBlocks[] = { header, backedge }; // All instructions in the header and body must be clonable. for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) { MBasicBlock *block = bodyBlocks[i]; for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { MInstruction *ins = *iter; if (ins->canClone()) continue; if (ins->isTest() || ins->isGoto() || ins->isInterruptCheck()) continue; #ifdef DEBUG JitSpew(JitSpew_Unrolling, "Aborting: can't clone instruction %s", ins->opName()); #endif return; } } // Compute the linear inequality we will use for exiting the unrolled loop: // // iterationBound - iterationCount - UnrollCount >= 0 // LinearSum remainingIterationsInequality(bound->boundSum); if (!remainingIterationsInequality.add(bound->currentSum, -1)) return; if (!remainingIterationsInequality.add(-int32_t(UnrollCount))) return; // Terms in the inequality need to be either loop invariant or phis from // the original header. for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) { MDefinition *def = remainingIterationsInequality.term(i).term; if (def->block()->id() < header->id()) continue; if (def->block() == header && def->isPhi()) continue; return; } // OK, we've checked everything, now unroll the loop. JitSpew(JitSpew_Unrolling, "Unrolling loop"); // The old preheader will go before the unrolled loop, and the old loop // will need a new empty preheader. CompileInfo &info = oldPreheader->info(); if (header->trackedSite().pc()) { unrolledHeader = MBasicBlock::New(graph, nullptr, info, oldPreheader, header->trackedSite(), MBasicBlock::LOOP_HEADER); unrolledBackedge = MBasicBlock::New(graph, nullptr, info, unrolledHeader, backedge->trackedSite(), MBasicBlock::NORMAL); newPreheader = MBasicBlock::New(graph, nullptr, info, unrolledHeader, oldPreheader->trackedSite(), MBasicBlock::NORMAL); } else { unrolledHeader = MBasicBlock::NewAsmJS(graph, info, oldPreheader, MBasicBlock::LOOP_HEADER); unrolledBackedge = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL); newPreheader = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL); } unrolledHeader->discardAllResumePoints(); unrolledBackedge->discardAllResumePoints(); newPreheader->discardAllResumePoints(); // Insert new blocks at their RPO position, and update block ids. graph.insertBlockAfter(oldPreheader, unrolledHeader); graph.insertBlockAfter(unrolledHeader, unrolledBackedge); graph.insertBlockAfter(unrolledBackedge, newPreheader); graph.renumberBlocksAfter(oldPreheader); if (!unrolledDefinitions.init()) CrashAtUnhandlableOOM("LoopUnroller::go"); // Add phis to the unrolled loop header which correspond to the phis in the // original loop header. JS_ASSERT(header->getPredecessor(0) == oldPreheader); for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; JS_ASSERT(old->numOperands() == 2); MPhi *phi = MPhi::New(alloc); phi->setResultType(old->type()); phi->setResultTypeSet(old->resultTypeSet()); phi->setRange(old->range()); unrolledHeader->addPhi(phi); if (!phi->reserveLength(2)) CrashAtUnhandlableOOM("LoopUnroller::go"); // Set the first input for the phi for now. We'll set the second after // finishing the unroll. phi->addInput(old->getOperand(0)); // The old phi will now take the value produced by the unrolled loop. old->replaceOperand(0, phi); if (!unrolledDefinitions.putNew(old, phi)) CrashAtUnhandlableOOM("LoopUnroller::go"); } // The loop condition can bail out on e.g. integer overflow, so make a // resume point based on the initial resume point of the original header. MResumePoint *headerResumePoint = header->entryResumePoint(); if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(unrolledHeader, headerResumePoint); unrolledHeader->setEntryResumePoint(rp); // Perform an interrupt check at the start of the unrolled loop. unrolledHeader->add(MInterruptCheck::New(alloc)); } // Generate code for the test in the unrolled loop. for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) { MDefinition *def = remainingIterationsInequality.term(i).term; MDefinition *replacement = getReplacementDefinition(def); remainingIterationsInequality.replaceTerm(i, replacement); } MCompare *compare = ConvertLinearInequality(alloc, unrolledHeader, remainingIterationsInequality); MTest *unrolledTest = MTest::New(alloc, compare, unrolledBackedge, newPreheader); unrolledHeader->end(unrolledTest); // Make an entry resume point for the unrolled body. The unrolled header // does not have side effects on stack values, even if the original loop // header does, so use the same resume point as for the unrolled header. if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(unrolledBackedge, headerResumePoint); unrolledBackedge->setEntryResumePoint(rp); } // Make an entry resume point for the new preheader. There are no // instructions which use this but some other stuff wants one to be here. if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(newPreheader, headerResumePoint); newPreheader->setEntryResumePoint(rp); } // Generate the unrolled code. JS_ASSERT(UnrollCount > 1); size_t unrollIndex = 0; while (true) { // Clone the contents of the original loop into the unrolled loop body. for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) { MBasicBlock *block = bodyBlocks[i]; for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { MInstruction *ins = *iter; if (ins->canClone()) { makeReplacementInstruction(*iter); } else { // Control instructions are handled separately. JS_ASSERT(ins->isTest() || ins->isGoto() || ins->isInterruptCheck()); } } } // Compute the value of each loop header phi after the execution of // this unrolled iteration. MDefinitionVector phiValues(alloc); JS_ASSERT(header->getPredecessor(1) == backedge); for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; MDefinition *oldInput = old->getOperand(1); if (!phiValues.append(getReplacementDefinition(oldInput))) CrashAtUnhandlableOOM("LoopUnroller::go"); } unrolledDefinitions.clear(); if (unrollIndex == UnrollCount - 1) { // We're at the end of the last unrolled iteration, set the // backedge input for the unrolled loop phis. size_t phiIndex = 0; for (MPhiIterator iter(unrolledHeader->phisBegin()); iter != unrolledHeader->phisEnd(); iter++) { MPhi *phi = *iter; phi->addInput(phiValues[phiIndex++]); } JS_ASSERT(phiIndex == phiValues.length()); break; } // Update the map for the phis in the next iteration. size_t phiIndex = 0; for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; if (!unrolledDefinitions.putNew(old, phiValues[phiIndex++])) CrashAtUnhandlableOOM("LoopUnroller::go"); } JS_ASSERT(phiIndex == phiValues.length()); unrollIndex++; } MGoto *backedgeJump = MGoto::New(alloc, unrolledHeader); unrolledBackedge->end(backedgeJump); // Place the old preheader before the unrolled loop. JS_ASSERT(oldPreheader->lastIns()->isGoto()); oldPreheader->discardLastIns(); oldPreheader->end(MGoto::New(alloc, unrolledHeader)); // Place the new preheader before the original loop. newPreheader->end(MGoto::New(alloc, header)); // Cleanup the MIR graph. if (!unrolledHeader->addPredecessorWithoutPhis(unrolledBackedge)) CrashAtUnhandlableOOM("LoopUnroller::go"); header->replacePredecessor(oldPreheader, newPreheader); oldPreheader->setSuccessorWithPhis(unrolledHeader, 0); newPreheader->setSuccessorWithPhis(header, 0); unrolledBackedge->setSuccessorWithPhis(unrolledHeader, 1); }
MPhi *popPhi() { MPhi *phi = phiWorklist_.popCopy(); phi->setNotInWorklist(); return phi; }