bool MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred, uint32_t popped) { MOZ_ASSERT(pred); MOZ_ASSERT(predecessors_.length() > 0); // Predecessors must be finished, and at the correct stack depth. MOZ_ASSERT(pred->hasLastIns()); MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped); for (uint32_t i = 0, e = stackPosition_; i < e; ++i) { MDefinition* mine = getSlot(i); MDefinition* other = pred->getSlot(i); if (mine != other) { // If the current instruction is a phi, and it was created in this // basic block, then we have already placed this phi and should // instead append to its operands. if (mine->isPhi() && mine->block() == this) { MOZ_ASSERT(predecessors_.length()); if (!mine->toPhi()->addInputSlow(other)) return false; } else { // Otherwise, create a new phi node. MPhi* phi; if (mine->type() == other->type()) phi = MPhi::New(alloc.fallible(), mine->type()); else phi = MPhi::New(alloc.fallible()); if (!phi) return false; addPhi(phi); // Prime the phi for each predecessor, so input(x) comes from // predecessor(x). if (!phi->reserveLength(predecessors_.length() + 1)) return false; for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds; ++j) { MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine); phi->addInput(mine); } phi->addInput(other); setSlot(i, phi); if (entryResumePoint()) entryResumePoint()->replaceOperand(i, phi); } } } return predecessors_.append(pred); }
MBasicBlock * MBasicBlock::NewAsmJS(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, Kind kind) { MBasicBlock *block = new MBasicBlock(graph, info, /* entryPC = */ NULL, kind); if (!block->init()) return NULL; if (pred) { block->stackPosition_ = pred->stackPosition_; if (block->kind_ == PENDING_LOOP_HEADER) { for (size_t i = 0; i < block->stackPosition_; i++) { MDefinition *predSlot = pred->getSlot(i); JS_ASSERT(predSlot->type() != MIRType_Value); MPhi *phi = MPhi::New(i, predSlot->type()); JS_ALWAYS_TRUE(phi->reserveLength(2)); phi->addInput(predSlot); block->addPhi(phi); block->setSlot(i, phi); } } else { block->copySlots(pred); } if (!block->predecessors_.append(pred)) return NULL; } return block; }
bool MBasicBlock::addPredecessorPopN(MBasicBlock *pred, uint32_t popped) { JS_ASSERT(pred); JS_ASSERT(predecessors_.length() > 0); // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(pred->lastIns_); JS_ASSERT(pred->stackPosition_ == stackPosition_ + popped); for (uint32_t i = 0; i < stackPosition_; i++) { MDefinition *mine = getSlot(i); MDefinition *other = pred->getSlot(i); if (mine != other) { MPhi *phi; // If the current instruction is a phi, and it was created in this // basic block, then we have already placed this phi and should // instead append to its operands. if (mine->isPhi() && mine->block() == this) { JS_ASSERT(predecessors_.length()); phi = mine->toPhi(); } else { // Otherwise, create a new phi node. phi = MPhi::New(i); addPhi(phi); // Prime the phi for each predecessor, so input(x) comes from // predecessor(x). for (size_t j = 0; j < predecessors_.length(); j++) { JS_ASSERT(predecessors_[j]->getSlot(i) == mine); if (!phi->addInput(mine)) return false; } setSlot(i, phi); entryResumePoint()->replaceOperand(i, phi); } if (!phi->addInput(other)) return false; } } return predecessors_.append(pred); }
MBasicBlock* MBasicBlock::NewAsmJS(MIRGraph& graph, const CompileInfo& info, MBasicBlock* pred, Kind kind) { BytecodeSite* site = new(graph.alloc()) BytecodeSite(); MBasicBlock* block = new(graph.alloc()) MBasicBlock(graph, info, site, kind); if (!block->init()) return nullptr; if (pred) { block->stackPosition_ = pred->stackPosition_; if (block->kind_ == PENDING_LOOP_HEADER) { size_t nphis = block->stackPosition_; size_t nfree = graph.phiFreeListLength(); TempAllocator& alloc = graph.alloc(); MPhi* phis = nullptr; if (nphis > nfree) { phis = alloc.allocateArray<MPhi>(nphis - nfree); if (!phis) return nullptr; } // Note: Phis are inserted in the same order as the slots. for (size_t i = 0; i < nphis; i++) { MDefinition* predSlot = pred->getSlot(i); MOZ_ASSERT(predSlot->type() != MIRType_Value); MPhi* phi; if (i < nfree) phi = graph.takePhiFromFreeList(); else phi = phis + (i - nfree); new(phi) MPhi(alloc, predSlot->type()); phi->addInput(predSlot); // Add append Phis in the block. block->addPhi(phi); block->setSlot(i, phi); } } else { block->copySlots(pred); } if (!block->predecessors_.append(pred)) return nullptr; } return block; }
bool MBasicBlock::setBackedgeAsmJS(MBasicBlock *pred) { // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(lastIns_); JS_ASSERT(pred->lastIns_); JS_ASSERT(stackDepth() == pred->stackDepth()); // We must be a pending loop header JS_ASSERT(kind_ == PENDING_LOOP_HEADER); // Add exit definitions to each corresponding phi at the entry. // Note: Phis are inserted in the same order as the slots. (see // MBasicBlock::NewAsmJS) size_t slot = 0; for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) { MPhi *entryDef = *phi; MDefinition *exitDef = pred->getSlot(slot); // Assert that we already placed phis for each slot. JS_ASSERT(entryDef->block() == this); // Assert that the phi already has the correct type. JS_ASSERT(entryDef->type() == exitDef->type()); JS_ASSERT(entryDef->type() != MIRType_Value); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } // MBasicBlock::NewAsmJS calls reserveLength(2) for loop header phis. entryDef->addInput(exitDef); MOZ_ASSERT(slot < pred->stackDepth()); setSlot(slot, entryDef); } // We are now a loop header proper kind_ = LOOP_HEADER; return predecessors_.append(pred); }
bool MBasicBlock::inherit(MBasicBlock *pred, uint32_t popped) { if (pred) { stackPosition_ = pred->stackPosition_; JS_ASSERT(stackPosition_ >= popped); stackPosition_ -= popped; if (kind_ != PENDING_LOOP_HEADER) copySlots(pred); } else { uint32_t stackDepth = info().script()->analysis()->getCode(pc()).stackDepth; stackPosition_ = info().firstStackSlot() + stackDepth; JS_ASSERT(stackPosition_ >= popped); stackPosition_ -= popped; } JS_ASSERT(info_.nslots() >= stackPosition_); JS_ASSERT(!entryResumePoint_); // Propagate the caller resume point from the inherited block. MResumePoint *callerResumePoint = pred ? pred->callerResumePoint() : NULL; // Create a resume point using our initial stack state. entryResumePoint_ = new MResumePoint(this, pc(), callerResumePoint, MResumePoint::ResumeAt); if (!entryResumePoint_->init(this)) return false; if (pred) { if (!predecessors_.append(pred)) return false; if (kind_ == PENDING_LOOP_HEADER) { for (size_t i = 0; i < stackDepth(); i++) { MPhi *phi = MPhi::New(i); if (!phi->addInput(pred->getSlot(i))) return false; addPhi(phi); setSlot(i, phi); entryResumePoint()->initOperand(i, phi); } } else { for (size_t i = 0; i < stackDepth(); i++) entryResumePoint()->initOperand(i, getSlot(i)); } } return true; }
MBasicBlock * MBasicBlock::NewAsmJS(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred, Kind kind) { MBasicBlock *block = new(graph.alloc()) MBasicBlock(graph, info, BytecodeSite(), kind); if (!block->init()) return nullptr; if (pred) { block->stackPosition_ = pred->stackPosition_; if (block->kind_ == PENDING_LOOP_HEADER) { size_t nphis = block->stackPosition_; TempAllocator &alloc = graph.alloc(); MPhi *phis = (MPhi*)alloc.allocateArray<sizeof(MPhi)>(nphis); if (!phis) return nullptr; // Note: Phis are inserted in the same order as the slots. for (size_t i = 0; i < nphis; i++) { MDefinition *predSlot = pred->getSlot(i); JS_ASSERT(predSlot->type() != MIRType_Value); MPhi *phi = new(phis + i) MPhi(alloc, predSlot->type()); JS_ALWAYS_TRUE(phi->reserveLength(2)); phi->addInput(predSlot); // Add append Phis in the block. block->addPhi(phi); block->setSlot(i, phi); } } else { block->copySlots(pred); } if (!block->predecessors_.append(pred)) return nullptr; } return block; }
bool MBasicBlock::setBackedge(MBasicBlock *pred) { // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(lastIns_); JS_ASSERT(pred->lastIns_); JS_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth()); // We must be a pending loop header JS_ASSERT(kind_ == PENDING_LOOP_HEADER); // Add exit definitions to each corresponding phi at the entry. for (uint32_t i = 0; i < pred->stackDepth(); i++) { MPhi *entryDef = entryResumePoint()->getOperand(i)->toPhi(); MDefinition *exitDef = pred->slots_[i]; // Assert that we already placed phis for each slot. JS_ASSERT(entryDef->block() == this); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } if (!entryDef->addInput(exitDef)) return false; setSlot(i, entryDef); } // We are now a loop header proper kind_ = LOOP_HEADER; return predecessors_.append(pred); }
void LoopUnroller::go(LoopIterationBound *bound) { // For now we always unroll loops the same number of times. static const size_t UnrollCount = 10; JitSpew(JitSpew_Unrolling, "Attempting to unroll loop"); header = bound->header; // UCE might have determined this isn't actually a loop. if (!header->isLoopHeader()) return; backedge = header->backedge(); oldPreheader = header->loopPredecessor(); JS_ASSERT(oldPreheader->numSuccessors() == 1); // Only unroll loops with two blocks: an initial one ending with the // bound's test, and the body ending with the backedge. MTest *test = bound->test; if (header->lastIns() != test) return; if (test->ifTrue() == backedge) { if (test->ifFalse()->id() <= backedge->id()) return; } else if (test->ifFalse() == backedge) { if (test->ifTrue()->id() <= backedge->id()) return; } else { return; } if (backedge->numPredecessors() != 1 || backedge->numSuccessors() != 1) return; JS_ASSERT(backedge->phisEmpty()); MBasicBlock *bodyBlocks[] = { header, backedge }; // All instructions in the header and body must be clonable. for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) { MBasicBlock *block = bodyBlocks[i]; for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { MInstruction *ins = *iter; if (ins->canClone()) continue; if (ins->isTest() || ins->isGoto() || ins->isInterruptCheck()) continue; #ifdef DEBUG JitSpew(JitSpew_Unrolling, "Aborting: can't clone instruction %s", ins->opName()); #endif return; } } // Compute the linear inequality we will use for exiting the unrolled loop: // // iterationBound - iterationCount - UnrollCount >= 0 // LinearSum remainingIterationsInequality(bound->boundSum); if (!remainingIterationsInequality.add(bound->currentSum, -1)) return; if (!remainingIterationsInequality.add(-int32_t(UnrollCount))) return; // Terms in the inequality need to be either loop invariant or phis from // the original header. for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) { MDefinition *def = remainingIterationsInequality.term(i).term; if (def->block()->id() < header->id()) continue; if (def->block() == header && def->isPhi()) continue; return; } // OK, we've checked everything, now unroll the loop. JitSpew(JitSpew_Unrolling, "Unrolling loop"); // The old preheader will go before the unrolled loop, and the old loop // will need a new empty preheader. CompileInfo &info = oldPreheader->info(); if (header->trackedSite().pc()) { unrolledHeader = MBasicBlock::New(graph, nullptr, info, oldPreheader, header->trackedSite(), MBasicBlock::LOOP_HEADER); unrolledBackedge = MBasicBlock::New(graph, nullptr, info, unrolledHeader, backedge->trackedSite(), MBasicBlock::NORMAL); newPreheader = MBasicBlock::New(graph, nullptr, info, unrolledHeader, oldPreheader->trackedSite(), MBasicBlock::NORMAL); } else { unrolledHeader = MBasicBlock::NewAsmJS(graph, info, oldPreheader, MBasicBlock::LOOP_HEADER); unrolledBackedge = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL); newPreheader = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL); } unrolledHeader->discardAllResumePoints(); unrolledBackedge->discardAllResumePoints(); newPreheader->discardAllResumePoints(); // Insert new blocks at their RPO position, and update block ids. graph.insertBlockAfter(oldPreheader, unrolledHeader); graph.insertBlockAfter(unrolledHeader, unrolledBackedge); graph.insertBlockAfter(unrolledBackedge, newPreheader); graph.renumberBlocksAfter(oldPreheader); if (!unrolledDefinitions.init()) CrashAtUnhandlableOOM("LoopUnroller::go"); // Add phis to the unrolled loop header which correspond to the phis in the // original loop header. JS_ASSERT(header->getPredecessor(0) == oldPreheader); for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; JS_ASSERT(old->numOperands() == 2); MPhi *phi = MPhi::New(alloc); phi->setResultType(old->type()); phi->setResultTypeSet(old->resultTypeSet()); phi->setRange(old->range()); unrolledHeader->addPhi(phi); if (!phi->reserveLength(2)) CrashAtUnhandlableOOM("LoopUnroller::go"); // Set the first input for the phi for now. We'll set the second after // finishing the unroll. phi->addInput(old->getOperand(0)); // The old phi will now take the value produced by the unrolled loop. old->replaceOperand(0, phi); if (!unrolledDefinitions.putNew(old, phi)) CrashAtUnhandlableOOM("LoopUnroller::go"); } // The loop condition can bail out on e.g. integer overflow, so make a // resume point based on the initial resume point of the original header. MResumePoint *headerResumePoint = header->entryResumePoint(); if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(unrolledHeader, headerResumePoint); unrolledHeader->setEntryResumePoint(rp); // Perform an interrupt check at the start of the unrolled loop. unrolledHeader->add(MInterruptCheck::New(alloc)); } // Generate code for the test in the unrolled loop. for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) { MDefinition *def = remainingIterationsInequality.term(i).term; MDefinition *replacement = getReplacementDefinition(def); remainingIterationsInequality.replaceTerm(i, replacement); } MCompare *compare = ConvertLinearInequality(alloc, unrolledHeader, remainingIterationsInequality); MTest *unrolledTest = MTest::New(alloc, compare, unrolledBackedge, newPreheader); unrolledHeader->end(unrolledTest); // Make an entry resume point for the unrolled body. The unrolled header // does not have side effects on stack values, even if the original loop // header does, so use the same resume point as for the unrolled header. if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(unrolledBackedge, headerResumePoint); unrolledBackedge->setEntryResumePoint(rp); } // Make an entry resume point for the new preheader. There are no // instructions which use this but some other stuff wants one to be here. if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(newPreheader, headerResumePoint); newPreheader->setEntryResumePoint(rp); } // Generate the unrolled code. JS_ASSERT(UnrollCount > 1); size_t unrollIndex = 0; while (true) { // Clone the contents of the original loop into the unrolled loop body. for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) { MBasicBlock *block = bodyBlocks[i]; for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { MInstruction *ins = *iter; if (ins->canClone()) { makeReplacementInstruction(*iter); } else { // Control instructions are handled separately. JS_ASSERT(ins->isTest() || ins->isGoto() || ins->isInterruptCheck()); } } } // Compute the value of each loop header phi after the execution of // this unrolled iteration. MDefinitionVector phiValues(alloc); JS_ASSERT(header->getPredecessor(1) == backedge); for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; MDefinition *oldInput = old->getOperand(1); if (!phiValues.append(getReplacementDefinition(oldInput))) CrashAtUnhandlableOOM("LoopUnroller::go"); } unrolledDefinitions.clear(); if (unrollIndex == UnrollCount - 1) { // We're at the end of the last unrolled iteration, set the // backedge input for the unrolled loop phis. size_t phiIndex = 0; for (MPhiIterator iter(unrolledHeader->phisBegin()); iter != unrolledHeader->phisEnd(); iter++) { MPhi *phi = *iter; phi->addInput(phiValues[phiIndex++]); } JS_ASSERT(phiIndex == phiValues.length()); break; } // Update the map for the phis in the next iteration. size_t phiIndex = 0; for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; if (!unrolledDefinitions.putNew(old, phiValues[phiIndex++])) CrashAtUnhandlableOOM("LoopUnroller::go"); } JS_ASSERT(phiIndex == phiValues.length()); unrollIndex++; } MGoto *backedgeJump = MGoto::New(alloc, unrolledHeader); unrolledBackedge->end(backedgeJump); // Place the old preheader before the unrolled loop. JS_ASSERT(oldPreheader->lastIns()->isGoto()); oldPreheader->discardLastIns(); oldPreheader->end(MGoto::New(alloc, unrolledHeader)); // Place the new preheader before the original loop. newPreheader->end(MGoto::New(alloc, header)); // Cleanup the MIR graph. if (!unrolledHeader->addPredecessorWithoutPhis(unrolledBackedge)) CrashAtUnhandlableOOM("LoopUnroller::go"); header->replacePredecessor(oldPreheader, newPreheader); oldPreheader->setSuccessorWithPhis(unrolledHeader, 0); newPreheader->setSuccessorWithPhis(header, 0); unrolledBackedge->setSuccessorWithPhis(unrolledHeader, 1); }
bool MBasicBlock::inherit(TempAllocator& alloc, BytecodeAnalysis* analysis, MBasicBlock* pred, uint32_t popped, unsigned stackPhiCount) { if (pred) { stackPosition_ = pred->stackPosition_; MOZ_ASSERT(stackPosition_ >= popped); stackPosition_ -= popped; if (kind_ != PENDING_LOOP_HEADER) copySlots(pred); } else { uint32_t stackDepth = analysis->info(pc()).stackDepth; stackPosition_ = info().firstStackSlot() + stackDepth; MOZ_ASSERT(stackPosition_ >= popped); stackPosition_ -= popped; } MOZ_ASSERT(info_.nslots() >= stackPosition_); MOZ_ASSERT(!entryResumePoint_); // Propagate the caller resume point from the inherited block. callerResumePoint_ = pred ? pred->callerResumePoint() : nullptr; // Create a resume point using our initial stack state. entryResumePoint_ = new(alloc) MResumePoint(this, pc(), MResumePoint::ResumeAt); if (!entryResumePoint_->init(alloc)) return false; if (pred) { if (!predecessors_.append(pred)) return false; if (kind_ == PENDING_LOOP_HEADER) { size_t i = 0; for (i = 0; i < info().firstStackSlot(); i++) { MPhi* phi = MPhi::New(alloc); phi->addInput(pred->getSlot(i)); addPhi(phi); setSlot(i, phi); entryResumePoint()->initOperand(i, phi); } MOZ_ASSERT(stackPhiCount <= stackDepth()); MOZ_ASSERT(info().firstStackSlot() <= stackDepth() - stackPhiCount); // Avoid creating new phis for stack values that aren't part of the // loop. Note that for loop headers that can OSR, all values on the // stack are part of the loop. for (; i < stackDepth() - stackPhiCount; i++) { MDefinition* val = pred->getSlot(i); setSlot(i, val); entryResumePoint()->initOperand(i, val); } for (; i < stackDepth(); i++) { MPhi* phi = MPhi::New(alloc); phi->addInput(pred->getSlot(i)); addPhi(phi); setSlot(i, phi); entryResumePoint()->initOperand(i, phi); } } else { for (size_t i = 0; i < stackDepth(); i++) entryResumePoint()->initOperand(i, getSlot(i)); } } else { /* * Don't leave the operands uninitialized for the caller, as it may not * initialize them later on. */ for (size_t i = 0; i < stackDepth(); i++) entryResumePoint()->clearOperand(i); } return true; }