// Remove the CFG edge between |pred| and |block|, after releasing the phi // operands on that edge and discarding any definitions consequently made dead. bool ValueNumberer::removePredecessorAndDoDCE(MBasicBlock *block, MBasicBlock *pred) { MOZ_ASSERT(!block->isMarked(), "Block marked unreachable should have predecessors removed already"); // Before removing the predecessor edge, scan the phi operands for that edge // for dead code before they get removed. if (!block->phisEmpty()) { uint32_t index = pred->positionInPhiSuccessor(); for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) { MPhi *phi = *iter; MOZ_ASSERT(!values_.has(phi), "Visited phi in block having predecessor removed"); MDefinition *op = phi->getOperand(index); if (op == phi) continue; // Set the operand to the phi itself rather than just releasing it // because removePredecessor expects to have something to release. phi->replaceOperand(index, phi); if (!handleUseReleased(op, DontSetUseRemoved) || !processDeadDefs()) return false; } } block->removePredecessor(pred); return true; }
// Remove the CFG edge between |pred| and |block|, after releasing the phi // operands on that edge and discarding any definitions consequently made dead. bool ValueNumberer::removePredecessorAndDoDCE(MBasicBlock* block, MBasicBlock* pred, size_t predIndex) { MOZ_ASSERT(!block->isMarked(), "Block marked unreachable should have predecessors removed already"); // Before removing the predecessor edge, scan the phi operands for that edge // for dead code before they get removed. MOZ_ASSERT(nextDef_ == nullptr); for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ) { MPhi* phi = *iter++; MOZ_ASSERT(!values_.has(phi), "Visited phi in block having predecessor removed"); MDefinition* op = phi->getOperand(predIndex); phi->removeOperand(predIndex); nextDef_ = iter != end ? *iter : nullptr; if (!handleUseReleased(op, DontSetUseRemoved) || !processDeadDefs()) return false; // If |nextDef_| became dead while we had it pinned, advance the iterator // and discard it now. while (nextDef_ && !nextDef_->hasUses()) { phi = nextDef_->toPhi(); iter++; nextDef_ = iter != end ? *iter : nullptr; discardDefsRecursively(phi); } } nextDef_ = nullptr; block->removePredecessorWithoutPhiOperands(pred, predIndex); return true; }
void MBasicBlock::inheritPhis(MBasicBlock *header) { MResumePoint *headerRp = header->entryResumePoint(); size_t stackDepth = headerRp->numOperands(); for (size_t slot = 0; slot < stackDepth; slot++) { MDefinition *exitDef = getSlot(slot); MDefinition *loopDef = headerRp->getOperand(slot); if (!loopDef->isPhi()) { MOZ_ASSERT(loopDef->block()->id() < header->id()); MOZ_ASSERT(loopDef == exitDef); continue; } // Phis are allocated by NewPendingLoopHeader. MPhi *phi = loopDef->toPhi(); MOZ_ASSERT(phi->numOperands() == 2); // The entry definition is always the leftmost input to the phi. MDefinition *entryDef = phi->getOperand(0); if (entryDef != exitDef) continue; // If the entryDef is the same as exitDef, then we must propagate the // phi down to this successor. This chance was missed as part of // setBackedge() because exits are not captured in resume points. setSlot(slot, phi); } }
// Remove the CFG edge between |pred| and |block|, after releasing the phi // operands on that edge and discarding any definitions consequently made dead. bool ValueNumberer::removePredecessorAndDoDCE(MBasicBlock *block, MBasicBlock *pred, size_t predIndex) { MOZ_ASSERT(!block->isMarked(), "Block marked unreachable should have predecessors removed already"); // Before removing the predecessor edge, scan the phi operands for that edge // for dead code before they get removed. MOZ_ASSERT(nextDef_ == nullptr); for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ) { MPhi *phi = *iter++; MOZ_ASSERT(!values_.has(phi), "Visited phi in block having predecessor removed"); MDefinition *op = phi->getOperand(predIndex); phi->removeOperand(predIndex); nextDef_ = *iter; if (!handleUseReleased(op, DontSetUseRemoved) || !processDeadDefs()) return false; } nextDef_ = nullptr; block->removePredecessorWithoutPhiOperands(pred, predIndex); return true; }
// Determine whether the possible value of start (a phi node within the loop) // can become smaller than an initial value at loop entry. bool Loop::nonDecreasing(MDefinition *initial, MDefinition *start) { MDefinitionVector worklist; MDefinitionVector seen; if (!worklist.append(start)) return false; while (!worklist.empty()) { MDefinition *def = worklist.popCopy(); bool duplicate = false; for (size_t i = 0; i < seen.length() && !duplicate; i++) { if (seen[i] == def) duplicate = true; } if (duplicate) continue; if (!seen.append(def)) return false; if (def->type() != MIRType_Int32) return false; if (!isInLoop(def)) { if (def != initial) return false; continue; } if (def->isPhi()) { MPhi *phi = def->toPhi(); for (size_t i = 0; i < phi->numOperands(); i++) { if (!worklist.append(phi->getOperand(i))) return false; } continue; } if (def->isAdd()) { if (def->toAdd()->specialization() != MIRType_Int32) return false; MDefinition *lhs = def->toAdd()->getOperand(0); MDefinition *rhs = def->toAdd()->getOperand(1); if (!rhs->isConstant()) return false; Value v = rhs->toConstant()->value(); if (!v.isInt32() || v.toInt32() < 0) return false; if (!worklist.append(lhs)) return false; continue; } return false; } return true; }
AbortReason MBasicBlock::setBackedge(MBasicBlock *pred) { // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(lastIns_); JS_ASSERT(pred->lastIns_); JS_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth()); // We must be a pending loop header JS_ASSERT(kind_ == PENDING_LOOP_HEADER); bool hadTypeChange = false; // Add exit definitions to each corresponding phi at the entry. for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++) { MPhi *entryDef = *phi; MDefinition *exitDef = pred->slots_[entryDef->slot()]; // Assert that we already placed phis for each slot. JS_ASSERT(entryDef->block() == this); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } bool typeChange = false; if (!entryDef->addInputSlow(exitDef, &typeChange)) return AbortReason_Alloc; hadTypeChange |= typeChange; JS_ASSERT(entryDef->slot() < pred->stackDepth()); setSlot(entryDef->slot(), entryDef); } if (hadTypeChange) { for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++) phi->removeOperand(phi->numOperands() - 1); return AbortReason_Disable; } // We are now a loop header proper kind_ = LOOP_HEADER; if (!predecessors_.append(pred)) return AbortReason_Alloc; return AbortReason_NoAbort; }
bool MBasicBlock::inheritPhisFromBackedge(MBasicBlock* backedge, bool* hadTypeChange) { // We must be a pending loop header MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER); size_t stackDepth = entryResumePoint()->stackDepth(); for (size_t slot = 0; slot < stackDepth; slot++) { // Get the value stack-slot of the back edge. MDefinition* exitDef = backedge->getSlot(slot); // Get the value of the loop header. MDefinition* loopDef = entryResumePoint()->getOperand(slot); if (loopDef->block() != this) { // If we are finishing a pending loop header, then we need to ensure // that all operands are phis. This is usualy the case, except for // object/arrays build with generators, in which case we share the // same allocations across all blocks. MOZ_ASSERT(loopDef->block()->id() < id()); MOZ_ASSERT(loopDef == exitDef); continue; } // Phis are allocated by NewPendingLoopHeader. MPhi* entryDef = loopDef->toPhi(); MOZ_ASSERT(entryDef->block() == this); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } bool typeChange = false; if (!entryDef->addInputSlow(exitDef)) return false; if (!entryDef->checkForTypeChange(exitDef, &typeChange)) return false; *hadTypeChange |= typeChange; setSlot(slot, entryDef); } return true; }
bool MBasicBlock::setBackedgeWasm(MBasicBlock* pred) { // Predecessors must be finished, and at the correct stack depth. MOZ_ASSERT(hasLastIns()); MOZ_ASSERT(pred->hasLastIns()); MOZ_ASSERT(stackDepth() == pred->stackDepth()); // We must be a pending loop header MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER); // Add exit definitions to each corresponding phi at the entry. // Note: Phis are inserted in the same order as the slots. (see // MBasicBlock::New) size_t slot = 0; for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) { MPhi* entryDef = *phi; MDefinition* exitDef = pred->getSlot(slot); // Assert that we already placed phis for each slot. MOZ_ASSERT(entryDef->block() == this); // Assert that the phi already has the correct type. MOZ_ASSERT(entryDef->type() == exitDef->type()); MOZ_ASSERT(entryDef->type() != MIRType::Value); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } // Phis always have room for 2 operands, so this can't fail. MOZ_ASSERT(phi->numOperands() == 1); entryDef->addInlineInput(exitDef); MOZ_ASSERT(slot < pred->stackDepth()); setSlot(slot, entryDef); } // We are now a loop header proper kind_ = LOOP_HEADER; return predecessors_.append(pred); }
void MBasicBlock::setLoopHeader(MBasicBlock* newBackedge) { MOZ_ASSERT(!isLoopHeader()); kind_ = LOOP_HEADER; size_t numPreds = numPredecessors(); MOZ_ASSERT(numPreds != 0); size_t lastIndex = numPreds - 1; size_t oldIndex = 0; for (; ; ++oldIndex) { MOZ_ASSERT(oldIndex < numPreds); MBasicBlock* pred = getPredecessor(oldIndex); if (pred == newBackedge) break; } // Set the loop backedge to be the last element in predecessors_. Swap(predecessors_[oldIndex], predecessors_[lastIndex]); // If we have phis, reorder their operands accordingly. if (!phisEmpty()) { getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex); getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex); for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) { MPhi* phi = *iter; MDefinition* last = phi->getOperand(oldIndex); MDefinition* old = phi->getOperand(lastIndex); phi->replaceOperand(oldIndex, old); phi->replaceOperand(lastIndex, last); } } MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this); MOZ_ASSERT(backedge() == newBackedge); }
bool MBasicBlock::setBackedgeAsmJS(MBasicBlock *pred) { // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(lastIns_); JS_ASSERT(pred->lastIns_); JS_ASSERT(stackDepth() == pred->stackDepth()); // We must be a pending loop header JS_ASSERT(kind_ == PENDING_LOOP_HEADER); // Add exit definitions to each corresponding phi at the entry. for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++) { MPhi *entryDef = *phi; MDefinition *exitDef = pred->getSlot(entryDef->slot()); // Assert that we already placed phis for each slot. JS_ASSERT(entryDef->block() == this); // Assert that the phi already has the correct type. JS_ASSERT(entryDef->type() == exitDef->type()); JS_ASSERT(entryDef->type() != MIRType_Value); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } // MBasicBlock::NewAsmJS calls reserveLength(2) for loop header phis. entryDef->addInput(exitDef); JS_ASSERT(entryDef->slot() < pred->stackDepth()); setSlot(entryDef->slot(), entryDef); } // We are now a loop header proper kind_ = LOOP_HEADER; return predecessors_.append(pred); }
void MBasicBlock::inheritPhis(MBasicBlock *header) { for (MPhiIterator iter = header->phisBegin(); iter != header->phisEnd(); iter++) { MPhi *phi = *iter; JS_ASSERT(phi->numOperands() == 2); // The entry definition is always the leftmost input to the phi. MDefinition *entryDef = phi->getOperand(0); MDefinition *exitDef = getSlot(phi->slot()); if (entryDef != exitDef) continue; // If the entryDef is the same as exitDef, then we must propagate the // phi down to this successor. This chance was missed as part of // setBackedge() because exits are not captured in resume points. setSlot(phi->slot(), phi); } }
bool MBasicBlock::setBackedge(MBasicBlock *pred) { // Predecessors must be finished, and at the correct stack depth. JS_ASSERT(lastIns_); JS_ASSERT(pred->lastIns_); JS_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth()); // We must be a pending loop header JS_ASSERT(kind_ == PENDING_LOOP_HEADER); // Add exit definitions to each corresponding phi at the entry. for (uint32_t i = 0; i < pred->stackDepth(); i++) { MPhi *entryDef = entryResumePoint()->getOperand(i)->toPhi(); MDefinition *exitDef = pred->slots_[i]; // Assert that we already placed phis for each slot. JS_ASSERT(entryDef->block() == this); if (entryDef == exitDef) { // If the exit def is the same as the entry def, make a redundant // phi. Since loop headers have exactly two incoming edges, we // know that that's just the first input. // // Note that we eliminate later rather than now, to avoid any // weirdness around pending continue edges which might still hold // onto phis. exitDef = entryDef->getOperand(0); } if (!entryDef->addInput(exitDef)) return false; setSlot(i, entryDef); } // We are now a loop header proper kind_ = LOOP_HEADER; return predecessors_.append(pred); }
void LoopUnroller::go(LoopIterationBound *bound) { // For now we always unroll loops the same number of times. static const size_t UnrollCount = 10; JitSpew(JitSpew_Unrolling, "Attempting to unroll loop"); header = bound->header; // UCE might have determined this isn't actually a loop. if (!header->isLoopHeader()) return; backedge = header->backedge(); oldPreheader = header->loopPredecessor(); JS_ASSERT(oldPreheader->numSuccessors() == 1); // Only unroll loops with two blocks: an initial one ending with the // bound's test, and the body ending with the backedge. MTest *test = bound->test; if (header->lastIns() != test) return; if (test->ifTrue() == backedge) { if (test->ifFalse()->id() <= backedge->id()) return; } else if (test->ifFalse() == backedge) { if (test->ifTrue()->id() <= backedge->id()) return; } else { return; } if (backedge->numPredecessors() != 1 || backedge->numSuccessors() != 1) return; JS_ASSERT(backedge->phisEmpty()); MBasicBlock *bodyBlocks[] = { header, backedge }; // All instructions in the header and body must be clonable. for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) { MBasicBlock *block = bodyBlocks[i]; for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { MInstruction *ins = *iter; if (ins->canClone()) continue; if (ins->isTest() || ins->isGoto() || ins->isInterruptCheck()) continue; #ifdef DEBUG JitSpew(JitSpew_Unrolling, "Aborting: can't clone instruction %s", ins->opName()); #endif return; } } // Compute the linear inequality we will use for exiting the unrolled loop: // // iterationBound - iterationCount - UnrollCount >= 0 // LinearSum remainingIterationsInequality(bound->boundSum); if (!remainingIterationsInequality.add(bound->currentSum, -1)) return; if (!remainingIterationsInequality.add(-int32_t(UnrollCount))) return; // Terms in the inequality need to be either loop invariant or phis from // the original header. for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) { MDefinition *def = remainingIterationsInequality.term(i).term; if (def->block()->id() < header->id()) continue; if (def->block() == header && def->isPhi()) continue; return; } // OK, we've checked everything, now unroll the loop. JitSpew(JitSpew_Unrolling, "Unrolling loop"); // The old preheader will go before the unrolled loop, and the old loop // will need a new empty preheader. CompileInfo &info = oldPreheader->info(); if (header->trackedSite().pc()) { unrolledHeader = MBasicBlock::New(graph, nullptr, info, oldPreheader, header->trackedSite(), MBasicBlock::LOOP_HEADER); unrolledBackedge = MBasicBlock::New(graph, nullptr, info, unrolledHeader, backedge->trackedSite(), MBasicBlock::NORMAL); newPreheader = MBasicBlock::New(graph, nullptr, info, unrolledHeader, oldPreheader->trackedSite(), MBasicBlock::NORMAL); } else { unrolledHeader = MBasicBlock::NewAsmJS(graph, info, oldPreheader, MBasicBlock::LOOP_HEADER); unrolledBackedge = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL); newPreheader = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL); } unrolledHeader->discardAllResumePoints(); unrolledBackedge->discardAllResumePoints(); newPreheader->discardAllResumePoints(); // Insert new blocks at their RPO position, and update block ids. graph.insertBlockAfter(oldPreheader, unrolledHeader); graph.insertBlockAfter(unrolledHeader, unrolledBackedge); graph.insertBlockAfter(unrolledBackedge, newPreheader); graph.renumberBlocksAfter(oldPreheader); if (!unrolledDefinitions.init()) CrashAtUnhandlableOOM("LoopUnroller::go"); // Add phis to the unrolled loop header which correspond to the phis in the // original loop header. JS_ASSERT(header->getPredecessor(0) == oldPreheader); for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; JS_ASSERT(old->numOperands() == 2); MPhi *phi = MPhi::New(alloc); phi->setResultType(old->type()); phi->setResultTypeSet(old->resultTypeSet()); phi->setRange(old->range()); unrolledHeader->addPhi(phi); if (!phi->reserveLength(2)) CrashAtUnhandlableOOM("LoopUnroller::go"); // Set the first input for the phi for now. We'll set the second after // finishing the unroll. phi->addInput(old->getOperand(0)); // The old phi will now take the value produced by the unrolled loop. old->replaceOperand(0, phi); if (!unrolledDefinitions.putNew(old, phi)) CrashAtUnhandlableOOM("LoopUnroller::go"); } // The loop condition can bail out on e.g. integer overflow, so make a // resume point based on the initial resume point of the original header. MResumePoint *headerResumePoint = header->entryResumePoint(); if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(unrolledHeader, headerResumePoint); unrolledHeader->setEntryResumePoint(rp); // Perform an interrupt check at the start of the unrolled loop. unrolledHeader->add(MInterruptCheck::New(alloc)); } // Generate code for the test in the unrolled loop. for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) { MDefinition *def = remainingIterationsInequality.term(i).term; MDefinition *replacement = getReplacementDefinition(def); remainingIterationsInequality.replaceTerm(i, replacement); } MCompare *compare = ConvertLinearInequality(alloc, unrolledHeader, remainingIterationsInequality); MTest *unrolledTest = MTest::New(alloc, compare, unrolledBackedge, newPreheader); unrolledHeader->end(unrolledTest); // Make an entry resume point for the unrolled body. The unrolled header // does not have side effects on stack values, even if the original loop // header does, so use the same resume point as for the unrolled header. if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(unrolledBackedge, headerResumePoint); unrolledBackedge->setEntryResumePoint(rp); } // Make an entry resume point for the new preheader. There are no // instructions which use this but some other stuff wants one to be here. if (headerResumePoint) { MResumePoint *rp = makeReplacementResumePoint(newPreheader, headerResumePoint); newPreheader->setEntryResumePoint(rp); } // Generate the unrolled code. JS_ASSERT(UnrollCount > 1); size_t unrollIndex = 0; while (true) { // Clone the contents of the original loop into the unrolled loop body. for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) { MBasicBlock *block = bodyBlocks[i]; for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { MInstruction *ins = *iter; if (ins->canClone()) { makeReplacementInstruction(*iter); } else { // Control instructions are handled separately. JS_ASSERT(ins->isTest() || ins->isGoto() || ins->isInterruptCheck()); } } } // Compute the value of each loop header phi after the execution of // this unrolled iteration. MDefinitionVector phiValues(alloc); JS_ASSERT(header->getPredecessor(1) == backedge); for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; MDefinition *oldInput = old->getOperand(1); if (!phiValues.append(getReplacementDefinition(oldInput))) CrashAtUnhandlableOOM("LoopUnroller::go"); } unrolledDefinitions.clear(); if (unrollIndex == UnrollCount - 1) { // We're at the end of the last unrolled iteration, set the // backedge input for the unrolled loop phis. size_t phiIndex = 0; for (MPhiIterator iter(unrolledHeader->phisBegin()); iter != unrolledHeader->phisEnd(); iter++) { MPhi *phi = *iter; phi->addInput(phiValues[phiIndex++]); } JS_ASSERT(phiIndex == phiValues.length()); break; } // Update the map for the phis in the next iteration. size_t phiIndex = 0; for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) { MPhi *old = *iter; if (!unrolledDefinitions.putNew(old, phiValues[phiIndex++])) CrashAtUnhandlableOOM("LoopUnroller::go"); } JS_ASSERT(phiIndex == phiValues.length()); unrollIndex++; } MGoto *backedgeJump = MGoto::New(alloc, unrolledHeader); unrolledBackedge->end(backedgeJump); // Place the old preheader before the unrolled loop. JS_ASSERT(oldPreheader->lastIns()->isGoto()); oldPreheader->discardLastIns(); oldPreheader->end(MGoto::New(alloc, unrolledHeader)); // Place the new preheader before the original loop. newPreheader->end(MGoto::New(alloc, header)); // Cleanup the MIR graph. if (!unrolledHeader->addPredecessorWithoutPhis(unrolledBackedge)) CrashAtUnhandlableOOM("LoopUnroller::go"); header->replacePredecessor(oldPreheader, newPreheader); oldPreheader->setSuccessorWithPhis(unrolledHeader, 0); newPreheader->setSuccessorWithPhis(header, 0); unrolledBackedge->setSuccessorWithPhis(unrolledHeader, 1); }
bool ion::EliminatePhis(MIRGenerator *mir, MIRGraph &graph) { Vector<MPhi *, 16, SystemAllocPolicy> worklist; // Add all observable phis to a worklist. We use the "in worklist" bit to // mean "this phi is live". for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { if (mir->shouldCancel("Eliminate Phis (populate loop)")) return false; MPhiIterator iter = block->phisBegin(); while (iter != block->phisEnd()) { // Flag all as unused, only observable phis would be marked as used // when processed by the work list. iter->setUnused(); // If the phi is redundant, remove it here. if (MDefinition *redundant = IsPhiRedundant(*iter)) { iter->replaceAllUsesWith(redundant); iter = block->discardPhiAt(iter); continue; } // Enqueue observable Phis. if (IsPhiObservable(*iter)) { iter->setInWorklist(); if (!worklist.append(*iter)) return false; } iter++; } } // Iteratively mark all phis reachable from live phis. while (!worklist.empty()) { if (mir->shouldCancel("Eliminate Phis (worklist)")) return false; MPhi *phi = worklist.popCopy(); JS_ASSERT(phi->isUnused()); phi->setNotInWorklist(); // The removal of Phis can produce newly redundant phis. if (MDefinition *redundant = IsPhiRedundant(phi)) { // Add to the worklist the used phis which are impacted. for (MUseDefIterator it(phi); it; it++) { if (it.def()->isPhi()) { MPhi *use = it.def()->toPhi(); if (!use->isUnused()) { use->setUnusedUnchecked(); use->setInWorklist(); if (!worklist.append(use)) return false; } } } phi->replaceAllUsesWith(redundant); } else { // Otherwise flag them as used. phi->setNotUnused(); } // The current phi is/was used, so all its operands are used. for (size_t i = 0; i < phi->numOperands(); i++) { MDefinition *in = phi->getOperand(i); if (!in->isPhi() || !in->isUnused() || in->isInWorklist()) continue; in->setInWorklist(); if (!worklist.append(in->toPhi())) return false; } } // Sweep dead phis. for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { MPhiIterator iter = block->phisBegin(); while (iter != block->phisEnd()) { if (iter->isUnused()) iter = block->discardPhiAt(iter); else iter++; } } return true; }
bool ion::EliminatePhis(MIRGenerator *mir, MIRGraph &graph, Observability observe) { // Eliminates redundant or unobservable phis from the graph. A // redundant phi is something like b = phi(a, a) or b = phi(a, b), // both of which can be replaced with a. An unobservable phi is // one that whose value is never used in the program. // // Note that we must be careful not to eliminate phis representing // values that the interpreter will require later. When the graph // is first constructed, we can be more aggressive, because there // is a greater correspondence between the CFG and the bytecode. // After optimizations such as GVN have been performed, however, // the bytecode and CFG may not correspond as closely to one // another. In that case, we must be more conservative. The flag // |conservativeObservability| is used to indicate that eliminate // phis is being run after some optimizations have been performed, // and thus we should use more conservative rules about // observability. The particular danger is that we can optimize // away uses of a phi because we think they are not executable, // but the foundation for that assumption is false TI information // that will eventually be invalidated. Therefore, if // |conservativeObservability| is set, we will consider any use // from a resume point to be observable. Otherwise, we demand a // use from an actual instruction. Vector<MPhi *, 16, SystemAllocPolicy> worklist; // Add all observable phis to a worklist. We use the "in worklist" bit to // mean "this phi is live". for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { if (mir->shouldCancel("Eliminate Phis (populate loop)")) return false; MPhiIterator iter = block->phisBegin(); while (iter != block->phisEnd()) { // Flag all as unused, only observable phis would be marked as used // when processed by the work list. iter->setUnused(); // If the phi is redundant, remove it here. if (MDefinition *redundant = IsPhiRedundant(*iter)) { iter->replaceAllUsesWith(redundant); iter = block->discardPhiAt(iter); continue; } // Enqueue observable Phis. if (IsPhiObservable(*iter, observe)) { iter->setInWorklist(); if (!worklist.append(*iter)) return false; } iter++; } } // Iteratively mark all phis reachable from live phis. while (!worklist.empty()) { if (mir->shouldCancel("Eliminate Phis (worklist)")) return false; MPhi *phi = worklist.popCopy(); JS_ASSERT(phi->isUnused()); phi->setNotInWorklist(); // The removal of Phis can produce newly redundant phis. if (MDefinition *redundant = IsPhiRedundant(phi)) { // Add to the worklist the used phis which are impacted. for (MUseDefIterator it(phi); it; it++) { if (it.def()->isPhi()) { MPhi *use = it.def()->toPhi(); if (!use->isUnused()) { use->setUnusedUnchecked(); use->setInWorklist(); if (!worklist.append(use)) return false; } } } phi->replaceAllUsesWith(redundant); } else { // Otherwise flag them as used. phi->setNotUnused(); } // The current phi is/was used, so all its operands are used. for (size_t i = 0; i < phi->numOperands(); i++) { MDefinition *in = phi->getOperand(i); if (!in->isPhi() || !in->isUnused() || in->isInWorklist()) continue; in->setInWorklist(); if (!worklist.append(in->toPhi())) return false; } } // Sweep dead phis. for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { MPhiIterator iter = block->phisBegin(); while (iter != block->phisEnd()) { if (iter->isUnused()) iter = block->discardPhiAt(iter); else iter++; } } return true; }