Ejemplo n.º 1
0
bool
EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph)
{
    const JitCompartment* jitCompartment = GetJitContext()->compartment->jitCompartment();
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eager Simd Unbox"))
            return false;

        for (MInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
            if (!ins->isSimdUnbox())
                continue;

            MSimdUnbox* unbox = ins->toSimdUnbox();
            if (!unbox->input()->isPhi())
                continue;

            MPhi* phi = unbox->input()->toPhi();
            if (!CanUnboxSimdPhi(jitCompartment, phi, unbox->simdType()))
                continue;

            UnboxSimdPhi(jitCompartment, graph, phi, unbox->simdType());
        }
    }

    return true;
}
bool
FoldLinearArithConstants(MIRGenerator* mir, MIRGraph& graph)
{
    for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Fold Linear Arithmetic Constants (main loop)"))
            return false;

        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
            if (!graph.alloc().ensureBallast())
                return false;

            if (mir->shouldCancel("Fold Linear Arithmetic Constants (inner loop)"))
                return false;

            if (i->isAdd())
                AnalyzeAdd(graph.alloc(), i->toAdd());
        }
    }
    return true;
}
Ejemplo n.º 3
0
// Instructions are useless if they are unused and have no side effects.
// This pass eliminates useless instructions.
// The graph itself is unchanged.
bool
ion::EliminateDeadCode(MIRGenerator *mir, MIRGraph &graph)
{
    // Traverse in postorder so that we hit uses before definitions.
    // Traverse instruction list backwards for the same reason.
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Dead Code (main loop)"))
            return false;

        // Remove unused instructions.
        for (MInstructionReverseIterator inst = block->rbegin(); inst != block->rend(); ) {
            if (!inst->isEffectful() && !inst->hasUses() && !inst->isGuard() &&
                !inst->isControlInstruction()) {
                inst = block->discardAt(inst);
            } else {
                inst++;
            }
        }
    }

    return true;
}
Ejemplo n.º 4
0
bool
Sink(MIRGenerator* mir, MIRGraph& graph)
{
    TempAllocator& alloc = graph.alloc();
    bool sinkEnabled = mir->optimizationInfo().sinkEnabled();

    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Sink"))
            return false;

        for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) {
            MInstruction* ins = *iter++;

            // Only instructions which can be recovered on bailout can be moved
            // into the bailout paths.
            if (ins->isGuard() || ins->isGuardRangeBailouts() ||
                ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout())
            {
                continue;
            }

            // Compute a common dominator for all uses of the current
            // instruction.
            bool hasLiveUses = false;
            bool hasUses = false;
            MBasicBlock* usesDominator = nullptr;
            for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) {
                hasUses = true;
                MNode* consumerNode = (*i)->consumer();
                if (consumerNode->isResumePoint())
                    continue;

                MDefinition* consumer = consumerNode->toDefinition();
                if (consumer->isRecoveredOnBailout())
                    continue;

                hasLiveUses = true;

                // If the instruction is a Phi, then we should dominate the
                // predecessor from which the value is coming from.
                MBasicBlock* consumerBlock = consumer->block();
                if (consumer->isPhi())
                    consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i));

                usesDominator = CommonDominator(usesDominator, consumerBlock);
                if (usesDominator == *block)
                    break;
            }

            // Leave this instruction for DCE.
            if (!hasUses)
                continue;

            // We have no uses, so sink this instruction in all the bailout
            // paths.
            if (!hasLiveUses) {
                MOZ_ASSERT(!usesDominator);
                ins->setRecoveredOnBailout();
                JitSpewDef(JitSpew_Sink, "  No live uses, recover the instruction on bailout\n", ins);
                continue;
            }

            // This guard is temporarly moved here as the above code deals with
            // Dead Code elimination, which got moved into this Sink phase, as
            // the Dead Code elimination used to move instructions with no-live
            // uses to the bailout path.
            if (!sinkEnabled)
                continue;

            // To move an effectful instruction, we would have to verify that the
            // side-effect is not observed. In the mean time, we just inhibit
            // this optimization on effectful instructions.
            if (ins->isEffectful())
                continue;

            // If all the uses are under a loop, we might not want to work
            // against LICM by moving everything back into the loop, but if the
            // loop is it-self inside an if, then we still want to move the
            // computation under this if statement.
            while (block->loopDepth() < usesDominator->loopDepth()) {
                MOZ_ASSERT(usesDominator != usesDominator->immediateDominator());
                usesDominator = usesDominator->immediateDominator();
            }

            // Only move instructions if there is a branch between the dominator
            // of the uses and the original instruction. This prevent moving the
            // computation of the arguments into an inline function if there is
            // no major win.
            MBasicBlock* lastJoin = usesDominator;
            while (*block != lastJoin && lastJoin->numPredecessors() == 1) {
                MOZ_ASSERT(lastJoin != lastJoin->immediateDominator());
                MBasicBlock* next = lastJoin->immediateDominator();
                if (next->numSuccessors() > 1)
                    break;
                lastJoin = next;
            }
            if (*block == lastJoin)
                continue;

            // Skip to the next instruction if we cannot find a common dominator
            // for all the uses of this instruction, or if the common dominator
            // correspond to the block of the current instruction.
            if (!usesDominator || usesDominator == *block)
                continue;

            // Only instruction which can be recovered on bailout and which are
            // sinkable can be moved into blocks which are below while filling
            // the resume points with a clone which is recovered on bailout.

            // If the instruction has live uses and if it is clonable, then we
            // can clone the instruction for all non-dominated uses and move the
            // instruction into the block which is dominating all live uses.
            if (!ins->canClone())
                continue;

            // If the block is a split-edge block, which is created for folding
            // test conditions, then the block has no resume point and has
            // multiple predecessors.  In such case, we cannot safely move
            // bailing instruction to these blocks as we have no way to bailout.
            if (!usesDominator->entryResumePoint() && usesDominator->numPredecessors() != 1)
                continue;

            JitSpewDef(JitSpew_Sink, "  Can Clone & Recover, sink instruction\n", ins);
            JitSpew(JitSpew_Sink, "  into Block %u", usesDominator->id());

            // Copy the arguments and clone the instruction.
            MDefinitionVector operands(alloc);
            for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
                if (!operands.append(ins->getOperand(i)))
                    return false;
            }

            MInstruction* clone = ins->clone(alloc, operands);
            ins->block()->insertBefore(ins, clone);
            clone->setRecoveredOnBailout();

            // We should not update the producer of the entry resume point, as
            // it cannot refer to any instruction within the basic block excepts
            // for Phi nodes.
            MResumePoint* entry = usesDominator->entryResumePoint();

            // Replace the instruction by its clone in all the resume points /
            // recovered-on-bailout instructions which are not in blocks which
            // are dominated by the usesDominator block.
            for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; ) {
                MUse* use = *i++;
                MNode* consumer = use->consumer();

                // If the consumer is a Phi, then we look for the index of the
                // use to find the corresponding predecessor block, which is
                // then used as the consumer block.
                MBasicBlock* consumerBlock = consumer->block();
                if (consumer->isDefinition() && consumer->toDefinition()->isPhi()) {
                    consumerBlock = consumerBlock->getPredecessor(
                        consumer->toDefinition()->toPhi()->indexOf(use));
                }

                // Keep the current instruction for all dominated uses, except
                // for the entry resume point of the block in which the
                // instruction would be moved into.
                if (usesDominator->dominates(consumerBlock) &&
                    (!consumer->isResumePoint() || consumer->toResumePoint() != entry))
                {
                    continue;
                }

                use->replaceProducer(clone);
            }

            // As we move this instruction in a different block, we should
            // verify that we do not carry over a resume point which would refer
            // to an outdated state of the control flow.
            if (ins->resumePoint())
                ins->clearResumePoint();

            // Now, that all uses which are not dominated by usesDominator are
            // using the cloned instruction, we can safely move the instruction
            // into the usesDominator block.
            MInstruction* at = usesDominator->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
            block->moveBefore(at, ins);
        }
    }

    return true;
}
Ejemplo n.º 5
0
bool
ion::BuildDominatorTree(MIRGraph &graph)
{
    ComputeImmediateDominators(graph);

    // Traversing through the graph in post-order means that every use
    // of a definition is visited before the def itself. Since a def
    // dominates its uses, by the time we reach a particular
    // block, we have processed all of its dominated children, so
    // block->numDominated() is accurate.
    for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
        MBasicBlock *child = *i;
        MBasicBlock *parent = child->immediateDominator();

        // If the block only self-dominates, it has no definite parent.
        if (child == parent)
            continue;

        if (!parent->addImmediatelyDominatedBlock(child))
            return false;

        // An additional +1 for the child block.
        parent->addNumDominated(child->numDominated() + 1);
    }

#ifdef DEBUG
    // If compiling with OSR, many blocks will self-dominate.
    // Without OSR, there is only one root block which dominates all.
    if (!graph.osrBlock())
        JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1);
#endif
    // Now, iterate through the dominator tree and annotate every
    // block with its index in the pre-order traversal of the
    // dominator tree.
    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;

    // The index of the current block in the CFG traversal.
    size_t index = 0;

    // Add all self-dominating blocks to the worklist.
    // This includes all roots. Order does not matter.
    for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
        MBasicBlock *block = *i;
        if (block->immediateDominator() == block) {
            if (!worklist.append(block))
                return false;
        }
    }
    // Starting from each self-dominating block, traverse the CFG in pre-order.
    while (!worklist.empty()) {
        MBasicBlock *block = worklist.popCopy();
        block->setDomIndex(index);

        for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) {
            if (!worklist.append(block->getImmediatelyDominatedBlock(i)))
                return false;
        }
        index++;
    }

    return true;
}
Ejemplo n.º 6
0
bool
ion::EliminatePhis(MIRGenerator *mir, MIRGraph &graph)
{
    Vector<MPhi *, 16, SystemAllocPolicy> worklist;

    // Add all observable phis to a worklist. We use the "in worklist" bit to
    // mean "this phi is live".
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Phis (populate loop)"))
            return false;

        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            // Flag all as unused, only observable phis would be marked as used
            // when processed by the work list.
            iter->setUnused();

            // If the phi is redundant, remove it here.
            if (MDefinition *redundant = IsPhiRedundant(*iter)) {
                iter->replaceAllUsesWith(redundant);
                iter = block->discardPhiAt(iter);
                continue;
            }

            // Enqueue observable Phis.
            if (IsPhiObservable(*iter)) {
                iter->setInWorklist();
                if (!worklist.append(*iter))
                    return false;
            }
            iter++;
        }
    }

    // Iteratively mark all phis reachable from live phis.
    while (!worklist.empty()) {
        if (mir->shouldCancel("Eliminate Phis (worklist)"))
            return false;

        MPhi *phi = worklist.popCopy();
        JS_ASSERT(phi->isUnused());
        phi->setNotInWorklist();

        // The removal of Phis can produce newly redundant phis.
        if (MDefinition *redundant = IsPhiRedundant(phi)) {
            // Add to the worklist the used phis which are impacted.
            for (MUseDefIterator it(phi); it; it++) {
                if (it.def()->isPhi()) {
                    MPhi *use = it.def()->toPhi();
                    if (!use->isUnused()) {
                        use->setUnusedUnchecked();
                        use->setInWorklist();
                        if (!worklist.append(use))
                            return false;
                    }
                }
            }
            phi->replaceAllUsesWith(redundant);
        } else {
            // Otherwise flag them as used.
            phi->setNotUnused();
        }

        // The current phi is/was used, so all its operands are used.
        for (size_t i = 0; i < phi->numOperands(); i++) {
            MDefinition *in = phi->getOperand(i);
            if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
                continue;
            in->setInWorklist();
            if (!worklist.append(in->toPhi()))
                return false;
        }
    }

    // Sweep dead phis.
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            if (iter->isUnused())
                iter = block->discardPhiAt(iter);
            else
                iter++;
        }
    }

    return true;
}
Ejemplo n.º 7
0
// Operands to a resume point which are dead at the point of the resume can be
// replaced with undefined values. This analysis supports limited detection of
// dead operands, pruning those which are defined in the resume point's basic
// block and have no uses outside the block or at points later than the resume
// point.
//
// This is intended to ensure that extra resume points within a basic block
// will not artificially extend the lifetimes of any SSA values. This could
// otherwise occur if the new resume point captured a value which is created
// between the old and new resume point and is dead at the new resume point.
bool
ion::EliminateDeadResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
{
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)"))
            return false;

        // The logic below can get confused on infinite loops.
        if (block->isLoopHeader() && block->backedge() == *block)
            continue;

        for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            // No benefit to replacing constant operands with other constants.
            if (ins->isConstant())
                continue;

            // Scanning uses does not give us sufficient information to tell
            // where instructions that are involved in box/unbox operations or
            // parameter passing might be live. Rewriting uses of these terms
            // in resume points may affect the interpreter's behavior. Rather
            // than doing a more sophisticated analysis, just ignore these.
            if (ins->isUnbox() || ins->isParameter())
                continue;

            // If the instruction's behavior has been constant folded into a
            // separate instruction, we can't determine precisely where the
            // instruction becomes dead and can't eliminate its uses.
            if (ins->isFolded())
                continue;

            // Check if this instruction's result is only used within the
            // current block, and keep track of its last use in a definition
            // (not resume point). This requires the instructions in the block
            // to be numbered, ensured by running this immediately after alias
            // analysis.
            uint32_t maxDefinition = 0;
            for (MUseDefIterator uses(*ins); uses; uses++) {
                if (uses.def()->block() != *block ||
                    uses.def()->isBox() ||
                    uses.def()->isPassArg() ||
                    uses.def()->isPhi())
                {
                    maxDefinition = UINT32_MAX;
                    break;
                }
                maxDefinition = Max(maxDefinition, uses.def()->id());
            }
            if (maxDefinition == UINT32_MAX)
                continue;

            // Walk the uses a second time, removing any in resume points after
            // the last use in a definition.
            for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) {
                if (uses->node()->isDefinition()) {
                    uses++;
                    continue;
                }
                MResumePoint *mrp = uses->node()->toResumePoint();
                if (mrp->block() != *block ||
                    !mrp->instruction() ||
                    mrp->instruction() == *ins ||
                    mrp->instruction()->id() <= maxDefinition)
                {
                    uses++;
                    continue;
                }

                // Store an undefined value in place of all dead resume point
                // operands. Making any such substitution can in general alter
                // the interpreter's behavior, even though the code is dead, as
                // the interpreter will still execute opcodes whose effects
                // cannot be observed. If the undefined value were to flow to,
                // say, a dead property access the interpreter could throw an
                // exception; we avoid this problem by removing dead operands
                // before removing dead code.
                MConstant *constant = MConstant::New(UndefinedValue());
                block->insertBefore(*(block->begin()), constant);
                uses = mrp->replaceOperand(uses, constant);
            }
        }
    }

    return true;
}
Ejemplo n.º 8
0
bool
ion::EliminatePhis(MIRGenerator *mir, MIRGraph &graph,
                   Observability observe)
{
    // Eliminates redundant or unobservable phis from the graph.  A
    // redundant phi is something like b = phi(a, a) or b = phi(a, b),
    // both of which can be replaced with a.  An unobservable phi is
    // one that whose value is never used in the program.
    //
    // Note that we must be careful not to eliminate phis representing
    // values that the interpreter will require later.  When the graph
    // is first constructed, we can be more aggressive, because there
    // is a greater correspondence between the CFG and the bytecode.
    // After optimizations such as GVN have been performed, however,
    // the bytecode and CFG may not correspond as closely to one
    // another.  In that case, we must be more conservative.  The flag
    // |conservativeObservability| is used to indicate that eliminate
    // phis is being run after some optimizations have been performed,
    // and thus we should use more conservative rules about
    // observability.  The particular danger is that we can optimize
    // away uses of a phi because we think they are not executable,
    // but the foundation for that assumption is false TI information
    // that will eventually be invalidated.  Therefore, if
    // |conservativeObservability| is set, we will consider any use
    // from a resume point to be observable.  Otherwise, we demand a
    // use from an actual instruction.

    Vector<MPhi *, 16, SystemAllocPolicy> worklist;

    // Add all observable phis to a worklist. We use the "in worklist" bit to
    // mean "this phi is live".
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Phis (populate loop)"))
            return false;

        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            // Flag all as unused, only observable phis would be marked as used
            // when processed by the work list.
            iter->setUnused();

            // If the phi is redundant, remove it here.
            if (MDefinition *redundant = IsPhiRedundant(*iter)) {
                iter->replaceAllUsesWith(redundant);
                iter = block->discardPhiAt(iter);
                continue;
            }

            // Enqueue observable Phis.
            if (IsPhiObservable(*iter, observe)) {
                iter->setInWorklist();
                if (!worklist.append(*iter))
                    return false;
            }
            iter++;
        }
    }

    // Iteratively mark all phis reachable from live phis.
    while (!worklist.empty()) {
        if (mir->shouldCancel("Eliminate Phis (worklist)"))
            return false;

        MPhi *phi = worklist.popCopy();
        JS_ASSERT(phi->isUnused());
        phi->setNotInWorklist();

        // The removal of Phis can produce newly redundant phis.
        if (MDefinition *redundant = IsPhiRedundant(phi)) {
            // Add to the worklist the used phis which are impacted.
            for (MUseDefIterator it(phi); it; it++) {
                if (it.def()->isPhi()) {
                    MPhi *use = it.def()->toPhi();
                    if (!use->isUnused()) {
                        use->setUnusedUnchecked();
                        use->setInWorklist();
                        if (!worklist.append(use))
                            return false;
                    }
                }
            }
            phi->replaceAllUsesWith(redundant);
        } else {
            // Otherwise flag them as used.
            phi->setNotUnused();
        }

        // The current phi is/was used, so all its operands are used.
        for (size_t i = 0; i < phi->numOperands(); i++) {
            MDefinition *in = phi->getOperand(i);
            if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
                continue;
            in->setInWorklist();
            if (!worklist.append(in->toPhi()))
                return false;
        }
    }

    // Sweep dead phis.
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            if (iter->isUnused())
                iter = block->discardPhiAt(iter);
            else
                iter++;
        }
    }

    return true;
}