Exemplo n.º 1
0
bool
EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph)
{
    const JitCompartment* jitCompartment = GetJitContext()->compartment->jitCompartment();
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eager Simd Unbox"))
            return false;

        for (MInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
            if (!ins->isSimdUnbox())
                continue;

            MSimdUnbox* unbox = ins->toSimdUnbox();
            if (!unbox->input()->isPhi())
                continue;

            MPhi* phi = unbox->input()->toPhi();
            if (!CanUnboxSimdPhi(jitCompartment, phi, unbox->simdType()))
                continue;

            UnboxSimdPhi(jitCompartment, graph, phi, unbox->simdType());
        }
    }

    return true;
}
Exemplo n.º 2
0
bool
jit::LICM(MIRGenerator *mir, MIRGraph &graph)
{
    JitSpew(JitSpew_LICM, "Beginning LICM pass");

    // Iterate in RPO to visit outer loops before inner loops. We'd hoist the
    // same things either way, but outer first means we do a little less work.
    for (auto i(graph.rpoBegin()), e(graph.rpoEnd()); i != e; ++i) {
        MBasicBlock *header = *i;
        if (!header->isLoopHeader())
            continue;

        bool canOsr;
        MarkLoopBlocks(graph, header, &canOsr);

        // Hoisting out of a loop that has an entry from the OSR block in
        // addition to its normal entry is tricky. In theory we could clone
        // the instruction and insert phis.
        if (!canOsr)
            VisitLoop(graph, header);
        else
            JitSpew(JitSpew_LICM, "  Skipping loop with header block%u due to OSR", header->id());

        UnmarkLoopBlocks(graph, header);

        if (mir->shouldCancel("LICM (main loop)"))
            return false;
    }

    return true;
}
Exemplo n.º 3
0
void
ion::AssertGraphCoherency(MIRGraph &graph)
{
#ifdef DEBUG
    // Assert successor and predecessor list coherency.
    uint32_t count = 0;
    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
        count++;

        for (size_t i = 0; i < block->numSuccessors(); i++)
            JS_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));

        for (size_t i = 0; i < block->numPredecessors(); i++)
            JS_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));

        for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            for (uint32_t i = 0; i < ins->numOperands(); i++)
                JS_ASSERT(CheckMarkedAsUse(*ins, ins->getOperand(i)));
        }
    }

    JS_ASSERT(graph.numBlocks() == count);

    AssertReversePostOrder(graph);
#endif
}
Exemplo n.º 4
0
MBasicBlock::MBasicBlock(MIRGraph &graph, CompileInfo &info, const BytecodeSite &site, Kind kind)
  : unreachable_(false),
    graph_(graph),
    info_(info),
    predecessors_(graph.alloc()),
    stackPosition_(info_.firstStackSlot()),
    lastIns_(nullptr),
    pc_(site.pc()),
    lir_(nullptr),
    start_(nullptr),
    entryResumePoint_(nullptr),
    successorWithPhis_(nullptr),
    positionInPhiSuccessor_(0),
    kind_(kind),
    loopDepth_(0),
    mark_(false),
    immediatelyDominated_(graph.alloc()),
    immediateDominator_(nullptr),
    numDominated_(0),
    trackedSite_(site)
#if defined (JS_ION_PERF)
    , lineno_(0u),
    columnIndex_(0u)
#endif
{
}
Exemplo n.º 5
0
static void
VisitLoop(MIRGraph &graph, MBasicBlock *header)
{
    MInstruction *hoistPoint = header->loopPredecessor()->lastIns();

    JitSpew(JitSpew_LICM, "  Visiting loop with header block%u, hoisting to %s%u",
            header->id(), hoistPoint->opName(), hoistPoint->id());

    MBasicBlock *backedge = header->backedge();

    // This indicates whether the loop contains calls or other things which
    // clobber most or all floating-point registers. In such loops,
    // floating-point constants should not be hoisted unless it enables further
    // hoisting.
    bool hasCalls = LoopContainsPossibleCall(graph, header, backedge);

    for (auto i(graph.rpoBegin(header)); ; ++i) {
        MOZ_ASSERT(i != graph.rpoEnd(), "Reached end of graph searching for blocks in loop");
        MBasicBlock *block = *i;
        if (!block->isMarked())
            continue;

        VisitLoopBlock(block, header, hoistPoint, hasCalls);

        if (block == backedge)
            break;
    }
}
Exemplo n.º 6
0
bool
ion::RenumberBlocks(MIRGraph &graph)
{
    size_t id = 0;
    for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++)
        block->setId(id++);

    return true;
}
static void
AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph)
{
    // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result,
    // since the users of the BitAnd include heap accesses. This will expose
    // the redundancy for GVN when expressions like this:
    //   a&m
    //   (a+1)&m,
    //   (a+2)&m,
    // are transformed into this:
    //   a&m
    //   (a&m)+1
    //   (a&m)+2
    // and it will allow the constants to be folded by the
    // EffectiveAddressAnalysis pass.
    //
    // Putting the add on the outside might seem like it exposes other users of
    // the expression to the possibility of i32 overflow, if we aren't in wasm
    // and they aren't naturally truncating. However, since we use MAdd::New
    // with MIRType::Int32, we make sure that the value is truncated, just as it
    // would be by the MBitAnd.

    MOZ_ASSERT(IsCompilingWasm());

    if (!ptr->isBitAnd())
        return;

    MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
    MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
    if (lhs->isConstant())
        mozilla::Swap(lhs, rhs);
    if (!lhs->isAdd() || !rhs->isConstant())
        return;

    MDefinition* op0 = lhs->toAdd()->getOperand(0);
    MDefinition* op1 = lhs->toAdd()->getOperand(1);
    if (op0->isConstant())
        mozilla::Swap(op0, op1);
    if (!op1->isConstant())
        return;

    uint32_t i = op1->toConstant()->toInt32();
    uint32_t m = rhs->toConstant()->toInt32();
    if (!IsAlignmentMask(m) || (i & m) != i)
        return;

    // The pattern was matched! Produce the replacement expression.
    MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32);
    ptr->block()->insertBefore(ptr->toBitAnd(), and_);
    MInstruction* add = MAdd::New(graph.alloc(), and_, op1, MIRType::Int32);
    ptr->block()->insertBefore(ptr->toBitAnd(), add);
    ptr->replaceAllUsesWith(add);
    ptr->block()->discard(ptr->toBitAnd());
}
Exemplo n.º 8
0
ValueNumberer::ValueNumberer(MIRGenerator* mir, MIRGraph& graph)
  : mir_(mir), graph_(graph),
    values_(graph.alloc()),
    deadDefs_(graph.alloc()),
    remainingBlocks_(graph.alloc()),
    nextDef_(nullptr),
    totalNumVisited_(0),
    rerun_(false),
    blocksRemoved_(false),
    updateAliasAnalysis_(false),
    dependenciesBroken_(false)
{}
Exemplo n.º 9
0
void
ion::AssertExtendedGraphCoherency(MIRGraph &graph)
{
    // Checks the basic GraphCoherency but also other conditions that
    // do not hold immediately (such as the fact that critical edges
    // are split)

#ifdef DEBUG
    AssertGraphCoherency(graph);

    uint32_t idx = 0;
    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
        JS_ASSERT(block->id() == idx++);

        // No critical edges:
        if (block->numSuccessors() > 1)
            for (size_t i = 0; i < block->numSuccessors(); i++)
                JS_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);

        if (block->isLoopHeader()) {
            JS_ASSERT(block->numPredecessors() == 2);
            MBasicBlock *backedge = block->getPredecessor(1);
            JS_ASSERT(backedge->id() >= block->id());
            JS_ASSERT(backedge->numSuccessors() == 1);
            JS_ASSERT(backedge->getSuccessor(0) == *block);
        }

        if (!block->phisEmpty()) {
            for (size_t i = 0; i < block->numPredecessors(); i++) {
                MBasicBlock *pred = block->getPredecessor(i);
                JS_ASSERT(pred->successorWithPhis() == *block);
                JS_ASSERT(pred->positionInPhiSuccessor() == i);
            }
        }

        uint32_t successorWithPhis = 0;
        for (size_t i = 0; i < block->numSuccessors(); i++)
            if (!block->getSuccessor(i)->phisEmpty())
                successorWithPhis++;

        JS_ASSERT(successorWithPhis <= 1);
        JS_ASSERT_IF(successorWithPhis, block->successorWithPhis() != NULL);

        // I'd like to assert this, but it's not necc. true.  Sometimes we set this
        // flag to non-NULL just because a successor has multiple preds, even if it
        // does not actually have any phis.
        //
        // JS_ASSERT_IF(!successorWithPhis, block->successorWithPhis() == NULL);
    }
#endif
}
Exemplo n.º 10
0
bool
ion::BuildPhiReverseMapping(MIRGraph &graph)
{
    // Build a mapping such that given a basic block, whose successor has one or
    // more phis, we can find our specific input to that phi. To make this fast
    // mapping work we rely on a specific property of our structured control
    // flow graph: For a block with phis, its predecessors each have only one
    // successor with phis. Consider each case:
    //   * Blocks with less than two predecessors cannot have phis.
    //   * Breaks. A break always has exactly one successor, and the break
    //             catch block has exactly one predecessor for each break, as
    //             well as a final predecessor for the actual loop exit.
    //   * Continues. A continue always has exactly one successor, and the
    //             continue catch block has exactly one predecessor for each
    //             continue, as well as a final predecessor for the actual
    //             loop continuation. The continue itself has exactly one
    //             successor.
    //   * An if. Each branch as exactly one predecessor.
    //   * A switch. Each branch has exactly one predecessor.
    //   * Loop tail. A new block is always created for the exit, and if a
    //             break statement is present, the exit block will forward
    //             directly to the break block.
    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
        if (block->numPredecessors() < 2) {
            JS_ASSERT(block->phisEmpty());
            continue;
        }

        // Assert on the above.
        for (size_t j = 0; j < block->numPredecessors(); j++) {
            MBasicBlock *pred = block->getPredecessor(j);

#ifdef DEBUG
            size_t numSuccessorsWithPhis = 0;
            for (size_t k = 0; k < pred->numSuccessors(); k++) {
                MBasicBlock *successor = pred->getSuccessor(k);
                if (!successor->phisEmpty())
                    numSuccessorsWithPhis++;
            }
            JS_ASSERT(numSuccessorsWithPhis <= 1);
#endif

            pred->setSuccessorWithPhis(*block, j);
        }
    }

    return true;
}
Exemplo n.º 11
0
ValueNumberer::ValueNumberer(MIRGenerator *mir, MIRGraph &graph, bool optimistic)
  : mir(mir),
    graph_(graph),
    values(graph.alloc()),
    pessimisticPass_(!optimistic),
    count_(0)
{ }
Exemplo n.º 12
0
static void
AssertReversePostOrder(MIRGraph &graph)
{
    // Check that every block is visited after all its predecessors (except backedges).
    for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
        JS_ASSERT(!block->isMarked());

        for (size_t i = 0; i < block->numPredecessors(); i++) {
            MBasicBlock *pred = block->getPredecessor(i);
            JS_ASSERT_IF(!pred->isLoopBackedge(), pred->isMarked());
        }

        block->mark();
    }

    graph.unmarkBlocks();
}
bool
FoldLinearArithConstants(MIRGenerator* mir, MIRGraph& graph)
{
    for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Fold Linear Arithmetic Constants (main loop)"))
            return false;

        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
            if (!graph.alloc().ensureBallast())
                return false;

            if (mir->shouldCancel("Fold Linear Arithmetic Constants (inner loop)"))
                return false;

            if (i->isAdd())
                AnalyzeAdd(graph.alloc(), i->toAdd());
        }
    }
    return true;
}
Exemplo n.º 14
0
// Instructions are useless if they are unused and have no side effects.
// This pass eliminates useless instructions.
// The graph itself is unchanged.
bool
ion::EliminateDeadCode(MIRGenerator *mir, MIRGraph &graph)
{
    // Traverse in postorder so that we hit uses before definitions.
    // Traverse instruction list backwards for the same reason.
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Dead Code (main loop)"))
            return false;

        // Remove unused instructions.
        for (MInstructionReverseIterator inst = block->rbegin(); inst != block->rend(); ) {
            if (!inst->isEffectful() && !inst->hasUses() && !inst->isGuard() &&
                !inst->isControlInstruction()) {
                inst = block->discardAt(inst);
            } else {
                inst++;
            }
        }
    }

    return true;
}
Exemplo n.º 15
0
// Test whether any instruction in the loop possiblyCalls().
static bool
LoopContainsPossibleCall(MIRGraph &graph, MBasicBlock *header, MBasicBlock *backedge)
{
    for (auto i(graph.rpoBegin(header)); ; ++i) {
        MOZ_ASSERT(i != graph.rpoEnd(), "Reached end of graph searching for blocks in loop");
        MBasicBlock *block = *i;
        if (!block->isMarked())
            continue;

        for (auto insIter(block->begin()), insEnd(block->end()); insIter != insEnd; ++insIter) {
            MInstruction *ins = *insIter;
            if (ins->possiblyCalls()) {
                JitSpew(JitSpew_LICM, "    Possile call found at %s%u", ins->opName(), ins->id());
                return true;
            }
        }

        if (block == backedge)
            break;
    }
    return false;
}
Exemplo n.º 16
0
static void
UnboxSimdPhi(const JitCompartment* jitCompartment, MIRGraph& graph, MPhi* phi, SimdType unboxType)
{
    TempAllocator& alloc = graph.alloc();

    // Unbox and replace all operands.
    for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
        MDefinition* op = phi->getOperand(i);
        MSimdUnbox* unbox = MSimdUnbox::New(alloc, op, unboxType);
        op->block()->insertAtEnd(unbox);
        phi->replaceOperand(i, unbox);
    }

    // Change the MIRType of the Phi.
    MIRType mirType = SimdTypeToMIRType(unboxType);
    phi->setResultType(mirType);

    MBasicBlock* phiBlock = phi->block();
    MInstruction* atRecover = phiBlock->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
    MInstruction* at = phiBlock->safeInsertTop(atRecover);

    // Note, we capture the uses-list now, as new instructions are not visited.
    MUseIterator i(phi->usesBegin()), e(phi->usesEnd());

    // Add a MSimdBox, and replace all the Phi uses with it.
    JSObject* templateObject = jitCompartment->maybeGetSimdTemplateObjectFor(unboxType);
    InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
    MSimdBox* recoverBox = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
    recoverBox->setRecoveredOnBailout();
    phiBlock->insertBefore(atRecover, recoverBox);

    MSimdBox* box = nullptr;
    while (i != e) {
        MUse* use = *i++;
        MNode* ins = use->consumer();

        if ((ins->isDefinition() && ins->toDefinition()->isRecoveredOnBailout()) ||
            (ins->isResumePoint() && ins->toResumePoint()->isRecoverableOperand(use)))
        {
            use->replaceProducer(recoverBox);
            continue;
        }

        if (!box) {
            box = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
            phiBlock->insertBefore(at, box);
        }

        use->replaceProducer(box);
    }
}
Exemplo n.º 17
0
// A critical edge is an edge which is neither its successor's only predecessor
// nor its predecessor's only successor. Critical edges must be split to
// prevent copy-insertion and code motion from affecting other edges.
bool
ion::SplitCriticalEdges(MIRGraph &graph)
{
    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
        if (block->numSuccessors() < 2)
            continue;
        for (size_t i = 0; i < block->numSuccessors(); i++) {
            MBasicBlock *target = block->getSuccessor(i);
            if (target->numPredecessors() < 2)
                continue;

            // Create a new block inheriting from the predecessor.
            MBasicBlock *split = MBasicBlock::NewSplitEdge(graph, block->info(), *block);
            split->setLoopDepth(block->loopDepth());
            graph.insertBlockAfter(*block, split);
            split->end(MGoto::New(target));

            block->replaceSuccessor(i, split);
            target->replacePredecessor(*block, split);
        }
    }
    return true;
}
Exemplo n.º 18
0
static void
ComputeImmediateDominators(MIRGraph &graph)
{
    // The default start block is a root and therefore only self-dominates.
    MBasicBlock *startBlock = *graph.begin();
    startBlock->setImmediateDominator(startBlock);

    // Any OSR block is a root and therefore only self-dominates.
    MBasicBlock *osrBlock = graph.osrBlock();
    if (osrBlock)
        osrBlock->setImmediateDominator(osrBlock);

    bool changed = true;

    while (changed) {
        changed = false;

        ReversePostorderIterator block = graph.rpoBegin();

        // For each block in RPO, intersect all dominators.
        for (; block != graph.rpoEnd(); block++) {
            // If a node has once been found to have no exclusive dominator,
            // it will never have an exclusive dominator, so it may be skipped.
            if (block->immediateDominator() == *block)
                continue;

            MBasicBlock *newIdom = block->getPredecessor(0);

            // Find the first common dominator.
            for (size_t i = 1; i < block->numPredecessors(); i++) {
                MBasicBlock *pred = block->getPredecessor(i);
                if (pred->immediateDominator() != NULL)
                    newIdom = IntersectDominators(pred, newIdom);

                // If there is no common dominator, the block self-dominates.
                if (newIdom == NULL) {
                    block->setImmediateDominator(*block);
                    changed = true;
                    break;
                }
            }

            if (newIdom && block->immediateDominator() != newIdom) {
                block->setImmediateDominator(newIdom);
                changed = true;
            }
        }
    }

#ifdef DEBUG
    // Assert that all blocks have dominator information.
    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
        JS_ASSERT(block->immediateDominator() != NULL);
    }
#endif
}
Exemplo n.º 19
0
bool
ion::EliminatePhis(MIRGenerator *mir, MIRGraph &graph,
                   Observability observe)
{
    // Eliminates redundant or unobservable phis from the graph.  A
    // redundant phi is something like b = phi(a, a) or b = phi(a, b),
    // both of which can be replaced with a.  An unobservable phi is
    // one that whose value is never used in the program.
    //
    // Note that we must be careful not to eliminate phis representing
    // values that the interpreter will require later.  When the graph
    // is first constructed, we can be more aggressive, because there
    // is a greater correspondence between the CFG and the bytecode.
    // After optimizations such as GVN have been performed, however,
    // the bytecode and CFG may not correspond as closely to one
    // another.  In that case, we must be more conservative.  The flag
    // |conservativeObservability| is used to indicate that eliminate
    // phis is being run after some optimizations have been performed,
    // and thus we should use more conservative rules about
    // observability.  The particular danger is that we can optimize
    // away uses of a phi because we think they are not executable,
    // but the foundation for that assumption is false TI information
    // that will eventually be invalidated.  Therefore, if
    // |conservativeObservability| is set, we will consider any use
    // from a resume point to be observable.  Otherwise, we demand a
    // use from an actual instruction.

    Vector<MPhi *, 16, SystemAllocPolicy> worklist;

    // Add all observable phis to a worklist. We use the "in worklist" bit to
    // mean "this phi is live".
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Phis (populate loop)"))
            return false;

        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            // Flag all as unused, only observable phis would be marked as used
            // when processed by the work list.
            iter->setUnused();

            // If the phi is redundant, remove it here.
            if (MDefinition *redundant = IsPhiRedundant(*iter)) {
                iter->replaceAllUsesWith(redundant);
                iter = block->discardPhiAt(iter);
                continue;
            }

            // Enqueue observable Phis.
            if (IsPhiObservable(*iter, observe)) {
                iter->setInWorklist();
                if (!worklist.append(*iter))
                    return false;
            }
            iter++;
        }
    }

    // Iteratively mark all phis reachable from live phis.
    while (!worklist.empty()) {
        if (mir->shouldCancel("Eliminate Phis (worklist)"))
            return false;

        MPhi *phi = worklist.popCopy();
        JS_ASSERT(phi->isUnused());
        phi->setNotInWorklist();

        // The removal of Phis can produce newly redundant phis.
        if (MDefinition *redundant = IsPhiRedundant(phi)) {
            // Add to the worklist the used phis which are impacted.
            for (MUseDefIterator it(phi); it; it++) {
                if (it.def()->isPhi()) {
                    MPhi *use = it.def()->toPhi();
                    if (!use->isUnused()) {
                        use->setUnusedUnchecked();
                        use->setInWorklist();
                        if (!worklist.append(use))
                            return false;
                    }
                }
            }
            phi->replaceAllUsesWith(redundant);
        } else {
            // Otherwise flag them as used.
            phi->setNotUnused();
        }

        // The current phi is/was used, so all its operands are used.
        for (size_t i = 0; i < phi->numOperands(); i++) {
            MDefinition *in = phi->getOperand(i);
            if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
                continue;
            in->setInWorklist();
            if (!worklist.append(in->toPhi()))
                return false;
        }
    }

    // Sweep dead phis.
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            if (iter->isUnused())
                iter = block->discardPhiAt(iter);
            else
                iter++;
        }
    }

    return true;
}
Exemplo n.º 20
0
// Operands to a resume point which are dead at the point of the resume can be
// replaced with undefined values. This analysis supports limited detection of
// dead operands, pruning those which are defined in the resume point's basic
// block and have no uses outside the block or at points later than the resume
// point.
//
// This is intended to ensure that extra resume points within a basic block
// will not artificially extend the lifetimes of any SSA values. This could
// otherwise occur if the new resume point captured a value which is created
// between the old and new resume point and is dead at the new resume point.
bool
ion::EliminateDeadResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
{
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)"))
            return false;

        // The logic below can get confused on infinite loops.
        if (block->isLoopHeader() && block->backedge() == *block)
            continue;

        for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            // No benefit to replacing constant operands with other constants.
            if (ins->isConstant())
                continue;

            // Scanning uses does not give us sufficient information to tell
            // where instructions that are involved in box/unbox operations or
            // parameter passing might be live. Rewriting uses of these terms
            // in resume points may affect the interpreter's behavior. Rather
            // than doing a more sophisticated analysis, just ignore these.
            if (ins->isUnbox() || ins->isParameter())
                continue;

            // If the instruction's behavior has been constant folded into a
            // separate instruction, we can't determine precisely where the
            // instruction becomes dead and can't eliminate its uses.
            if (ins->isFolded())
                continue;

            // Check if this instruction's result is only used within the
            // current block, and keep track of its last use in a definition
            // (not resume point). This requires the instructions in the block
            // to be numbered, ensured by running this immediately after alias
            // analysis.
            uint32_t maxDefinition = 0;
            for (MUseDefIterator uses(*ins); uses; uses++) {
                if (uses.def()->block() != *block ||
                    uses.def()->isBox() ||
                    uses.def()->isPassArg() ||
                    uses.def()->isPhi())
                {
                    maxDefinition = UINT32_MAX;
                    break;
                }
                maxDefinition = Max(maxDefinition, uses.def()->id());
            }
            if (maxDefinition == UINT32_MAX)
                continue;

            // Walk the uses a second time, removing any in resume points after
            // the last use in a definition.
            for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) {
                if (uses->node()->isDefinition()) {
                    uses++;
                    continue;
                }
                MResumePoint *mrp = uses->node()->toResumePoint();
                if (mrp->block() != *block ||
                    !mrp->instruction() ||
                    mrp->instruction() == *ins ||
                    mrp->instruction()->id() <= maxDefinition)
                {
                    uses++;
                    continue;
                }

                // Store an undefined value in place of all dead resume point
                // operands. Making any such substitution can in general alter
                // the interpreter's behavior, even though the code is dead, as
                // the interpreter will still execute opcodes whose effects
                // cannot be observed. If the undefined value were to flow to,
                // say, a dead property access the interpreter could throw an
                // exception; we avoid this problem by removing dead operands
                // before removing dead code.
                MConstant *constant = MConstant::New(UndefinedValue());
                block->insertBefore(*(block->begin()), constant);
                uses = mrp->replaceOperand(uses, constant);
            }
        }
    }

    return true;
}
Exemplo n.º 21
0
// A bounds check is considered redundant if it's dominated by another bounds
// check with the same length and the indexes differ by only a constant amount.
// In this case we eliminate the redundant bounds check and update the other one
// to cover the ranges of both checks.
//
// Bounds checks are added to a hash map and since the hash function ignores
// differences in constant offset, this offers a fast way to find redundant
// checks.
bool
ion::EliminateRedundantBoundsChecks(MIRGraph &graph)
{
    BoundsCheckMap checks;

    if (!checks.init())
        return false;

    // Stack for pre-order CFG traversal.
    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;

    // The index of the current block in the CFG traversal.
    size_t index = 0;

    // Add all self-dominating blocks to the worklist.
    // This includes all roots. Order does not matter.
    for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
        MBasicBlock *block = *i;
        if (block->immediateDominator() == block) {
            if (!worklist.append(block))
                return false;
        }
    }

    // Starting from each self-dominating block, traverse the CFG in pre-order.
    while (!worklist.empty()) {
        MBasicBlock *block = worklist.popCopy();

        // Add all immediate dominators to the front of the worklist.
        for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) {
            if (!worklist.append(block->getImmediatelyDominatedBlock(i)))
                return false;
        }

        for (MDefinitionIterator iter(block); iter; ) {
            if (!iter->isBoundsCheck()) {
                iter++;
                continue;
            }

            MBoundsCheck *check = iter->toBoundsCheck();

            // Replace all uses of the bounds check with the actual index.
            // This is (a) necessary, because we can coalesce two different
            // bounds checks and would otherwise use the wrong index and
            // (b) helps register allocation. Note that this is safe since
            // no other pass after bounds check elimination moves instructions.
            check->replaceAllUsesWith(check->index());

            if (!check->isMovable()) {
                iter++;
                continue;
            }

            MBoundsCheck *dominating = FindDominatingBoundsCheck(checks, check, index);
            if (!dominating)
                return false;

            if (dominating == check) {
                // We didn't find a dominating bounds check.
                iter++;
                continue;
            }

            bool eliminated = false;
            if (!TryEliminateBoundsCheck(dominating, check, &eliminated))
                return false;

            if (eliminated)
                iter = check->block()->discardDefAt(iter);
            else
                iter++;
        }
        index++;
    }

    JS_ASSERT(index == graph.numBlocks());
    return true;
}
Exemplo n.º 22
0
bool
jit::ReorderInstructions(MIRGraph& graph)
{
    // Renumber all instructions in the graph as we go.
    size_t nextId = 0;

    // List of the headers of any loops we are in.
    Vector<MBasicBlock*, 4, SystemAllocPolicy> loopHeaders;

    for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
        // Renumber all definitions inside the basic blocks.
        for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++)
            iter->setId(nextId++);

        for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
            iter->setId(nextId++);

        // Don't reorder instructions within entry blocks, which have special requirements.
        if (*block == graph.entryBlock() || *block == graph.osrBlock())
            continue;

        if (block->isLoopHeader()) {
            if (!loopHeaders.append(*block))
                return false;
        }

        MBasicBlock* innerLoop = loopHeaders.empty() ? nullptr : loopHeaders.back();

        MInstruction* top = block->safeInsertTop();
        MInstructionReverseIterator rtop = ++block->rbegin(top);
        for (MInstructionIterator iter(block->begin(top)); iter != block->end(); ) {
            MInstruction* ins = *iter;

            // Filter out some instructions which are never reordered.
            if (ins->isEffectful() ||
                !ins->isMovable() ||
                ins->resumePoint() ||
                ins == block->lastIns())
            {
                iter++;
                continue;
            }

            // Move constants with a single use in the current block to the
            // start of the block. Constants won't be reordered by the logic
            // below, as they have no inputs. Moving them up as high as
            // possible can allow their use to be moved up further, though,
            // and has no cost if the constant is emitted at its use.
            if (ins->isConstant() &&
                ins->hasOneUse() &&
                ins->usesBegin()->consumer()->block() == *block &&
                !IsFloatingPointType(ins->type()))
            {
                iter++;
                MInstructionIterator targetIter = block->begin();
                while (targetIter->isConstant() || targetIter->isInterruptCheck()) {
                    if (*targetIter == ins)
                        break;
                    targetIter++;
                }
                MoveBefore(*block, *targetIter, ins);
                continue;
            }

            // Look for inputs where this instruction is the last use of that
            // input. If we move this instruction up, the input's lifetime will
            // be shortened, modulo resume point uses (which don't need to be
            // stored in a register, and can be handled by the register
            // allocator by just spilling at some point with no reload).
            Vector<MDefinition*, 4, SystemAllocPolicy> lastUsedInputs;
            for (size_t i = 0; i < ins->numOperands(); i++) {
                MDefinition* input = ins->getOperand(i);
                if (!input->isConstant() && IsLastUse(ins, input, innerLoop)) {
                    if (!lastUsedInputs.append(input))
                        return false;
                }
            }

            // Don't try to move instructions which aren't the last use of any
            // of their inputs (we really ought to move these down instead).
            if (lastUsedInputs.length() < 2) {
                iter++;
                continue;
            }

            MInstruction* target = ins;
            for (MInstructionReverseIterator riter = ++block->rbegin(ins); riter != rtop; riter++) {
                MInstruction* prev = *riter;
                if (prev->isInterruptCheck())
                    break;

                // The instruction can't be moved before any of its uses.
                bool isUse = false;
                for (size_t i = 0; i < ins->numOperands(); i++) {
                    if (ins->getOperand(i) == prev) {
                        isUse = true;
                        break;
                    }
                }
                if (isUse)
                    break;

                // The instruction can't be moved before an instruction that
                // stores to a location read by the instruction.
                if (prev->isEffectful() &&
                    (ins->getAliasSet().flags() & prev->getAliasSet().flags()) &&
                    ins->mightAlias(prev) != MDefinition::AliasType::NoAlias)
                {
                    break;
                }

                // Make sure the instruction will still be the last use of one
                // of its inputs when moved up this far.
                for (size_t i = 0; i < lastUsedInputs.length(); ) {
                    bool found = false;
                    for (size_t j = 0; j < prev->numOperands(); j++) {
                        if (prev->getOperand(j) == lastUsedInputs[i]) {
                            found = true;
                            break;
                        }
                    }
                    if (found) {
                        lastUsedInputs[i] = lastUsedInputs.back();
                        lastUsedInputs.popBack();
                    } else {
                        i++;
                    }
                }
                if (lastUsedInputs.length() < 2)
                    break;

                // We can move the instruction before this one.
                target = prev;
            }

            iter++;
            MoveBefore(*block, target, ins);
        }

        if (block->isLoopBackedge())
            loopHeaders.popBack();
    }

    return true;
}
Exemplo n.º 23
0
bool
ion::BuildDominatorTree(MIRGraph &graph)
{
    ComputeImmediateDominators(graph);

    // Traversing through the graph in post-order means that every use
    // of a definition is visited before the def itself. Since a def
    // dominates its uses, by the time we reach a particular
    // block, we have processed all of its dominated children, so
    // block->numDominated() is accurate.
    for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
        MBasicBlock *child = *i;
        MBasicBlock *parent = child->immediateDominator();

        // If the block only self-dominates, it has no definite parent.
        if (child == parent)
            continue;

        if (!parent->addImmediatelyDominatedBlock(child))
            return false;

        // An additional +1 for the child block.
        parent->addNumDominated(child->numDominated() + 1);
    }

#ifdef DEBUG
    // If compiling with OSR, many blocks will self-dominate.
    // Without OSR, there is only one root block which dominates all.
    if (!graph.osrBlock())
        JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1);
#endif
    // Now, iterate through the dominator tree and annotate every
    // block with its index in the pre-order traversal of the
    // dominator tree.
    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;

    // The index of the current block in the CFG traversal.
    size_t index = 0;

    // Add all self-dominating blocks to the worklist.
    // This includes all roots. Order does not matter.
    for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
        MBasicBlock *block = *i;
        if (block->immediateDominator() == block) {
            if (!worklist.append(block))
                return false;
        }
    }
    // Starting from each self-dominating block, traverse the CFG in pre-order.
    while (!worklist.empty()) {
        MBasicBlock *block = worklist.popCopy();
        block->setDomIndex(index);

        for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) {
            if (!worklist.append(block->getImmediatelyDominatedBlock(i)))
                return false;
        }
        index++;
    }

    return true;
}
Exemplo n.º 24
0
bool
ion::EliminatePhis(MIRGenerator *mir, MIRGraph &graph)
{
    Vector<MPhi *, 16, SystemAllocPolicy> worklist;

    // Add all observable phis to a worklist. We use the "in worklist" bit to
    // mean "this phi is live".
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Phis (populate loop)"))
            return false;

        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            // Flag all as unused, only observable phis would be marked as used
            // when processed by the work list.
            iter->setUnused();

            // If the phi is redundant, remove it here.
            if (MDefinition *redundant = IsPhiRedundant(*iter)) {
                iter->replaceAllUsesWith(redundant);
                iter = block->discardPhiAt(iter);
                continue;
            }

            // Enqueue observable Phis.
            if (IsPhiObservable(*iter)) {
                iter->setInWorklist();
                if (!worklist.append(*iter))
                    return false;
            }
            iter++;
        }
    }

    // Iteratively mark all phis reachable from live phis.
    while (!worklist.empty()) {
        if (mir->shouldCancel("Eliminate Phis (worklist)"))
            return false;

        MPhi *phi = worklist.popCopy();
        JS_ASSERT(phi->isUnused());
        phi->setNotInWorklist();

        // The removal of Phis can produce newly redundant phis.
        if (MDefinition *redundant = IsPhiRedundant(phi)) {
            // Add to the worklist the used phis which are impacted.
            for (MUseDefIterator it(phi); it; it++) {
                if (it.def()->isPhi()) {
                    MPhi *use = it.def()->toPhi();
                    if (!use->isUnused()) {
                        use->setUnusedUnchecked();
                        use->setInWorklist();
                        if (!worklist.append(use))
                            return false;
                    }
                }
            }
            phi->replaceAllUsesWith(redundant);
        } else {
            // Otherwise flag them as used.
            phi->setNotUnused();
        }

        // The current phi is/was used, so all its operands are used.
        for (size_t i = 0; i < phi->numOperands(); i++) {
            MDefinition *in = phi->getOperand(i);
            if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
                continue;
            in->setInWorklist();
            if (!worklist.append(in->toPhi()))
                return false;
        }
    }

    // Sweep dead phis.
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        MPhiIterator iter = block->phisBegin();
        while (iter != block->phisEnd()) {
            if (iter->isUnused())
                iter = block->discardPhiAt(iter);
            else
                iter++;
        }
    }

    return true;
}
Exemplo n.º 25
0
bool
Sink(MIRGenerator* mir, MIRGraph& graph)
{
    TempAllocator& alloc = graph.alloc();
    bool sinkEnabled = mir->optimizationInfo().sinkEnabled();

    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Sink"))
            return false;

        for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) {
            MInstruction* ins = *iter++;

            // Only instructions which can be recovered on bailout can be moved
            // into the bailout paths.
            if (ins->isGuard() || ins->isGuardRangeBailouts() ||
                ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout())
            {
                continue;
            }

            // Compute a common dominator for all uses of the current
            // instruction.
            bool hasLiveUses = false;
            bool hasUses = false;
            MBasicBlock* usesDominator = nullptr;
            for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) {
                hasUses = true;
                MNode* consumerNode = (*i)->consumer();
                if (consumerNode->isResumePoint())
                    continue;

                MDefinition* consumer = consumerNode->toDefinition();
                if (consumer->isRecoveredOnBailout())
                    continue;

                hasLiveUses = true;

                // If the instruction is a Phi, then we should dominate the
                // predecessor from which the value is coming from.
                MBasicBlock* consumerBlock = consumer->block();
                if (consumer->isPhi())
                    consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i));

                usesDominator = CommonDominator(usesDominator, consumerBlock);
                if (usesDominator == *block)
                    break;
            }

            // Leave this instruction for DCE.
            if (!hasUses)
                continue;

            // We have no uses, so sink this instruction in all the bailout
            // paths.
            if (!hasLiveUses) {
                MOZ_ASSERT(!usesDominator);
                ins->setRecoveredOnBailout();
                JitSpewDef(JitSpew_Sink, "  No live uses, recover the instruction on bailout\n", ins);
                continue;
            }

            // This guard is temporarly moved here as the above code deals with
            // Dead Code elimination, which got moved into this Sink phase, as
            // the Dead Code elimination used to move instructions with no-live
            // uses to the bailout path.
            if (!sinkEnabled)
                continue;

            // To move an effectful instruction, we would have to verify that the
            // side-effect is not observed. In the mean time, we just inhibit
            // this optimization on effectful instructions.
            if (ins->isEffectful())
                continue;

            // If all the uses are under a loop, we might not want to work
            // against LICM by moving everything back into the loop, but if the
            // loop is it-self inside an if, then we still want to move the
            // computation under this if statement.
            while (block->loopDepth() < usesDominator->loopDepth()) {
                MOZ_ASSERT(usesDominator != usesDominator->immediateDominator());
                usesDominator = usesDominator->immediateDominator();
            }

            // Only move instructions if there is a branch between the dominator
            // of the uses and the original instruction. This prevent moving the
            // computation of the arguments into an inline function if there is
            // no major win.
            MBasicBlock* lastJoin = usesDominator;
            while (*block != lastJoin && lastJoin->numPredecessors() == 1) {
                MOZ_ASSERT(lastJoin != lastJoin->immediateDominator());
                MBasicBlock* next = lastJoin->immediateDominator();
                if (next->numSuccessors() > 1)
                    break;
                lastJoin = next;
            }
            if (*block == lastJoin)
                continue;

            // Skip to the next instruction if we cannot find a common dominator
            // for all the uses of this instruction, or if the common dominator
            // correspond to the block of the current instruction.
            if (!usesDominator || usesDominator == *block)
                continue;

            // Only instruction which can be recovered on bailout and which are
            // sinkable can be moved into blocks which are below while filling
            // the resume points with a clone which is recovered on bailout.

            // If the instruction has live uses and if it is clonable, then we
            // can clone the instruction for all non-dominated uses and move the
            // instruction into the block which is dominating all live uses.
            if (!ins->canClone())
                continue;

            // If the block is a split-edge block, which is created for folding
            // test conditions, then the block has no resume point and has
            // multiple predecessors.  In such case, we cannot safely move
            // bailing instruction to these blocks as we have no way to bailout.
            if (!usesDominator->entryResumePoint() && usesDominator->numPredecessors() != 1)
                continue;

            JitSpewDef(JitSpew_Sink, "  Can Clone & Recover, sink instruction\n", ins);
            JitSpew(JitSpew_Sink, "  into Block %u", usesDominator->id());

            // Copy the arguments and clone the instruction.
            MDefinitionVector operands(alloc);
            for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
                if (!operands.append(ins->getOperand(i)))
                    return false;
            }

            MInstruction* clone = ins->clone(alloc, operands);
            ins->block()->insertBefore(ins, clone);
            clone->setRecoveredOnBailout();

            // We should not update the producer of the entry resume point, as
            // it cannot refer to any instruction within the basic block excepts
            // for Phi nodes.
            MResumePoint* entry = usesDominator->entryResumePoint();

            // Replace the instruction by its clone in all the resume points /
            // recovered-on-bailout instructions which are not in blocks which
            // are dominated by the usesDominator block.
            for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; ) {
                MUse* use = *i++;
                MNode* consumer = use->consumer();

                // If the consumer is a Phi, then we look for the index of the
                // use to find the corresponding predecessor block, which is
                // then used as the consumer block.
                MBasicBlock* consumerBlock = consumer->block();
                if (consumer->isDefinition() && consumer->toDefinition()->isPhi()) {
                    consumerBlock = consumerBlock->getPredecessor(
                        consumer->toDefinition()->toPhi()->indexOf(use));
                }

                // Keep the current instruction for all dominated uses, except
                // for the entry resume point of the block in which the
                // instruction would be moved into.
                if (usesDominator->dominates(consumerBlock) &&
                    (!consumer->isResumePoint() || consumer->toResumePoint() != entry))
                {
                    continue;
                }

                use->replaceProducer(clone);
            }

            // As we move this instruction in a different block, we should
            // verify that we do not carry over a resume point which would refer
            // to an outdated state of the control flow.
            if (ins->resumePoint())
                ins->clearResumePoint();

            // Now, that all uses which are not dominated by usesDominator are
            // using the cloned instruction, we can safely move the instruction
            // into the usesDominator block.
            MInstruction* at = usesDominator->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
            block->moveBefore(at, ins);
        }
    }

    return true;
}
Exemplo n.º 26
0
// Eliminate checks which are redundant given each other or other instructions.
//
// A type barrier is considered redundant if all missing types have been tested
// for by earlier control instructions.
//
// A bounds check is considered redundant if it's dominated by another bounds
// check with the same length and the indexes differ by only a constant amount.
// In this case we eliminate the redundant bounds check and update the other one
// to cover the ranges of both checks.
//
// Bounds checks are added to a hash map and since the hash function ignores
// differences in constant offset, this offers a fast way to find redundant
// checks.
bool
ion::EliminateRedundantChecks(MIRGraph &graph)
{
    BoundsCheckMap checks;

    if (!checks.init())
        return false;

    // Stack for pre-order CFG traversal.
    Vector<MBasicBlock *, 1, IonAllocPolicy> worklist;

    // The index of the current block in the CFG traversal.
    size_t index = 0;

    // Add all self-dominating blocks to the worklist.
    // This includes all roots. Order does not matter.
    for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
        MBasicBlock *block = *i;
        if (block->immediateDominator() == block) {
            if (!worklist.append(block))
                return false;
        }
    }

    // Starting from each self-dominating block, traverse the CFG in pre-order.
    while (!worklist.empty()) {
        MBasicBlock *block = worklist.popCopy();

        // Add all immediate dominators to the front of the worklist.
        for (size_t i = 0; i < block->numImmediatelyDominatedBlocks(); i++) {
            if (!worklist.append(block->getImmediatelyDominatedBlock(i)))
                return false;
        }

        for (MDefinitionIterator iter(block); iter; ) {
            bool eliminated = false;

            if (iter->isBoundsCheck()) {
                if (!TryEliminateBoundsCheck(checks, index, iter->toBoundsCheck(), &eliminated))
                    return false;
            } else if (iter->isTypeBarrier()) {
                if (!TryEliminateTypeBarrier(iter->toTypeBarrier(), &eliminated))
                    return false;
            } else if (iter->isConvertElementsToDoubles()) {
                // Now that code motion passes have finished, replace any
                // ConvertElementsToDoubles with the actual elements.
                MConvertElementsToDoubles *ins = iter->toConvertElementsToDoubles();
                ins->replaceAllUsesWith(ins->elements());
            }

            if (eliminated)
                iter = block->discardDefAt(iter);
            else
                iter++;
        }
        index++;
    }

    JS_ASSERT(index == graph.numBlocks());
    return true;
}