Esempio n. 1
0
void
JSONSpewer::spewMResumePoint(MResumePoint *rp)
{
    if (!rp)
        return;

    beginObjectProperty("resumePoint");

    if (rp->caller())
        integerProperty("caller", rp->caller()->block()->id());

    property("mode");
    switch (rp->mode()) {
      case MResumePoint::ResumeAt:
        fprintf(fp_, "\"At\"");
        break;
      case MResumePoint::ResumeAfter:
        fprintf(fp_, "\"After\"");
        break;
      case MResumePoint::Outer:
        fprintf(fp_, "\"Outer\"");
        break;
    }

    beginListProperty("operands");
    for (MResumePoint *iter = rp; iter; iter = iter->caller()) {
        for (int i = iter->numOperands() - 1; i >= 0; i--)
            integerValue(iter->getOperand(i)->id());
        if (iter->caller())
            stringValue("|");
    }
    endList();

    endObject();
}
Esempio n. 2
0
// Do not optimize any Phi instruction which has conflicting Unbox operations,
// as this might imply some intended polymorphism.
static bool
CanUnboxSimdPhi(const JitCompartment* jitCompartment, MPhi* phi, SimdType unboxType)
{
    MOZ_ASSERT(phi->type() == MIRType::Object);

    // If we are unboxing, we are more than likely to have boxed this SIMD type
    // once in baseline, otherwise, we cannot create a MSimdBox as we have no
    // template object to use.
    if (!jitCompartment->maybeGetSimdTemplateObjectFor(unboxType))
        return false;

    MResumePoint* entry = phi->block()->entryResumePoint();
    MIRType mirType = SimdTypeToMIRType(unboxType);
    for (MUseIterator i(phi->usesBegin()), e(phi->usesEnd()); i != e; i++) {
        // If we cannot recover the Simd object at the entry of the basic block,
        // then we would have to box the content anyways.
        if ((*i)->consumer() == entry && !entry->isRecoverableOperand(*i))
            return false;

        if (!(*i)->consumer()->isDefinition())
            continue;

        MDefinition* def = (*i)->consumer()->toDefinition();
        if (def->isSimdUnbox() && def->toSimdUnbox()->type() != mirType)
            return false;
    }

    return true;
}
Esempio n. 3
0
void
MBasicBlock::inheritPhis(MBasicBlock *header)
{
    MResumePoint *headerRp = header->entryResumePoint();
    size_t stackDepth = headerRp->numOperands();
    for (size_t slot = 0; slot < stackDepth; slot++) {
        MDefinition *exitDef = getSlot(slot);
        MDefinition *loopDef = headerRp->getOperand(slot);
        if (!loopDef->isPhi()) {
            MOZ_ASSERT(loopDef->block()->id() < header->id());
            MOZ_ASSERT(loopDef == exitDef);
            continue;
        }

        // Phis are allocated by NewPendingLoopHeader.
        MPhi *phi = loopDef->toPhi();
        MOZ_ASSERT(phi->numOperands() == 2);

        // The entry definition is always the leftmost input to the phi.
        MDefinition *entryDef = phi->getOperand(0);

        if (entryDef != exitDef)
            continue;

        // If the entryDef is the same as exitDef, then we must propagate the
        // phi down to this successor. This chance was missed as part of
        // setBackedge() because exits are not captured in resume points.
        setSlot(slot, phi);
    }
}
Esempio n. 4
0
void
MBasicBlock::flagOperandsOfPrunedBranches(MInstruction* ins)
{
    // Find the previous resume point which would be used for bailing out.
    MResumePoint* rp = nullptr;
    for (MInstructionReverseIterator iter = rbegin(ins); iter != rend(); iter++) {
        rp = iter->resumePoint();
        if (rp)
            break;
    }

    // If none, take the entry resume point.
    if (!rp)
        rp = entryResumePoint();

    // The only blocks which do not have any entryResumePoint in Ion, are the
    // SplitEdge blocks.  SplitEdge blocks only have a Goto instruction before
    // Range Analysis phase.  In adjustInputs, we are manipulating instructions
    // which have a TypePolicy.  So, as a Goto has no operand and no type
    // policy, the entry resume point should exists.
    MOZ_ASSERT(rp);

    // Flag all operand as being potentially used.
    while (rp) {
        for (size_t i = 0, end = rp->numOperands(); i < end; i++)
            rp->getOperand(i)->setUseRemovedUnchecked();
        rp = rp->caller();
    }
}
LSnapshot *
LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
{
    LSnapshot *snapshot = LSnapshot::New(gen, rp, kind);
    if (!snapshot)
        return NULL;

    FlattenedMResumePointIter iter(rp);
    if (!iter.init())
        return NULL;

    size_t i = 0;
    for (MResumePoint **it = iter.begin(), **end = iter.end(); it != end; ++it) {
        MResumePoint *mir = *it;
        for (size_t j = 0; j < mir->numOperands(); ++i, ++j) {
            MDefinition *def = mir->getOperand(j);

            if (def->isPassArg())
                def = def->toPassArg()->getArgument();

            LAllocation *a = snapshot->getEntry(i);

            if (def->isUnused()) {
                *a = LConstantIndex::Bogus();
                continue;
            }

            *a = useKeepaliveOrConstant(def);
        }
    }

    return snapshot;
}
Esempio n. 6
0
MResumePoint *
MResumePoint::New(MBasicBlock *block, jsbytecode *pc, MResumePoint *parent, Mode mode)
{
    MResumePoint *resume = new MResumePoint(block, pc, parent, mode);
    if (!resume->init(block))
        return NULL;
    resume->inherit(block);
    return resume;
}
LSnapshot *
LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
{
    LSnapshot *snapshot = LSnapshot::New(gen, rp, kind);
    if (!snapshot)
        return NULL;

    FlattenedMResumePointIter iter(rp);
    if (!iter.init())
        return NULL;

    size_t i = 0;
    for (MResumePoint **it = iter.begin(), **end = iter.end(); it != end; ++it) {
        MResumePoint *mir = *it;
        for (size_t j = 0, e = mir->numOperands(); j < e; ++i, ++j) {
            MDefinition *ins = mir->getOperand(j);

            LAllocation *type = snapshot->typeOfSlot(i);
            LAllocation *payload = snapshot->payloadOfSlot(i);

            if (ins->isPassArg())
                ins = ins->toPassArg()->getArgument();
            JS_ASSERT(!ins->isPassArg());

            if (ins->isBox())
                ins = ins->toBox()->getOperand(0);

            // Guards should never be eliminated.
            JS_ASSERT_IF(ins->isUnused(), !ins->isGuard());

            // Snapshot operands other than constants should never be
            // emitted-at-uses. Try-catch support depends on there being no
            // code between an instruction and the LOsiPoint that follows it.
            JS_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());

            // The register allocation will fill these fields in with actual
            // register/stack assignments. During code generation, we can restore
            // interpreter state with the given information. Note that for
            // constants, including known types, we record a dummy placeholder,
            // since we can recover the same information, much cleaner, from MIR.
            if (ins->isConstant() || ins->isUnused()) {
                *type = LConstantIndex::Bogus();
                *payload = LConstantIndex::Bogus();
            } else if (ins->type() != MIRType_Value) {
                *type = LConstantIndex::Bogus();
                *payload = use(ins, LUse::KEEPALIVE);
            } else {
                *type = useType(ins, LUse::KEEPALIVE);
                *payload = usePayload(ins, LUse::KEEPALIVE);
            }
        }
    }

    return snapshot;
}
Esempio n. 8
0
void
MBasicBlock::discardAllResumePoints(bool discardEntry)
{
    for (MResumePointIterator iter = resumePointsBegin(); iter != resumePointsEnd(); ) {
        MResumePoint *rp = *iter;
        if (rp == entryResumePoint() && !discardEntry) {
            iter++;
        } else {
            rp->discardUses();
            iter = resumePoints_.removeAt(iter);
        }
    }
}
Esempio n. 9
0
static bool
CheckUseImpliesOperand(MInstruction *ins, MUse *use)
{
    MNode *consumer = use->consumer();
    uint32_t index = use->index();

    if (consumer->isDefinition()) {
        MDefinition *def = consumer->toDefinition();
        return (def->getOperand(index) == ins);
    }

    JS_ASSERT(consumer->isResumePoint());
    MResumePoint *res = consumer->toResumePoint();
    return (res->getOperand(index) == ins);
}
LSnapshot *
LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
{
    LSnapshot *snapshot = LSnapshot::New(gen, rp, kind);
    if (!snapshot)
        return NULL;

    FlattenedMResumePointIter iter(rp);
    if (!iter.init())
        return NULL;

    size_t i = 0;
    for (MResumePoint **it = iter.begin(), **end = iter.end(); it != end; ++it) {
        MResumePoint *mir = *it;
        for (size_t j = 0, e = mir->numOperands(); j < e; ++i, ++j) {
            MDefinition *def = mir->getOperand(j);

            if (def->isPassArg())
                def = def->toPassArg()->getArgument();
            JS_ASSERT(!def->isPassArg());

            if (def->isBox())
                def = def->toBox()->getOperand(0);

            // Guards should never be eliminated.
            JS_ASSERT_IF(def->isUnused(), !def->isGuard());

            // Snapshot operands other than constants should never be
            // emitted-at-uses. Try-catch support depends on there being no
            // code between an instruction and the LOsiPoint that follows it.
            JS_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());

            LAllocation *a = snapshot->getEntry(i);

            if (def->isUnused()) {
                *a = LConstantIndex::Bogus();
                continue;
            }

            *a = useKeepaliveOrConstant(def);
        }
    }

    return snapshot;
}
Esempio n. 11
0
bool
LRecoverInfo::init(MResumePoint *rp)
{
    MResumePoint *it = rp;

    // Sort operations in the order in which we need to restore the stack. This
    // implies that outer frames, as well as operations needed to recover the
    // current frame, are located before the current frame. The inner-most
    // resume point should be the last element in the list.
    do {
        if (!instructions_.append(it))
            return false;
        it = it->caller();
    } while (it);

    Reverse(instructions_.begin(), instructions_.end());
    MOZ_ASSERT(mir() == rp);
    return true;
}
Esempio n. 12
0
// Visit |def|.
bool
ValueNumberer::visitDefinition(MDefinition* def)
{
    // Nop does not fit in any of the previous optimization, as its only purpose
    // is to reduce the register pressure by keeping additional resume
    // point. Still, there is no need consecutive list of MNop instructions, and
    // this will slow down every other iteration on the Graph.
    if (def->isNop()) {
        MNop* nop = def->toNop();
        MBasicBlock* block = nop->block();

        // We look backward to know if we can remove the previous Nop, we do not
        // look forward as we would not benefit from the folding made by GVN.
        MInstructionReverseIterator iter = ++block->rbegin(nop);

        // This nop is at the beginning of the basic block, just replace the
        // resume point of the basic block by the one from the resume point.
        if (iter == block->rend()) {
            JitSpew(JitSpew_GVN, "      Removing Nop%u", nop->id());
            nop->moveResumePointAsEntry();
            block->discard(nop);
            return true;
        }

        // The previous instruction is also a Nop, no need to keep it anymore.
        MInstruction* prev = *iter;
        if (prev->isNop()) {
            JitSpew(JitSpew_GVN, "      Removing Nop%u", prev->id());
            block->discard(prev);
            return true;
        }

        // The Nop is introduced to capture the result and make sure the operands
        // are not live anymore when there are no further uses. Though when
        // all operands are still needed the Nop doesn't decrease the liveness
        // and can get removed.
        MResumePoint* rp = nop->resumePoint();
        if (rp && rp->numOperands() > 0 &&
            rp->getOperand(rp->numOperands() - 1) == prev &&
            !nop->block()->lastIns()->isThrow())
        {
            size_t numOperandsLive = 0;
            for (size_t j = 0; j < prev->numOperands(); j++) {
                for (size_t i = 0; i < rp->numOperands(); i++) {
                    if (prev->getOperand(j) == rp->getOperand(i)) {
                        numOperandsLive++;
                        break;
                    }
                }
            }

            if (numOperandsLive == prev->numOperands()) {
                JitSpew(JitSpew_GVN, "      Removing Nop%u", nop->id());
                block->discard(nop);
            }
        }

        return true;
    }

    // Skip optimizations on instructions which are recovered on bailout, to
    // avoid mixing instructions which are recovered on bailouts with
    // instructions which are not.
    if (def->isRecoveredOnBailout())
        return true;

    // If this instruction has a dependency() into an unreachable block, we'll
    // need to update AliasAnalysis.
    MDefinition* dep = def->dependency();
    if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) {
        JitSpew(JitSpew_GVN, "      AliasAnalysis invalidated");
        if (updateAliasAnalysis_ && !dependenciesBroken_) {
            // TODO: Recomputing alias-analysis could theoretically expose more
            // GVN opportunities.
            JitSpew(JitSpew_GVN, "        Will recompute!");
            dependenciesBroken_ = true;
        }
        // Temporarily clear its dependency, to protect foldsTo, which may
        // wish to use the dependency to do store-to-load forwarding.
        def->setDependency(def->toInstruction());
    } else {
        dep = nullptr;
    }

    // Look for a simplified form of |def|.
    MDefinition* sim = simplified(def);
    if (sim != def) {
        if (sim == nullptr)
            return false;

        bool isNewInstruction = sim->block() == nullptr;

        // If |sim| doesn't belong to a block, insert it next to |def|.
        if (isNewInstruction)
            def->block()->insertAfter(def->toInstruction(), sim->toInstruction());

#ifdef JS_JITSPEW
        JitSpew(JitSpew_GVN, "      Folded %s%u to %s%u",
                def->opName(), def->id(), sim->opName(), sim->id());
#endif
        MOZ_ASSERT(!sim->isDiscarded());
        ReplaceAllUsesWith(def, sim);

        // The node's foldsTo said |def| can be replaced by |rep|. If |def| is a
        // guard, then either |rep| is also a guard, or a guard isn't actually
        // needed, so we can clear |def|'s guard flag and let it be discarded.
        def->setNotGuardUnchecked();

        if (DeadIfUnused(def)) {
            if (!discardDefsRecursively(def))
                return false;

            // If that ended up discarding |sim|, then we're done here.
            if (sim->isDiscarded())
                return true;
        }

        if (!rerun_ && def->isPhi() && !sim->isPhi()) {
            rerun_ = true;
            JitSpew(JitSpew_GVN, "      Replacing phi%u may have enabled cascading optimisations; "
                                 "will re-run", def->id());
        }

        // Otherwise, procede to optimize with |sim| in place of |def|.
        def = sim;

        // If the simplified instruction was already part of the graph, then we
        // probably already visited and optimized this instruction.
        if (!isNewInstruction)
            return true;
    }

    // Now that foldsTo is done, re-enable the original dependency. Even though
    // it may be pointing into a discarded block, it's still valid for the
    // purposes of detecting congruent loads.
    if (dep != nullptr)
        def->setDependency(dep);

    // Look for a dominating def which makes |def| redundant.
    MDefinition* rep = leader(def);
    if (rep != def) {
        if (rep == nullptr)
            return false;
        if (rep->updateForReplacement(def)) {
#ifdef JS_JITSPEW
            JitSpew(JitSpew_GVN,
                    "      Replacing %s%u with %s%u",
                    def->opName(), def->id(), rep->opName(), rep->id());
#endif
            ReplaceAllUsesWith(def, rep);

            // The node's congruentTo said |def| is congruent to |rep|, and it's
            // dominated by |rep|. If |def| is a guard, it's covered by |rep|,
            // so we can clear |def|'s guard flag and let it be discarded.
            def->setNotGuardUnchecked();

            if (DeadIfUnused(def)) {
                // discardDef should not add anything to the deadDefs, as the
                // redundant operation should have the same input operands.
                mozilla::DebugOnly<bool> r = discardDef(def);
                MOZ_ASSERT(r, "discardDef shouldn't have tried to add anything to the worklist, "
                              "so it shouldn't have failed");
                MOZ_ASSERT(deadDefs_.empty(),
                           "discardDef shouldn't have added anything to the worklist");
            }
            def = rep;
        }
    }

    return true;
}
Esempio n. 13
0
MBasicBlock*
MBasicBlock::NewSplitEdge(MIRGraph& graph, MBasicBlock* pred, size_t predEdgeIdx, MBasicBlock* succ)
{
    MBasicBlock* split = nullptr;
    if (!succ->pc()) {
        // The predecessor does not have a PC, this is a Wasm compilation.
        split = MBasicBlock::New(graph, succ->info(), pred, SPLIT_EDGE);
        if (!split)
            return nullptr;
    } else {
        // The predecessor has a PC, this is an IonBuilder compilation.
        MResumePoint* succEntry = succ->entryResumePoint();

        BytecodeSite* site = new(graph.alloc()) BytecodeSite(succ->trackedTree(), succEntry->pc());
        split = new(graph.alloc()) MBasicBlock(graph, succ->info(), site, SPLIT_EDGE);

        if (!split->init())
            return nullptr;

        // A split edge is used to simplify the graph to avoid having a
        // predecessor with multiple successors as well as a successor with
        // multiple predecessors.  As instructions can be moved in this
        // split-edge block, we need to give this block a resume point. To do
        // so, we copy the entry resume points of the successor and filter the
        // phis to keep inputs from the current edge.

        // Propagate the caller resume point from the inherited block.
        split->callerResumePoint_ = succ->callerResumePoint();

        // Split-edge are created after the interpreter stack emulation. Thus,
        // there is no need for creating slots.
        split->stackPosition_ = succEntry->stackDepth();

        // Create a resume point using our initial stack position.
        MResumePoint* splitEntry = new(graph.alloc()) MResumePoint(split, succEntry->pc(),
                                                                   MResumePoint::ResumeAt);
        if (!splitEntry->init(graph.alloc()))
            return nullptr;
        split->entryResumePoint_ = splitEntry;

        // The target entry resume point might have phi operands, keep the
        // operands of the phi coming from our edge.
        size_t succEdgeIdx = succ->indexForPredecessor(pred);

        for (size_t i = 0, e = splitEntry->numOperands(); i < e; i++) {
            MDefinition* def = succEntry->getOperand(i);
            // This early in the pipeline, we have no recover instructions in
            // any entry resume point.
            MOZ_ASSERT_IF(def->block() == succ, def->isPhi());
            if (def->block() == succ)
                def = def->toPhi()->getOperand(succEdgeIdx);

            splitEntry->initOperand(i, def);
        }

        // This is done in the New variant for wasm, so we cannot keep this
        // line below, where the rest of the graph is modified.
        if (!split->predecessors_.append(pred))
            return nullptr;
    }

    split->setLoopDepth(succ->loopDepth());

    // Insert the split edge block in-between.
    split->end(MGoto::New(graph.alloc(), succ));

    graph.insertBlockAfter(pred, split);

    pred->replaceSuccessor(predEdgeIdx, split);
    succ->replacePredecessor(pred, split);
    return split;
}
Esempio n. 14
0
// Operands to a resume point which are dead at the point of the resume can be
// replaced with undefined values. This analysis supports limited detection of
// dead operands, pruning those which are defined in the resume point's basic
// block and have no uses outside the block or at points later than the resume
// point.
//
// This is intended to ensure that extra resume points within a basic block
// will not artificially extend the lifetimes of any SSA values. This could
// otherwise occur if the new resume point captured a value which is created
// between the old and new resume point and is dead at the new resume point.
bool
ion::EliminateDeadResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
{
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)"))
            return false;

        // The logic below can get confused on infinite loops.
        if (block->isLoopHeader() && block->backedge() == *block)
            continue;

        for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            // No benefit to replacing constant operands with other constants.
            if (ins->isConstant())
                continue;

            // Scanning uses does not give us sufficient information to tell
            // where instructions that are involved in box/unbox operations or
            // parameter passing might be live. Rewriting uses of these terms
            // in resume points may affect the interpreter's behavior. Rather
            // than doing a more sophisticated analysis, just ignore these.
            if (ins->isUnbox() || ins->isParameter())
                continue;

            // If the instruction's behavior has been constant folded into a
            // separate instruction, we can't determine precisely where the
            // instruction becomes dead and can't eliminate its uses.
            if (ins->isFolded())
                continue;

            // Check if this instruction's result is only used within the
            // current block, and keep track of its last use in a definition
            // (not resume point). This requires the instructions in the block
            // to be numbered, ensured by running this immediately after alias
            // analysis.
            uint32_t maxDefinition = 0;
            for (MUseDefIterator uses(*ins); uses; uses++) {
                if (uses.def()->block() != *block ||
                    uses.def()->isBox() ||
                    uses.def()->isPassArg() ||
                    uses.def()->isPhi())
                {
                    maxDefinition = UINT32_MAX;
                    break;
                }
                maxDefinition = Max(maxDefinition, uses.def()->id());
            }
            if (maxDefinition == UINT32_MAX)
                continue;

            // Walk the uses a second time, removing any in resume points after
            // the last use in a definition.
            for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) {
                if (uses->node()->isDefinition()) {
                    uses++;
                    continue;
                }
                MResumePoint *mrp = uses->node()->toResumePoint();
                if (mrp->block() != *block ||
                    !mrp->instruction() ||
                    mrp->instruction() == *ins ||
                    mrp->instruction()->id() <= maxDefinition)
                {
                    uses++;
                    continue;
                }

                // Store an undefined value in place of all dead resume point
                // operands. Making any such substitution can in general alter
                // the interpreter's behavior, even though the code is dead, as
                // the interpreter will still execute opcodes whose effects
                // cannot be observed. If the undefined value were to flow to,
                // say, a dead property access the interpreter could throw an
                // exception; we avoid this problem by removing dead operands
                // before removing dead code.
                MConstant *constant = MConstant::New(UndefinedValue());
                block->insertBefore(*(block->begin()), constant);
                uses = mrp->replaceOperand(uses, constant);
            }
        }
    }

    return true;
}