Exemplo n.º 1
0
MBasicBlock::MBasicBlock(MIRGraph &graph, CompileInfo &info, const BytecodeSite &site, Kind kind)
  : unreachable_(false),
    graph_(graph),
    info_(info),
    predecessors_(graph.alloc()),
    stackPosition_(info_.firstStackSlot()),
    lastIns_(nullptr),
    pc_(site.pc()),
    lir_(nullptr),
    start_(nullptr),
    entryResumePoint_(nullptr),
    successorWithPhis_(nullptr),
    positionInPhiSuccessor_(0),
    kind_(kind),
    loopDepth_(0),
    mark_(false),
    immediatelyDominated_(graph.alloc()),
    immediateDominator_(nullptr),
    numDominated_(0),
    trackedSite_(site)
#if defined (JS_ION_PERF)
    , lineno_(0u),
    columnIndex_(0u)
#endif
{
}
static void
AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph)
{
    // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result,
    // since the users of the BitAnd include heap accesses. This will expose
    // the redundancy for GVN when expressions like this:
    //   a&m
    //   (a+1)&m,
    //   (a+2)&m,
    // are transformed into this:
    //   a&m
    //   (a&m)+1
    //   (a&m)+2
    // and it will allow the constants to be folded by the
    // EffectiveAddressAnalysis pass.
    //
    // Putting the add on the outside might seem like it exposes other users of
    // the expression to the possibility of i32 overflow, if we aren't in wasm
    // and they aren't naturally truncating. However, since we use MAdd::New
    // with MIRType::Int32, we make sure that the value is truncated, just as it
    // would be by the MBitAnd.

    MOZ_ASSERT(IsCompilingWasm());

    if (!ptr->isBitAnd())
        return;

    MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
    MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
    if (lhs->isConstant())
        mozilla::Swap(lhs, rhs);
    if (!lhs->isAdd() || !rhs->isConstant())
        return;

    MDefinition* op0 = lhs->toAdd()->getOperand(0);
    MDefinition* op1 = lhs->toAdd()->getOperand(1);
    if (op0->isConstant())
        mozilla::Swap(op0, op1);
    if (!op1->isConstant())
        return;

    uint32_t i = op1->toConstant()->toInt32();
    uint32_t m = rhs->toConstant()->toInt32();
    if (!IsAlignmentMask(m) || (i & m) != i)
        return;

    // The pattern was matched! Produce the replacement expression.
    MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32);
    ptr->block()->insertBefore(ptr->toBitAnd(), and_);
    MInstruction* add = MAdd::New(graph.alloc(), and_, op1, MIRType::Int32);
    ptr->block()->insertBefore(ptr->toBitAnd(), add);
    ptr->replaceAllUsesWith(add);
    ptr->block()->discard(ptr->toBitAnd());
}
Exemplo n.º 3
0
ValueNumberer::ValueNumberer(MIRGenerator* mir, MIRGraph& graph)
  : mir_(mir), graph_(graph),
    values_(graph.alloc()),
    deadDefs_(graph.alloc()),
    remainingBlocks_(graph.alloc()),
    nextDef_(nullptr),
    totalNumVisited_(0),
    rerun_(false),
    blocksRemoved_(false),
    updateAliasAnalysis_(false),
    dependenciesBroken_(false)
{}
Exemplo n.º 4
0
ValueNumberer::ValueNumberer(MIRGenerator *mir, MIRGraph &graph, bool optimistic)
  : mir(mir),
    graph_(graph),
    values(graph.alloc()),
    pessimisticPass_(!optimistic),
    count_(0)
{ }
bool
FoldLinearArithConstants(MIRGenerator* mir, MIRGraph& graph)
{
    for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Fold Linear Arithmetic Constants (main loop)"))
            return false;

        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
            if (!graph.alloc().ensureBallast())
                return false;

            if (mir->shouldCancel("Fold Linear Arithmetic Constants (inner loop)"))
                return false;

            if (i->isAdd())
                AnalyzeAdd(graph.alloc(), i->toAdd());
        }
    }
    return true;
}
Exemplo n.º 6
0
static void
UnboxSimdPhi(const JitCompartment* jitCompartment, MIRGraph& graph, MPhi* phi, SimdType unboxType)
{
    TempAllocator& alloc = graph.alloc();

    // Unbox and replace all operands.
    for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
        MDefinition* op = phi->getOperand(i);
        MSimdUnbox* unbox = MSimdUnbox::New(alloc, op, unboxType);
        op->block()->insertAtEnd(unbox);
        phi->replaceOperand(i, unbox);
    }

    // Change the MIRType of the Phi.
    MIRType mirType = SimdTypeToMIRType(unboxType);
    phi->setResultType(mirType);

    MBasicBlock* phiBlock = phi->block();
    MInstruction* atRecover = phiBlock->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
    MInstruction* at = phiBlock->safeInsertTop(atRecover);

    // Note, we capture the uses-list now, as new instructions are not visited.
    MUseIterator i(phi->usesBegin()), e(phi->usesEnd());

    // Add a MSimdBox, and replace all the Phi uses with it.
    JSObject* templateObject = jitCompartment->maybeGetSimdTemplateObjectFor(unboxType);
    InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
    MSimdBox* recoverBox = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
    recoverBox->setRecoveredOnBailout();
    phiBlock->insertBefore(atRecover, recoverBox);

    MSimdBox* box = nullptr;
    while (i != e) {
        MUse* use = *i++;
        MNode* ins = use->consumer();

        if ((ins->isDefinition() && ins->toDefinition()->isRecoveredOnBailout()) ||
            (ins->isResumePoint() && ins->toResumePoint()->isRecoverableOperand(use)))
        {
            use->replaceProducer(recoverBox);
            continue;
        }

        if (!box) {
            box = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
            phiBlock->insertBefore(at, box);
        }

        use->replaceProducer(box);
    }
}
Exemplo n.º 7
0
bool
Sink(MIRGenerator* mir, MIRGraph& graph)
{
    TempAllocator& alloc = graph.alloc();
    bool sinkEnabled = mir->optimizationInfo().sinkEnabled();

    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Sink"))
            return false;

        for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) {
            MInstruction* ins = *iter++;

            // Only instructions which can be recovered on bailout can be moved
            // into the bailout paths.
            if (ins->isGuard() || ins->isGuardRangeBailouts() ||
                ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout())
            {
                continue;
            }

            // Compute a common dominator for all uses of the current
            // instruction.
            bool hasLiveUses = false;
            bool hasUses = false;
            MBasicBlock* usesDominator = nullptr;
            for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) {
                hasUses = true;
                MNode* consumerNode = (*i)->consumer();
                if (consumerNode->isResumePoint())
                    continue;

                MDefinition* consumer = consumerNode->toDefinition();
                if (consumer->isRecoveredOnBailout())
                    continue;

                hasLiveUses = true;

                // If the instruction is a Phi, then we should dominate the
                // predecessor from which the value is coming from.
                MBasicBlock* consumerBlock = consumer->block();
                if (consumer->isPhi())
                    consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i));

                usesDominator = CommonDominator(usesDominator, consumerBlock);
                if (usesDominator == *block)
                    break;
            }

            // Leave this instruction for DCE.
            if (!hasUses)
                continue;

            // We have no uses, so sink this instruction in all the bailout
            // paths.
            if (!hasLiveUses) {
                MOZ_ASSERT(!usesDominator);
                ins->setRecoveredOnBailout();
                JitSpewDef(JitSpew_Sink, "  No live uses, recover the instruction on bailout\n", ins);
                continue;
            }

            // This guard is temporarly moved here as the above code deals with
            // Dead Code elimination, which got moved into this Sink phase, as
            // the Dead Code elimination used to move instructions with no-live
            // uses to the bailout path.
            if (!sinkEnabled)
                continue;

            // To move an effectful instruction, we would have to verify that the
            // side-effect is not observed. In the mean time, we just inhibit
            // this optimization on effectful instructions.
            if (ins->isEffectful())
                continue;

            // If all the uses are under a loop, we might not want to work
            // against LICM by moving everything back into the loop, but if the
            // loop is it-self inside an if, then we still want to move the
            // computation under this if statement.
            while (block->loopDepth() < usesDominator->loopDepth()) {
                MOZ_ASSERT(usesDominator != usesDominator->immediateDominator());
                usesDominator = usesDominator->immediateDominator();
            }

            // Only move instructions if there is a branch between the dominator
            // of the uses and the original instruction. This prevent moving the
            // computation of the arguments into an inline function if there is
            // no major win.
            MBasicBlock* lastJoin = usesDominator;
            while (*block != lastJoin && lastJoin->numPredecessors() == 1) {
                MOZ_ASSERT(lastJoin != lastJoin->immediateDominator());
                MBasicBlock* next = lastJoin->immediateDominator();
                if (next->numSuccessors() > 1)
                    break;
                lastJoin = next;
            }
            if (*block == lastJoin)
                continue;

            // Skip to the next instruction if we cannot find a common dominator
            // for all the uses of this instruction, or if the common dominator
            // correspond to the block of the current instruction.
            if (!usesDominator || usesDominator == *block)
                continue;

            // Only instruction which can be recovered on bailout and which are
            // sinkable can be moved into blocks which are below while filling
            // the resume points with a clone which is recovered on bailout.

            // If the instruction has live uses and if it is clonable, then we
            // can clone the instruction for all non-dominated uses and move the
            // instruction into the block which is dominating all live uses.
            if (!ins->canClone())
                continue;

            // If the block is a split-edge block, which is created for folding
            // test conditions, then the block has no resume point and has
            // multiple predecessors.  In such case, we cannot safely move
            // bailing instruction to these blocks as we have no way to bailout.
            if (!usesDominator->entryResumePoint() && usesDominator->numPredecessors() != 1)
                continue;

            JitSpewDef(JitSpew_Sink, "  Can Clone & Recover, sink instruction\n", ins);
            JitSpew(JitSpew_Sink, "  into Block %u", usesDominator->id());

            // Copy the arguments and clone the instruction.
            MDefinitionVector operands(alloc);
            for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
                if (!operands.append(ins->getOperand(i)))
                    return false;
            }

            MInstruction* clone = ins->clone(alloc, operands);
            ins->block()->insertBefore(ins, clone);
            clone->setRecoveredOnBailout();

            // We should not update the producer of the entry resume point, as
            // it cannot refer to any instruction within the basic block excepts
            // for Phi nodes.
            MResumePoint* entry = usesDominator->entryResumePoint();

            // Replace the instruction by its clone in all the resume points /
            // recovered-on-bailout instructions which are not in blocks which
            // are dominated by the usesDominator block.
            for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; ) {
                MUse* use = *i++;
                MNode* consumer = use->consumer();

                // If the consumer is a Phi, then we look for the index of the
                // use to find the corresponding predecessor block, which is
                // then used as the consumer block.
                MBasicBlock* consumerBlock = consumer->block();
                if (consumer->isDefinition() && consumer->toDefinition()->isPhi()) {
                    consumerBlock = consumerBlock->getPredecessor(
                        consumer->toDefinition()->toPhi()->indexOf(use));
                }

                // Keep the current instruction for all dominated uses, except
                // for the entry resume point of the block in which the
                // instruction would be moved into.
                if (usesDominator->dominates(consumerBlock) &&
                    (!consumer->isResumePoint() || consumer->toResumePoint() != entry))
                {
                    continue;
                }

                use->replaceProducer(clone);
            }

            // As we move this instruction in a different block, we should
            // verify that we do not carry over a resume point which would refer
            // to an outdated state of the control flow.
            if (ins->resumePoint())
                ins->clearResumePoint();

            // Now, that all uses which are not dominated by usesDominator are
            // using the cloned instruction, we can safely move the instruction
            // into the usesDominator block.
            MInstruction* at = usesDominator->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
            block->moveBefore(at, ins);
        }
    }

    return true;
}