Example #1
0
void
MBasicBlock::linkOsrValues(MStart *start)
{
    JS_ASSERT(start->startType() == MStart::StartType_Osr);

    MResumePoint *res = start->resumePoint();

    for (uint32_t i = 0; i < stackDepth(); i++) {
        MDefinition *def = slots_[i];
        if (i == info().scopeChainSlot()) {
            if (def->isOsrScopeChain())
                def->toOsrScopeChain()->setResumePoint(res);
        } else if (info().hasArguments() && i == info().argsObjSlot()) {
            JS_ASSERT(def->isConstant() || def->isOsrArgumentsObject());
            JS_ASSERT_IF(def->isConstant(), def->toConstant()->value() == UndefinedValue());
            if (def->isOsrArgumentsObject())
                def->toOsrArgumentsObject()->setResumePoint(res);
        } else {
            JS_ASSERT(def->isOsrValue() || def->isGetArgumentsObjectArg() || def->isConstant());
            // A constant Undefined can show up here for an argument slot when the function uses
            // a heavyweight argsobj, but the argument in question is stored on the scope chain.
            JS_ASSERT_IF(def->isConstant(), def->toConstant()->value() == UndefinedValue());

            if (def->isOsrValue())
                def->toOsrValue()->setResumePoint(res);
            else if (def->isGetArgumentsObjectArg())
                def->toGetArgumentsObjectArg()->setResumePoint(res);
        }
    }
}
Example #2
0
LSnapshot *
LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
{
    LRecoverInfo *recoverInfo = getRecoverInfo(rp);
    if (!recoverInfo)
        return nullptr;

    LSnapshot *snapshot = LSnapshot::New(gen, recoverInfo, kind);
    if (!snapshot)
        return nullptr;

    size_t index = 0;
    LRecoverInfo::OperandIter it(recoverInfo->begin());
    LRecoverInfo::OperandIter end(recoverInfo->end());
    for (; it != end; ++it) {
        // Check that optimized out operands are in eliminable slots.
        MOZ_ASSERT(it.canOptimizeOutIfUnused());

        MDefinition *ins = *it;

        if (ins->isRecoveredOnBailout())
            continue;

        LAllocation *type = snapshot->typeOfSlot(index);
        LAllocation *payload = snapshot->payloadOfSlot(index);
        ++index;

        if (ins->isBox())
            ins = ins->toBox()->getOperand(0);

        // Guards should never be eliminated.
        MOZ_ASSERT_IF(ins->isUnused(), !ins->isGuard());

        // Snapshot operands other than constants should never be
        // emitted-at-uses. Try-catch support depends on there being no
        // code between an instruction and the LOsiPoint that follows it.
        MOZ_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());

        // The register allocation will fill these fields in with actual
        // register/stack assignments. During code generation, we can restore
        // interpreter state with the given information. Note that for
        // constants, including known types, we record a dummy placeholder,
        // since we can recover the same information, much cleaner, from MIR.
        if (ins->isConstant() || ins->isUnused()) {
            *type = LConstantIndex::Bogus();
            *payload = LConstantIndex::Bogus();
        } else if (ins->type() != MIRType_Value) {
            *type = LConstantIndex::Bogus();
            *payload = use(ins, LUse(LUse::KEEPALIVE));
        } else {
            *type = useType(ins, LUse::KEEPALIVE);
            *payload = usePayload(ins, LUse::KEEPALIVE);
        }
    }

    return snapshot;
}
// Transform:
//
//   [AddI]
//   addl       $9, %esi
//   [LoadUnboxedScalar]
//   movsd      0x0(%rbx,%rsi,8), %xmm4
//
// into:
//
//   [LoadUnboxedScalar]
//   movsd      0x48(%rbx,%rsi,8), %xmm4
//
// This is possible when the AddI is only used by the LoadUnboxedScalar opcode.
static void
AnalyzeLoadUnboxedScalar(TempAllocator& alloc, MLoadUnboxedScalar* load)
{
    if (load->isRecoveredOnBailout())
        return;

    if (!load->getOperand(1)->isAdd())
        return;

    JitSpew(JitSpew_EAA, "analyze: %s%u", load->opName(), load->id());

    MAdd* add = load->getOperand(1)->toAdd();

    if (add->specialization() != MIRType::Int32 || !add->hasUses() ||
        add->truncateKind() != MDefinition::TruncateKind::Truncate)
    {
        return;
    }

    MDefinition* lhs = add->lhs();
    MDefinition* rhs = add->rhs();
    MDefinition* constant = nullptr;
    MDefinition* node = nullptr;

    if (lhs->isConstant()) {
        constant = lhs;
        node = rhs;
    } else if (rhs->isConstant()) {
        constant = rhs;
        node = lhs;
    } else
        return;

    MOZ_ASSERT(constant->type() == MIRType::Int32);

    size_t storageSize = Scalar::byteSize(load->storageType());
    int32_t c1 = load->offsetAdjustment();
    int32_t c2 = 0;
    if (!SafeMul(constant->maybeConstantValue()->toInt32(), storageSize, &c2))
        return;

    int32_t offset = 0;
    if (!SafeAdd(c1, c2, &offset))
        return;

    JitSpew(JitSpew_EAA, "set offset: %d + %d = %d on: %s%u", c1, c2, offset,
            load->opName(), load->id());
    load->setOffsetAdjustment(offset);
    load->replaceOperand(1, node);

    if (!add->hasLiveDefUses() && DeadIfUnused(add) && add->canRecoverOnBailout()) {
        JitSpew(JitSpew_EAA, "mark as recovered on bailout: %s%u",
                add->opName(), add->id());
        add->setRecoveredOnBailoutUnchecked();
    }
}
LSnapshot *
LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
{
    LSnapshot *snapshot = LSnapshot::New(gen, rp, kind);
    if (!snapshot)
        return NULL;

    FlattenedMResumePointIter iter(rp);
    if (!iter.init())
        return NULL;

    size_t i = 0;
    for (MResumePoint **it = iter.begin(), **end = iter.end(); it != end; ++it) {
        MResumePoint *mir = *it;
        for (size_t j = 0, e = mir->numOperands(); j < e; ++i, ++j) {
            MDefinition *ins = mir->getOperand(j);

            LAllocation *type = snapshot->typeOfSlot(i);
            LAllocation *payload = snapshot->payloadOfSlot(i);

            if (ins->isPassArg())
                ins = ins->toPassArg()->getArgument();
            JS_ASSERT(!ins->isPassArg());

            if (ins->isBox())
                ins = ins->toBox()->getOperand(0);

            // Guards should never be eliminated.
            JS_ASSERT_IF(ins->isUnused(), !ins->isGuard());

            // Snapshot operands other than constants should never be
            // emitted-at-uses. Try-catch support depends on there being no
            // code between an instruction and the LOsiPoint that follows it.
            JS_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());

            // The register allocation will fill these fields in with actual
            // register/stack assignments. During code generation, we can restore
            // interpreter state with the given information. Note that for
            // constants, including known types, we record a dummy placeholder,
            // since we can recover the same information, much cleaner, from MIR.
            if (ins->isConstant() || ins->isUnused()) {
                *type = LConstantIndex::Bogus();
                *payload = LConstantIndex::Bogus();
            } else if (ins->type() != MIRType_Value) {
                *type = LConstantIndex::Bogus();
                *payload = use(ins, LUse::KEEPALIVE);
            } else {
                *type = useType(ins, LUse::KEEPALIVE);
                *payload = usePayload(ins, LUse::KEEPALIVE);
            }
        }
    }

    return snapshot;
}
static void
AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph)
{
    // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result,
    // since the users of the BitAnd include heap accesses. This will expose
    // the redundancy for GVN when expressions like this:
    //   a&m
    //   (a+1)&m,
    //   (a+2)&m,
    // are transformed into this:
    //   a&m
    //   (a&m)+1
    //   (a&m)+2
    // and it will allow the constants to be folded by the
    // EffectiveAddressAnalysis pass.
    //
    // Putting the add on the outside might seem like it exposes other users of
    // the expression to the possibility of i32 overflow, if we aren't in wasm
    // and they aren't naturally truncating. However, since we use MAdd::New
    // with MIRType::Int32, we make sure that the value is truncated, just as it
    // would be by the MBitAnd.

    MOZ_ASSERT(IsCompilingWasm());

    if (!ptr->isBitAnd())
        return;

    MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
    MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
    if (lhs->isConstant())
        mozilla::Swap(lhs, rhs);
    if (!lhs->isAdd() || !rhs->isConstant())
        return;

    MDefinition* op0 = lhs->toAdd()->getOperand(0);
    MDefinition* op1 = lhs->toAdd()->getOperand(1);
    if (op0->isConstant())
        mozilla::Swap(op0, op1);
    if (!op1->isConstant())
        return;

    uint32_t i = op1->toConstant()->toInt32();
    uint32_t m = rhs->toConstant()->toInt32();
    if (!IsAlignmentMask(m) || (i & m) != i)
        return;

    // The pattern was matched! Produce the replacement expression.
    MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32);
    ptr->block()->insertBefore(ptr->toBitAnd(), and_);
    MInstruction* add = MAdd::New(graph.alloc(), and_, op1, MIRType::Int32);
    ptr->block()->insertBefore(ptr->toBitAnd(), add);
    ptr->replaceAllUsesWith(add);
    ptr->block()->discard(ptr->toBitAnd());
}
bool
LIRGeneratorX64::visitBox(MBox *box)
{
    MDefinition *opd = box->getOperand(0);

    // If the operand is a constant, emit near its uses.
    if (opd->isConstant() && box->canEmitAtUses())
        return emitAtUses(box);

    if (opd->isConstant())
        return define(new LValue(opd->toConstant()->value()), box, LDefinition(LDefinition::BOX));

    LBox *ins = new LBox(opd->type(), useRegister(opd));
    return define(ins, box, LDefinition(LDefinition::BOX));
}
Example #7
0
bool
MBasicBlock::linkOsrValues(MStart* start)
{
    MOZ_ASSERT(start->startType() == MStart::StartType_Osr);

    MResumePoint* res = start->resumePoint();

    for (uint32_t i = 0; i < stackDepth(); i++) {
        MDefinition* def = slots_[i];
        MInstruction* cloneRp = nullptr;
        if (i == info().scopeChainSlot()) {
            if (def->isOsrScopeChain())
                cloneRp = def->toOsrScopeChain();
        } else if (i == info().returnValueSlot()) {
            if (def->isOsrReturnValue())
                cloneRp = def->toOsrReturnValue();
        } else if (info().hasArguments() && i == info().argsObjSlot()) {
            MOZ_ASSERT(def->isConstant() || def->isOsrArgumentsObject());
            MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->value() == UndefinedValue());
            if (def->isOsrArgumentsObject())
                cloneRp = def->toOsrArgumentsObject();
        } else {
            MOZ_ASSERT(def->isOsrValue() || def->isGetArgumentsObjectArg() || def->isConstant() ||
                       def->isParameter());

            // A constant Undefined can show up here for an argument slot when
            // the function has an arguments object, but the argument in
            // question is stored on the scope chain.
            MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->value() == UndefinedValue());

            if (def->isOsrValue())
                cloneRp = def->toOsrValue();
            else if (def->isGetArgumentsObjectArg())
                cloneRp = def->toGetArgumentsObjectArg();
            else if (def->isParameter())
                cloneRp = def->toParameter();
        }

        if (cloneRp) {
            MResumePoint* clone = MResumePoint::Copy(graph().alloc(), res);
            if (!clone)
                return false;
            cloneRp->setResumePoint(clone);
        }
    }

    return true;
}
Example #8
0
bool
LIRGeneratorARM::visitBox(MBox *box)
{
    MDefinition *inner = box->getOperand(0);

    // If the box wrapped a double, it needs a new register.
    if (inner->type() == MIRType_Double)
        return defineBox(new LBoxDouble(useRegisterAtStart(inner), tempCopy(inner, 0)), box);

    if (box->canEmitAtUses())
        return emitAtUses(box);

    if (inner->isConstant())
        return defineBox(new LValue(inner->toConstant()->value()), box);

    LBox *lir = new LBox(use(inner), inner->type());

    // Otherwise, we should not define a new register for the payload portion
    // of the output, so bypass defineBox().
    uint32_t vreg = getVirtualRegister();
    if (vreg >= MAX_VIRTUAL_REGISTERS)
        return false;

    // Note that because we're using PASSTHROUGH, we do not change the type of
    // the definition. We also do not define the first output as "TYPE",
    // because it has no corresponding payload at (vreg + 1). Also note that
    // although we copy the input's original type for the payload half of the
    // definition, this is only for clarity. PASSTHROUGH definitions are
    // ignored.
    lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
    lir->setDef(1, LDefinition(inner->virtualRegister(), LDefinition::TypeFrom(inner->type()),
                               LDefinition::PASSTHROUGH));
    box->setVirtualRegister(vreg);
    return add(lir);
}
Example #9
0
// Determine whether the possible value of start (a phi node within the loop)
// can become smaller than an initial value at loop entry.
bool
Loop::nonDecreasing(MDefinition *initial, MDefinition *start)
{
    MDefinitionVector worklist;
    MDefinitionVector seen;

    if (!worklist.append(start))
        return false;

    while (!worklist.empty()) {
        MDefinition *def = worklist.popCopy();
        bool duplicate = false;
        for (size_t i = 0; i < seen.length() && !duplicate; i++) {
            if (seen[i] == def)
                duplicate = true;
        }
        if (duplicate)
            continue;
        if (!seen.append(def))
            return false;

        if (def->type() != MIRType_Int32)
            return false;

        if (!isInLoop(def)) {
            if (def != initial)
                return false;
            continue;
        }

        if (def->isPhi()) {
            MPhi *phi = def->toPhi();
            for (size_t i = 0; i < phi->numOperands(); i++) {
                if (!worklist.append(phi->getOperand(i)))
                    return false;
            }
            continue;
        }

        if (def->isAdd()) {
            if (def->toAdd()->specialization() != MIRType_Int32)
                return false;
            MDefinition *lhs = def->toAdd()->getOperand(0);
            MDefinition *rhs = def->toAdd()->getOperand(1);
            if (!rhs->isConstant())
                return false;
            Value v = rhs->toConstant()->value();
            if (!v.isInt32() || v.toInt32() < 0)
                return false;
            if (!worklist.append(lhs))
                return false;
            continue;
        }

        return false;
    }

    return true;
}
Example #10
0
void LIRGenerator::visitBox(MBox* box) {
  MDefinition* opd = box->getOperand(0);

  // If the operand is a constant, emit near its uses.
  if (opd->isConstant() && box->canEmitAtUses()) {
    emitAtUses(box);
    return;
  }

  if (opd->isConstant()) {
    define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
           LDefinition(LDefinition::BOX));
  } else {
    LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
    define(ins, box, LDefinition(LDefinition::BOX));
  }
}
void
MRsh::computeRange()
{
    MDefinition *right = getOperand(1);
    if (!right->isConstant())
        return;

    int32_t c = right->toConstant()->value().toInt32();
    Range other(getOperand(0));
    setRange(Range::shr(&other, c));
}
void
MLsh::computeRange()
{
    MDefinition *right = getOperand(1);
    if (!right->isConstant())
        return;

    int32 c = right->toConstant()->value().toInt32();
    const Range *other = getOperand(0)->range();
    setRange(Range::shl(other, c));
}
Example #13
0
void
LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
{
    MDefinition *ptr = ins->ptr();
    LAsmJSStoreHeap *lir;
    MOZ_ASSERT(ptr->type() == MIRType_Int32);

    if (ptr->isConstant() && !ins->needsBoundsCheck()) {
        MOZ_ASSERT(ptr->toConstant()->value().toInt32() >= 0);
        LAllocation ptrAlloc = LAllocation(ptr->toConstant()->vp());
        switch (ins->accessType()) {
          case Scalar::Int8: case Scalar::Uint8:
            // See comment below.
            lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useFixed(ins->value(), eax));
            break;
          case Scalar::Int16: case Scalar::Uint16:
          case Scalar::Int32: case Scalar::Uint32:
          case Scalar::Float32: case Scalar::Float64:
          case Scalar::Float32x4: case Scalar::Int32x4:
            // See comment below.
            lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
            break;
          case Scalar::Uint8Clamped:
          case Scalar::MaxTypedArrayViewType:
            MOZ_CRASH("unexpected array type");
        }
        add(lir, ins);
        return;
    }

    switch (ins->accessType()) {
      case Scalar::Int8: case Scalar::Uint8:
        // See comment for LIRGeneratorX86::useByteOpRegister.
        lir = new(alloc()) LAsmJSStoreHeap(useRegister(ins->ptr()), useFixed(ins->value(), eax));
        break;
      case Scalar::Int16: case Scalar::Uint16:
      case Scalar::Int32: case Scalar::Uint32:
      case Scalar::Float32: case Scalar::Float64:
      case Scalar::Float32x4: case Scalar::Int32x4:
        // For now, don't allow constant values. The immediate operand
        // affects instruction layout which affects patching.
        lir = new(alloc()) LAsmJSStoreHeap(useRegisterAtStart(ptr), useRegisterAtStart(ins->value()));
        break;
      case Scalar::Uint8Clamped:
      case Scalar::MaxTypedArrayViewType:
        MOZ_CRASH("unexpected array type");
    }

    add(lir, ins);
}
Example #14
0
MDefinition *
MTruncateToInt32::foldsTo(bool useValueNumbers)
{
    MDefinition *input = getOperand(0);
    if (input->type() == MIRType_Int32)
        return input;

    if (input->type() == MIRType_Double && input->isConstant()) {
        const Value &v = input->toConstant()->value();
        uint32 ret = ToInt32(v.toDouble());
        return MConstant::New(Int32Value(ret));
    }

    return this;
}
Example #15
0
void
LIRGeneratorPPC::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
{
    MDefinition *ptr = ins->ptr();
    MOZ_ASSERT(ptr->type() == MIRType_Int32);
    LAllocation ptrAlloc;

    if (ptr->isConstant() && !ins->needsBoundsCheck()) {
        MOZ_ASSERT(ptr->toConstant()->value().toInt32() >= 0);
        ptrAlloc = LAllocation(ptr->toConstant()->vp());
    } else
        ptrAlloc = useRegisterAtStart(ptr);

    add(new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value())), ins);
}
Example #16
0
bool
LIRGeneratorARM::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
{
    MDefinition *ptr = ins->ptr();
    JS_ASSERT(ptr->type() == MIRType_Int32);
    LAllocation ptrAlloc;

    if (ptr->isConstant() && ins->skipBoundsCheck()) {
        JS_ASSERT(ptr->toConstant()->value().toInt32() >= 0);
        ptrAlloc = LAllocation(ptr->toConstant()->vp());
    } else
        ptrAlloc = useRegisterAtStart(ptr);

    return add(new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value())), ins);
}
bool
LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap *ins)
{
    MDefinition *ptr = ins->ptr();
    JS_ASSERT(ptr->type() == MIRType_Int32);

    // The X64 does not inline an explicit bounds check so has no need to keep the
    // index in a register, however only a positive index is accepted because a
    // negative offset encoded as an offset in the addressing mode would not wrap
    // back into the protected area reserved for the heap.
    if (ptr->isConstant() && ptr->toConstant()->value().toInt32() >= 0) {
        LAsmJSLoadHeap *lir = new LAsmJSLoadHeap(LAllocation(ptr->toConstant()->vp()));
        return define(lir, ins);
    }
    return define(new LAsmJSLoadHeap(useRegisterAtStart(ptr)), ins);
}
void
LIRGeneratorMIPSShared::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
{
    MOZ_ASSERT(ins->offset() == 0);

    MDefinition* base = ins->base();
    MOZ_ASSERT(base->type() == MIRType::Int32);
    LAllocation baseAlloc;

    if (base->isConstant() && !ins->needsBoundsCheck()) {
        MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
        baseAlloc = LAllocation(base->toConstant());
    } else
        baseAlloc = useRegisterAtStart(base);

    add(new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value())), ins);
}
Example #19
0
LSnapshot *
LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
{
    LRecoverInfo *recoverInfo = getRecoverInfo(rp);
    if (!recoverInfo)
        return nullptr;

    LSnapshot *snapshot = LSnapshot::New(gen, recoverInfo, kind);
    if (!snapshot)
        return nullptr;

    size_t index = 0;
    LRecoverInfo::OperandIter it(recoverInfo->begin());
    LRecoverInfo::OperandIter end(recoverInfo->end());
    for (; it != end; ++it) {
        // Check that optimized out operands are in eliminable slots.
        MOZ_ASSERT(it.canOptimizeOutIfUnused());

        MDefinition *def = *it;

        if (def->isRecoveredOnBailout())
            continue;

        if (def->isBox())
            def = def->toBox()->getOperand(0);

        // Guards should never be eliminated.
        MOZ_ASSERT_IF(def->isUnused(), !def->isGuard());

        // Snapshot operands other than constants should never be
        // emitted-at-uses. Try-catch support depends on there being no
        // code between an instruction and the LOsiPoint that follows it.
        MOZ_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());

        LAllocation *a = snapshot->getEntry(index++);

        if (def->isUnused()) {
            *a = LConstantIndex::Bogus();
            continue;
        }

        *a = useKeepaliveOrConstant(def);
    }

    return snapshot;
}
LSnapshot *
LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
{
    LSnapshot *snapshot = LSnapshot::New(gen, rp, kind);
    if (!snapshot)
        return NULL;

    FlattenedMResumePointIter iter(rp);
    if (!iter.init())
        return NULL;

    size_t i = 0;
    for (MResumePoint **it = iter.begin(), **end = iter.end(); it != end; ++it) {
        MResumePoint *mir = *it;
        for (size_t j = 0, e = mir->numOperands(); j < e; ++i, ++j) {
            MDefinition *def = mir->getOperand(j);

            if (def->isPassArg())
                def = def->toPassArg()->getArgument();
            JS_ASSERT(!def->isPassArg());

            if (def->isBox())
                def = def->toBox()->getOperand(0);

            // Guards should never be eliminated.
            JS_ASSERT_IF(def->isUnused(), !def->isGuard());

            // Snapshot operands other than constants should never be
            // emitted-at-uses. Try-catch support depends on there being no
            // code between an instruction and the LOsiPoint that follows it.
            JS_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());

            LAllocation *a = snapshot->getEntry(i);

            if (def->isUnused()) {
                *a = LConstantIndex::Bogus();
                continue;
            }

            *a = useKeepaliveOrConstant(def);
        }
    }

    return snapshot;
}
Example #21
0
bool
LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap *ins)
{
    MDefinition *ptr = ins->ptr();
    JS_ASSERT(ptr->type() == MIRType_Int32);
    LAllocation ptrAlloc;

    // For the ARM it is best to keep the 'ptr' in a register if a bounds check is needed.
    if (ptr->isConstant() && ins->skipBoundsCheck()) {
        int32_t ptrValue = ptr->toConstant()->value().toInt32();
        // A bounds check is only skipped for a positive index.
        JS_ASSERT(ptrValue >= 0);
        ptrAlloc = LAllocation(ptr->toConstant()->vp());
    } else
        ptrAlloc = useRegisterAtStart(ptr);

    return define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
}
Example #22
0
MDefinition *
MBitNot::foldsTo(bool useValueNumbers)
{
    if (specialization_ != MIRType_Int32)
        return this;

    MDefinition *input = getOperand(0);

    if (input->isConstant()) {
        js::Value v = Int32Value(~(input->toConstant()->value().toInt32()));
        return MConstant::New(v);
    }

    if (input->isBitNot())
        return input->getOperand(0); // ~~x => x

    return this;
}
void
MBasicBlock::linkOsrValues(MStart *start)
{
    JS_ASSERT(start->startType() == MStart::StartType_Osr);

    MResumePoint *res = start->resumePoint();

    for (uint32 i = 0; i < stackDepth(); i++) {
        MDefinition *def = slots_[i];
        //PS
        if(!def->isConstant()){
            if (i == info().scopeChainSlot())
                def->toOsrScopeChain()->setResumePoint(res);
            else
                def->toOsrValue()->setResumePoint(res);
        }
    }
}
Example #24
0
void
LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap *ins)
{
    MDefinition *ptr = ins->ptr();
    LAllocation ptrAlloc;
    MOZ_ASSERT(ptr->type() == MIRType_Int32);

    // For the x86 it is best to keep the 'ptr' in a register if a bounds check is needed.
    if (ptr->isConstant() && !ins->needsBoundsCheck()) {
        // A bounds check is only skipped for a positive index.
        MOZ_ASSERT(ptr->toConstant()->value().toInt32() >= 0);
        ptrAlloc = LAllocation(ptr->toConstant()->vp());
    } else {
        ptrAlloc = useRegisterAtStart(ptr);
    }
    LAsmJSLoadHeap *lir = new(alloc()) LAsmJSLoadHeap(ptrAlloc);
    define(lir, ins);
}
Example #25
0
void
MBasicBlock::linkOsrValues(MStart *start)
{
    JS_ASSERT(start->startType() == MStart::StartType_Osr);

    MResumePoint *res = start->resumePoint();

    for (uint32_t i = 0; i < stackDepth(); i++) {
        MDefinition *def = slots_[i];
        if (i == info().scopeChainSlot()) {
            if (def->isOsrScopeChain())
                def->toOsrScopeChain()->setResumePoint(res);
        } else if (info().hasArguments() && i == info().argsObjSlot()) {
            JS_ASSERT(def->isConstant() && def->toConstant()->value() == UndefinedValue());
        } else {
            def->toOsrValue()->setResumePoint(res);
        }
    }
}
Example #26
0
void
LIRGeneratorPPC::visitAsmJSLoadHeap(MAsmJSLoadHeap *ins)
{
    MDefinition *ptr = ins->ptr();
    MOZ_ASSERT(ptr->type() == MIRType_Int32);
    LAllocation ptrAlloc;

    // For PowerPC it would be better to keep the pointer in a register
    // if bounds checking is needed.
    if (ptr->isConstant() && !ins->needsBoundsCheck()) {
        int32_t ptrValue = ptr->toConstant()->value().toInt32();
        // A bounds check is only skipped for a positive index.
        MOZ_ASSERT(ptrValue >= 0);
        ptrAlloc = LAllocation(ptr->toConstant()->vp());
    } else
        ptrAlloc = useRegisterAtStart(ptr);

    define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
}
MBasicBlock *
UnreachableCodeElimination::optimizableSuccessor(MBasicBlock *block)
{
    // If the last instruction in `block` is a test instruction of a
    // constant value, returns the successor that the branch will
    // always branch to at runtime. Otherwise, returns nullptr.

    MControlInstruction *ins = block->lastIns();
    if (!ins->isTest())
        return nullptr;

    MTest *testIns = ins->toTest();
    MDefinition *v = testIns->getOperand(0);
    if (!v->isConstant())
        return nullptr;

    BranchDirection bdir = v->toConstant()->valueToBoolean() ? TRUE_BRANCH : FALSE_BRANCH;
    return testIns->branchSuccessor(bdir);
}
bool
LIRGeneratorX86Shared::lowerUrshD(MUrsh *mir)
{
    MDefinition *lhs = mir->lhs();
    MDefinition *rhs = mir->rhs();

    JS_ASSERT(lhs->type() == MIRType_Int32);
    JS_ASSERT(rhs->type() == MIRType_Int32);
    JS_ASSERT(mir->type() == MIRType_Double);

#ifdef JS_CPU_X64
    JS_ASSERT(ecx == rcx);
#endif

    LUse lhsUse = useRegisterAtStart(lhs);
    LAllocation rhsAlloc = rhs->isConstant() ? useOrConstant(rhs) : useFixed(rhs, ecx);

    LUrshD *lir = new LUrshD(lhsUse, rhsAlloc, tempCopy(lhs, 0));
    return define(lir, mir);
}
Example #29
0
void
LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
{
    MOZ_ASSERT(ins->offset() == 0);

    MDefinition* base = ins->base();
    MOZ_ASSERT(base->type() == MIRType_Int32);
    LAllocation baseAlloc;

    // For the ARM it is best to keep the 'base' in a register if a bounds check is needed.
    if (base->isConstant() && !ins->needsBoundsCheck()) {
        // A bounds check is only skipped for a positive index.
        MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
        baseAlloc = LAllocation(base->toConstant());
    } else {
        baseAlloc = useRegisterAtStart(base);
    }

    define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
}
Example #30
0
void
LIRGeneratorX64::visitWasmStore(MWasmStore* ins)
{
    MDefinition* base = ins->base();
    MOZ_ASSERT(base->type() == MIRType::Int32);

    MDefinition* value = ins->value();
    LAllocation valueAlloc;
    switch (ins->accessType()) {
      case Scalar::Int8:
      case Scalar::Uint8:
      case Scalar::Int16:
      case Scalar::Uint16:
      case Scalar::Int32:
      case Scalar::Uint32:
        valueAlloc = useRegisterOrConstantAtStart(value);
        break;
      case Scalar::Int64:
        // No way to encode an int64-to-memory move on x64.
        if (value->isConstant() && value->type() != MIRType::Int64)
            valueAlloc = useOrConstantAtStart(value);
        else
            valueAlloc = useRegisterAtStart(value);
        break;
      case Scalar::Float32:
      case Scalar::Float64:
      case Scalar::Float32x4:
      case Scalar::Int8x16:
      case Scalar::Int16x8:
      case Scalar::Int32x4:
        valueAlloc = useRegisterAtStart(value);
        break;
      case Scalar::Uint8Clamped:
      case Scalar::MaxTypedArrayViewType:
        MOZ_CRASH("unexpected array type");
    }

    LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
    auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
    add(lir, ins);
}