void
CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
{
    const MLoadTypedArrayElementStatic* mir = ins->mir();
    Scalar::Type accessType = mir->accessType();
    MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType_Float32);

    Register ptr = ToRegister(ins->ptr());
    const LDefinition* out = ins->output();
    OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
    uint32_t offset = mir->offset();

    if (mir->needsBoundsCheck()) {
        MOZ_ASSERT(offset == 0);
        if (!mir->fallible()) {
            ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
            addOutOfLineCode(ool, ins->mir());
        }

        masm.cmpPtr(ptr, ImmWord(mir->length()));
        if (ool)
            masm.j(Assembler::AboveOrEqual, ool->entry());
        else
            bailoutIf(Assembler::AboveOrEqual, ins->snapshot());
    }

    Operand srcAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
    load(accessType, srcAddr, out);
    if (accessType == Scalar::Float64)
        masm.canonicalizeDouble(ToFloatRegister(out));
    if (accessType == Scalar::Float32)
        masm.canonicalizeFloat(ToFloatRegister(out));
    if (ool)
        masm.bind(ool->rejoin());
}
Exemple #2
0
void
CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
{
    const MAsmJSLoadHeap* mir = ins->mir();
    MOZ_ASSERT(mir->access().offset() == 0);

    const LAllocation* ptr = ins->ptr();
    const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
    const LAllocation* memoryBase = ins->memoryBase();
    AnyRegister out = ToAnyRegister(ins->output());

    Scalar::Type accessType = mir->accessType();
    MOZ_ASSERT(!Scalar::isSimdType(accessType));

    OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
    if (mir->needsBoundsCheck()) {
        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
        addOutOfLineCode(ool, mir);

        masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ToRegister(boundsCheckLimit),
                             ool->entry());
    }

    Operand srcAddr = ptr->isBogus()
                      ? Operand(ToRegister(memoryBase), 0)
                      : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne);

    masm.wasmLoad(mir->access(), srcAddr, out);

    if (ool)
        masm.bind(ool->rejoin());
}
void
CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
{
    const MAsmJSLoadHeap* mir = ins->mir();
    Scalar::Type accessType = mir->accessType();

    if (Scalar::isSimdType(accessType))
        return emitSimdLoad(ins);

    const LAllocation* ptr = ins->ptr();
    const LDefinition* out = ins->output();
    Operand srcAddr = ptr->isBogus()
                      ? Operand(HeapReg, mir->offset())
                      : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());

    memoryBarrier(mir->barrierBefore());
    OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
    if (gen->needsAsmJSBoundsCheckBranch(mir)) {
        Label* jumpTo = nullptr;
        if (mir->isAtomicAccess()) {
            jumpTo = gen->outOfBoundsLabel();
        } else {
            ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
            addOutOfLineCode(ool, mir);
            jumpTo = ool->entry();
        }
        maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), jumpTo);
    }

    uint32_t before = masm.size();
    switch (accessType) {
      case Scalar::Int8:      masm.movsbl(srcAddr, ToRegister(out)); break;
      case Scalar::Uint8:     masm.movzbl(srcAddr, ToRegister(out)); break;
      case Scalar::Int16:     masm.movswl(srcAddr, ToRegister(out)); break;
      case Scalar::Uint16:    masm.movzwl(srcAddr, ToRegister(out)); break;
      case Scalar::Int32:
      case Scalar::Uint32:    masm.movl(srcAddr, ToRegister(out)); break;
      case Scalar::Float32:   masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
      case Scalar::Float64:   masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
      case Scalar::Float32x4:
      case Scalar::Int32x4:   MOZ_CRASH("SIMD loads should be handled in emitSimdLoad");
      case Scalar::Uint8Clamped:
      case Scalar::MaxTypedArrayViewType:
          MOZ_CRASH("unexpected array type");
    }
    uint32_t after = masm.size();
    verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, accessType, 0, srcAddr, *out->output());
    if (ool) {
        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
        masm.bind(ool->rejoin());
    }
    memoryBarrier(mir->barrierAfter());
    masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::CarryOn, maybeCmpOffset));
}
void
CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
{
    MAsmJSLoadHeap *mir = ins->mir();
    Scalar::Type vt = mir->accessType();
    const LAllocation *ptr = ins->ptr();
    const LDefinition *out = ins->output();
    Operand srcAddr(HeapReg);

    if (Scalar::isSimdType(vt))
        return emitSimdLoad(ins);

    if (ptr->isConstant()) {
        int32_t ptrImm = ptr->toConstant()->toInt32();
        MOZ_ASSERT(ptrImm >= 0);
        srcAddr = Operand(HeapReg, ptrImm);
    } else {
        srcAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
    }

    memoryBarrier(ins->mir()->barrierBefore());
    OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
    if (mir->needsBoundsCheck()) {
        CodeOffsetLabel cmp = masm.cmp32WithPatch(ToRegister(ptr), Imm32(0));
        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
        addOutOfLineCode(ool, ins->mir());
        masm.j(Assembler::AboveOrEqual, ool->entry());
        maybeCmpOffset = cmp.offset();
    }

    uint32_t before = masm.size();
    switch (vt) {
      case Scalar::Int8:      masm.movsbl(srcAddr, ToRegister(out)); break;
      case Scalar::Uint8:     masm.movzbl(srcAddr, ToRegister(out)); break;
      case Scalar::Int16:     masm.movswl(srcAddr, ToRegister(out)); break;
      case Scalar::Uint16:    masm.movzwl(srcAddr, ToRegister(out)); break;
      case Scalar::Int32:
      case Scalar::Uint32:    masm.movl(srcAddr, ToRegister(out)); break;
      case Scalar::Float32:   masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
      case Scalar::Float64:   masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
      case Scalar::Float32x4:
      case Scalar::Int32x4:   MOZ_CRASH("SIMD loads should be handled in emitSimdLoad");
      case Scalar::Uint8Clamped:
      case Scalar::MaxTypedArrayViewType:
          MOZ_CRASH("unexpected array type");
    }
    uint32_t after = masm.size();
    verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, vt, srcAddr, *out->output());
    if (ool)
        masm.bind(ool->rejoin());
    memoryBarrier(ins->mir()->barrierAfter());
    masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), maybeCmpOffset));
}
void
CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
{
    const MAsmJSLoadHeap* mir = ins->mir();
    Scalar::Type accessType = mir->accessType();

    if (Scalar::isSimdType(accessType))
        return emitSimdLoad(ins);

    const LAllocation* ptr = ins->ptr();
    const LDefinition* out = ins->output();
    Operand srcAddr = ptr->isBogus()
                      ? Operand(HeapReg, mir->offset())
                      : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());

    memoryBarrier(mir->barrierBefore());

    OutOfLineLoadTypedArrayOutOfBounds* ool;
    DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);

    uint32_t before = masm.size();
    switch (accessType) {
      case Scalar::Int8:      masm.movsbl(srcAddr, ToRegister(out)); break;
      case Scalar::Uint8:     masm.movzbl(srcAddr, ToRegister(out)); break;
      case Scalar::Int16:     masm.movswl(srcAddr, ToRegister(out)); break;
      case Scalar::Uint16:    masm.movzwl(srcAddr, ToRegister(out)); break;
      case Scalar::Int32:
      case Scalar::Uint32:    masm.movl(srcAddr, ToRegister(out)); break;
      case Scalar::Float32:   masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
      case Scalar::Float64:   masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
      case Scalar::Float32x4:
      case Scalar::Int8x16:
      case Scalar::Int16x8:
      case Scalar::Int32x4:   MOZ_CRASH("SIMD loads should be handled in emitSimdLoad");
      case Scalar::Uint8Clamped:
      case Scalar::MaxTypedArrayViewType:
          MOZ_CRASH("unexpected array type");
    }
    uint32_t after = masm.size();

    verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, accessType, 0, srcAddr, *out->output());

    if (ool) {
        MOZ_ASSERT(hasBoundsCheck);
        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
        masm.bind(ool->rejoin());
    }

    memoryBarrier(mir->barrierAfter());

    masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
}
bool
CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
{
    MAsmJSLoadHeap *mir = ins->mir();
    AsmJSHeapAccess::ViewType vt = mir->viewType();
    const LAllocation *ptr = ins->ptr();
    const LDefinition *out = ins->output();
    Operand srcAddr(HeapReg);

    if (ptr->isConstant()) {
        int32_t ptrImm = ptr->toConstant()->toInt32();
        MOZ_ASSERT(ptrImm >= 0);
        srcAddr = Operand(HeapReg, ptrImm);
    } else {
        srcAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
    }

    memoryBarrier(ins->mir()->barrierBefore());
    OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
    if (mir->needsBoundsCheck()) {
        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
        if (!addOutOfLineCode(ool, ins->mir()))
            return false;

        CodeOffsetLabel cmp = masm.cmplWithPatch(ToRegister(ptr), Imm32(0));
        masm.j(Assembler::AboveOrEqual, ool->entry());
        maybeCmpOffset = cmp.offset();
    }

    uint32_t before = masm.size();
    switch (vt) {
      case AsmJSHeapAccess::Int8:      masm.movsbl(srcAddr, ToRegister(out)); break;
      case AsmJSHeapAccess::Uint8:     masm.movzbl(srcAddr, ToRegister(out)); break;
      case AsmJSHeapAccess::Int16:     masm.movswl(srcAddr, ToRegister(out)); break;
      case AsmJSHeapAccess::Uint16:    masm.movzwl(srcAddr, ToRegister(out)); break;
      case AsmJSHeapAccess::Int32:
      case AsmJSHeapAccess::Uint32:    masm.movl(srcAddr, ToRegister(out)); break;
      case AsmJSHeapAccess::Float32:   masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
      case AsmJSHeapAccess::Float64:   masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
      case AsmJSHeapAccess::Float32x4: masm.loadUnalignedFloat32x4(srcAddr, ToFloatRegister(out)); break;
      case AsmJSHeapAccess::Int32x4:   masm.loadUnalignedInt32x4(srcAddr, ToFloatRegister(out)); break;
      case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
    }
    uint32_t after = masm.size();
    if (ool)
        masm.bind(ool->rejoin());
    memoryBarrier(ins->mir()->barrierAfter());
    masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), maybeCmpOffset));
    return true;
}
Exemple #7
0
void
CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
{
    const MLoadTypedArrayElementStatic* mir = ins->mir();
    Scalar::Type accessType = mir->accessType();
    MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType::Float32);

    Register ptr = ToRegister(ins->ptr());
    AnyRegister out = ToAnyRegister(ins->output());
    OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
    uint32_t offset = mir->offset();

    if (mir->needsBoundsCheck()) {
        MOZ_ASSERT(offset == 0);
        if (!mir->fallible()) {
            ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
            addOutOfLineCode(ool, ins->mir());
        }

        masm.cmpPtr(ptr, ImmWord(mir->length()));
        if (ool)
            masm.j(Assembler::AboveOrEqual, ool->entry());
        else
            bailoutIf(Assembler::AboveOrEqual, ins->snapshot());
    }

    Operand srcAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
    switch (accessType) {
      case Scalar::Int8:         masm.movsblWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Uint8Clamped:
      case Scalar::Uint8:        masm.movzblWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Int16:        masm.movswlWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Uint16:       masm.movzwlWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Int32:
      case Scalar::Uint32:       masm.movlWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Float32:      masm.vmovssWithPatch(srcAddr, out.fpu()); break;
      case Scalar::Float64:      masm.vmovsdWithPatch(srcAddr, out.fpu()); break;
      default:                   MOZ_CRASH("Unexpected type");
    }

    if (accessType == Scalar::Float64)
        masm.canonicalizeDouble(out.fpu());
    if (accessType == Scalar::Float32)
        masm.canonicalizeFloat(out.fpu());

    if (ool)
        masm.bind(ool->rejoin());
}
void
CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
{
    const MAsmJSLoadHeap* mir = ins->mir();
    Scalar::Type accessType = mir->accessType();

    if (Scalar::isSimdType(accessType))
        return emitSimdLoad(ins);

    const LAllocation* ptr = ins->ptr();
    const LDefinition* out = ins->output();
    Operand srcAddr = ptr->isBogus()
                      ? Operand(PatchedAbsoluteAddress(mir->offset()))
                      : Operand(ToRegister(ptr), mir->offset());

    memoryBarrier(mir->barrierBefore());
    OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
    if (gen->needsAsmJSBoundsCheckBranch(mir)) {
        Label* jumpTo = nullptr;
        if (mir->isAtomicAccess()) {
            jumpTo = gen->outOfBoundsLabel();
        } else {
            ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
            addOutOfLineCode(ool, mir);
            jumpTo = ool->entry();
        }
        maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), jumpTo);
    }

    uint32_t before = masm.size();
    load(accessType, srcAddr, out);
    uint32_t after = masm.size();
    if (ool) {
        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
        masm.bind(ool->rejoin());
    }
    memoryBarrier(mir->barrierAfter());
    masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
}