Example #1
0
void
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
{
    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());

    MAsmJSCompareExchangeHeap* mir = ins->mir();
    Scalar::Type accessType = mir->accessType();

    Register ptr = ToRegister(ins->ptr());
    BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
    Register oldval = ToRegister(ins->oldValue());
    Register newval = ToRegister(ins->newValue());

    MaybeAddAtomicsBoundsCheck(masm, mir, ptr);

    masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        srcAddr,
                                        oldval,
                                        newval,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
    MOZ_ASSERT(mir->offset() == 0,
               "The AsmJS signal handler doesn't yet support emulating "
               "atomic accesses in the case of a fault from an unwrapped offset");
}
Example #2
0
bool
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
{
    MAsmJSCompareExchangeHeap *mir = ins->mir();

    MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32);
    Scalar::Type vt = Scalar::Type(mir->viewType());

    const LAllocation *ptr = ins->ptr();

    MOZ_ASSERT(ptr->isRegister());
    BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne);

    Register oldval = ToRegister(ins->oldValue());
    Register newval = ToRegister(ins->newValue());

    Label rejoin;
    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
    MOZ_ASSERT(mir->needsBoundsCheck());
    {
        maybeCmpOffset = masm.cmplWithPatch(ToRegister(ptr), Imm32(0)).offset();
        Label goahead;
        masm.j(Assembler::LessThan, &goahead);
        memoryBarrier(MembarFull);
        Register out = ToRegister(ins->output());
        masm.xorl(out, out);
        masm.jmp(&rejoin);
        masm.bind(&goahead);
    }
    masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr,
                                        oldval,
                                        newval,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
    uint32_t after = masm.size();
    if (rejoin.used())
        masm.bind(&rejoin);
    masm.append(AsmJSHeapAccess(after, after, mir->viewType(), maybeCmpOffset));
    return true;
}
void
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
{
    MAsmJSCompareExchangeHeap* mir = ins->mir();
    MOZ_ASSERT(mir->access().offset() == 0);

    Register ptr = ToRegister(ins->ptr());
    Register oldval = ToRegister(ins->oldValue());
    Register newval = ToRegister(ins->newValue());
    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());

    Scalar::Type accessType = mir->access().type();
    BaseIndex srcAddr(HeapReg, ptr, TimesOne);

    masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        srcAddr,
                                        oldval,
                                        newval,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
}
void
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
{
    MAsmJSCompareExchangeHeap* mir = ins->mir();
    Scalar::Type accessType = mir->accessType();
    const LAllocation* ptr = ins->ptr();

    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
    MOZ_ASSERT(ptr->isRegister());
    BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());

    Register oldval = ToRegister(ins->oldValue());
    Register newval = ToRegister(ins->newValue());

    // Note that we can't use
    // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch
    // since signal-handler bounds checking is not yet implemented for atomic accesses.
    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
    if (mir->needsBoundsCheck()) {
        maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
        masm.j(Assembler::Above, gen->outOfBoundsLabel());
    }
    uint32_t before = masm.size();
    masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        srcAddr,
                                        oldval,
                                        newval,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
    MOZ_ASSERT(mir->offset() == 0,
               "The AsmJS signal handler doesn't yet support emulating "
               "atomic accesses in the case of a fault from an unwrapped offset");
    masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
}
Example #5
0
void
CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
{
    MAsmJSCompareExchangeHeap* mir = ins->mir();
    MOZ_ASSERT(mir->access().offset() == 0);

    Scalar::Type accessType = mir->access().type();
    Register ptrReg = ToRegister(ins->ptr());
    Register oldval = ToRegister(ins->oldValue());
    Register newval = ToRegister(ins->newValue());
    Register addrTemp = ToRegister(ins->addrTemp());
    Register memoryBase = ToRegister(ins->memoryBase());

    asmJSAtomicComputeAddress(addrTemp, ptrReg, memoryBase);

    Address memAddr(addrTemp, 0);
    masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        memAddr,
                                        oldval,
                                        newval,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
}
void
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
{
    MAsmJSCompareExchangeHeap *mir = ins->mir();
    Scalar::Type accessType = mir->accessType();
    const LAllocation *ptr = ins->ptr();

    MOZ_ASSERT(ptr->isRegister());
    BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());

    Register oldval = ToRegister(ins->oldValue());
    Register newval = ToRegister(ins->newValue());

    // Note that we can't use
    // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch
    // since signal-handler bounds checking is not yet implemented for atomic accesses.
    Label rejoin;
    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
    if (mir->needsBoundsCheck()) {
        maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
        Label goahead;
        masm.j(Assembler::BelowOrEqual, &goahead);
        memoryBarrier(MembarFull);
        Register out = ToRegister(ins->output());
        masm.xorl(out, out);
        masm.jmp(&rejoin);
        masm.bind(&goahead);
    }
    masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        srcAddr,
                                        oldval,
                                        newval,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
    uint32_t after = masm.size();
    if (rejoin.used())
        masm.bind(&rejoin);
    MOZ_ASSERT(mir->offset() == 0,
               "The AsmJS signal handler doesn't yet support emulating "
               "atomic accesses in the case of a fault from an unwrapped offset");
    masm.append(AsmJSHeapAccess(after, AsmJSHeapAccess::Throw, maybeCmpOffset));
}