void CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins) { MAsmJSCompareExchangeHeap* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); const LAllocation* ptr = ins->ptr(); MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); MOZ_ASSERT(ptr->isRegister()); BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset()); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); // Note that we can't use // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch // since signal-handler bounds checking is not yet implemented for atomic accesses. uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck; if (mir->needsBoundsCheck()) { maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset(); masm.j(Assembler::Above, gen->outOfBoundsLabel()); } uint32_t before = masm.size(); masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, srcAddr, oldval, newval, InvalidReg, ToAnyRegister(ins->output())); MOZ_ASSERT(mir->offset() == 0, "The AsmJS signal handler doesn't yet support emulating " "atomic accesses in the case of a fault from an unwrapped offset"); masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset)); }
void CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins) { MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); MAsmJSCompareExchangeHeap* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); Register ptr = ToRegister(ins->ptr()); BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset()); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); MaybeAddAtomicsBoundsCheck(masm, mir, ptr); masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, srcAddr, oldval, newval, InvalidReg, ToAnyRegister(ins->output())); MOZ_ASSERT(mir->offset() == 0, "The AsmJS signal handler doesn't yet support emulating " "atomic accesses in the case of a fault from an unwrapped offset"); }
void CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins) { MAsmJSCompareExchangeHeap *mir = ins->mir(); Scalar::Type vt = mir->accessType(); const LAllocation *ptr = ins->ptr(); MOZ_ASSERT(ptr->isRegister()); BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); Label rejoin; uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck; if (mir->needsBoundsCheck()) { maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(0)).offset(); Label goahead; masm.j(Assembler::Below, &goahead); memoryBarrier(MembarFull); Register out = ToRegister(ins->output()); masm.xorl(out, out); masm.jmp(&rejoin); masm.bind(&goahead); } masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt, srcAddr, oldval, newval, InvalidReg, ToAnyRegister(ins->output())); uint32_t after = masm.size(); if (rejoin.used()) masm.bind(&rejoin); masm.append(AsmJSHeapAccess(after, after, mir->accessType(), maybeCmpOffset)); }
void CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins) { MAsmJSCompareExchangeHeap *mir = ins->mir(); Scalar::Type accessType = mir->accessType(); const LAllocation *ptr = ins->ptr(); MOZ_ASSERT(ptr->isRegister()); BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset()); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); // Note that we can't use // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch // since signal-handler bounds checking is not yet implemented for atomic accesses. Label rejoin; uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck; if (mir->needsBoundsCheck()) { maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset(); Label goahead; masm.j(Assembler::BelowOrEqual, &goahead); memoryBarrier(MembarFull); Register out = ToRegister(ins->output()); masm.xorl(out, out); masm.jmp(&rejoin); masm.bind(&goahead); } masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, srcAddr, oldval, newval, InvalidReg, ToAnyRegister(ins->output())); uint32_t after = masm.size(); if (rejoin.used()) masm.bind(&rejoin); MOZ_ASSERT(mir->offset() == 0, "The AsmJS signal handler doesn't yet support emulating " "atomic accesses in the case of a fault from an unwrapped offset"); masm.append(AsmJSHeapAccess(after, AsmJSHeapAccess::Throw, maybeCmpOffset)); }
void CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins) { MAsmJSCompareExchangeHeap* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); Register addrTemp = ToRegister(ins->addrTemp()); asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(), mir->endOffset()); Address memAddr(addrTemp, mir->offset()); masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, memAddr, oldval, newval, InvalidReg, ToAnyRegister(ins->output())); }