void CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins) { MOZ_ASSERT(!ins->mir()->hasUses()); MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); MAsmJSAtomicBinopHeap* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); const LAllocation* value = ins->value(); AtomicOp op = mir->operation(); BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->offset()); // Note that we can't use // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch // since signal-handler bounds checking is not yet implemented for atomic accesses. uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck; if (mir->needsBoundsCheck()) { maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset(); masm.j(Assembler::Above, gen->outOfBoundsLabel()); } uint32_t before = masm.size(); if (value->isConstant()) atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr); else atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr); MOZ_ASSERT(mir->offset() == 0, "The AsmJS signal handler doesn't yet support emulating " "atomic accesses in the case of a fault from an unwrapped offset"); masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset)); }
void CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins) { MAsmJSAtomicBinopHeap* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp()); Register addrTemp = ToRegister(ins->addrTemp()); const LAllocation* value = ins->value(); AtomicOp op = mir->operation(); asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(), mir->endOffset()); Address memAddr(addrTemp, mir->offset()); if (value->isConstant()) { atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, Imm32(ToInt32(value)), memAddr, temp, InvalidReg, ToAnyRegister(ins->output())); } else { atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, ToRegister(value), memAddr, temp, InvalidReg, ToAnyRegister(ins->output())); } }
void CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins) { MOZ_ASSERT(ins->mir()->hasUses()); MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); MAsmJSAtomicBinopHeap* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); accessType = accessType == Scalar::Uint32 ? Scalar::Int32 : accessType; AtomicOp op = mir->operation(); Register ptr = ToRegister(ins->ptr()); Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp()); BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset()); const LAllocation* value = ins->value(); MaybeAddAtomicsBoundsCheck(masm, mir, ptr); AnyRegister output = ToAnyRegister(ins->output()); if (value->isConstant()) { atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg, output); } else { atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr, temp, InvalidReg, output); } MOZ_ASSERT(mir->offset() == 0, "The AsmJS signal handler doesn't yet support emulating " "atomic accesses in the case of a fault from an unwrapped offset"); }
void CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins) { MAsmJSAtomicBinopHeap *mir = ins->mir(); Scalar::Type vt = mir->accessType(); const LAllocation *ptr = ins->ptr(); Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp()); const LAllocation* value = ins->value(); AtomicOp op = mir->operation(); MOZ_ASSERT(ptr->isRegister()); BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne); Label rejoin; uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck; if (mir->needsBoundsCheck()) { maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(0)).offset(); Label goahead; masm.j(Assembler::Below, &goahead); memoryBarrier(MembarFull); Register out = ToRegister(ins->output()); masm.xorl(out,out); masm.jmp(&rejoin); masm.bind(&goahead); } if (value->isConstant()) { masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg, ToAnyRegister(ins->output())); } else { masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt, ToRegister(value), srcAddr, temp, InvalidReg, ToAnyRegister(ins->output())); } uint32_t after = masm.size(); if (rejoin.used()) masm.bind(&rejoin); masm.append(AsmJSHeapAccess(after, after, mir->accessType(), maybeCmpOffset)); }
void CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins) { MOZ_ASSERT(ins->mir()->hasUses()); MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); MAsmJSAtomicBinopHeap *mir = ins->mir(); Scalar::Type accessType = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp()); const LAllocation* value = ins->value(); AtomicOp op = mir->operation(); BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->offset()); // Note that we can't use // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch // since signal-handler bounds checking is not yet implemented for atomic accesses. Label rejoin; uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck; if (mir->needsBoundsCheck()) { maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset(); Label goahead; masm.j(Assembler::BelowOrEqual, &goahead); memoryBarrier(MembarFull); Register out = ToRegister(ins->output()); masm.xorl(out,out); masm.jmp(&rejoin); masm.bind(&goahead); } if (value->isConstant()) { masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg, ToAnyRegister(ins->output())); } else { masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, ToRegister(value), srcAddr, temp, InvalidReg, ToAnyRegister(ins->output())); } uint32_t after = masm.size(); if (rejoin.used()) masm.bind(&rejoin); MOZ_ASSERT(mir->offset() == 0, "The AsmJS signal handler doesn't yet support emulating " "atomic accesses in the case of a fault from an unwrapped offset"); masm.append(AsmJSHeapAccess(after, AsmJSHeapAccess::Throw, maybeCmpOffset)); }
void CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins) { MAsmJSAtomicBinopHeap* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register addrTemp = ToRegister(ins->addrTemp()); const LAllocation* value = ins->value(); AtomicOp op = mir->operation(); MOZ_ASSERT(!mir->hasUses()); asmJSAtomicComputeAddress(addrTemp, ptrReg, mir->needsBoundsCheck(), mir->offset(), mir->endOffset()); Address memAddr(addrTemp, mir->offset()); if (value->isConstant()) atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr); else atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr); }