void MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest) { if (maybeInlineDouble(d, dest)) return; if (!doubleMap_.initialized()) { enoughMemory_ &= doubleMap_.init(); if (!enoughMemory_) return; } size_t doubleIndex; if (DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d)) { doubleIndex = p->value(); } else { doubleIndex = doubles_.length(); enoughMemory_ &= doubles_.append(Double(d)); enoughMemory_ &= doubleMap_.add(p, d, doubleIndex); if (!enoughMemory_) return; } Double& dbl = doubles_[doubleIndex]; MOZ_ASSERT(!dbl.uses.bound()); // The constants will be stored in a pool appended to the text (see // finish()), so they will always be a fixed distance from the // instructions which reference them. This allows the instructions to use // PC-relative addressing. Use "jump" label support code, because we need // the same PC-relative address patching that jumps use. JmpSrc j = masm.vmovsd_ripr(dest.encoding()); JmpSrc prev = JmpSrc(dbl.uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest) { if (maybeInlineFloat(f, dest)) return; if (!floatMap_.initialized()) { enoughMemory_ &= floatMap_.init(); if (!enoughMemory_) return; } size_t floatIndex; if (FloatMap::AddPtr p = floatMap_.lookupForAdd(f)) { floatIndex = p->value(); } else { floatIndex = floats_.length(); enoughMemory_ &= floats_.append(Float(f)); enoughMemory_ &= floatMap_.add(p, f, floatIndex); if (!enoughMemory_) return; } Float& flt = floats_[floatIndex]; MOZ_ASSERT(!flt.uses.bound()); // See comment in loadConstantDouble JmpSrc j = masm.vmovss_ripr(dest.encoding()); JmpSrc prev = JmpSrc(flt.uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX64::loadConstantSimd128Float(const SimdConstant&v, FloatRegister dest) { if (maybeInlineSimd128Float(v, dest)) return; SimdData* val = getSimdData(v); if (!val) return; JmpSrc j = masm.vmovaps_ripr(dest.encoding()); propagateOOM(val->uses.append(CodeOffset(j.offset()))); }
void MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest) { if (maybeInlineFloat(f, dest)) return; Float* flt = getFloat(f); if (!flt) return; // See comment in loadConstantDouble JmpSrc j = masm.vmovss_ripr(dest.encoding()); propagateOOM(flt->uses.append(CodeOffset(j.offset()))); }
void MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData* val = getSimdData(v); if (!val) return; MOZ_ASSERT(val->type() == SimdConstant::Float32x4); JmpSrc j = masm.vmovaps_ripr(dest.encoding()); propagateOOM(val->uses.append(CodeOffset(j.offset()))); }
void MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest) { if (maybeInlineFloat(f, dest)) return; Float* flt = getFloat(f); if (!flt) return; // See comment in loadConstantDouble JmpSrc j = masm.vmovss_ripr(dest.encoding()); JmpSrc prev = JmpSrc(flt->uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData* val = getSimdData(v); if (!val) return; MOZ_ASSERT(val->type() == SimdConstant::Float32x4); JmpSrc j = masm.vmovaps_ripr(dest.encoding()); JmpSrc prev = JmpSrc(val->uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest) { if (maybeInlineDouble(d, dest)) return; Double* dbl = getDouble(d); if (!dbl) return; // The constants will be stored in a pool appended to the text (see // finish()), so they will always be a fixed distance from the // instructions which reference them. This allows the instructions to use // PC-relative addressing. Use "jump" label support code, because we need // the same PC-relative address patching that jumps use. JmpSrc j = masm.vmovsd_ripr(dest.encoding()); propagateOOM(dbl->uses.append(CodeOffset(j.offset()))); }
size_t Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc) { // This jump is patchable at runtime so we always need to make sure the // jump table is emitted. writeRelocation(src, reloc); size_t index = jumps_.length(); enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc)); return index; }
void Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc) { MOZ_ASSERT(target.value != nullptr); // Emit reloc before modifying the jump table, since it computes a 0-based // index. This jump is not patchable at runtime. if (reloc == Relocation::JITCODE) writeRelocation(src, reloc); enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc)); }
void Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc) { if (!jumpRelocations_.length()) { // The jump relocation table starts with a fixed-width integer pointing // to the start of the extended jump table. But, we don't know the // actual extended jump table offset yet, so write a 0 which we'll // patch later. jumpRelocations_.writeFixedUint32_t(0); } if (reloc == Relocation::JITCODE) { jumpRelocations_.writeUnsigned(src.offset()); jumpRelocations_.writeUnsigned(jumps_.length()); } }