void MacroAssemblerX86::addConstantDouble(double d, FloatRegister dest) { Double* dbl = getDouble(d); if (!dbl) return; masm.vaddsd_mr(reinterpret_cast<const void*>(dbl->uses.prev()), dest.encoding(), dest.encoding()); dbl->uses.setPrev(masm.size()); }
void MacroAssemblerX86::addConstantFloat32(float f, FloatRegister dest) { Float* flt = getFloat(f); if (!flt) return; masm.vaddss_mr(reinterpret_cast<const void*>(flt->uses.prev()), dest.encoding(), dest.encoding()); flt->uses.setPrev(masm.size()); }
void MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest) { if (maybeInlineDouble(d, dest)) return; if (!doubleMap_.initialized()) { enoughMemory_ &= doubleMap_.init(); if (!enoughMemory_) return; } size_t doubleIndex; if (DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d)) { doubleIndex = p->value(); } else { doubleIndex = doubles_.length(); enoughMemory_ &= doubles_.append(Double(d)); enoughMemory_ &= doubleMap_.add(p, d, doubleIndex); if (!enoughMemory_) return; } Double& dbl = doubles_[doubleIndex]; MOZ_ASSERT(!dbl.uses.bound()); // The constants will be stored in a pool appended to the text (see // finish()), so they will always be a fixed distance from the // instructions which reference them. This allows the instructions to use // PC-relative addressing. Use "jump" label support code, because we need // the same PC-relative address patching that jumps use. JmpSrc j = masm.vmovsd_ripr(dest.encoding()); JmpSrc prev = JmpSrc(dbl.uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest) { if (maybeInlineFloat(f, dest)) return; if (!floatMap_.initialized()) { enoughMemory_ &= floatMap_.init(); if (!enoughMemory_) return; } size_t floatIndex; if (FloatMap::AddPtr p = floatMap_.lookupForAdd(f)) { floatIndex = p->value(); } else { floatIndex = floats_.length(); enoughMemory_ &= floats_.append(Float(f)); enoughMemory_ &= floatMap_.add(p, f, floatIndex); if (!enoughMemory_) return; } Float& flt = floats_[floatIndex]; MOZ_ASSERT(!flt.uses.bound()); // See comment in loadConstantDouble JmpSrc j = masm.vmovss_ripr(dest.encoding()); JmpSrc prev = JmpSrc(flt.uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX86::convertUInt64ToDouble(Register64 src, Register temp, FloatRegister dest) { // SUBPD needs SSE2, HADDPD needs SSE3. if (!HasSSE3()) { convertUInt32ToDouble(src.high, dest); movePtr(ImmPtr(&TO_DOUBLE_HIGH_SCALE), temp); loadDouble(Address(temp, 0), ScratchDoubleReg); asMasm().mulDouble(ScratchDoubleReg, dest); convertUInt32ToDouble(src.low, ScratchDoubleReg); asMasm().addDouble(ScratchDoubleReg, dest); return; } // Following operation uses entire 128-bit of dest XMM register. // Currently higher 64-bit is free when we have access to lower 64-bit. MOZ_ASSERT(dest.size() == 8); FloatRegister dest128 = FloatRegister(dest.encoding(), FloatRegisters::Simd128); // Assume that src is represented as following: // src = 0x HHHHHHHH LLLLLLLL // Move src to dest (=dest128) and ScratchInt32x4Reg (=scratch): // dest = 0x 00000000 00000000 00000000 LLLLLLLL // scratch = 0x 00000000 00000000 00000000 HHHHHHHH vmovd(src.low, dest128); vmovd(src.high, ScratchSimd128Reg); // Unpack and interleave dest and scratch to dest: // dest = 0x 00000000 00000000 HHHHHHHH LLLLLLLL vpunpckldq(ScratchSimd128Reg, dest128, dest128); // Unpack and interleave dest and a constant C1 to dest: // C1 = 0x 00000000 00000000 45300000 43300000 // dest = 0x 45300000 HHHHHHHH 43300000 LLLLLLLL // here, each 64-bit part of dest represents following double: // HI(dest) = 0x 1.00000HHHHHHHH * 2**84 == 2**84 + 0x HHHHHHHH 00000000 // LO(dest) = 0x 1.00000LLLLLLLL * 2**52 == 2**52 + 0x 00000000 LLLLLLLL movePtr(ImmPtr(TO_DOUBLE), temp); vpunpckldq(Operand(temp, 0), dest128, dest128); // Subtract a constant C2 from dest, for each 64-bit part: // C2 = 0x 45300000 00000000 43300000 00000000 // here, each 64-bit part of C2 represents following double: // HI(C2) = 0x 1.0000000000000 * 2**84 == 2**84 // LO(C2) = 0x 1.0000000000000 * 2**52 == 2**52 // after the operation each 64-bit part of dest represents following: // HI(dest) = double(0x HHHHHHHH 00000000) // LO(dest) = double(0x 00000000 LLLLLLLL) vsubpd(Operand(temp, sizeof(uint64_t) * 2), dest128, dest128); // Add HI(dest) and LO(dest) in double and store it into LO(dest), // LO(dest) = double(0x HHHHHHHH 00000000) + double(0x 00000000 LLLLLLLL) // = double(0x HHHHHHHH LLLLLLLL) // = double(src) vhaddpd(dest128, dest128); }
void MacroAssemblerX64::loadConstantSimd128Float(const SimdConstant&v, FloatRegister dest) { if (maybeInlineSimd128Float(v, dest)) return; SimdData* val = getSimdData(v); if (!val) return; JmpSrc j = masm.vmovaps_ripr(dest.encoding()); propagateOOM(val->uses.append(CodeOffset(j.offset()))); }
void MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest) { if (maybeInlineFloat(f, dest)) return; Float* flt = getFloat(f); if (!flt) return; masm.vmovss_mr(nullptr, dest.encoding()); propagateOOM(flt->uses.append(CodeOffset(masm.size()))); }
void MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest) { if (maybeInlineDouble(d, dest)) return; Double* dbl = getDouble(d); if (!dbl) return; masm.vmovsd_mr(nullptr, dest.encoding()); propagateOOM(dbl->uses.append(CodeOffset(masm.size()))); }
void MacroAssemblerX86::loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest) { if (maybeInlineSimd128Float(v, dest)) return; SimdData* f4 = getSimdData(v); if (!f4) return; masm.vmovaps_mr(nullptr, dest.encoding()); propagateOOM(f4->uses.append(CodeOffset(masm.size()))); }
void MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest) { if (maybeInlineFloat(f, dest)) return; Float* flt = getFloat(f); if (!flt) return; // See comment in loadConstantDouble JmpSrc j = masm.vmovss_ripr(dest.encoding()); propagateOOM(flt->uses.append(CodeOffset(j.offset()))); }
void MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant& v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData* f4 = getSimdData(v); if (!f4) return; MOZ_ASSERT(f4->type() == SimdConstant::Float32x4); masm.vmovaps_mr(nullptr, dest.encoding()); propagateOOM(f4->uses.append(CodeOffset(masm.size()))); }
void MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant& v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData* f4 = getSimdData(v); if (!f4) return; MOZ_ASSERT(f4->type() == SimdConstant::Float32x4); masm.vmovaps_mr(reinterpret_cast<const void*>(f4->uses.prev()), dest.encoding()); f4->uses.setPrev(masm.size()); }
void MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData* val = getSimdData(v); if (!val) return; MOZ_ASSERT(val->type() == SimdConstant::Float32x4); JmpSrc j = masm.vmovaps_ripr(dest.encoding()); propagateOOM(val->uses.append(CodeOffset(j.offset()))); }
void MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest) { if (maybeInlineFloat(f, dest)) return; Float* flt = getFloat(f); if (!flt) return; // See comment in loadConstantDouble JmpSrc j = masm.vmovss_ripr(dest.encoding()); JmpSrc prev = JmpSrc(flt->uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData* val = getSimdData(v); if (!val) return; MOZ_ASSERT(val->type() == SimdConstant::Float32x4); JmpSrc j = masm.vmovaps_ripr(dest.encoding()); JmpSrc prev = JmpSrc(val->uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest) { if (maybeInlineDouble(d, dest)) return; Double* dbl = getDouble(d); if (!dbl) return; // The constants will be stored in a pool appended to the text (see // finish()), so they will always be a fixed distance from the // instructions which reference them. This allows the instructions to use // PC-relative addressing. Use "jump" label support code, because we need // the same PC-relative address patching that jumps use. JmpSrc j = masm.vmovsd_ripr(dest.encoding()); propagateOOM(dbl->uses.append(CodeOffset(j.offset()))); }
void VMRegImpl::set_regName() { Register reg = ::as_Register(0); int i; for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) { regName[i++ ] = reg->name(); regName[i++ ] = reg->name(); reg = reg->successor(); } FloatRegister freg = ::as_FloatRegister(0); for ( ; i < ConcreteRegisterImpl::max_fpr ; ) { regName[i++] = freg->name(); if (freg->encoding() > 31) { regName[i++] = freg->name(); } freg = freg->successor(); } for ( ; i < ConcreteRegisterImpl::number_of_registers ; i ++ ) { regName[i] = "NON-GPR-FPR"; } }