void SnapshotWriter::addFloat32Slot(const FloatRegister ®) { JS_ASSERT(reg.code() < MIN_REG_FIELD_ESC); IonSpew(IonSpew_Snapshots, " slot %u: float32 (reg %s)", slotsWritten_, reg.name()); writeSlotHeader(JSVAL_TYPE_NULL, ESC_REG_FIELD_FLOAT32_REG); writer_.writeUnsigned(reg.code()); }
void SnapshotWriter::addSlot(const FloatRegister ®) { JS_ASSERT(reg.code() < MIN_REG_FIELD_ESC); IonSpew(IonSpew_Snapshots, " slot %u: double (reg %s)", slotsWritten_, reg.name()); writeSlotHeader(JSVAL_TYPE_DOUBLE, reg.code()); }
void MacroAssemblerX86::addConstantDouble(double d, FloatRegister dest) { Double* dbl = getDouble(d); if (!dbl) return; masm.vaddsd_mr(reinterpret_cast<const void*>(dbl->uses.prev()), dest.code(), dest.code()); dbl->uses.setPrev(masm.size()); }
void MacroAssemblerX86::addConstantFloat32(float f, FloatRegister dest) { Float* flt = getFloat(f); if (!flt) return; masm.vaddss_mr(reinterpret_cast<const void*>(flt->uses.prev()), dest.code(), dest.code()); flt->uses.setPrev(masm.size()); }
void MacroAssemblerX86::loadConstantDouble(double d, const FloatRegister &dest) { if (maybeInlineDouble(d, dest)) return; if (!doubleMap_.initialized()) { enoughMemory_ &= doubleMap_.init(); if (!enoughMemory_) return; } size_t doubleIndex; DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d); if (p) { doubleIndex = p->value; } else { doubleIndex = doubles_.length(); enoughMemory_ &= doubles_.append(Double(d)); enoughMemory_ &= doubleMap_.add(p, d, doubleIndex); if (!enoughMemory_) return; } Double &dbl = doubles_[doubleIndex]; JS_ASSERT(!dbl.uses.bound()); masm.movsd_mr(reinterpret_cast<const void *>(dbl.uses.prev()), dest.code()); dbl.uses.setPrev(masm.size()); }
void MacroAssemblerX64::loadConstantFloat32(float f, const FloatRegister &dest) { if (maybeInlineFloat(f, dest)) return; if (!floatMap_.initialized()) { enoughMemory_ &= floatMap_.init(); if (!enoughMemory_) return; } size_t floatIndex; if (FloatMap::AddPtr p = floatMap_.lookupForAdd(f)) { floatIndex = p->value(); } else { floatIndex = floats_.length(); enoughMemory_ &= floats_.append(Float(f)); enoughMemory_ &= floatMap_.add(p, f, floatIndex); if (!enoughMemory_) return; } Float &flt = floats_[floatIndex]; JS_ASSERT(!flt.uses.bound()); // See comment in loadConstantDouble JmpSrc j = masm.movss_ripr(dest.code()); JmpSrc prev = JmpSrc(flt.uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX64::loadConstantDouble(double d, const FloatRegister &dest) { if (maybeInlineDouble(d, dest)) return; if (!doubleMap_.initialized()) { enoughMemory_ &= doubleMap_.init(); if (!enoughMemory_) return; } size_t doubleIndex; if (DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d)) { doubleIndex = p->value(); } else { doubleIndex = doubles_.length(); enoughMemory_ &= doubles_.append(Double(d)); enoughMemory_ &= doubleMap_.add(p, d, doubleIndex); if (!enoughMemory_) return; } Double &dbl = doubles_[doubleIndex]; JS_ASSERT(!dbl.uses.bound()); // The constants will be stored in a pool appended to the text (see // finish()), so they will always be a fixed distance from the // instructions which reference them. This allows the instructions to use // PC-relative addressing. Use "jump" label support code, because we need // the same PC-relative address patching that jumps use. JmpSrc j = masm.movsd_ripr(dest.code()); JmpSrc prev = JmpSrc(dbl.uses.use(j.offset())); masm.setNextJump(j, prev); }
void MacroAssemblerX86::loadConstantFloat32(float f, const FloatRegister &dest) { // Contrarily to loadConstantDouble, this one doesn't have any maybeInlineFloat, // but that might be interesting to do it in the future. if (!floatMap_.initialized()) { enoughMemory_ &= floatMap_.init(); if (!enoughMemory_) return; } size_t floatIndex; FloatMap::AddPtr p = floatMap_.lookupForAdd(f); if (p) { floatIndex = p->value; } else { floatIndex = floats_.length(); enoughMemory_ &= floats_.append(Float(f)); enoughMemory_ &= floatMap_.add(p, f, floatIndex); if (!enoughMemory_) return; } Float &flt = floats_[floatIndex]; JS_ASSERT(!flt.uses.bound()); masm.movss_mr(reinterpret_cast<const void *>(flt.uses.prev()), dest.code()); flt.uses.setPrev(masm.size()); }
void MacroAssemblerMIPS::addConstantDouble(double d, const FloatRegister &dest) { Double *dbl = getDouble(d); if (!dbl) return; // masm.addsd_mr(reinterpret_cast<const void *>(dbl->uses.prev()), dest.code()); // need to modify . by wangqing mcss.loadDouble(reinterpret_cast<const void *>(dbl->uses.prev()), dest.code()); dbl->uses.setPrev(masm.size()); }
void MacroAssemblerMIPS::addConstantFloat32(float f, const FloatRegister &dest) { Float *flt = getFloat(f); if (!flt) return; // masm.addss_mr(reinterpret_cast<const void *>(flt->uses.prev()), dest.code()); // need to modify. by wangqing mcss.loadFloat(reinterpret_cast<const void *>(flt->uses.prev()), dest.code()); flt->uses.setPrev(masm.size()); }
void MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest) { if (maybeInlineFloat(f, dest)) return; Float *flt = getFloat(f); if (!flt) return; masm.movss_mr(reinterpret_cast<const void *>(flt->uses.prev()), dest.code()); flt->uses.setPrev(masm.size()); }
void MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest) { if (maybeInlineDouble(d, dest)) return; Double *dbl = getDouble(d); if (!dbl) return; masm.movsd_mr(reinterpret_cast<const void *>(dbl->uses.prev()), dest.code()); dbl->uses.setPrev(masm.size()); }
void MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant &v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData *f4 = getSimdData(v); if (!f4) return; MOZ_ASSERT(f4->type() == SimdConstant::Float32x4); masm.movaps_mr(reinterpret_cast<const void *>(f4->uses.prev()), dest.code()); f4->uses.setPrev(masm.size()); }
void MacroAssemblerX86::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Int32x4); if (maybeInlineInt32x4(v, dest)) return; SimdData* i4 = getSimdData(v); if (!i4) return; MOZ_ASSERT(i4->type() == SimdConstant::Int32x4); masm.vmovdqa_mr(reinterpret_cast<const void*>(i4->uses.prev()), dest.code()); i4->uses.setPrev(masm.size()); }