void MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant &v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData *f4 = getSimdData(v); if (!f4) return; MOZ_ASSERT(f4->type() == SimdConstant::Float32x4); masm.movaps_mr(reinterpret_cast<const void *>(f4->uses.prev()), dest.code()); f4->uses.setPrev(masm.size()); }
void MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData* val = getSimdData(v); if (!val) return; MOZ_ASSERT(val->type() == SimdConstant::Float32x4); JmpSrc j = masm.vmovaps_ripr(dest.encoding()); propagateOOM(val->uses.append(CodeOffset(j.offset()))); }
void MacroAssemblerX86::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Int32x4); if (maybeInlineInt32x4(v, dest)) return; SimdData* i4 = getSimdData(v); if (!i4) return; MOZ_ASSERT(i4->type() == SimdConstant::Int32x4); masm.vmovdqa_mr(reinterpret_cast<const void*>(i4->uses.prev()), dest.code()); i4->uses.setPrev(masm.size()); }
void MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant& v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData* f4 = getSimdData(v); if (!f4) return; MOZ_ASSERT(f4->type() == SimdConstant::Float32x4); masm.vmovaps_mr(nullptr, dest.encoding()); propagateOOM(f4->uses.append(CodeOffset(masm.size()))); }
void MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest) { MOZ_ASSERT(v.type() == SimdConstant::Float32x4); if (maybeInlineFloat32x4(v, dest)) return; SimdData* val = getSimdData(v); if (!val) return; MOZ_ASSERT(val->type() == SimdConstant::Float32x4); JmpSrc j = masm.vmovaps_ripr(dest.encoding()); JmpSrc prev = JmpSrc(val->uses.use(j.offset())); masm.setNextJump(j, prev); }