void MoveEmitterMIPS::emitFloat32Move(const MoveOperand &from, const MoveOperand &to) { // Ensure that we can use ScratchFloatReg in memory move. MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloatReg); MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloatReg); if (from.isFloatReg()) { if (to.isFloatReg()) { masm.moveFloat32(from.floatReg(), to.floatReg()); } else if (to.isGeneralReg()) { // This should only be used when passing float parameter in a1,a2,a3 MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3); masm.as_mfc1(to.reg(), from.floatReg()); } else { MOZ_ASSERT(to.isMemory()); masm.storeFloat32(from.floatReg(), getAdjustedAddress(to)); } } else if (to.isFloatReg()) { MOZ_ASSERT(from.isMemory()); masm.loadFloat32(getAdjustedAddress(from), to.floatReg()); } else if (to.isGeneralReg()) { MOZ_ASSERT(from.isMemory()); // This should only be used when passing float parameter in a1,a2,a3 MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3); masm.loadPtr(getAdjustedAddress(from), to.reg()); } else { MOZ_ASSERT(from.isMemory()); MOZ_ASSERT(to.isMemory()); masm.loadFloat32(getAdjustedAddress(from), ScratchFloatReg); masm.storeFloat32(ScratchFloatReg, getAdjustedAddress(to)); } }
void MoveEmitterARM::emitFloat32Move(const MoveOperand& from, const MoveOperand& to) { // Register pairs are used to store Double values during calls. MOZ_ASSERT(!from.isGeneralRegPair()); MOZ_ASSERT(!to.isGeneralRegPair()); if (from.isFloatReg()) { if (to.isFloatReg()) masm.ma_vmov_f32(from.floatReg(), to.floatReg()); else if (to.isGeneralReg()) masm.ma_vxfer(from.floatReg(), to.reg()); else masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to)); } else if (from.isGeneralReg()) { if (to.isFloatReg()) masm.ma_vxfer(from.reg(), to.floatReg()); else if (to.isGeneralReg()) masm.ma_mov(from.reg(), to.reg()); else masm.ma_str(from.reg(), toAddress(to)); } else if (to.isFloatReg()) { masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay()); } else if (to.isGeneralReg()) { masm.ma_ldr(toAddress(from), to.reg()); } else { // Memory to memory move. MOZ_ASSERT(from.isMemory()); FloatRegister reg = ScratchFloat32Reg; masm.ma_vldr(toAddress(from), VFPRegister(reg).singleOverlay()); masm.ma_vstr(VFPRegister(reg).singleOverlay(), toAddress(to)); } }
void MoveEmitterMIPS::emitDoubleMove(const MoveOperand& from, const MoveOperand& to) { // Ensure that we can use ScratchDoubleReg in memory move. MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchDoubleReg); MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchDoubleReg); if (from.isFloatReg()) { if (to.isFloatReg()) { masm.moveDouble(from.floatReg(), to.floatReg()); } else if (to.isGeneralReg()) { // Used for passing double parameter in a2,a3 register pair. // Two moves are added for one double parameter by // MacroAssemblerMIPSCompat::passABIArg if(to.reg() == a2) masm.moveFromDoubleLo(from.floatReg(), a2); else if(to.reg() == a3) masm.moveFromDoubleHi(from.floatReg(), a3); else MOZ_CRASH("Invalid emitDoubleMove arguments."); } else { MOZ_ASSERT(to.isMemory()); masm.storeDouble(from.floatReg(), getAdjustedAddress(to)); } } else if (to.isFloatReg()) { MOZ_ASSERT(from.isMemory()); masm.loadDouble(getAdjustedAddress(from), to.floatReg()); } else if (to.isGeneralReg()) { // Used for passing double parameter in a2,a3 register pair. // Two moves are added for one double parameter by // MacroAssemblerMIPSCompat::passABIArg if (from.isMemory()) { if(to.reg() == a2) masm.loadPtr(getAdjustedAddress(from), a2); else if(to.reg() == a3) masm.loadPtr(Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3); else MOZ_CRASH("Invalid emitDoubleMove arguments."); } else { // Used for moving a double parameter from the same source. See Bug 1123874. if(to.reg() == a2 || to.reg() == a3) masm.ma_move(to.reg(), from.reg()); else MOZ_CRASH("Invalid emitDoubleMove arguments."); } } else { MOZ_ASSERT(from.isMemory()); MOZ_ASSERT(to.isMemory()); masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg); masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to)); } }
void MoveEmitterX86::emitDoubleMove(const MoveOperand &from, const MoveOperand &to) { if (from.isFloatReg()) { masm.movsd(from.floatReg(), toOperand(to)); } else if (to.isFloatReg()) { masm.movsd(toOperand(from), to.floatReg()); } else { // Memory to memory float move. JS_ASSERT(from.isMemory()); masm.movsd(toOperand(from), ScratchFloatReg); masm.movsd(ScratchFloatReg, toOperand(to)); } }
void MoveEmitterARM::emitDoubleMove(const MoveOperand& from, const MoveOperand& to) { // Registers are used to store pointers / int32 / float32 values. MOZ_ASSERT(!from.isGeneralReg()); MOZ_ASSERT(!to.isGeneralReg()); if (from.isFloatReg()) { if (to.isFloatReg()) masm.ma_vmov(from.floatReg(), to.floatReg()); else if (to.isGeneralRegPair()) masm.ma_vxfer(from.floatReg(), to.evenReg(), to.oddReg()); else masm.ma_vstr(from.floatReg(), toAddress(to)); } else if (from.isGeneralRegPair()) { if (to.isFloatReg()) masm.ma_vxfer(from.evenReg(), from.oddReg(), to.floatReg()); else if (to.isGeneralRegPair()) { MOZ_ASSERT(!from.aliases(to)); masm.ma_mov(from.evenReg(), to.evenReg()); masm.ma_mov(from.oddReg(), to.oddReg()); } else { FloatRegister reg = ScratchDoubleReg; masm.ma_vxfer(from.evenReg(), from.oddReg(), reg); masm.ma_vstr(reg, toAddress(to)); } } else if (to.isFloatReg()) { masm.ma_vldr(toAddress(from), to.floatReg()); } else if (to.isGeneralRegPair()) { MOZ_ASSERT(from.isMemory()); Address src = toAddress(from); // Note: We can safely use the MoveOperand's displacement here, // even if the base is SP: MoveEmitter::toOperand adjusts // SP-relative operands by the difference between the current // stack usage and stackAdjust, which emitter.finish() resets to // 0. // // Warning: if the offset isn't within [-255,+255] then this // will assert-fail (or, if non-debug, load the wrong words). // Nothing uses such an offset at the time of this writing. masm.ma_ldrd(EDtrAddr(src.base, EDtrOffImm(src.offset)), to.evenReg(), to.oddReg()); } else { // Memory to memory move. MOZ_ASSERT(from.isMemory()); ScratchDoubleScope scratch(masm); masm.ma_vldr(toAddress(from), scratch); masm.ma_vstr(scratch, toAddress(to)); } }
void MacroAssemblerX64::passABIArg(const MoveOperand &from) { MoveOperand to; if (from.isDouble()) { FloatRegister dest; if (GetFloatArgReg(passedIntArgs_, passedFloatArgs_++, &dest)) { if (from.isFloatReg() && from.floatReg() == dest) { // Nothing to do; the value is in the right register already return; } to = MoveOperand(dest); } else { to = MoveOperand(StackPointer, stackForCall_); stackForCall_ += sizeof(double); } enoughMemory_ = moveResolver_.addMove(from, to, Move::DOUBLE); } else { Register dest; if (GetIntArgReg(passedIntArgs_++, passedFloatArgs_, &dest)) { if (from.isGeneralReg() && from.reg() == dest) { // Nothing to do; the value is in the right register already return; } to = MoveOperand(dest); } else { to = MoveOperand(StackPointer, stackForCall_); stackForCall_ += sizeof(int64_t); } enoughMemory_ = moveResolver_.addMove(from, to, Move::GENERAL); } }
void MoveEmitterX86::emitDoubleMove(const MoveOperand &from, const MoveOperand &to) { if (from.isFloatReg()) { if (to.isFloatReg()) masm.moveDouble(from.floatReg(), to.floatReg()); else masm.storeDouble(from.floatReg(), toAddress(to)); } else if (to.isFloatReg()) { masm.loadDouble(toAddress(from), to.floatReg()); } else { // Memory to memory move. JS_ASSERT(from.isMemory()); masm.loadDouble(toAddress(from), ScratchFloatReg); masm.storeDouble(ScratchFloatReg, toAddress(to)); } }
void MoveEmitterX86::emitFloat32X4Move(const MoveOperand &from, const MoveOperand &to) { if (from.isFloatReg()) { if (to.isFloatReg()) masm.moveFloat32x4(from.floatReg(), to.floatReg()); else masm.storeAlignedFloat32x4(from.floatReg(), toAddress(to)); } else if (to.isFloatReg()) { masm.loadAlignedFloat32x4(toAddress(from), to.floatReg()); } else { // Memory to memory move. MOZ_ASSERT(from.isMemory()); masm.loadAlignedFloat32x4(toAddress(from), ScratchSimdReg); masm.storeAlignedFloat32x4(ScratchSimdReg, toAddress(to)); } }
void MoveEmitterMIPS::emitDoubleMove(const MoveOperand &from, const MoveOperand &to) { // Ensure that we can use ScratchFloatReg in memory move. MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloatReg); MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloatReg); if (from.isFloatReg()) { if (to.isFloatReg()) { masm.moveDouble(from.floatReg(), to.floatReg()); } else if (to.isGeneralReg()) { // Used for passing double parameter in a2,a3 register pair. // Two moves are added for one double parameter by // MacroAssemblerMIPSCompat::passABIArg if(to.reg() == a2) masm.as_mfc1(a2, from.floatReg()); else if(to.reg() == a3) masm.as_mfc1_Odd(a3, from.floatReg()); else MOZ_ASSUME_UNREACHABLE("Invalid emitDoubleMove arguments."); } else { MOZ_ASSERT(to.isMemory()); masm.storeDouble(from.floatReg(), getAdjustedAddress(to)); } } else if (to.isFloatReg()) { MOZ_ASSERT(from.isMemory()); masm.loadDouble(getAdjustedAddress(from), to.floatReg()); } else if (to.isGeneralReg()) { MOZ_ASSERT(from.isMemory()); // Used for passing double parameter in a2,a3 register pair. // Two moves are added for one double parameter by // MacroAssemblerMIPSCompat::passABIArg if(to.reg() == a2) masm.loadPtr(getAdjustedAddress(from), a2); else if(to.reg() == a3) masm.loadPtr(Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3); else MOZ_ASSUME_UNREACHABLE("Invalid emitDoubleMove arguments."); } else { MOZ_ASSERT(from.isMemory()); MOZ_ASSERT(to.isMemory()); masm.loadDouble(getAdjustedAddress(from), ScratchFloatReg); masm.storeDouble(ScratchFloatReg, getAdjustedAddress(to)); } }
void MoveEmitterARM::emitDoubleMove(const MoveOperand &from, const MoveOperand &to) { if (from.isFloatReg()) { if (to.isFloatReg()) masm.ma_vmov(from.floatReg(), to.floatReg()); else masm.ma_vstr(from.floatReg(), toOperand(to, true)); } else if (to.isFloatReg()) { masm.ma_vldr(toOperand(from, true), to.floatReg()); } else { // Memory to memory float move. JS_ASSERT(from.isMemory()); FloatRegister reg = ScratchFloatReg; masm.ma_vldr(toOperand(from, true), reg); masm.ma_vstr(reg, toOperand(to, true)); } }
void MoveEmitterX86::emitFloat32Move(const MoveOperand& from, const MoveOperand& to) { MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSingle()); MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSingle()); if (from.isFloatReg()) { if (to.isFloatReg()) masm.moveFloat32(from.floatReg(), to.floatReg()); else masm.storeFloat32(from.floatReg(), toAddress(to)); } else if (to.isFloatReg()) { masm.loadFloat32(toAddress(from), to.floatReg()); } else { // Memory to memory move. MOZ_ASSERT(from.isMemory()); masm.loadFloat32(toAddress(from), ScratchFloat32Reg); masm.storeFloat32(ScratchFloat32Reg, toAddress(to)); } }
// Warning, do not use the resulting operand with pop instructions, since they // compute the effective destination address after altering the stack pointer. // Use toPopOperand if an Operand is needed for a pop. Operand MoveEmitterMIPS::toOperand(const MoveOperand &operand) const { if (operand.isMemory() || operand.isEffectiveAddress() || operand.isFloatAddress()) return Operand(toAddress(operand)); if (operand.isGeneralReg()) return Operand(operand.reg()); JS_ASSERT(operand.isFloatReg()); return Operand(operand.floatReg()); }
// Warning, do not use the resulting operand with pop instructions, since they // compute the effective destination address after altering the stack pointer. // Use toPopOperand if an Operand is needed for a pop. Operand MoveEmitterX86::toOperand(const MoveOperand& operand) const { if (operand.isMemoryOrEffectiveAddress()) return Operand(toAddress(operand)); if (operand.isGeneralReg()) return Operand(operand.reg()); MOZ_ASSERT(operand.isFloatReg()); return Operand(operand.floatReg()); }
void MoveEmitterARM64::emitDoubleMove(const MoveOperand& from, const MoveOperand& to) { if (from.isFloatReg()) { if (to.isFloatReg()) { masm.Fmov(toFPReg(to, MoveOp::DOUBLE), toFPReg(from, MoveOp::DOUBLE)); } else { masm.Str(toFPReg(from, MoveOp::DOUBLE), toMemOperand(to)); } return; } if (to.isFloatReg()) { masm.Ldr(toFPReg(to, MoveOp::DOUBLE), toMemOperand(from)); return; } vixl::UseScratchRegisterScope temps(&masm.asVIXL()); const ARMFPRegister scratch = temps.AcquireD(); masm.Ldr(scratch, toMemOperand(from)); masm.Str(scratch, toMemOperand(to)); }
void MacroAssemblerX64::passABIArg(const MoveOperand& from, MoveOp::Type type) { MoveOperand to; switch (type) { case MoveOp::FLOAT32: case MoveOp::DOUBLE: { FloatRegister dest; if (GetFloatArgReg(passedIntArgs_, passedFloatArgs_++, &dest)) { // Convert to the right type of register. if (type == MoveOp::FLOAT32) dest = dest.asSingle(); if (from.isFloatReg() && from.floatReg() == dest) { // Nothing to do; the value is in the right register already return; } to = MoveOperand(dest); } else { to = MoveOperand(StackPointer, stackForCall_); switch (type) { case MoveOp::FLOAT32: stackForCall_ += sizeof(float); break; case MoveOp::DOUBLE: stackForCall_ += sizeof(double); break; default: MOZ_CRASH("Unexpected float register class argument type"); } } break; } case MoveOp::GENERAL: { Register dest; if (GetIntArgReg(passedIntArgs_++, passedFloatArgs_, &dest)) { if (from.isGeneralReg() && from.reg() == dest) { // Nothing to do; the value is in the right register already return; } to = MoveOperand(dest); } else { to = MoveOperand(StackPointer, stackForCall_); stackForCall_ += sizeof(int64_t); } break; } default: MOZ_CRASH("Unexpected argument type"); } enoughMemory_ = moveResolver_.addMove(from, to, type); }
// Warning, do not use the resulting operand with pop instructions, since they // compute the effective destination address after altering the stack pointer. // Use toPopOperand if an Operand is needed for a pop. Operand MoveEmitterX86::toOperand(const MoveOperand &operand) const { if (operand.isMemory() || operand.isEffectiveAddress() || operand.isFloatAddress()) { if (operand.base() != StackPointer) return Operand(operand.base(), operand.disp()); JS_ASSERT(operand.disp() >= 0); // Otherwise, the stack offset may need to be adjusted. return Operand(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_)); } if (operand.isGeneralReg()) return Operand(operand.reg()); JS_ASSERT(operand.isFloatReg()); return Operand(operand.floatReg()); }
// This is the same as toOperand except that it computes an Operand suitable for // use in a pop. Operand MoveEmitterX86::toPopOperand(const MoveOperand& operand) const { if (operand.isMemory()) { if (operand.base() != StackPointer) return Operand(operand.base(), operand.disp()); MOZ_ASSERT(operand.disp() >= 0); // Otherwise, the stack offset may need to be adjusted. // Note the adjustment by the stack slot here, to offset for the fact that pop // computes its effective address after incrementing the stack pointer. return Operand(StackPointer, operand.disp() + (masm.framePushed() - sizeof(void*) - pushedAtStart_)); } if (operand.isGeneralReg()) return Operand(operand.reg()); MOZ_ASSERT(operand.isFloatReg()); return Operand(operand.floatReg()); }