void
MoveEmitterX86::breakCycle(const MoveOperand& to, MoveOp::Type type)
{
    // There is some pattern:
    //   (A -> B)
    //   (B -> A)
    //
    // This case handles (A -> B), which we reach first. We save B, then allow
    // the original move to continue.
    switch (type) {
      case MoveOp::INT32X4:
        if (to.isMemory()) {
            masm.loadAlignedInt32x4(toAddress(to), ScratchSimdReg);
            masm.storeAlignedInt32x4(ScratchSimdReg, cycleSlot());
        } else {
            masm.storeAlignedInt32x4(to.floatReg(), cycleSlot());
        }
        break;
      case MoveOp::FLOAT32X4:
        if (to.isMemory()) {
            masm.loadAlignedFloat32x4(toAddress(to), ScratchSimdReg);
            masm.storeAlignedFloat32x4(ScratchSimdReg, cycleSlot());
        } else {
            masm.storeAlignedFloat32x4(to.floatReg(), cycleSlot());
        }
        break;
      case MoveOp::FLOAT32:
        if (to.isMemory()) {
            masm.loadFloat32(toAddress(to), ScratchFloat32Reg);
            masm.storeFloat32(ScratchFloat32Reg, cycleSlot());
        } else {
            masm.storeFloat32(to.floatReg(), cycleSlot());
        }
        break;
      case MoveOp::DOUBLE:
        if (to.isMemory()) {
            masm.loadDouble(toAddress(to), ScratchDoubleReg);
            masm.storeDouble(ScratchDoubleReg, cycleSlot());
        } else {
            masm.storeDouble(to.floatReg(), cycleSlot());
        }
        break;
      case MoveOp::INT32:
#ifdef JS_CODEGEN_X64
        // x64 can't pop to a 32-bit destination, so don't push.
        if (to.isMemory()) {
            masm.load32(toAddress(to), ScratchReg);
            masm.store32(ScratchReg, cycleSlot());
        } else {
            masm.store32(to.reg(), cycleSlot());
        }
        break;
#endif
      case MoveOp::GENERAL:
        masm.Push(toOperand(to));
        break;
      default:
        MOZ_CRASH("Unexpected move type");
    }
}
Exemplo n.º 2
0
void
MoveEmitterMIPS::breakCycle(const MoveOperand &from, const MoveOperand &to, Move::Kind kind)
{
    // There is some pattern:
    //   (A -> B)
    //   (B -> A)
    //
    // This case handles (A -> B), which we reach first. We save B, then allow
    // the original move to continue.
    if (kind == Move::DOUBLE) {
        if (to.isMemory()) {
            masm.movsd(toOperand(to), ScratchFloatReg);
            masm.movsd(ScratchFloatReg, cycleSlot());
        } else {
            masm.movsd(to.floatReg(), cycleSlot());
        }
    } else {
        if (to.isMemory()) {
            Register temp = tempReg();
            masm.mov(toOperand(to), temp);
            masm.mov(temp, cycleSlot());
        } else {
            if (to.reg() == spilledReg_) {
                // If the destination was spilled, restore it first.
                masm.mov(spillSlot(), spilledReg_);
                spilledReg_ = InvalidReg;
            }
            masm.mov(to.reg(), cycleSlot());
        }
    }
}
Exemplo n.º 3
0
void
MoveEmitterMIPS::emitFloat32Move(const MoveOperand &from, const MoveOperand &to)
{
    // Ensure that we can use ScratchFloatReg in memory move.
    MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloatReg);
    MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloatReg);

    if (from.isFloatReg()) {
        if (to.isFloatReg()) {
            masm.moveFloat32(from.floatReg(), to.floatReg());
        } else if (to.isGeneralReg()) {
            // This should only be used when passing float parameter in a1,a2,a3
            MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
            masm.as_mfc1(to.reg(), from.floatReg());
        } else {
            MOZ_ASSERT(to.isMemory());
            masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
        }
    } else if (to.isFloatReg()) {
        MOZ_ASSERT(from.isMemory());
        masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
    } else if (to.isGeneralReg()) {
        MOZ_ASSERT(from.isMemory());
        // This should only be used when passing float parameter in a1,a2,a3
        MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
        masm.loadPtr(getAdjustedAddress(from), to.reg());
    } else {
        MOZ_ASSERT(from.isMemory());
        MOZ_ASSERT(to.isMemory());
        masm.loadFloat32(getAdjustedAddress(from), ScratchFloatReg);
        masm.storeFloat32(ScratchFloatReg, getAdjustedAddress(to));
    }
}
Exemplo n.º 4
0
void
MoveEmitterMIPS::completeCycle(const MoveOperand &from, const MoveOperand &to, Move::Kind kind)
{
    // There is some pattern:
    //   (A -> B)
    //   (B -> A)
    //
    // This case handles (B -> A), which we reach last. We emit a move from the
    // saved value of B, to A.
    if (kind == Move::DOUBLE) {
        if (to.isMemory()) {
            masm.movsd(cycleSlot(), ScratchFloatReg);
            masm.movsd(ScratchFloatReg, toOperand(to));
        } else {
            masm.movsd(cycleSlot(), to.floatReg());
        }
    } else {
        if (to.isMemory()) {
            Register temp = tempReg();
            masm.mov(cycleSlot(), temp);
            masm.mov(temp, toOperand(to));
        } else {
            if (to.reg() == spilledReg_) {
                // Make sure we don't re-clobber the spilled register later.
                spilledReg_ = InvalidReg;
            }
            masm.mov(cycleSlot(), to.reg());
        }
    }
}
Exemplo n.º 5
0
void
MoveEmitterMIPS::emitMove(const MoveOperand &from, const MoveOperand &to)
{
    if (from.isGeneralReg()) {
        // Second scratch register should not be moved by MoveEmitter.
        MOZ_ASSERT(from.reg() != spilledReg_);

        if (to.isGeneralReg())
            masm.movePtr(from.reg(), to.reg());
        else if (to.isMemory())
            masm.storePtr(from.reg(), getAdjustedAddress(to));
        else
            MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
    } else if (from.isMemory()) {
        if (to.isGeneralReg()) {
            masm.loadPtr(getAdjustedAddress(from), to.reg());
        } else if (to.isMemory()) {
            masm.loadPtr(getAdjustedAddress(from), tempReg());
            masm.storePtr(tempReg(), getAdjustedAddress(to));
        } else {
            MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
        }
    } else if (from.isEffectiveAddress()) {
        if (to.isGeneralReg()) {
            masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
        } else if (to.isMemory()) {
            masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
            masm.storePtr(tempReg(), getAdjustedAddress(to));
        } else {
            MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
        }
    } else {
        MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
    }
}
void
MoveEmitterX86::completeCycle(const MoveOperand &to, MoveOp::Type type)
{
    // There is some pattern:
    //   (A -> B)
    //   (B -> A)
    //
    // This case handles (B -> A), which we reach last. We emit a move from the
    // saved value of B, to A.
    switch (type) {
      case MoveOp::FLOAT32:
        JS_ASSERT(pushedAtCycle_ != -1);
        JS_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(float));
        if (to.isMemory()) {
            masm.loadFloat32(cycleSlot(), ScratchFloatReg);
            masm.storeFloat32(ScratchFloatReg, toAddress(to));
        } else {
            masm.loadFloat32(cycleSlot(), to.floatReg());
        }
        break;
      case MoveOp::DOUBLE:
        JS_ASSERT(pushedAtCycle_ != -1);
        JS_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(double));
        if (to.isMemory()) {
            masm.loadDouble(cycleSlot(), ScratchFloatReg);
            masm.storeDouble(ScratchFloatReg, toAddress(to));
        } else {
            masm.loadDouble(cycleSlot(), to.floatReg());
        }
        break;
#ifdef JS_CPU_X64
      case MoveOp::INT32:
        JS_ASSERT(pushedAtCycle_ != -1);
        JS_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(int32_t));
        // x64 can't pop to a 32-bit destination.
        if (to.isMemory()) {
            masm.load32(cycleSlot(), ScratchReg);
            masm.store32(ScratchReg, toAddress(to));
        } else {
            masm.load32(cycleSlot(), to.reg());
        }
        break;
#endif
#ifndef JS_CPU_X64
      case MoveOp::INT32:
#endif
      case MoveOp::GENERAL:
        JS_ASSERT(masm.framePushed() - pushedAtStart_ >= sizeof(intptr_t));
        masm.Pop(toPopOperand(to));
        break;
      default:
        MOZ_ASSUME_UNREACHABLE("Unexpected move type");
    }
}
Exemplo n.º 7
0
void
MoveEmitterMIPS::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
{
    // Ensure that we can use ScratchDoubleReg in memory move.
    MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchDoubleReg);
    MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchDoubleReg);

    if (from.isFloatReg()) {
        if (to.isFloatReg()) {
            masm.moveDouble(from.floatReg(), to.floatReg());
        } else if (to.isGeneralReg()) {
            // Used for passing double parameter in a2,a3 register pair.
            // Two moves are added for one double parameter by
            // MacroAssemblerMIPSCompat::passABIArg
            if(to.reg() == a2)
                masm.moveFromDoubleLo(from.floatReg(), a2);
            else if(to.reg() == a3)
                masm.moveFromDoubleHi(from.floatReg(), a3);
            else
                MOZ_CRASH("Invalid emitDoubleMove arguments.");
        } else {
            MOZ_ASSERT(to.isMemory());
            masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
        }
    } else if (to.isFloatReg()) {
        MOZ_ASSERT(from.isMemory());
        masm.loadDouble(getAdjustedAddress(from), to.floatReg());
    } else if (to.isGeneralReg()) {
        // Used for passing double parameter in a2,a3 register pair.
        // Two moves are added for one double parameter by
        // MacroAssemblerMIPSCompat::passABIArg
        if (from.isMemory()) {
            if(to.reg() == a2)
                masm.loadPtr(getAdjustedAddress(from), a2);
            else if(to.reg() == a3)
                masm.loadPtr(Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3);
            else
                MOZ_CRASH("Invalid emitDoubleMove arguments.");
        } else {
            // Used for moving a double parameter from the same source. See Bug 1123874.
            if(to.reg() == a2 || to.reg() == a3)
                masm.ma_move(to.reg(), from.reg());
            else
                MOZ_CRASH("Invalid emitDoubleMove arguments.");
        }
    } else {
        MOZ_ASSERT(from.isMemory());
        MOZ_ASSERT(to.isMemory());
        masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg);
        masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to));
    }
}
Exemplo n.º 8
0
void MoveEmitterARM64::completeCycle(const MoveOperand& from,
                                     const MoveOperand& to, MoveOp::Type type) {
  switch (type) {
    case MoveOp::FLOAT32:
      if (to.isMemory()) {
        vixl::UseScratchRegisterScope temps(&masm.asVIXL());
        const ARMFPRegister scratch32 = temps.AcquireS();
        masm.Ldr(scratch32, cycleSlot());
        masm.Str(scratch32, toMemOperand(to));
      } else {
        masm.Ldr(toFPReg(to, type), cycleSlot());
      }
      break;

    case MoveOp::DOUBLE:
      if (to.isMemory()) {
        vixl::UseScratchRegisterScope temps(&masm.asVIXL());
        const ARMFPRegister scratch = temps.AcquireD();
        masm.Ldr(scratch, cycleSlot());
        masm.Str(scratch, toMemOperand(to));
      } else {
        masm.Ldr(toFPReg(to, type), cycleSlot());
      }
      break;

    case MoveOp::INT32:
      if (to.isMemory()) {
        vixl::UseScratchRegisterScope temps(&masm.asVIXL());
        const ARMRegister scratch32 = temps.AcquireW();
        masm.Ldr(scratch32, cycleSlot());
        masm.Str(scratch32, toMemOperand(to));
      } else {
        masm.Ldr(toARMReg64(to), cycleSlot());
      }
      break;

    case MoveOp::GENERAL:
      if (to.isMemory()) {
        vixl::UseScratchRegisterScope temps(&masm.asVIXL());
        const ARMRegister scratch64 = temps.AcquireX();
        masm.Ldr(scratch64, cycleSlot());
        masm.Str(scratch64, toMemOperand(to));
      } else {
        masm.Ldr(toARMReg64(to), cycleSlot());
      }
      break;

    default:
      MOZ_CRASH("Unexpected move type");
  }
}
Exemplo n.º 9
0
void
MoveEmitterMIPS::breakCycle(const MoveOperand& from, const MoveOperand& to,
                            MoveOp::Type type, uint32_t slotId)
{
    // There is some pattern:
    //   (A -> B)
    //   (B -> A)
    //
    // This case handles (A -> B), which we reach first. We save B, then allow
    // the original move to continue.
    switch (type) {
      case MoveOp::FLOAT32:
        if (to.isMemory()) {
            FloatRegister temp = ScratchFloat32Reg;
            masm.loadFloat32(getAdjustedAddress(to), temp);
            // Since it is uncertain if the load will be aligned or not
            // just fill both of them with the same value.
            masm.storeFloat32(temp, cycleSlot(slotId, 0));
            masm.storeFloat32(temp, cycleSlot(slotId, 4));
        } else {
            // Just always store the largest possible size.
            masm.storeDouble(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
        }
        break;
      case MoveOp::DOUBLE:
        if (to.isMemory()) {
            FloatRegister temp = ScratchDoubleReg;
            masm.loadDouble(getAdjustedAddress(to), temp);
            masm.storeDouble(temp, cycleSlot(slotId, 0));
        } else {
            masm.storeDouble(to.floatReg(), cycleSlot(slotId, 0));
        }
        break;
      case MoveOp::INT32:
        MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
      case MoveOp::GENERAL:
        if (to.isMemory()) {
            Register temp = tempReg();
            masm.loadPtr(getAdjustedAddress(to), temp);
            masm.storePtr(temp, cycleSlot(0, 0));
        } else {
            // Second scratch register should not be moved by MoveEmitter.
            MOZ_ASSERT(to.reg() != spilledReg_);
            masm.storePtr(to.reg(), cycleSlot(0, 0));
        }
        break;
      default:
        MOZ_CRASH("Unexpected move type");
    }
}
Exemplo n.º 10
0
void
MoveEmitterMIPS::completeCycle(const MoveOperand& from, const MoveOperand& to,
                               MoveOp::Type type, uint32_t slotId)
{
    // There is some pattern:
    //   (A -> B)
    //   (B -> A)
    //
    // This case handles (B -> A), which we reach last. We emit a move from the
    // saved value of B, to A.
    switch (type) {
      case MoveOp::FLOAT32:
        if (to.isMemory()) {
            FloatRegister temp = ScratchFloat32Reg;
            masm.loadFloat32(cycleSlot(slotId, 0), temp);
            masm.storeFloat32(temp, getAdjustedAddress(to));
        } else {
            uint32_t offset = 0;
            if (from.floatReg().numAlignedAliased() == 1)
                offset = sizeof(float);
            masm.loadFloat32(cycleSlot(slotId, offset), to.floatReg());
        }
        break;
      case MoveOp::DOUBLE:
        if (to.isMemory()) {
            FloatRegister temp = ScratchDoubleReg;
            masm.loadDouble(cycleSlot(slotId, 0), temp);
            masm.storeDouble(temp, getAdjustedAddress(to));
        } else {
            masm.loadDouble(cycleSlot(slotId, 0), to.floatReg());
        }
        break;
      case MoveOp::INT32:
        MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
      case MoveOp::GENERAL:
        MOZ_ASSERT(slotId == 0);
        if (to.isMemory()) {
            Register temp = tempReg();
            masm.loadPtr(cycleSlot(0, 0), temp);
            masm.storePtr(temp, getAdjustedAddress(to));
        } else {
            // Second scratch register should not be moved by MoveEmitter.
            MOZ_ASSERT(to.reg() != spilledReg_);
            masm.loadPtr(cycleSlot(0, 0), to.reg());
        }
        break;
      default:
        MOZ_CRASH("Unexpected move type");
    }
}
Exemplo n.º 11
0
void
MoveEmitterARM::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
{
    // Registers are used to store pointers / int32 / float32 values.
    MOZ_ASSERT(!from.isGeneralReg());
    MOZ_ASSERT(!to.isGeneralReg());

    if (from.isFloatReg()) {
        if (to.isFloatReg())
            masm.ma_vmov(from.floatReg(), to.floatReg());
        else if (to.isGeneralRegPair())
            masm.ma_vxfer(from.floatReg(), to.evenReg(), to.oddReg());
        else
            masm.ma_vstr(from.floatReg(), toAddress(to));
    } else if (from.isGeneralRegPair()) {
        if (to.isFloatReg())
            masm.ma_vxfer(from.evenReg(), from.oddReg(), to.floatReg());
        else if (to.isGeneralRegPair()) {
            MOZ_ASSERT(!from.aliases(to));
            masm.ma_mov(from.evenReg(), to.evenReg());
            masm.ma_mov(from.oddReg(), to.oddReg());
        } else {
            FloatRegister reg = ScratchDoubleReg;
            masm.ma_vxfer(from.evenReg(), from.oddReg(), reg);
            masm.ma_vstr(reg, toAddress(to));
        }
    } else if (to.isFloatReg()) {
        masm.ma_vldr(toAddress(from), to.floatReg());
    } else if (to.isGeneralRegPair()) {
        MOZ_ASSERT(from.isMemory());
        Address src = toAddress(from);
        // Note: We can safely use the MoveOperand's displacement here,
        // even if the base is SP: MoveEmitter::toOperand adjusts
        // SP-relative operands by the difference between the current
        // stack usage and stackAdjust, which emitter.finish() resets to
        // 0.
        //
        // Warning: if the offset isn't within [-255,+255] then this
        // will assert-fail (or, if non-debug, load the wrong words).
        // Nothing uses such an offset at the time of this writing.
        masm.ma_ldrd(EDtrAddr(src.base, EDtrOffImm(src.offset)), to.evenReg(), to.oddReg());
    } else {
        // Memory to memory move.
        MOZ_ASSERT(from.isMemory());
        ScratchDoubleScope scratch(masm);
        masm.ma_vldr(toAddress(from), scratch);
        masm.ma_vstr(scratch, toAddress(to));
    }
}
Exemplo n.º 12
0
void
MoveEmitterARM::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
{
    // Register pairs are used to store Double values during calls.
    MOZ_ASSERT(!from.isGeneralRegPair());
    MOZ_ASSERT(!to.isGeneralRegPair());

    if (from.isFloatReg()) {
        if (to.isFloatReg())
            masm.ma_vmov_f32(from.floatReg(), to.floatReg());
        else if (to.isGeneralReg())
            masm.ma_vxfer(from.floatReg(), to.reg());
        else
            masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to));
    } else if (from.isGeneralReg()) {
        if (to.isFloatReg())
            masm.ma_vxfer(from.reg(), to.floatReg());
        else if (to.isGeneralReg())
            masm.ma_mov(from.reg(), to.reg());
        else
            masm.ma_str(from.reg(), toAddress(to));
    } else if (to.isFloatReg()) {
        masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay());
    } else if (to.isGeneralReg()) {
        masm.ma_ldr(toAddress(from), to.reg());
    } else {
        // Memory to memory move.
        MOZ_ASSERT(from.isMemory());
        FloatRegister reg = ScratchFloat32Reg;
        masm.ma_vldr(toAddress(from), VFPRegister(reg).singleOverlay());
        masm.ma_vstr(VFPRegister(reg).singleOverlay(), toAddress(to));
    }
}
Exemplo n.º 13
0
void
MoveEmitterARM::emitMove(const MoveOperand &from, const MoveOperand &to)
{
    if (to.isGeneralReg() && to.reg() == spilledReg_) {
        // If the destination is the spilled register, make sure we
        // don't re-clobber its value.
        spilledReg_ = InvalidReg;
    }

    if (from.isGeneralReg()) {
        if (from.reg() == spilledReg_) {
            // If the source is a register that has been spilled, make sure
            // to load the source back into that register.
            masm.ma_ldr(spillSlot(), spilledReg_);
            spilledReg_ = InvalidReg;
        }
        switch (toOperand(to, false).getTag()) {
          case Operand::OP2:
            // secretly must be a register
            masm.ma_mov(from.reg(), to.reg());
            break;
          case Operand::MEM:
            masm.ma_str(from.reg(), toOperand(to, false));
            break;
          default:
            MOZ_ASSUME_UNREACHABLE("strange move!");
        }
    } else if (to.isGeneralReg()) {
        JS_ASSERT(from.isMemory() || from.isEffectiveAddress());
        if (from.isMemory())
            masm.ma_ldr(toOperand(from, false), to.reg());
        else
            masm.ma_add(from.base(), Imm32(from.disp()), to.reg());
    } else {
        // Memory to memory gpr move.
        Register reg = tempReg();

        JS_ASSERT(from.isMemory() || from.isEffectiveAddress());
        if (from.isMemory())
            masm.ma_ldr(toOperand(from, false), reg);
        else
            masm.ma_add(from.base(), Imm32(from.disp()), reg);
        JS_ASSERT(to.base() != reg);
        masm.ma_str(reg, toOperand(to, false));
    }
}
Exemplo n.º 14
0
void
MoveEmitterMIPS::emitDoubleMove(const MoveOperand &from, const MoveOperand &to)
{
    // Ensure that we can use ScratchFloatReg in memory move.
    MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloatReg);
    MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloatReg);

    if (from.isFloatReg()) {
        if (to.isFloatReg()) {
            masm.moveDouble(from.floatReg(), to.floatReg());
        } else if (to.isGeneralReg()) {
            // Used for passing double parameter in a2,a3 register pair.
            // Two moves are added for one double parameter by
            // MacroAssemblerMIPSCompat::passABIArg
            if(to.reg() == a2)
                masm.as_mfc1(a2, from.floatReg());
            else if(to.reg() == a3)
                masm.as_mfc1_Odd(a3, from.floatReg());
            else
                MOZ_ASSUME_UNREACHABLE("Invalid emitDoubleMove arguments.");
        } else {
            MOZ_ASSERT(to.isMemory());
            masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
        }
    } else if (to.isFloatReg()) {
        MOZ_ASSERT(from.isMemory());
        masm.loadDouble(getAdjustedAddress(from), to.floatReg());
    } else if (to.isGeneralReg()) {
        MOZ_ASSERT(from.isMemory());
        // Used for passing double parameter in a2,a3 register pair.
        // Two moves are added for one double parameter by
        // MacroAssemblerMIPSCompat::passABIArg
        if(to.reg() == a2)
            masm.loadPtr(getAdjustedAddress(from), a2);
        else if(to.reg() == a3)
            masm.loadPtr(Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3);
        else
            MOZ_ASSUME_UNREACHABLE("Invalid emitDoubleMove arguments.");
    } else {
        MOZ_ASSERT(from.isMemory());
        MOZ_ASSERT(to.isMemory());
        masm.loadDouble(getAdjustedAddress(from), ScratchFloatReg);
        masm.storeDouble(ScratchFloatReg, getAdjustedAddress(to));
    }
}
Exemplo n.º 15
0
void
MoveEmitterMIPS::breakCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type)
{
    // There is some pattern:
    //   (A -> B)
    //   (B -> A)
    //
    // This case handles (A -> B), which we reach first. We save B, then allow
    // the original move to continue.
    switch (type) {
      case MoveOp::FLOAT32:
        if (to.isMemory()) {
            FloatRegister temp = ScratchFloatReg;
            masm.loadFloat32(getAdjustedAddress(to), temp);
            masm.storeFloat32(temp, cycleSlot());
        } else {
            masm.storeFloat32(to.floatReg(), cycleSlot());
        }
        break;
      case MoveOp::DOUBLE:
        if (to.isMemory()) {
            FloatRegister temp = ScratchFloatReg;
            masm.loadDouble(getAdjustedAddress(to), temp);
            masm.storeDouble(temp, cycleSlot());
        } else {
            masm.storeDouble(to.floatReg(), cycleSlot());
        }
        break;
      case MoveOp::INT32:
        MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
      case MoveOp::GENERAL:
        if (to.isMemory()) {
            Register temp = tempReg();
            masm.loadPtr(getAdjustedAddress(to), temp);
            masm.storePtr(temp, cycleSlot());
        } else {
            // Second scratch register should not be moved by MoveEmitter.
            MOZ_ASSERT(to.reg() != spilledReg_);
            masm.storePtr(to.reg(), cycleSlot());
        }
        break;
      default:
        MOZ_ASSUME_UNREACHABLE("Unexpected move type");
    }
}
Exemplo n.º 16
0
MemOperand MoveEmitterARM64::toMemOperand(const MoveOperand& operand) const {
  MOZ_ASSERT(operand.isMemory());
  ARMRegister base(operand.base(), 64);
  if (operand.base() == masm.getStackPointer()) {
    return MemOperand(base,
                      operand.disp() + (masm.framePushed() - pushedAtStart_));
  }
  return MemOperand(base, operand.disp());
}
Exemplo n.º 17
0
void
MoveEmitterARM::emitMove(const MoveOperand& from, const MoveOperand& to)
{
    // Register pairs are used to store Double values during calls.
    MOZ_ASSERT(!from.isGeneralRegPair());
    MOZ_ASSERT(!to.isGeneralRegPair());

    if (to.isGeneralReg() && to.reg() == spilledReg_) {
        // If the destination is the spilled register, make sure we
        // don't re-clobber its value.
        spilledReg_ = InvalidReg;
    }

    if (from.isGeneralReg()) {
        if (from.reg() == spilledReg_) {
            // If the source is a register that has been spilled, make sure
            // to load the source back into that register.
            masm.ma_ldr(spillSlot(), spilledReg_);
            spilledReg_ = InvalidReg;
        }
        if (to.isMemoryOrEffectiveAddress())
            masm.ma_str(from.reg(), toAddress(to));
        else
            masm.ma_mov(from.reg(), to.reg());
    } else if (to.isGeneralReg()) {
        MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
        if (from.isMemory())
            masm.ma_ldr(toAddress(from), to.reg());
        else
            masm.ma_add(from.base(), Imm32(from.disp()), to.reg());
    } else {
        // Memory to memory gpr move.
        Register reg = tempReg();

        MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
        if (from.isMemory())
            masm.ma_ldr(toAddress(from), reg);
        else
            masm.ma_add(from.base(), Imm32(from.disp()), reg);
        MOZ_ASSERT(to.base() != reg);
        masm.ma_str(reg, toAddress(to));
    }
}
Exemplo n.º 18
0
// Warning, do not use the resulting operand with pop instructions, since they
// compute the effective destination address after altering the stack pointer.
// Use toPopOperand if an Operand is needed for a pop.
Operand
MoveEmitterX86::toOperand(const MoveOperand &operand) const
{
    if (operand.isMemory() || operand.isEffectiveAddress() || operand.isFloatAddress())
        return Operand(toAddress(operand));
    if (operand.isGeneralReg())
        return Operand(operand.reg());

    JS_ASSERT(operand.isFloatReg());
    return Operand(operand.floatReg());
}
Exemplo n.º 19
0
void MoveEmitterARM64::emitGeneralMove(const MoveOperand& from,
                                       const MoveOperand& to) {
  if (from.isGeneralReg()) {
    MOZ_ASSERT(to.isGeneralReg() || to.isMemory());
    if (to.isGeneralReg()) {
      masm.Mov(toARMReg64(to), toARMReg64(from));
    } else {
      masm.Str(toARMReg64(from), toMemOperand(to));
    }
    return;
  }

  // {Memory OR EffectiveAddress} -> Register move.
  if (to.isGeneralReg()) {
    MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
    if (from.isMemory()) {
      masm.Ldr(toARMReg64(to), toMemOperand(from));
    } else {
      masm.Add(toARMReg64(to), toARMReg64(from), Operand(from.disp()));
    }
    return;
  }

  vixl::UseScratchRegisterScope temps(&masm.asVIXL());
  const ARMRegister scratch64 = temps.AcquireX();

  // Memory -> Memory move.
  if (from.isMemory()) {
    MOZ_ASSERT(to.isMemory());
    masm.Ldr(scratch64, toMemOperand(from));
    masm.Str(scratch64, toMemOperand(to));
    return;
  }

  // EffectiveAddress -> Memory move.
  MOZ_ASSERT(from.isEffectiveAddress());
  MOZ_ASSERT(to.isMemory());
  masm.Add(scratch64, toARMReg64(from), Operand(from.disp()));
  masm.Str(scratch64, toMemOperand(to));
}
void
MoveEmitterX86::emitInt32Move(const MoveOperand& from, const MoveOperand& to)
{
    if (from.isGeneralReg()) {
        masm.move32(from.reg(), toOperand(to));
    } else if (to.isGeneralReg()) {
        MOZ_ASSERT(from.isMemory());
        masm.load32(toAddress(from), to.reg());
    } else {
        // Memory to memory gpr move.
        MOZ_ASSERT(from.isMemory());
        if (hasScratchRegister()) {
            Register reg = scratchRegister();
            masm.load32(toAddress(from), reg);
            masm.move32(reg, toOperand(to));
        } else {
            // No scratch register available; bounce it off the stack.
            masm.Push(toOperand(from));
            masm.Pop(toPopOperand(to));
        }
    }
}
void
MoveEmitterX86::emitGeneralMove(const MoveOperand& from, const MoveOperand& to)
{
    if (from.isGeneralReg()) {
        masm.mov(from.reg(), toOperand(to));
    } else if (to.isGeneralReg()) {
        MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
        if (from.isMemory())
            masm.loadPtr(toAddress(from), to.reg());
        else
            masm.lea(toOperand(from), to.reg());
    } else if (from.isMemory()) {
        // Memory to memory gpr move.
        if (hasScratchRegister()) {
            Register reg = scratchRegister();
            masm.loadPtr(toAddress(from), reg);
            masm.mov(reg, toOperand(to));
        } else {
            // No scratch register available; bounce it off the stack.
            masm.Push(toOperand(from));
            masm.Pop(toPopOperand(to));
        }
    } else {
        // Effective address to memory move.
        MOZ_ASSERT(from.isEffectiveAddress());
        if (hasScratchRegister()) {
            Register reg = scratchRegister();
            masm.lea(toOperand(from), reg);
            masm.mov(reg, toOperand(to));
        } else {
            // This is tricky without a scratch reg. We can't do an lea. Bounce the
            // base register off the stack, then add the offset in place. Note that
            // this clobbers FLAGS!
            masm.Push(from.base());
            masm.Pop(toPopOperand(to));
            masm.addPtr(Imm32(from.disp()), toOperand(to));
        }
    }
}
Exemplo n.º 22
0
void
MoveEmitterX86::emitInt32Move(const MoveOperand &from, const MoveOperand &to)
{
    if (from.isGeneralReg()) {
        masm.move32(from.reg(), toOperand(to));
    } else if (to.isGeneralReg()) {
        JS_ASSERT(from.isMemory());
        masm.load32(toAddress(from), to.reg());
    } else {
        // Memory to memory gpr move.
        JS_ASSERT(from.isMemory());
#ifdef JS_CODEGEN_X64
        // x64 has a ScratchReg. Use it.
        masm.load32(toAddress(from), ScratchReg);
        masm.move32(ScratchReg, toOperand(to));
#else
        // No ScratchReg; bounce it off the stack.
        masm.Push(toOperand(from));
        masm.Pop(toPopOperand(to));
#endif
    }
}
Exemplo n.º 23
0
void
MoveEmitterX86::emitGeneralMove(const MoveOperand &from, const MoveOperand &to)
{
    if (from.isGeneralReg()) {
        masm.mov(from.reg(), toOperand(to));
    } else if (to.isGeneralReg()) {
        JS_ASSERT(from.isMemoryOrEffectiveAddress());
        if (from.isMemory())
            masm.loadPtr(toAddress(from), to.reg());
        else
            masm.lea(toOperand(from), to.reg());
    } else if (from.isMemory()) {
        // Memory to memory gpr move.
#ifdef JS_CODEGEN_X64
        // x64 has a ScratchReg. Use it.
        masm.loadPtr(toAddress(from), ScratchReg);
        masm.mov(ScratchReg, toOperand(to));
#else
        // No ScratchReg; bounce it off the stack.
        masm.Push(toOperand(from));
        masm.Pop(toPopOperand(to));
#endif
    } else {
        // Effective address to memory move.
        JS_ASSERT(from.isEffectiveAddress());
#ifdef JS_CODEGEN_X64
        // x64 has a ScratchReg. Use it.
        masm.lea(toOperand(from), ScratchReg);
        masm.mov(ScratchReg, toOperand(to));
#else
        // This is tricky without a ScratchReg. We can't do an lea. Bounce the
        // base register off the stack, then add the offset in place. Note that
        // this clobbers FLAGS!
        masm.Push(from.base());
        masm.Pop(toPopOperand(to));
        masm.addPtr(Imm32(from.disp()), toOperand(to));
#endif
    }
}
Exemplo n.º 24
0
void
MoveEmitterMIPS::emitMove(const MoveOperand &from, const MoveOperand &to)
{
    if (to.isGeneralReg() && to.reg() == spilledReg_) {
        // If the destination is the spilled register, make sure we
        // don't re-clobber its value.
        spilledReg_ = InvalidReg;
    }

    if (from.isGeneralReg()) {
        if (from.reg() == spilledReg_) {
            // If the source is a register that has been spilled, make sure
            // to load the source back into that register.
            masm.mov(spillSlot(), spilledReg_);
            spilledReg_ = InvalidReg;
        }
        masm.mov(from.reg(), toOperand(to));
    } else if (to.isGeneralReg()) {
        JS_ASSERT(from.isMemory() || from.isEffectiveAddress());
        if (from.isMemory())
            masm.mov(toOperand(from), to.reg());
        else
            masm.lea(toOperand(from), to.reg());
    } else {
        // Memory to memory gpr move.
        Register reg = tempReg();
        // Reload its previous value from the stack.
        if (reg == from.base())
            masm.mov(spillSlot(), from.base());

        JS_ASSERT(from.isMemory() || from.isEffectiveAddress());
        if (from.isMemory())
            masm.mov(toOperand(from), reg);
        else
            masm.lea(toOperand(from), reg);
        JS_ASSERT(to.base() != reg);
        masm.mov(reg, toOperand(to));
    }
}
void
MoveEmitterX86::emitDoubleMove(const MoveOperand &from, const MoveOperand &to)
{
    if (from.isFloatReg()) {
        masm.movsd(from.floatReg(), toOperand(to));
    } else if (to.isFloatReg()) {
        masm.movsd(toOperand(from), to.floatReg());
    } else {
        // Memory to memory float move.
        JS_ASSERT(from.isMemory());
        masm.movsd(toOperand(from), ScratchFloatReg);
        masm.movsd(ScratchFloatReg, toOperand(to));
    }
}
void
MoveEmitterX86::breakCycle(const MoveOperand &to, Move::Kind kind)
{
    // There is some pattern:
    //   (A -> B)
    //   (B -> A)
    //
    // This case handles (A -> B), which we reach first. We save B, then allow
    // the original move to continue.
    if (kind == Move::DOUBLE) {
        if (to.isMemory()) {
            masm.movsd(toOperand(to), ScratchFloatReg);
            masm.movsd(ScratchFloatReg, cycleSlot());
        } else {
            masm.movsd(to.floatReg(), cycleSlot());
        }
    } else {
        if (to.isMemory())
            masm.Push(toOperand(to));
        else
            masm.Push(to.reg());
    }
}
void
MoveEmitterX86::completeCycle(const MoveOperand &to, Move::Kind kind)
{
    // There is some pattern:
    //   (A -> B)
    //   (B -> A)
    //
    // This case handles (B -> A), which we reach last. We emit a move from the
    // saved value of B, to A.
    if (kind == Move::DOUBLE) {
        if (to.isMemory()) {
            masm.movsd(cycleSlot(), ScratchFloatReg);
            masm.movsd(ScratchFloatReg, toOperand(to));
        } else {
            masm.movsd(cycleSlot(), to.floatReg());
        }
    } else {
        if (to.isMemory()) {
            masm.Pop(toPopOperand(to));
        } else {
            masm.Pop(to.reg());
        }
    }
}
Exemplo n.º 28
0
void
MoveEmitterX86::emitFloat32X4Move(const MoveOperand &from, const MoveOperand &to)
{
    if (from.isFloatReg()) {
        if (to.isFloatReg())
            masm.moveFloat32x4(from.floatReg(), to.floatReg());
        else
            masm.storeAlignedFloat32x4(from.floatReg(), toAddress(to));
    } else if (to.isFloatReg()) {
        masm.loadAlignedFloat32x4(toAddress(from), to.floatReg());
    } else {
        // Memory to memory move.
        MOZ_ASSERT(from.isMemory());
        masm.loadAlignedFloat32x4(toAddress(from), ScratchSimdReg);
        masm.storeAlignedFloat32x4(ScratchSimdReg, toAddress(to));
    }
}
Exemplo n.º 29
0
void
MoveEmitterX86::emitDoubleMove(const MoveOperand &from, const MoveOperand &to)
{
    if (from.isFloatReg()) {
        if (to.isFloatReg())
            masm.moveDouble(from.floatReg(), to.floatReg());
        else
            masm.storeDouble(from.floatReg(), toAddress(to));
    } else if (to.isFloatReg()) {
        masm.loadDouble(toAddress(from), to.floatReg());
    } else {
        // Memory to memory move.
        JS_ASSERT(from.isMemory());
        masm.loadDouble(toAddress(from), ScratchFloatReg);
        masm.storeDouble(ScratchFloatReg, toAddress(to));
    }
}
// Warning, do not use the resulting operand with pop instructions, since they
// compute the effective destination address after altering the stack pointer.
// Use toPopOperand if an Operand is needed for a pop.
Operand
MoveEmitterX86::toOperand(const MoveOperand &operand) const
{
    if (operand.isMemory() || operand.isEffectiveAddress() || operand.isFloatAddress()) {
        if (operand.base() != StackPointer)
            return Operand(operand.base(), operand.disp());

        JS_ASSERT(operand.disp() >= 0);

        // Otherwise, the stack offset may need to be adjusted.
        return Operand(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
    }
    if (operand.isGeneralReg())
        return Operand(operand.reg());

    JS_ASSERT(operand.isFloatReg());
    return Operand(operand.floatReg());
}