Exemplo n.º 1
0
void FPURegCache::SpillLockV(int vec, VectorSize sz) {
	u8 r[4];
	GetVectorRegs(r, sz, vec);
	SpillLockV(r, sz);
}
Exemplo n.º 2
0
	void Jit::Comp_SVQ(u32 op)
	{
		CONDITIONAL_DISABLE;

		int imm = (signed short)(op&0xFFFC);
		int vt = (((op >> 16) & 0x1f)) | ((op&1) << 5);
		int rs = _RS;

		bool doCheck = false;
		switch (op >> 26)
		{
		case 54: //lv.q
			{
				// CC might be set by slow path below, so load regs first.
				u8 vregs[4];
				GetVectorRegs(vregs, V_Quad, vt);
				fpr.MapRegsV(vregs, V_Quad, MAP_DIRTY | MAP_NOINIT);
				fpr.ReleaseSpillLocks();

				if (gpr.IsImm(rs)) {
					u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF;
					MOVI2R(R0, addr + (u32)Memory::base);
				} else {
					gpr.MapReg(rs);
					if (g_Config.bFastMemory) {
						SetR0ToEffectiveAddress(rs, imm);
					} else {
						SetCCAndR0ForSafeAddress(rs, imm, R1);
						doCheck = true;
					}
					ADD(R0, R0, R11);
				}

				for (int i = 0; i < 4; i++)
					VLDR(fpr.V(vregs[i]), R0, i * 4);

				if (doCheck) {
					SetCC(CC_EQ);
					MOVI2R(R0, 0);
					for (int i = 0; i < 4; i++)
						VMOV(fpr.V(vregs[i]), R0);
					SetCC(CC_AL);
				}
			}
			break;

		case 62: //sv.q
			{
				// CC might be set by slow path below, so load regs first.
				u8 vregs[4];
				GetVectorRegs(vregs, V_Quad, vt);
				fpr.MapRegsV(vregs, V_Quad, 0);
				fpr.ReleaseSpillLocks();

				if (gpr.IsImm(rs)) {
					u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF;
					MOVI2R(R0, addr + (u32)Memory::base);
				} else {
					gpr.MapReg(rs);
					if (g_Config.bFastMemory) {
						SetR0ToEffectiveAddress(rs, imm);
					} else {
						SetCCAndR0ForSafeAddress(rs, imm, R1);
						doCheck = true;
					}
					ADD(R0, R0, R11);
				}

				for (int i = 0; i < 4; i++)
					VSTR(fpr.V(vregs[i]), R0, i * 4);

				if (doCheck) {
					SetCC(CC_AL);
				}
			}
			break;

		default:
			DISABLE;
			break;
		}
	}