void JitArm::ps_rsqrte(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITPairedOff); FALLBACK_IF(inst.Rc); u32 b = inst.FB, d = inst.FD; ARMReg vB0 = fpr.R0(b); ARMReg vB1 = fpr.R1(b); ARMReg vD0 = fpr.R0(d, false); ARMReg vD1 = fpr.R1(d, false); ARMReg fpscrReg = gpr.GetReg(); ARMReg V0 = D1; ARMReg rA = gpr.GetReg(); MOVI2R(fpscrReg, (u32)&PPC_NAN); VLDR(V0, fpscrReg, 0); LDR(fpscrReg, R9, PPCSTATE_OFF(fpscr)); VCMP(vB0); VMRS(_PC); FixupBranch Less0 = B_CC(CC_LT); VMOV(vD0, V0); SetFPException(fpscrReg, FPSCR_VXSQRT); FixupBranch SkipOrr0 = B(); SetJumpTarget(Less0); SetCC(CC_EQ); ORR(rA, rA, 1); SetCC(); SetJumpTarget(SkipOrr0); VCMP(vB1); VMRS(_PC); FixupBranch Less1 = B_CC(CC_LT); VMOV(vD1, V0); SetFPException(fpscrReg, FPSCR_VXSQRT); FixupBranch SkipOrr1 = B(); SetJumpTarget(Less1); SetCC(CC_EQ); ORR(rA, rA, 2); SetCC(); SetJumpTarget(SkipOrr1); CMP(rA, 0); FixupBranch noException = B_CC(CC_EQ); SetFPException(fpscrReg, FPSCR_ZX); SetJumpTarget(noException); VCVT(S0, vB0, 0); VCVT(S1, vB1, 0); NEONXEmitter nemit(this); nemit.VRSQRTE(F_32, D0, D0); VCVT(vD0, S0, 0); VCVT(vD1, S1, 0); STR(fpscrReg, R9, PPCSTATE_OFF(fpscr)); gpr.Unlock(fpscrReg, rA); }
void Jit::Comp_Mftv(u32 op) { CONDITIONAL_DISABLE; int imm = op & 0xFF; int rt = _RT; switch ((op >> 21) & 0x1f) { case 3: //mfv / mfvc // rt = 0, imm = 255 appears to be used as a CPU interlock by some games. if (rt != 0) { if (imm < 128) { //R(rt) = VI(imm); fpr.FlushV(imm); gpr.MapReg(rt, MAP_NOINIT | MAP_DIRTY); LDR(gpr.R(rt), CTXREG, fpr.GetMipsRegOffsetV(imm)); } else if (imm < 128 + VFPU_CTRL_MAX) { //mtvc DISABLE; // In case we have a saved prefix. //FlushPrefixV(); //gpr.BindToRegister(rt, false, true); //MOV(32, gpr.R(rt), M(¤tMIPS->vfpuCtrl[imm - 128])); } else { //ERROR - maybe need to make this value too an "interlock" value? _dbg_assert_msg_(CPU,0,"mfv - invalid register"); } } break; case 7: //mtv if (imm < 128) { gpr.FlushR(rt); fpr.MapRegV(imm, MAP_DIRTY | MAP_NOINIT); VLDR(fpr.V(imm), CTXREG, gpr.GetMipsRegOffset(rt)); } else if (imm < 128 + VFPU_CTRL_MAX) { //mtvc //currentMIPS->vfpuCtrl[imm - 128] = R(rt); DISABLE; //gpr.BindToRegister(rt, true, false); //MOV(32, M(¤tMIPS->vfpuCtrl[imm - 128]), gpr.R(rt)); // TODO: Optimization if rt is Imm? //if (imm - 128 == VFPU_CTRL_SPREFIX) { //js.prefixSFlag = JitState::PREFIX_UNKNOWN; //} else if (imm - 128 == VFPU_CTRL_TPREFIX) { // js.prefixTFlag = JitState::PREFIX_UNKNOWN; //} else if (imm - 128 == VFPU_CTRL_DPREFIX) { // js.prefixDFlag = JitState::PREFIX_UNKNOWN; //} } else { //ERROR _dbg_assert_msg_(CPU,0,"mtv - invalid register"); } break; default: DISABLE; } }
void Jit::Comp_FPULS(u32 op) { CONDITIONAL_DISABLE; s32 offset = (s16)(op & 0xFFFF); int ft = _FT; int rs = _RS; // u32 addr = R(rs) + offset; // logBlocks = 1; bool doCheck = false; switch(op >> 26) { case 49: //FI(ft) = Memory::Read_U32(addr); break; //lwc1 fpr.MapReg(ft, MAP_NOINIT | MAP_DIRTY); if (gpr.IsImm(rs)) { u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF; MOVI2R(R0, addr + (u32)Memory::base); } else { gpr.MapReg(rs); if (g_Config.bFastMemory) { SetR0ToEffectiveAddress(rs, offset); } else { SetCCAndR0ForSafeAddress(rs, offset, R1); doCheck = true; } ADD(R0, R0, R11); } VLDR(fpr.R(ft), R0, 0); if (doCheck) { SetCC(CC_EQ); MOVI2R(R0, 0); VMOV(fpr.R(ft), R0); SetCC(CC_AL); } break; case 57: //Memory::Write_U32(FI(ft), addr); break; //swc1 fpr.MapReg(ft); if (gpr.IsImm(rs)) { u32 addr = (offset + gpr.GetImm(rs)) & 0x3FFFFFFF; MOVI2R(R0, addr + (u32)Memory::base); } else { gpr.MapReg(rs); if (g_Config.bFastMemory) { SetR0ToEffectiveAddress(rs, offset); } else { SetCCAndR0ForSafeAddress(rs, offset, R1); doCheck = true; } ADD(R0, R0, R11); } VSTR(fpr.R(ft), R0, 0); if (doCheck) { SetCC(CC_AL); } break; default: Comp_Generic(op); return; } }
void Jit::Comp_mxc1(u32 op) { CONDITIONAL_DISABLE; int fs = _FS; int rt = _RT; switch((op >> 21) & 0x1f) { case 0: // R(rt) = FI(fs); break; //mfc1 // Let's just go through RAM for now. fpr.FlushR(fs); gpr.MapReg(rt, MAP_DIRTY | MAP_NOINIT); LDR(gpr.R(rt), CTXREG, fpr.GetMipsRegOffset(fs)); return; case 2: //cfc1 if (fs == 31) { gpr.MapReg(rt, MAP_DIRTY | MAP_NOINIT); LDR(R0, CTXREG, offsetof(MIPSState, fpcond)); AND(R0,R0, Operand2(1)); // Just in case LDR(gpr.R(rt), CTXREG, offsetof(MIPSState, fcr31)); BIC(gpr.R(rt), gpr.R(rt), Operand2(0x1 << 23)); ORR(gpr.R(rt), gpr.R(rt), Operand2(R0, ST_LSL, 23)); } else if (fs == 0) { gpr.MapReg(rt, MAP_DIRTY | MAP_NOINIT); LDR(gpr.R(rt), CTXREG, offsetof(MIPSState, fcr0)); } return; case 4: //FI(fs) = R(rt); break; //mtc1 // Let's just go through RAM for now. gpr.FlushR(rt); fpr.MapReg(fs, MAP_DIRTY | MAP_NOINIT); VLDR(fpr.R(fs), CTXREG, gpr.GetMipsRegOffset(rt)); return; case 6: //ctc1 if (fs == 31) { gpr.MapReg(rt, 0); // Hardware rounding method. // Left here in case it is faster than conditional method. /* AND(R0, gpr.R(rt), Operand2(3)); // MIPS Rounding Mode <-> ARM Rounding Mode // 0, 1, 2, 3 <-> 0, 3, 1, 2 CMP(R0, Operand2(1)); SetCC(CC_EQ); ADD(R0, R0, Operand2(2)); SetCC(CC_GT); SUB(R0, R0, Operand2(1)); SetCC(CC_AL); // Load and Store RM to FPSCR VMRS(R1); BIC(R1, R1, Operand2(0x3 << 22)); ORR(R1, R1, Operand2(R0, ST_LSL, 22)); VMSR(R1); */ // Update MIPS state STR(gpr.R(rt), CTXREG, offsetof(MIPSState, fcr31)); MOV(R0, Operand2(gpr.R(rt), ST_LSR, 23)); AND(R0, R0, Operand2(1)); STR(R0, CTXREG, offsetof(MIPSState, fpcond)); } return; } }
void JitArm::fctiwx(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITFloatingPointOff) u32 b = inst.FB; u32 d = inst.FD; ARMReg vB = fpr.R0(b); ARMReg vD = fpr.R0(d); ARMReg V0 = fpr.GetReg(); ARMReg V1 = fpr.GetReg(); ARMReg V2 = fpr.GetReg(); ARMReg rA = gpr.GetReg(); ARMReg fpscrReg = gpr.GetReg(); FixupBranch DoneMax, DoneMin; LDR(fpscrReg, R9, PPCSTATE_OFF(fpscr)); MOVI2R(rA, (u32)minmaxFloat); // Check if greater than max float { VLDR(V0, rA, 8); // Load Max VCMPE(vB, V0); VMRS(_PC); // Loads in to APSR FixupBranch noException = B_CC(CC_LE); VMOV(vD, V0); // Set to max SetFPException(fpscrReg, FPSCR_VXCVI); DoneMax = B(); SetJumpTarget(noException); } // Check if less than min float { VLDR(V0, rA, 0); VCMPE(vB, V0); VMRS(_PC); FixupBranch noException = B_CC(CC_GE); VMOV(vD, V0); SetFPException(fpscrReg, FPSCR_VXCVI); DoneMin = B(); SetJumpTarget(noException); } // Within ranges, convert to integer // Set rounding mode first // PPC <-> ARM rounding modes // 0, 1, 2, 3 <-> 0, 3, 1, 2 ARMReg rB = gpr.GetReg(); VMRS(rA); // Bits 22-23 BIC(rA, rA, Operand2(3, 5)); LDR(rB, R9, PPCSTATE_OFF(fpscr)); AND(rB, rB, 0x3); // Get the FPSCR rounding bits CMP(rB, 1); SetCC(CC_EQ); // zero ORR(rA, rA, Operand2(3, 5)); SetCC(CC_NEQ); CMP(rB, 2); // +inf SetCC(CC_EQ); ORR(rA, rA, Operand2(1, 5)); SetCC(CC_NEQ); CMP(rB, 3); // -inf SetCC(CC_EQ); ORR(rA, rA, Operand2(2, 5)); SetCC(); VMSR(rA); ORR(rA, rA, Operand2(3, 5)); VCVT(vD, vB, TO_INT | IS_SIGNED); VMSR(rA); gpr.Unlock(rB); VCMPE(vD, vB); VMRS(_PC); SetCC(CC_EQ); BIC(fpscrReg, fpscrReg, FRFIMask); FixupBranch DoneEqual = B(); SetCC(); SetFPException(fpscrReg, FPSCR_XX); ORR(fpscrReg, fpscrReg, FIMask); VABS(V1, vB); VABS(V2, vD); VCMPE(V2, V1); VMRS(_PC); SetCC(CC_GT); ORR(fpscrReg, fpscrReg, FRMask); SetCC(); SetJumpTarget(DoneEqual); SetJumpTarget(DoneMax); SetJumpTarget(DoneMin); MOVI2R(rA, (u32)&doublenum); VLDR(V0, rA, 0); NEONXEmitter nemit(this); nemit.VORR(vD, vD, V0); if (inst.Rc) Helper_UpdateCR1(fpscrReg, rA); STR(fpscrReg, R9, PPCSTATE_OFF(fpscr)); gpr.Unlock(rA); gpr.Unlock(fpscrReg); fpr.Unlock(V0); fpr.Unlock(V1); fpr.Unlock(V2); }
void JitArm::fctiwzx(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITFloatingPointOff) u32 b = inst.FB; u32 d = inst.FD; ARMReg vB = fpr.R0(b); ARMReg vD = fpr.R0(d); ARMReg V0 = fpr.GetReg(); ARMReg V1 = fpr.GetReg(); ARMReg V2 = fpr.GetReg(); ARMReg rA = gpr.GetReg(); ARMReg fpscrReg = gpr.GetReg(); FixupBranch DoneMax, DoneMin; LDR(fpscrReg, R9, PPCSTATE_OFF(fpscr)); MOVI2R(rA, (u32)minmaxFloat); // Check if greater than max float { VLDR(V0, rA, 8); // Load Max VCMPE(vB, V0); VMRS(_PC); // Loads in to APSR FixupBranch noException = B_CC(CC_LE); VMOV(vD, V0); // Set to max SetFPException(fpscrReg, FPSCR_VXCVI); DoneMax = B(); SetJumpTarget(noException); } // Check if less than min float { VLDR(V0, rA, 0); VCMPE(vB, V0); VMRS(_PC); FixupBranch noException = B_CC(CC_GE); VMOV(vD, V0); SetFPException(fpscrReg, FPSCR_VXCVI); DoneMin = B(); SetJumpTarget(noException); } // Within ranges, convert to integer VCVT(vD, vB, TO_INT | IS_SIGNED | ROUND_TO_ZERO); VCMPE(vD, vB); VMRS(_PC); SetCC(CC_EQ); BIC(fpscrReg, fpscrReg, FRFIMask); FixupBranch DoneEqual = B(); SetCC(); SetFPException(fpscrReg, FPSCR_XX); ORR(fpscrReg, fpscrReg, FIMask); VABS(V1, vB); VABS(V2, vD); VCMPE(V2, V1); VMRS(_PC); SetCC(CC_GT); ORR(fpscrReg, fpscrReg, FRMask); SetCC(); SetJumpTarget(DoneEqual); SetJumpTarget(DoneMax); SetJumpTarget(DoneMin); MOVI2R(rA, (u32)&doublenum); VLDR(V0, rA, 0); NEONXEmitter nemit(this); nemit.VORR(vD, vD, V0); if (inst.Rc) Helper_UpdateCR1(fpscrReg, rA); STR(fpscrReg, R9, PPCSTATE_OFF(fpscr)); gpr.Unlock(rA); gpr.Unlock(fpscrReg); fpr.Unlock(V0); fpr.Unlock(V1); fpr.Unlock(V2); }
void Jit::Comp_SVQ(MIPSOpcode op) { CONDITIONAL_DISABLE; int imm = (signed short)(op&0xFFFC); int vt = (((op >> 16) & 0x1f)) | ((op&1) << 5); MIPSGPReg rs = _RS; bool doCheck = false; switch (op >> 26) { case 54: //lv.q { // CC might be set by slow path below, so load regs first. u8 vregs[4]; GetVectorRegs(vregs, V_Quad, vt); fpr.MapRegsAndSpillLockV(vregs, V_Quad, MAP_DIRTY | MAP_NOINIT); if (gpr.IsImm(rs)) { u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF; MOVI2R(R0, addr + (u32)Memory::base); } else { gpr.MapReg(rs); if (g_Config.bFastMemory) { SetR0ToEffectiveAddress(rs, imm); } else { SetCCAndR0ForSafeAddress(rs, imm, R1); doCheck = true; } ADD(R0, R0, R11); } #ifdef __ARM_ARCH_7S__ FixupBranch skip; if (doCheck) { skip = B_CC(CC_EQ); } for (int i = 0; i < 4; i++) VLDR(fpr.V(vregs[i]), R0, i * 4); if (doCheck) { SetJumpTarget(skip); SetCC(CC_AL); } #else for (int i = 0; i < 4; i++) VLDR(fpr.V(vregs[i]), R0, i * 4); if (doCheck) { SetCC(CC_EQ); MOVI2R(R0, 0); for (int i = 0; i < 4; i++) VMOV(fpr.V(vregs[i]), R0); SetCC(CC_AL); } #endif } break; case 62: //sv.q { // CC might be set by slow path below, so load regs first. u8 vregs[4]; GetVectorRegs(vregs, V_Quad, vt); fpr.MapRegsAndSpillLockV(vregs, V_Quad, 0); if (gpr.IsImm(rs)) { u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF; MOVI2R(R0, addr + (u32)Memory::base); } else { gpr.MapReg(rs); if (g_Config.bFastMemory) { SetR0ToEffectiveAddress(rs, imm); } else { SetCCAndR0ForSafeAddress(rs, imm, R1); doCheck = true; } ADD(R0, R0, R11); } #ifdef __ARM_ARCH_7S__ FixupBranch skip; if (doCheck) { skip = B_CC(CC_EQ); } for (int i = 0; i < 4; i++) VSTR(fpr.V(vregs[i]), R0, i * 4); if (doCheck) { SetJumpTarget(skip); SetCC(CC_AL); } #else for (int i = 0; i < 4; i++) VSTR(fpr.V(vregs[i]), R0, i * 4); if (doCheck) { SetCC(CC_AL); } #endif } break; default: DISABLE; break; } fpr.ReleaseSpillLocksAndDiscardTemps(); }
void Jit::Comp_SV(MIPSOpcode op) { CONDITIONAL_DISABLE; s32 imm = (signed short)(op&0xFFFC); int vt = ((op >> 16) & 0x1f) | ((op & 3) << 5); MIPSGPReg rs = _RS; bool doCheck = false; switch (op >> 26) { case 50: //lv.s // VI(vt) = Memory::Read_U32(addr); { // CC might be set by slow path below, so load regs first. fpr.MapRegV(vt, MAP_DIRTY | MAP_NOINIT); if (gpr.IsImm(rs)) { u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF; MOVI2R(R0, addr + (u32)Memory::base); } else { gpr.MapReg(rs); if (g_Config.bFastMemory) { SetR0ToEffectiveAddress(rs, imm); } else { SetCCAndR0ForSafeAddress(rs, imm, R1); doCheck = true; } ADD(R0, R0, R11); } #ifdef __ARM_ARCH_7S__ FixupBranch skip; if (doCheck) { skip = B_CC(CC_EQ); } VLDR(fpr.V(vt), R0, 0); if (doCheck) { SetJumpTarget(skip); SetCC(CC_AL); } #else VLDR(fpr.V(vt), R0, 0); if (doCheck) { SetCC(CC_EQ); MOVI2F(fpr.V(vt), 0.0f, R0); SetCC(CC_AL); } #endif } break; case 58: //sv.s // Memory::Write_U32(VI(vt), addr); { // CC might be set by slow path below, so load regs first. fpr.MapRegV(vt); if (gpr.IsImm(rs)) { u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF; MOVI2R(R0, addr + (u32)Memory::base); } else { gpr.MapReg(rs); if (g_Config.bFastMemory) { SetR0ToEffectiveAddress(rs, imm); } else { SetCCAndR0ForSafeAddress(rs, imm, R1); doCheck = true; } ADD(R0, R0, R11); } #ifdef __ARM_ARCH_7S__ FixupBranch skip; if (doCheck) { skip = B_CC(CC_EQ); } VSTR(fpr.V(vt), R0, 0); if (doCheck) { SetJumpTarget(skip); SetCC(CC_AL); } #else VSTR(fpr.V(vt), R0, 0); if (doCheck) { SetCC(CC_AL); } #endif } break; default: DISABLE; } }
void JitArm::lfXX(UGeckoInstruction inst) { INSTRUCTION_START JITDISABLE(bJITLoadStoreFloatingOff); ARMReg rA = gpr.GetReg(); ARMReg rB = gpr.GetReg(); ARMReg RA; u32 a = inst.RA, b = inst.RB; s32 offset = inst.SIMM_16; bool single = false; bool update = false; bool zeroA = false; s32 offsetReg = -1; switch (inst.OPCD) { case 31: switch (inst.SUBOP10) { case 567: // lfsux single = true; update = true; offsetReg = b; break; case 535: // lfsx single = true; zeroA = true; offsetReg = b; break; case 631: // lfdux update = true; offsetReg = b; break; case 599: // lfdx zeroA = true; offsetReg = b; break; } break; case 49: // lfsu update = true; single = true; break; case 48: // lfs single = true; zeroA = true; break; case 51: // lfdu update = true; break; case 50: // lfd zeroA = true; break; } ARMReg v0 = fpr.R0(inst.FD), v1; if (single) v1 = fpr.R1(inst.FD); if (update) { RA = gpr.R(a); // Update path /always/ uses RA if (offsetReg == -1) // uses SIMM_16 { MOVI2R(rB, offset); ADD(rB, rB, RA); } else { ADD(rB, gpr.R(offsetReg), RA); } } else { if (zeroA) { if (offsetReg == -1) { if (a) { RA = gpr.R(a); MOVI2R(rB, offset); ADD(rB, rB, RA); } else { MOVI2R(rB, (u32)offset); } } else { ARMReg RB = gpr.R(offsetReg); if (a) { RA = gpr.R(a); ADD(rB, RB, RA); } else { MOV(rB, RB); } } } } LDR(rA, R9, PPCSTATE_OFF(Exceptions)); CMP(rA, EXCEPTION_DSI); FixupBranch DoNotLoad = B_CC(CC_EQ); if (update) MOV(RA, rB); if (Core::g_CoreStartupParameter.bFastmem) { Operand2 mask(2, 1); // ~(Memory::MEMVIEW32_MASK) BIC(rB, rB, mask); // 1 MOVI2R(rA, (u32)Memory::base, false); // 2-3 ADD(rB, rB, rA); // 4 NEONXEmitter nemit(this); if (single) { VLDR(S0, rB, 0); nemit.VREV32(I_8, D0, D0); // Byte swap to result VCVT(v0, S0, 0); VCVT(v1, S0, 0); } else { VLDR(v0, rB, 0); nemit.VREV64(I_8, v0, v0); // Byte swap to result } } else { PUSH(4, R0, R1, R2, R3); MOV(R0, rB); if (single) { MOVI2R(rA, (u32)&Memory::Read_U32); BL(rA); VMOV(S0, R0); VCVT(v0, S0, 0); VCVT(v1, S0, 0); } else { MOVI2R(rA, (u32)&Memory::Read_F64); BL(rA); #if !defined(__ARM_PCS_VFP) // SoftFP returns in R0 and R1 VMOV(v0, R0); #else VMOV(v0, D0); #endif } POP(4, R0, R1, R2, R3); } gpr.Unlock(rA, rB); SetJumpTarget(DoNotLoad); }
void Jit::Comp_SV(u32 op) { CONDITIONAL_DISABLE; s32 imm = (signed short)(op&0xFFFC); int vt = ((op >> 16) & 0x1f) | ((op & 3) << 5); int rs = _RS; bool doCheck = false; switch (op >> 26) { case 50: //lv.s // VI(vt) = Memory::Read_U32(addr); { // CC might be set by slow path below, so load regs first. fpr.MapRegV(vt, MAP_DIRTY | MAP_NOINIT); fpr.ReleaseSpillLocks(); if (gpr.IsImm(rs)) { u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF; MOVI2R(R0, addr + (u32)Memory::base); } else { gpr.MapReg(rs); if (g_Config.bFastMemory) { SetR0ToEffectiveAddress(rs, imm); } else { SetCCAndR0ForSafeAddress(rs, imm, R1); doCheck = true; } ADD(R0, R0, R11); } VLDR(fpr.V(vt), R0, 0); if (doCheck) { SetCC(CC_EQ); MOVI2R(R0, 0); VMOV(fpr.V(vt), R0); SetCC(CC_AL); } } break; case 58: //sv.s // Memory::Write_U32(VI(vt), addr); { // CC might be set by slow path below, so load regs first. fpr.MapRegV(vt); fpr.ReleaseSpillLocks(); if (gpr.IsImm(rs)) { u32 addr = (imm + gpr.GetImm(rs)) & 0x3FFFFFFF; MOVI2R(R0, addr + (u32)Memory::base); } else { gpr.MapReg(rs); if (g_Config.bFastMemory) { SetR0ToEffectiveAddress(rs, imm); } else { SetCCAndR0ForSafeAddress(rs, imm, R1); doCheck = true; } ADD(R0, R0, R11); } VSTR(fpr.V(vt), R0, 0); if (doCheck) { SetCC(CC_AL); } } break; default: DISABLE; } }