void recMTSAB( void ) { if( GPR_IS_CONST1(_Rs_) ) { MOV32ItoM((uptr)&cpuRegs.sa, ((g_cpuConstRegs[_Rs_].UL[0] & 0xF) ^ (_Imm_ & 0xF)) ); } else { _eeMoveGPRtoR(EAX, _Rs_); AND32ItoR(EAX, 0xF); XOR32ItoR(EAX, _Imm_&0xf); MOV32RtoM((uptr)&cpuRegs.sa, EAX); } }
void recMFSA( void ) { int mmreg; if (!_Rd_) return; mmreg = _checkXMMreg(XMMTYPE_GPRREG, _Rd_, MODE_WRITE); if( mmreg >= 0 ) { SSE_MOVLPS_M64_to_XMM(mmreg, (uptr)&cpuRegs.sa); } else if( (mmreg = _checkMMXreg(MMX_GPR+_Rd_, MODE_WRITE)) >= 0 ) { MOVDMtoMMX(mmreg, (uptr)&cpuRegs.sa); SetMMXstate(); } else { MOV32MtoR(EAX, (u32)&cpuRegs.sa); _deleteEEreg(_Rd_, 0); MOV32RtoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[0], EAX); MOV32ItoM((uptr)&cpuRegs.GPR.r[_Rd_].UL[1], 0); } }
// SA is 4-bit and contains the amount of bytes to shift void recMTSA( void ) { if( GPR_IS_CONST1(_Rs_) ) { MOV32ItoM((uptr)&cpuRegs.sa, g_cpuConstRegs[_Rs_].UL[0] & 0xf ); } else { int mmreg; if( (mmreg = _checkXMMreg(XMMTYPE_GPRREG, _Rs_, MODE_READ)) >= 0 ) { SSE_MOVSS_XMM_to_M32((uptr)&cpuRegs.sa, mmreg); } else if( (mmreg = _checkMMXreg(MMX_GPR+_Rs_, MODE_READ)) >= 0 ) { MOVDMMXtoM((uptr)&cpuRegs.sa, mmreg); SetMMXstate(); } else { MOV32MtoR(EAX, (uptr)&cpuRegs.GPR.r[_Rs_].UL[0]); MOV32RtoM((uptr)&cpuRegs.sa, EAX); } AND32ItoM((uptr)&cpuRegs.sa, 0xf); } }
// EE XMM allocation code int eeRecompileCodeXMM(int xmminfo) { int info = PROCESS_EE_XMM; // flush consts if( xmminfo & XMMINFO_READT ) { if( GPR_IS_CONST1( _Rt_ ) && !(g_cpuFlushedConstReg&(1<<_Rt_)) ) { MOV32ItoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 0 ], g_cpuConstRegs[_Rt_].UL[0]); MOV32ItoM((int)&cpuRegs.GPR.r[ _Rt_ ].UL[ 1 ], g_cpuConstRegs[_Rt_].UL[1]); g_cpuFlushedConstReg |= (1<<_Rt_); } } if( xmminfo & XMMINFO_READS) { if( GPR_IS_CONST1( _Rs_ ) && !(g_cpuFlushedConstReg&(1<<_Rs_)) ) { MOV32ItoM((int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ], g_cpuConstRegs[_Rs_].UL[0]); MOV32ItoM((int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 1 ], g_cpuConstRegs[_Rs_].UL[1]); g_cpuFlushedConstReg |= (1<<_Rs_); } } if( xmminfo & XMMINFO_WRITED ) { GPR_DEL_CONST(_Rd_); } // add needed if( xmminfo & (XMMINFO_READLO|XMMINFO_WRITELO) ) { _addNeededGPRtoXMMreg(XMMGPR_LO); } if( xmminfo & (XMMINFO_READHI|XMMINFO_WRITEHI) ) { _addNeededGPRtoXMMreg(XMMGPR_HI); } if( xmminfo & XMMINFO_READS) _addNeededGPRtoXMMreg(_Rs_); if( xmminfo & XMMINFO_READT) _addNeededGPRtoXMMreg(_Rt_); if( xmminfo & XMMINFO_WRITED ) _addNeededGPRtoXMMreg(_Rd_); // allocate if( xmminfo & XMMINFO_READS) { int reg = _allocGPRtoXMMreg(-1, _Rs_, MODE_READ); info |= PROCESS_EE_SET_S(reg)|PROCESS_EE_SETMODES(reg); } if( xmminfo & XMMINFO_READT) { int reg = _allocGPRtoXMMreg(-1, _Rt_, MODE_READ); info |= PROCESS_EE_SET_T(reg)|PROCESS_EE_SETMODET(reg); } if( xmminfo & XMMINFO_WRITED ) { int readd = MODE_WRITE|((xmminfo&XMMINFO_READD)?((xmminfo&XMMINFO_READD_LO)?(MODE_READ|MODE_READHALF):MODE_READ):0); int regd = _checkXMMreg(XMMTYPE_GPRREG, _Rd_, readd); if( regd < 0 ) { if( !(xmminfo&XMMINFO_READD) && (xmminfo & XMMINFO_READT) && (_Rt_ == 0 || (g_pCurInstInfo->regs[_Rt_] & EEINST_LASTUSE) || !EEINST_ISLIVEXMM(_Rt_)) ) { _freeXMMreg(EEREC_T); _deleteMMXreg(MMX_GPR+_Rd_, 2); xmmregs[EEREC_T].inuse = 1; xmmregs[EEREC_T].reg = _Rd_; xmmregs[EEREC_T].mode = readd; regd = EEREC_T; } else if( !(xmminfo&XMMINFO_READD) && (xmminfo & XMMINFO_READS) && (_Rs_ == 0 || (g_pCurInstInfo->regs[_Rs_] & EEINST_LASTUSE) || !EEINST_ISLIVEXMM(_Rs_)) ) { _freeXMMreg(EEREC_S); _deleteMMXreg(MMX_GPR+_Rd_, 2); xmmregs[EEREC_S].inuse = 1; xmmregs[EEREC_S].reg = _Rd_; xmmregs[EEREC_S].mode = readd; regd = EEREC_S; } else regd = _allocGPRtoXMMreg(-1, _Rd_, readd); } info |= PROCESS_EE_SET_D(regd); } if( xmminfo & (XMMINFO_READLO|XMMINFO_WRITELO) ) { info |= PROCESS_EE_SET_LO(_allocGPRtoXMMreg(-1, XMMGPR_LO, ((xmminfo&XMMINFO_READLO)?MODE_READ:0)|((xmminfo&XMMINFO_WRITELO)?MODE_WRITE:0))); info |= PROCESS_EE_LO; } if( xmminfo & (XMMINFO_READHI|XMMINFO_WRITEHI) ) { info |= PROCESS_EE_SET_HI(_allocGPRtoXMMreg(-1, XMMGPR_HI, ((xmminfo&XMMINFO_READHI)?MODE_READ:0)|((xmminfo&XMMINFO_WRITEHI)?MODE_WRITE:0))); info |= PROCESS_EE_HI; } return info; }