void recMicroVU1::Execute(u32 cycles) { pxAssert(mvu1_allocated); // please allocate me first! :| if(!(VU0.VI[REG_VPU_STAT].UL & 0x100)) return; ((mVUrecCall)microVU1.startFunct)(VU1.VI[REG_TPC].UL, vu1RunCycles); }
void xImpl_DwordShift::operator()( const xRegister16or32or64& to, const xRegister16or32or64& from, const xRegisterCL& /* clreg */ ) const { pxAssert( to->GetOperandSize() == from->GetOperandSize() ); xOpWrite0F( from->GetPrefix16(), OpcodeBase+1, to, from ); }
wxString pxGetAppName() { pxAssert( wxTheApp ); return wxTheApp->GetAppName(); }
void recMicroVU1::ResumeXGkick() { pxAssert(m_Reserved); // please allocate me first! :| if(!(VU0.VI[REG_VPU_STAT].UL & 0x100)) return; ((mVUrecCallXG)microVU1.startFunctXG)(); }
// Generates a 'reduced' ModSib form, which has valid Base, Index, and Scale values. // Necessary because by default ModSib compounds registers into Index when possible. // // If the ModSib is in illegal form ([Base + Index*5] for example) then an assertion // followed by an InvalidParameter Exception will be tossed around in haphazard // fashion. // // Optimization Note: Currently VC does a piss poor job of inlining this, even though // constant propagation *should* resove it to little or no code (VC's constprop fails // on C++ class initializers). There is a work around [using array initializers instead] // but it's too much trouble for code that isn't performance critical anyway. // And, with luck, maybe VC10 will optimize it better and make it a non-issue. :D // void xIndirectVoid::Reduce() { if( Index.IsStackPointer() ) { // esp cannot be encoded as the index, so move it to the Base, if possible. // note: intentionally leave index assigned to esp also (generates correct // encoding later, since ESP cannot be encoded 'alone') pxAssert( Scale == 0 ); // esp can't have an index modifier! pxAssert( Base.IsEmpty() ); // base must be empty or else! Base = Index; return; } // If no index reg, then load the base register into the index slot. if( Index.IsEmpty() ) { Index = Base; Scale = 0; if( !Base.IsStackPointer() ) // prevent ESP from being encoded 'alone' Base = xEmptyReg; return; } // The Scale has a series of valid forms, all shown here: switch( Scale ) { case 0: break; case 1: Scale = 0; break; case 2: Scale = 1; break; case 3: // becomes [reg*2+reg] pxAssertDev( Base.IsEmpty(), "Cannot scale an Index register by 3 when Base is not empty!" ); Base = Index; Scale = 1; break; case 4: Scale = 2; break; case 5: // becomes [reg*4+reg] pxAssertDev( Base.IsEmpty(), "Cannot scale an Index register by 5 when Base is not empty!" ); Base = Index; Scale = 2; break; case 6: // invalid! pxAssumeDev( false, "x86 asm cannot scale a register by 6." ); break; case 7: // so invalid! pxAssumeDev( false, "x86 asm cannot scale a register by 7." ); break; case 8: Scale = 3; break; case 9: // becomes [reg*8+reg] pxAssertDev( Base.IsEmpty(), "Cannot scale an Index register by 9 when Base is not empty!" ); Base = Index; Scale = 3; break; jNO_DEFAULT } }
void operator+=( wxWindow& target, const pxStretchSpacer& spacer ) { if( !pxAssert( target.GetSizer() != NULL ) ) return; target.GetSizer()->AddStretchSpacer( spacer.proportion ); }
static void psxCheckEndGate32(int i) { pxAssert(i == 3); _psxCheckEndGate( i ); }
// -------------------------------------------------------------------------------------- // EventListener_PageFault (implementations) // -------------------------------------------------------------------------------------- EventListener_PageFault::EventListener_PageFault() { pxAssert(Source_PageFault); Source_PageFault->Add( *this ); }
void psxCheckEndGate16(int i) { pxAssert(i < 3); _psxCheckEndGate( i ); }
static void psxCheckStartGate32(int i) { // 32 bit gate is called for gate 3 only. Ever. pxAssert(i == 3); _psxCheckStartGate( i ); }
bool Pcsx2Config::MultitapEnabled( uint port ) const { pxAssert( port < 2 ); return (port==0) ? MultitapPort0_Enabled : MultitapPort1_Enabled; }
void recJALR() { int newpc = pc + 4; _allocX86reg(esi, X86TYPE_PCWRITEBACK, 0, MODE_WRITE); _eeMoveGPRtoR(esi, _Rs_); if (EmuConfig.Gamefixes.GoemonTlbHack) { xMOV(ecx, esi); vtlb_DynV2P(); xMOV(esi, eax); } // uncomment when there are NO instructions that need to call interpreter // int mmreg; // if( GPR_IS_CONST1(_Rs_) ) // xMOV(ptr32[&cpuRegs.pc], g_cpuConstRegs[_Rs_].UL[0] ); // else { // int mmreg; // // if( (mmreg = _checkXMMreg(XMMTYPE_GPRREG, _Rs_, MODE_READ)) >= 0 ) { // xMOVSS(ptr[&cpuRegs.pc], xRegisterSSE(mmreg)); // } // else if( (mmreg = _checkMMXreg(MMX_GPR+_Rs_, MODE_READ)) >= 0 ) { // xMOVD(ptr[&cpuRegs.pc], xRegisterMMX(mmreg)); // SetMMXstate(); // } // else { // xMOV(eax, ptr[(void*)((int)&cpuRegs.GPR.r[ _Rs_ ].UL[ 0 ] )]); // xMOV(ptr[&cpuRegs.pc], eax); // } // } if ( _Rd_ ) { _deleteEEreg(_Rd_, 0); if(EE_CONST_PROP) { GPR_SET_CONST(_Rd_); g_cpuConstRegs[_Rd_].UL[0] = newpc; g_cpuConstRegs[_Rd_].UL[1] = 0; } else { xMOV(ptr32[&cpuRegs.GPR.r[_Rd_].UL[0]], newpc); xMOV(ptr32[&cpuRegs.GPR.r[_Rd_].UL[1]], 0); } } _clearNeededMMXregs(); _clearNeededXMMregs(); recompileNextInstruction(1); if( x86regs[esi.GetId()].inuse ) { pxAssert( x86regs[esi.GetId()].type == X86TYPE_PCWRITEBACK ); xMOV(ptr[&cpuRegs.pc], esi); x86regs[esi.GetId()].inuse = 0; } else { xMOV(eax, ptr[&g_recWriteback]); xMOV(ptr[&cpuRegs.pc], eax); } SetBranchReg(0xffffffff); }
void recMicroVU0::Clear(u32 addr, u32 size) { pxAssert(mvu0_allocated); // please allocate me first! :| mVUclear(µVU0, addr, size); }
void xImpl_DwordShift::operator()( const xRegister16or32or64& to, const xRegister16or32or64& from, u8 shiftcnt ) const { pxAssert( to->GetOperandSize() == from->GetOperandSize() ); if( shiftcnt != 0 ) xOpWrite0F( from->GetPrefix16(), OpcodeBase, to, from, shiftcnt ); }
int _allocGPRtoXMMreg(int xmmreg, int gprreg, int mode) { int i; for (i=0; (uint)i<iREGCNT_XMM; i++) { if (xmmregs[i].inuse == 0) continue; if (xmmregs[i].type != XMMTYPE_GPRREG) continue; if (xmmregs[i].reg != gprreg) continue; pxAssert( _checkMMXreg(MMX_GPR|gprreg, mode) == -1 ); g_xmmtypes[i] = XMMT_INT; if (!(xmmregs[i].mode & MODE_READ) && (mode & MODE_READ)) { if (gprreg == 0 ) { SSEX_PXOR_XMM_to_XMM(i, i); } else { //pxAssert( !(g_cpuHasConstReg & (1<<gprreg)) || (g_cpuFlushedConstReg & (1<<gprreg)) ); _flushConstReg(gprreg); SSEX_MOVDQA_M128_to_XMM(i, (uptr)&cpuRegs.GPR.r[gprreg].UL[0]); } xmmregs[i].mode |= MODE_READ; } if ((mode & MODE_WRITE) && (gprreg < 32)) { g_cpuHasConstReg &= ~(1<<gprreg); //pxAssert( !(g_cpuHasConstReg & (1<<gprreg)) ); } xmmregs[i].counter = g_xmmAllocCounter++; // update counter xmmregs[i].needed = 1; xmmregs[i].mode|= mode; return i; } // currently only gpr regs are const // fixme - do we really need to execute this both here and in the loop? if ((mode & MODE_WRITE) && gprreg < 32) { //pxAssert( !(g_cpuHasConstReg & (1<<gprreg)) ); g_cpuHasConstReg &= ~(1<<gprreg); } if (xmmreg == -1) xmmreg = _getFreeXMMreg(); g_xmmtypes[xmmreg] = XMMT_INT; xmmregs[xmmreg].inuse = 1; xmmregs[xmmreg].type = XMMTYPE_GPRREG; xmmregs[xmmreg].reg = gprreg; xmmregs[xmmreg].mode = mode; xmmregs[xmmreg].needed = 1; xmmregs[xmmreg].counter = g_xmmAllocCounter++; if (mode & MODE_READ) { if (gprreg == 0 ) { SSEX_PXOR_XMM_to_XMM(xmmreg, xmmreg); } else { // DOX86 int mmxreg; if (mode & MODE_READ) _flushConstReg(gprreg); mmxreg = _checkMMXreg(MMX_GPR+gprreg, 0); if (mmxreg >= 0 ) { // transfer SetMMXstate(); SSE2_MOVQ2DQ_MM_to_XMM(xmmreg, mmxreg); SSE2_PUNPCKLQDQ_XMM_to_XMM(xmmreg, xmmreg); SSE2_PUNPCKHQDQ_M128_to_XMM(xmmreg, (u32)&cpuRegs.GPR.r[gprreg].UL[0]); if (mmxregs[mmxreg].mode & MODE_WRITE ) { // instead of setting to write, just flush to mem if (!(mode & MODE_WRITE)) { SetMMXstate(); MOVQRtoM((u32)&cpuRegs.GPR.r[gprreg].UL[0], mmxreg); } //xmmregs[xmmreg].mode |= MODE_WRITE; } // don't flush mmxregs[mmxreg].inuse = 0; } else SSEX_MOVDQA_M128_to_XMM(xmmreg, (uptr)&cpuRegs.GPR.r[gprreg].UL[0]); } } else _deleteMMXreg(MMX_GPR+gprreg, 0); return xmmreg; }
void IPU0dma() { if(!ipuRegs.ctrl.OFC) { IPU_INT_FROM( 64 ); IPUProcessInterrupt(); return; } int readsize; tDMA_TAG* pMem; if ((!(ipu0ch.chcr.STR) || (cpuRegs.interrupt & (1 << DMAC_FROM_IPU))) || (ipu0ch.qwc == 0)) { DevCon.Warning("How??"); return; } pxAssert(!(ipu0ch.chcr.TTE)); IPU_LOG("dmaIPU0 chcr = %lx, madr = %lx, qwc = %lx", ipu0ch.chcr._u32, ipu0ch.madr, ipu0ch.qwc); pxAssert(ipu0ch.chcr.MOD == NORMAL_MODE); pMem = dmaGetAddr(ipu0ch.madr, true); readsize = std::min(ipu0ch.qwc, (u16)ipuRegs.ctrl.OFC); ipu_fifo.out.read(pMem, readsize); ipu0ch.madr += readsize << 4; ipu0ch.qwc -= readsize; // note: qwc is u16 if (dmacRegs.ctrl.STS == STS_fromIPU) // STS == fromIPU { dmacRegs.stadr.ADDR = ipu0ch.madr; switch (dmacRegs.ctrl.STD) { case NO_STD: break; case STD_GIF: // GIF //DevCon.Warning("GIFSTALL"); g_nDMATransfer.GIFSTALL = true; break; case STD_VIF1: // VIF //DevCon.Warning("VIFSTALL"); g_nDMATransfer.VIFSTALL = true; break; case STD_SIF1: // DevCon.Warning("SIFSTALL"); g_nDMATransfer.SIFSTALL = true; break; } } //Fixme ( voodoocycles ): //This was IPU_INT_FROM(readsize*BIAS ); //This broke vids in Digital Devil Saga //Note that interrupting based on totalsize is just guessing.. IPU_INT_FROM( readsize * BIAS ); if(ipuRegs.ctrl.IFC > 0) IPUProcessInterrupt(); //return readsize; }
void _deleteVFtoXMMreg(int reg, int vu, int flush) { int i; VURegs *VU = vu ? &VU1 : &VU0; for (i=0; (uint)i<iREGCNT_XMM; i++) { if (xmmregs[i].inuse && (xmmregs[i].type == XMMTYPE_VFREG) && (xmmregs[i].reg == reg) && (xmmregs[i].VU == vu)) { switch(flush) { case 0: _freeXMMreg(i); break; case 1: if( xmmregs[i].mode & MODE_WRITE ) { pxAssert( reg != 0 ); if( xmmregs[i].mode & MODE_VUXYZ ) { if( xmmregs[i].mode & MODE_VUZ ) { // xyz, don't destroy w uint t0reg; for (t0reg = 0; t0reg < iREGCNT_XMM; ++t0reg) { if (!xmmregs[t0reg].inuse ) break; } if (t0reg < iREGCNT_XMM ) { SSE_MOVHLPS_XMM_to_XMM(t0reg, i); SSE_MOVLPS_XMM_to_M64(VU_VFx_ADDR(xmmregs[i].reg), i); SSE_MOVSS_XMM_to_M32(VU_VFx_ADDR(xmmregs[i].reg)+8, t0reg); } else { // no free reg SSE_MOVLPS_XMM_to_M64(VU_VFx_ADDR(xmmregs[i].reg), i); SSE_SHUFPS_XMM_to_XMM(i, i, 0xc6); SSE_MOVSS_XMM_to_M32(VU_VFx_ADDR(xmmregs[i].reg)+8, i); SSE_SHUFPS_XMM_to_XMM(i, i, 0xc6); } } else { // xy SSE_MOVLPS_XMM_to_M64(VU_VFx_ADDR(xmmregs[i].reg), i); } } else SSE_MOVAPS_XMM_to_M128(VU_VFx_ADDR(xmmregs[i].reg), i); // get rid of MODE_WRITE since don't want to flush again xmmregs[i].mode &= ~MODE_WRITE; xmmregs[i].mode |= MODE_READ; } break; case 2: xmmregs[i].inuse = 0; break; } return; } } }
////////////////////////////////////////////////////////////////////////////////////////// // Conditionally generates Sib encoding information! // // regfield - register field to be written to the ModRm. This is either a register specifier // or an opcode extension. In either case, the instruction determines the value for us. // void EmitSibMagic( uint regfield, const xIndirectVoid& info ) { pxAssertDev( regfield < 8, "Invalid x86 register identifier." ); int displacement_size = (info.Displacement == 0) ? 0 : ( ( info.IsByteSizeDisp() ) ? 1 : 2 ); pxAssert(!info.Base.IsEmpty() || !info.Index.IsEmpty() || displacement_size == 2); if( !NeedsSibMagic( info ) ) { // Use ModRm-only encoding, with the rm field holding an index/base register, if // one has been specified. If neither register is specified then use Disp32 form, // which is encoded as "EBP w/o displacement" (which is why EBP must always be // encoded *with* a displacement of 0, if it would otherwise not have one). if( info.Index.IsEmpty() ) { EmitSibMagic( regfield, (void*)info.Displacement ); return; } else { if( info.Index == ebp && displacement_size == 0 ) displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]! ModRM( displacement_size, regfield, info.Index.Id ); } } else { // In order to encode "just" index*scale (and no base), we have to encode // it as a special [index*scale + displacement] form, which is done by // specifying EBP as the base register and setting the displacement field // to zero. (same as ModRm w/o SIB form above, basically, except the // ModRm_UseDisp flag is specified in the SIB instead of the ModRM field). if( info.Base.IsEmpty() ) { ModRM( 0, regfield, ModRm_UseSib ); SibSB( info.Scale, info.Index.Id, ModRm_UseDisp32 ); xWrite<s32>( info.Displacement ); return; } else { if( info.Base == ebp && displacement_size == 0 ) displacement_size = 1; // forces [ebp] to be encoded as [ebp+0]! ModRM( displacement_size, regfield, ModRm_UseSib ); SibSB( info.Scale, info.Index.Id, info.Base.Id ); } } if( displacement_size != 0 ) { if( displacement_size == 1 ) xWrite<s8>( info.Displacement ); else xWrite<s32>( info.Displacement ); } }
void _freeXMMreg(u32 xmmreg) { pxAssert( xmmreg < iREGCNT_XMM ); if (!xmmregs[xmmreg].inuse) return; if (xmmregs[xmmreg].mode & MODE_WRITE) { switch (xmmregs[xmmreg].type) { case XMMTYPE_VFREG: { const VURegs *VU = xmmregs[xmmreg].VU ? &VU1 : &VU0; if( xmmregs[xmmreg].mode & MODE_VUXYZ ) { if( xmmregs[xmmreg].mode & MODE_VUZ ) { // don't destroy w uint t0reg; for(t0reg = 0; t0reg < iREGCNT_XMM; ++t0reg ) { if( !xmmregs[t0reg].inuse ) break; } if( t0reg < iREGCNT_XMM ) { SSE_MOVHLPS_XMM_to_XMM(t0reg, xmmreg); SSE_MOVLPS_XMM_to_M64(VU_VFx_ADDR(xmmregs[xmmreg].reg), xmmreg); SSE_MOVSS_XMM_to_M32(VU_VFx_ADDR(xmmregs[xmmreg].reg)+8, t0reg); } else { // no free reg SSE_MOVLPS_XMM_to_M64(VU_VFx_ADDR(xmmregs[xmmreg].reg), xmmreg); SSE_SHUFPS_XMM_to_XMM(xmmreg, xmmreg, 0xc6); //SSE_MOVHLPS_XMM_to_XMM(xmmreg, xmmreg); SSE_MOVSS_XMM_to_M32(VU_VFx_ADDR(xmmregs[xmmreg].reg)+8, xmmreg); SSE_SHUFPS_XMM_to_XMM(xmmreg, xmmreg, 0xc6); } } else { SSE_MOVLPS_XMM_to_M64(VU_VFx_ADDR(xmmregs[xmmreg].reg), xmmreg); } } else { SSE_MOVAPS_XMM_to_M128(VU_VFx_ADDR(xmmregs[xmmreg].reg), xmmreg); } } break; case XMMTYPE_ACC: { const VURegs *VU = xmmregs[xmmreg].VU ? &VU1 : &VU0; if( xmmregs[xmmreg].mode & MODE_VUXYZ ) { if( xmmregs[xmmreg].mode & MODE_VUZ ) { // don't destroy w uint t0reg; for(t0reg = 0; t0reg < iREGCNT_XMM; ++t0reg ) { if( !xmmregs[t0reg].inuse ) break; } if( t0reg < iREGCNT_XMM ) { SSE_MOVHLPS_XMM_to_XMM(t0reg, xmmreg); SSE_MOVLPS_XMM_to_M64(VU_ACCx_ADDR, xmmreg); SSE_MOVSS_XMM_to_M32(VU_ACCx_ADDR+8, t0reg); } else { // no free reg SSE_MOVLPS_XMM_to_M64(VU_ACCx_ADDR, xmmreg); SSE_SHUFPS_XMM_to_XMM(xmmreg, xmmreg, 0xc6); //SSE_MOVHLPS_XMM_to_XMM(xmmreg, xmmreg); SSE_MOVSS_XMM_to_M32(VU_ACCx_ADDR+8, xmmreg); SSE_SHUFPS_XMM_to_XMM(xmmreg, xmmreg, 0xc6); } } else { SSE_MOVLPS_XMM_to_M64(VU_ACCx_ADDR, xmmreg); } } else { SSE_MOVAPS_XMM_to_M128(VU_ACCx_ADDR, xmmreg); } } break; case XMMTYPE_GPRREG: pxAssert( xmmregs[xmmreg].reg != 0 ); //pxAssert( g_xmmtypes[xmmreg] == XMMT_INT ); SSEX_MOVDQA_XMM_to_M128((uptr)&cpuRegs.GPR.r[xmmregs[xmmreg].reg].UL[0], xmmreg); break; case XMMTYPE_FPREG: SSE_MOVSS_XMM_to_M32((uptr)&fpuRegs.fpr[xmmregs[xmmreg].reg], xmmreg); break; case XMMTYPE_FPACC: SSE_MOVSS_XMM_to_M32((uptr)&fpuRegs.ACC.f, xmmreg); break; default: break; } } xmmregs[xmmreg].mode &= ~(MODE_WRITE|MODE_VUXYZ); xmmregs[xmmreg].inuse = 0; }
void operator+=( wxWindow& target, int spacer ) { if( !pxAssert( target.GetSizer() != NULL ) ) return; target.GetSizer()->AddSpacer( spacer ); }
// rd = rs op rt void eeFPURecompileCode(R5900FNPTR_INFO xmmcode, R5900FNPTR fpucode, int xmminfo) { int mmregs=-1, mmregt=-1, mmregd=-1, mmregacc=-1; int info = PROCESS_EE_XMM; if( xmminfo & XMMINFO_READS ) _addNeededFPtoXMMreg(_Fs_); if( xmminfo & XMMINFO_READT ) _addNeededFPtoXMMreg(_Ft_); if( xmminfo & (XMMINFO_WRITED|XMMINFO_READD) ) _addNeededFPtoXMMreg(_Fd_); if( xmminfo & (XMMINFO_WRITEACC|XMMINFO_READACC) ) _addNeededFPACCtoXMMreg(); if( xmminfo & XMMINFO_READT ) { if( g_pCurInstInfo->fpuregs[_Ft_] & EEINST_LASTUSE ) mmregt = _checkXMMreg(XMMTYPE_FPREG, _Ft_, MODE_READ); else mmregt = _allocFPtoXMMreg(-1, _Ft_, MODE_READ); } if( xmminfo & XMMINFO_READS ) { if( ( !(xmminfo & XMMINFO_READT) || (mmregt >= 0) ) && (g_pCurInstInfo->fpuregs[_Fs_] & EEINST_LASTUSE) ) { mmregs = _checkXMMreg(XMMTYPE_FPREG, _Fs_, MODE_READ); } else mmregs = _allocFPtoXMMreg(-1, _Fs_, MODE_READ); } if( mmregs >= 0 ) info |= PROCESS_EE_SETMODES_XMM(mmregs); if( mmregt >= 0 ) info |= PROCESS_EE_SETMODET_XMM(mmregt); if( xmminfo & XMMINFO_READD ) { pxAssert( xmminfo & XMMINFO_WRITED ); mmregd = _allocFPtoXMMreg(-1, _Fd_, MODE_READ); } if( xmminfo & XMMINFO_READACC ) { if( !(xmminfo&XMMINFO_WRITEACC) && (g_pCurInstInfo->fpuregs[_Ft_] & EEINST_LASTUSE) ) mmregacc = _checkXMMreg(XMMTYPE_FPACC, 0, MODE_READ); else mmregacc = _allocFPACCtoXMMreg(-1, MODE_READ); } if( xmminfo & XMMINFO_WRITEACC ) { // check for last used, if so don't alloc a new XMM reg int readacc = MODE_WRITE|((xmminfo&XMMINFO_READACC)?MODE_READ:0); mmregacc = _checkXMMreg(XMMTYPE_FPACC, 0, readacc); if( mmregacc < 0 ) { if( (xmminfo&XMMINFO_READT) && mmregt >= 0 && (FPUINST_LASTUSE(_Ft_) || !FPUINST_ISLIVE(_Ft_)) ) { if( FPUINST_ISLIVE(_Ft_) ) { _freeXMMreg(mmregt); info &= ~PROCESS_EE_MODEWRITET; } _deleteMMXreg(MMX_FPU+XMMFPU_ACC, 2); xmmregs[mmregt].inuse = 1; xmmregs[mmregt].reg = 0; xmmregs[mmregt].mode = readacc; xmmregs[mmregt].type = XMMTYPE_FPACC; mmregacc = mmregt; } else if( (xmminfo&XMMINFO_READS) && mmregs >= 0 && (FPUINST_LASTUSE(_Fs_) || !FPUINST_ISLIVE(_Fs_)) ) { if( FPUINST_ISLIVE(_Fs_) ) { _freeXMMreg(mmregs); info &= ~PROCESS_EE_MODEWRITES; } _deleteMMXreg(MMX_FPU+XMMFPU_ACC, 2); xmmregs[mmregs].inuse = 1; xmmregs[mmregs].reg = 0; xmmregs[mmregs].mode = readacc; xmmregs[mmregs].type = XMMTYPE_FPACC; mmregacc = mmregs; } else mmregacc = _allocFPACCtoXMMreg(-1, readacc); } xmmregs[mmregacc].mode |= MODE_WRITE; } else if( xmminfo & XMMINFO_WRITED ) { // check for last used, if so don't alloc a new XMM reg int readd = MODE_WRITE|((xmminfo&XMMINFO_READD)?MODE_READ:0); if( xmminfo&XMMINFO_READD ) mmregd = _allocFPtoXMMreg(-1, _Fd_, readd); else mmregd = _checkXMMreg(XMMTYPE_FPREG, _Fd_, readd); if( mmregd < 0 ) { if( (xmminfo&XMMINFO_READT) && mmregt >= 0 && (FPUINST_LASTUSE(_Ft_) || !FPUINST_ISLIVE(_Ft_)) ) { if( FPUINST_ISLIVE(_Ft_) ) { _freeXMMreg(mmregt); info &= ~PROCESS_EE_MODEWRITET; } _deleteMMXreg(MMX_FPU+_Fd_, 2); xmmregs[mmregt].inuse = 1; xmmregs[mmregt].reg = _Fd_; xmmregs[mmregt].mode = readd; mmregd = mmregt; } else if( (xmminfo&XMMINFO_READS) && mmregs >= 0 && (FPUINST_LASTUSE(_Fs_) || !FPUINST_ISLIVE(_Fs_)) ) { if( FPUINST_ISLIVE(_Fs_) ) { _freeXMMreg(mmregs); info &= ~PROCESS_EE_MODEWRITES; } _deleteMMXreg(MMX_FPU+_Fd_, 2); xmmregs[mmregs].inuse = 1; xmmregs[mmregs].reg = _Fd_; xmmregs[mmregs].mode = readd; mmregd = mmregs; } else if( (xmminfo&XMMINFO_READACC) && mmregacc >= 0 && (FPUINST_LASTUSE(XMMFPU_ACC) || !FPUINST_ISLIVE(XMMFPU_ACC)) ) { if( FPUINST_ISLIVE(XMMFPU_ACC) ) _freeXMMreg(mmregacc); _deleteMMXreg(MMX_FPU+_Fd_, 2); xmmregs[mmregacc].inuse = 1; xmmregs[mmregacc].reg = _Fd_; xmmregs[mmregacc].mode = readd; xmmregs[mmregacc].type = XMMTYPE_FPREG; mmregd = mmregacc; } else mmregd = _allocFPtoXMMreg(-1, _Fd_, readd); } } pxAssert( mmregs >= 0 || mmregt >= 0 || mmregd >= 0 || mmregacc >= 0 ); if( xmminfo & XMMINFO_WRITED ) { pxAssert( mmregd >= 0 ); info |= PROCESS_EE_SET_D(mmregd); } if( xmminfo & (XMMINFO_WRITEACC|XMMINFO_READACC) ) { if( mmregacc >= 0 ) info |= PROCESS_EE_SET_ACC(mmregacc)|PROCESS_EE_ACC; else pxAssert( !(xmminfo&XMMINFO_WRITEACC)); } if( xmminfo & XMMINFO_READS ) { if( mmregs >= 0 ) info |= PROCESS_EE_SET_S(mmregs)|PROCESS_EE_S; } if( xmminfo & XMMINFO_READT ) { if( mmregt >= 0 ) info |= PROCESS_EE_SET_T(mmregt)|PROCESS_EE_T; } // at least one must be in xmm if( (xmminfo & (XMMINFO_READS|XMMINFO_READT)) == (XMMINFO_READS|XMMINFO_READT) ) { pxAssert( mmregs >= 0 || mmregt >= 0 ); } xmmcode(info); _clearNeededXMMregs(); }
void operator+=( wxSizer* target, wxSizer& src ) { if( !pxAssert( target != NULL ) ) return; target->Add( &src ); }
// ===================================================================================================== // TEST / INC / DEC // ===================================================================================================== void xImpl_Test::operator()( const xRegisterInt& to, const xRegisterInt& from ) const { pxAssert( to.GetOperandSize() == from.GetOperandSize() ); xOpWrite( to.GetPrefix16(), to.Is8BitOp() ? 0x84 : 0x85, from, to ); }
void MainEmuFrame::ApplyCoreStatus() { wxMenuBar& menubar( *GetMenuBar() ); // [TODO] : Ideally each of these items would bind a listener instance to the AppCoreThread // dispatcher, and modify their states accordingly. This is just a hack (for now) -- air if (wxMenuItem* susres = menubar.FindItem(MenuId_Sys_SuspendResume)) { if( !CoreThread.IsClosing() ) { susres->Enable(); susres->SetItemLabel(_("Paus&e")); susres->SetHelp(_("Safely pauses emulation and preserves the PS2 state.")); } else { bool ActiveVM = SysHasValidState(); susres->Enable(ActiveVM); if( ActiveVM ) { susres->SetItemLabel(_("R&esume")); susres->SetHelp(_("Resumes the suspended emulation state.")); } else { susres->SetItemLabel(_("Pause/Resume")); susres->SetHelp(_("No emulation state is active; cannot suspend or resume.")); } } } const CDVD_SourceType Source = g_Conf->CdvdSource; const MenuIdentifiers fullboot_id = MenuId_Boot_CDVD; const MenuIdentifiers fastboot_id = MenuId_Boot_CDVD2; wxMenuItem *cdvd_fast = menubar.FindItem(fastboot_id); if (Source == CDVD_SourceType::NoDisc) { if(cdvd_fast) m_menuSys.Destroy(cdvd_fast); } else { wxString label; wxString help_text = _("Use fast boot to skip PS2 startup and splash screens"); switch (Source) { case CDVD_SourceType::Iso: label = _("Boot ISO (&fast)"); break; case CDVD_SourceType::Plugin: label = _("Boot CDVD (&fast)"); break; //case CDVD_SourceType::NoDisc: (Fast boot menu item is destroyed when no disc is selected) default: pxAssert(false); } if (cdvd_fast) { cdvd_fast->SetItemLabel(label); cdvd_fast->SetHelp(help_text); } else { m_menuSys.Insert(1, fastboot_id, label, help_text); } } if (wxMenuItem *cdvd_full = menubar.FindItem(fullboot_id)) { switch (Source) { case CDVD_SourceType::Iso: cdvd_full->SetItemLabel(_("Boo&t ISO (full)")); cdvd_full->SetHelp(_("Boot the VM using the current ISO source media")); break; case CDVD_SourceType::Plugin: cdvd_full->SetItemLabel(_("Boo&t CDVD (full)")); cdvd_full->SetHelp(_("Boot the VM using the current CD/DVD source media")); break; case CDVD_SourceType::NoDisc: cdvd_full->SetItemLabel(_("Boo&t BIOS")); cdvd_full->SetHelp(_("Boot the VM without any source media")); break; default: pxAssert(false); } } }
void xImpl_BitScan::operator()( const xRegister16or32or64& to, const xRegister16or32or64& from ) const { pxAssert( to->GetOperandSize() == from->GetOperandSize() ); xOpWrite0F( from->GetPrefix16(), Opcode, to, from ); }
void recMicroVU1::Clear(u32 addr, u32 size) { pxAssert(m_Reserved); // please allocate me first! :| mVUclear(microVU1, addr, size); }
// mmx mem-compare implementation, size has to be a multiple of 8 // returns 0 is equal, nonzero value if not equal // ~10 times faster than standard memcmp // (zerofrog) u8 memcmp_mmx(const void *src1, const void *src2, int cmpsize) { pxAssert((cmpsize & 7) == 0); __asm { mov ecx, cmpsize mov edx, src1 mov esi, src2 cmp ecx, 32 jl Done4 // custom test first 8 to make sure things are ok movq mm0, [esi] movq mm1, [esi+8] pcmpeqd mm0, [edx] pcmpeqd mm1, [edx+8] pand mm0, mm1 movq mm2, [esi+16] pmovmskb eax, mm0 movq mm3, [esi+24] // check if eq cmp eax, 0xff je NextComp mov eax, 1 jmp End NextComp: pcmpeqd mm2, [edx+16] pcmpeqd mm3, [edx+24] pand mm2, mm3 pmovmskb eax, mm2 sub ecx, 32 add esi, 32 add edx, 32 // check if eq cmp eax, 0xff je ContinueTest mov eax, 1 jmp End cmp ecx, 64 jl Done8 Cmp8: movq mm0, [esi] movq mm1, [esi+8] movq mm2, [esi+16] movq mm3, [esi+24] movq mm4, [esi+32] movq mm5, [esi+40] movq mm6, [esi+48] movq mm7, [esi+56] pcmpeqd mm0, [edx] pcmpeqd mm1, [edx+8] pcmpeqd mm2, [edx+16] pcmpeqd mm3, [edx+24] pand mm0, mm1 pcmpeqd mm4, [edx+32] pand mm0, mm2 pcmpeqd mm5, [edx+40] pand mm0, mm3 pcmpeqd mm6, [edx+48] pand mm0, mm4 pcmpeqd mm7, [edx+56] pand mm0, mm5 pand mm0, mm6 pand mm0, mm7 pmovmskb eax, mm0 // check if eq cmp eax, 0xff je Continue mov eax, 1 jmp End Continue: sub ecx, 64 add esi, 64 add edx, 64 ContinueTest: cmp ecx, 64 jge Cmp8 Done8: test ecx, 0x20 jz Done4 movq mm0, [esi] movq mm1, [esi+8] movq mm2, [esi+16] movq mm3, [esi+24] pcmpeqd mm0, [edx] pcmpeqd mm1, [edx+8] pcmpeqd mm2, [edx+16] pcmpeqd mm3, [edx+24] pand mm0, mm1 pand mm0, mm2 pand mm0, mm3 pmovmskb eax, mm0 sub ecx, 32 add esi, 32 add edx, 32 // check if eq cmp eax, 0xff je Done4 mov eax, 1 jmp End Done4: cmp ecx, 24 jne Done2 movq mm0, [esi] movq mm1, [esi+8] movq mm2, [esi+16] pcmpeqd mm0, [edx] pcmpeqd mm1, [edx+8] pcmpeqd mm2, [edx+16] pand mm0, mm1 pand mm0, mm2 pmovmskb eax, mm0 // check if eq cmp eax, 0xff setne al jmp End Done2: cmp ecx, 16 jne Done1 movq mm0, [esi] movq mm1, [esi+8] pcmpeqd mm0, [edx] pcmpeqd mm1, [edx+8] pand mm0, mm1 pmovmskb eax, mm0 // check if eq cmp eax, 0xff setne al jmp End Done1: cmp ecx, 8 jne Done mov eax, [esi] mov esi, [esi+4] cmp eax, [edx] je Next mov eax, 1 jmp End Next: cmp esi, [edx+4] setne al jmp End Done: xor eax, eax End: emms } }