static __fi void GifDMAInt(int cycles) { if (dmacRegs.ctrl.MFD == MFD_GIF) { if (!(cpuRegs.interrupt & (1 << DMAC_MFIFO_GIF)) || cpuRegs.eCycle[DMAC_MFIFO_GIF] < (u32)cycles) { CPU_INT(DMAC_MFIFO_GIF, cycles); } } else if (!(cpuRegs.interrupt & (1 << DMAC_GIF)) || cpuRegs.eCycle[DMAC_GIF] < (u32)cycles) { CPU_INT(DMAC_GIF, cycles); } }
__fi void SPR1chain() { int cycles = 0; if(!CHECK_IPUWAITHACK) { cycles = _SPR1chain() * BIAS; CPU_INT(DMAC_TO_SPR, cycles); } else { _SPR1chain(); CPU_INT(DMAC_TO_SPR, 8); } }
void vif1TransferToMemory() { u128* pMem = (u128*)dmaGetAddr(vif1ch.madr, false); // VIF from gsMemory if (pMem == NULL) { // Is vif0ptag empty? Console.WriteLn("Vif1 Tag BUSERR"); dmacRegs.stat.BEIS = true; // Bus Error vif1Regs.stat.FQC = 0; vif1ch.qwc = 0; vif1.done = true; CPU_INT(DMAC_VIF1, 0); return; // An error has occurred. } // MTGS concerns: The MTGS is inherently disagreeable with the idea of downloading // stuff from the GS. The *only* way to handle this case safely is to flush the GS // completely and execute the transfer there-after. //Console.Warning("Real QWC %x", vif1ch.qwc); const u32 size = min(vif1.GSLastDownloadSize, (u32)vif1ch.qwc); const u128* pMemEnd = vif1.GSLastDownloadSize + pMem; if (size) { // Checking if any crazy game does a partial // gs primitive and then does a gs download... Gif_Path& p1 = gifUnit.gifPath[GIF_PATH_1]; Gif_Path& p2 = gifUnit.gifPath[GIF_PATH_2]; Gif_Path& p3 = gifUnit.gifPath[GIF_PATH_3]; pxAssert(p1.isDone() || !p1.gifTag.isValid); pxAssert(p2.isDone() || !p2.gifTag.isValid); pxAssert(p3.isDone() || !p3.gifTag.isValid); } GetMTGS().WaitGS(); GSreadFIFO2((u64*)pMem, size); pMem += size; if(pMem < pMemEnd) { //DevCon.Warning("GS Transfer < VIF QWC, Clearing end of space"); __m128 zeroreg = _mm_setzero_ps(); do { _mm_store_ps((float*)pMem, zeroreg); } while (++pMem < pMemEnd); } g_vif1Cycles += vif1ch.qwc * 2; vif1ch.madr += vif1ch.qwc * 16; // mgs3 scene changes if (vif1.GSLastDownloadSize >= vif1ch.qwc) { vif1.GSLastDownloadSize -= vif1ch.qwc; vif1Regs.stat.FQC = min((u32)16, vif1.GSLastDownloadSize); } else { vif1Regs.stat.FQC = 0; vif1.GSLastDownloadSize = 0; } vif1ch.qwc = 0; }
void FinalizeEERead() { SIF_LOG("Sif0: End EE"); sif0.ee.end = false; sif0.ee.busy = false; SIF_LOG("CPU INT FIRED SIF0"); CPU_INT(DMAC_SIF0, 16); }
bool CheckPaths(EE_EventType Channel) { // Can't do Path 3, so try dma again later... if(!gifUnit.CanDoPath3()) { if(!gifUnit.Path3Masked()) { CPU_INT(Channel, 128); } return false; } return true; }
// Stop transferring ee, and signal an interrupt. static __fi void EndEE() { SIF_LOG("Sif2: End EE"); sif2.ee.end = false; sif2.ee.busy = false; if (sif2.ee.cycles == 0) { SIF_LOG("SIF2 EE: cycles = 0"); sif2.ee.cycles = 1; } CPU_INT(DMAC_SIF2, sif2.ee.cycles*BIAS); }
void dmaVIF0() { VIF_LOG("dmaVIF0 chcr = %lx, madr = %lx, qwc = %lx\n" " tadr = %lx, asr0 = %lx, asr1 = %lx", vif0ch.chcr._u32, vif0ch.madr, vif0ch.qwc, vif0ch.tadr, vif0ch.asr0, vif0ch.asr1); g_vif0Cycles = 0; if ((vif0ch.chcr.MOD == NORMAL_MODE) || vif0ch.qwc > 0) // Normal Mode { vif0.dmamode = VIF_NORMAL_TO_MEM_MODE; if(vif0.irqoffset.enabled == true && vif0.done == false) { if(vif0ch.chcr.MOD == NORMAL_MODE)DevCon.Warning("Warning! VIF0 starting a new Normal transfer with vif offset set (Possible force stop?)"); else if(vif0ch.qwc == 0) DevCon.Warning("Warning! VIF0 starting a new Chain transfer with vif offset set (Possible force stop?)"); } vif0.done = false; if(vif0ch.chcr.MOD == CHAIN_MODE && vif0ch.qwc > 0) { vif0.dmamode = VIF_CHAIN_MODE; DevCon.Warning(L"VIF0 QWC on Chain CHCR " + vif0ch.chcr.desc()); if ((vif0ch.chcr.tag().ID == TAG_REFE) || (vif0ch.chcr.tag().ID == TAG_END)) { vif0.done = true; } } } else { vif0.dmamode = VIF_CHAIN_MODE; vif0.done = false; } vif0Regs.stat.FQC = min((u16)0x8, vif0ch.qwc); //Using a delay as Beyond Good and Evil does the DMA twice with 2 different TADR's (no checks in the middle, all one block of code), //the first bit it sends isnt required for it to work. //Also being an end chain it ignores the second lot, this causes infinite loops ;p // Chain Mode CPU_INT(DMAC_VIF0, 4); }
// Stop processing EE, and signal an interrupt. static __fi void EndEE() { sif1.ee.end = false; sif1.ee.busy = false; SIF_LOG("Sif 1: End EE"); // Voodoocycles : Okami wants around 100 cycles when booting up // Other games reach like 50k cycles here, but the EE will long have given up by then and just retry. // (Cause of double interrupts on the EE) if (sif1.ee.cycles == 0) { SIF_LOG("SIF1 EE: cycles = 0"); sif1.ee.cycles = 1; } CPU_INT(DMAC_SIF1, /*min((int)(*/sif1.ee.cycles*BIAS/*), 384)*/); }
__fi void vif1VUFinish() { if (VU0.VI[REG_VPU_STAT].UL & 0x100) { int _cycles = VU1.cycle; //DevCon.Warning("Finishing VU1"); vu1Finish(); CPU_INT(VIF_VU1_FINISH, (VU1.cycle - _cycles) * BIAS); return; } vif1Regs.stat.VEW = false; VIF_LOG("VU1 finished"); if( gifRegs.stat.APATH == 1 ) { VIF_LOG("Clear APATH1"); gifRegs.stat.APATH = 0; gifRegs.stat.OPH = 0; vif1Regs.stat.VGW = false; //Let vif continue if it's stuck on a flush if(!vif1.waitforvu) { if(gifUnit.checkPaths(0,1,1)) gifUnit.Execute(false, true); } } if(vif1.waitforvu == true) { vif1.waitforvu = false; ExecuteVU(1); //Check if VIF is already scheduled to interrupt, if it's waiting, kick it :P if((cpuRegs.interrupt & (1<<DMAC_VIF1 | 1 << DMAC_MFIFO_VIF)) == 0 && vif1ch.chcr.STR == true && !vif1Regs.stat.INT) { if(dmacRegs.ctrl.MFD == MFD_VIF1) vifMFIFOInterrupt(); else vif1Interrupt(); } } //DevCon.Warning("VU1 state cleared"); }
void _SPR0interleave() { int qwc = spr0ch.qwc; int sqwc = dmacRegs.sqwc.SQWC; int tqwc = dmacRegs.sqwc.TQWC; tDMA_TAG *pMem; if (tqwc == 0) tqwc = qwc; //Console.WriteLn("dmaSPR0 interleave"); SPR_LOG("SPR0 interleave size=%d, tqwc=%d, sqwc=%d, addr=%lx sadr=%lx", spr0ch.qwc, tqwc, sqwc, spr0ch.madr, spr0ch.sadr); CPU_INT(DMAC_FROM_SPR, qwc * BIAS); while (qwc > 0) { spr0ch.qwc = std::min(tqwc, qwc); qwc -= spr0ch.qwc; pMem = SPRdmaGetAddr(spr0ch.madr, true); switch (dmacRegs.ctrl.MFD) { case MFD_VIF1: case MFD_GIF: hwMFIFOWrite(spr0ch.madr, &psSu128(spr0ch.sadr), spr0ch.qwc); mfifotransferred += spr0ch.qwc; break; case NO_MFD: case MFD_RESERVED: // clear VU mem also! TestClearVUs(spr0ch.madr, spr0ch.qwc); memcpy_qwc(pMem, &psSu128(spr0ch.sadr), spr0ch.qwc); break; } spr0ch.sadr += spr0ch.qwc * 16; spr0ch.madr += (sqwc + spr0ch.qwc) * 16; } spr0ch.qwc = 0; }
void _SPR1interleave() { int qwc = spr1ch.qwc; int sqwc = dmacRegs.sqwc.SQWC; int tqwc = dmacRegs.sqwc.TQWC; tDMA_TAG *pMem; if (tqwc == 0) tqwc = qwc; SPR_LOG("SPR1 interleave size=%d, tqwc=%d, sqwc=%d, addr=%lx sadr=%lx", spr1ch.qwc, tqwc, sqwc, spr1ch.madr, spr1ch.sadr); CPU_INT(DMAC_TO_SPR, qwc * BIAS); while (qwc > 0) { spr1ch.qwc = std::min(tqwc, qwc); qwc -= spr1ch.qwc; pMem = SPRdmaGetAddr(spr1ch.madr, false); memcpy_qwc(&psSu128(spr1ch.sadr), pMem, spr1ch.qwc); spr1ch.sadr += spr1ch.qwc * 16; spr1ch.madr += (sqwc + spr1ch.qwc) * 16; } spr1ch.qwc = 0; }
__fi void vif0VUFinish() { if ((VU0.VI[REG_VPU_STAT].UL & 1)) { int _cycles = VU0.cycle; //DevCon.Warning("Finishing VU0"); vu0Finish(); _cycles = VU0.cycle - _cycles; //DevCon.Warning("Finishing VU0 %d cycles", _cycles); CPU_INT(VIF_VU0_FINISH, _cycles * BIAS); return; } vif0Regs.stat.VEW = false; VIF_LOG("VU0 finished"); if(vif0.waitforvu == true) { vif0.waitforvu = false; ExecuteVU(0); //Make sure VIF0 isnt already scheduled to spin. if(!(cpuRegs.interrupt & 0x1) && vif0ch.chcr.STR == true && !vif0Regs.stat.INT) vif0Interrupt(); } //DevCon.Warning("VU0 state cleared"); }
void GIFdma() { tDMA_TAG *ptag; gscycles = prevcycles; if (gifRegs.ctrl.PSE) { // temporarily stop Console.WriteLn("Gif dma temp paused? (non MFIFO GIF)"); CPU_INT(DMAC_GIF, 16); return; } if ((dmacRegs.ctrl.STD == STD_GIF) && (prevcycles != 0)) { //Console.WriteLn("GS Stall Control Source = %x, Drain = %x\n MADR = %x, STADR = %x", (psHu32(0xe000) >> 4) & 0x3, (psHu32(0xe000) >> 6) & 0x3, gifch.madr, psHu32(DMAC_STADR)); if ((gifch.madr + (gifch.qwc * 16)) > dmacRegs.stadr.ADDR) { CPU_INT(DMAC_GIF, 4); gscycles = 0; return; } prevcycles = 0; gifch.qwc = 0; } if ((gifch.chcr.MOD == CHAIN_MODE) && (!gspath3done) && gifch.qwc == 0) // Chain Mode { ptag = ReadTag(); if (ptag == NULL) return; //DevCon.Warning("GIF Reading Tag MSK = %x", vif1Regs.mskpath3); GIF_LOG("gifdmaChain %8.8x_%8.8x size=%d, id=%d, addr=%lx tadr=%lx", ptag[1]._u32, ptag[0]._u32, gifch.qwc, ptag->ID, gifch.madr, gifch.tadr); gifRegs.stat.FQC = std::min((u16)0x10, gifch.qwc);// FQC=31, hack ;) (for values of 31 that equal 16) [ used to be 0xE00; // APATH=3] if (dmacRegs.ctrl.STD == STD_GIF) { // there are still bugs, need to also check if gifch.madr +16*qwc >= stadr, if not, stall if ((ptag->ID == TAG_REFS) && ((gifch.madr + (gifch.qwc * 16)) > dmacRegs.stadr.ADDR)) { // stalled. // We really need to test this. Pay attention to prevcycles, as it used to trigger GIFchains in the code above. (rama) //Console.WriteLn("GS Stall Control start Source = %x, Drain = %x\n MADR = %x, STADR = %x", (psHu32(0xe000) >> 4) & 0x3, (psHu32(0xe000) >> 6) & 0x3,gifch.madr, psHu32(DMAC_STADR)); prevcycles = gscycles; gifch.tadr -= 16; gifch.qwc = 0; hwDmacIrq(DMAC_STALL_SIS); CPU_INT(DMAC_GIF, gscycles); gscycles = 0; return; } } checkTieBit(ptag); } else if (dmacRegs.ctrl.STD == STD_GIF && gifch.chcr.MOD == NORMAL_MODE) { Console.WriteLn("GIF DMA Stall in Normal mode not implemented - Report which game to PCSX2 Team"); } clearFIFOstuff(true); gifRegs.stat.FQC = std::min((u16)0x10, gifch.qwc);// FQC=31, hack ;) (for values of 31 that equal 16) [ used to be 0xE00; // APATH=3] #if USE_OLD_GIF == 1 // ... if (vif1Regs.mskpath3 || gifRegs.mode.M3R) { if (GSTransferStatus.PTH3 == STOPPED_MODE) { MSKPATH3_LOG("Path3 Paused by VIF QWC %x", gifch.qwc); if(gifch.qwc == 0) CPU_INT(DMAC_GIF, 4); else gifRegs.stat.set_flags(GIF_STAT_P3Q); return; } } #endif // Transfer Dn_QWC from Dn_MADR to GIF if (gifch.qwc > 0) // Normal Mode { gifRegs.stat.FQC = std::min((u16)0x10, gifch.qwc);// FQC=31, hack ;) (for values of 31 that equal 16) [ used to be 0xE00; // APATH=3] if (!CheckPaths(DMAC_GIF)) return; GIFchain(); //Transfers the data set by the switch CPU_INT(DMAC_GIF, gscycles); return; } else if(!gspath3done) GIFdma(); //Loop round if there was a blank tag, causes hell otherwise with P3 masking games. prevcycles = 0; CPU_INT(DMAC_GIF, gscycles); gifRegs.stat.FQC = std::min((u16)0x10, gifch.qwc);// FQC=31, hack ;) (for values of 31 that equal 16) [ used to be 0xE00; // OPH=1 | APATH=3] }
__fi void vif0Interrupt() { VIF_LOG("vif0Interrupt: %8.8x", cpuRegs.cycle); g_vif0Cycles = 0; vif0Regs.stat.FQC = min(vif0ch.qwc, (u16)8); if (!(vif0ch.chcr.STR)) Console.WriteLn("vif0 running when CHCR == %x", vif0ch.chcr._u32); if (vif0.irq && vif0.tag.size == 0 && vif0.cmd == 0) { vif0Regs.stat.INT = true; hwIntcIrq(VIF0intc); --vif0.irq; if (vif0Regs.stat.test(VIF0_STAT_VSS | VIF0_STAT_VIS | VIF0_STAT_VFS)) { //vif0Regs.stat.FQC = 0; // One game doesn't like vif stalling at end, can't remember what. Spiderman isn't keen on it tho //vif0ch.chcr.STR = false; vif0Regs.stat.FQC = min((u16)0x8, vif0ch.qwc); if(vif0ch.qwc > 0 || !vif0.done) { VIF_LOG("VIF0 Stalled"); return; } } } if(vif0.waitforvu == true) { //DevCon.Warning("Waiting on VU0"); //CPU_INT(DMAC_VIF0, 16); return; } vif0.vifstalled.enabled = false; //Must go after the Stall, incase it's still in progress, GTC africa likes to see it still transferring. if (vif0.cmd) { if(vif0.done == true && vif0ch.qwc == 0) vif0Regs.stat.VPS = VPS_WAITING; } else { vif0Regs.stat.VPS = VPS_IDLE; } if (vif0.inprogress & 0x1) { _VIF0chain(); vif0Regs.stat.FQC = min(vif0ch.qwc, (u16)8); CPU_INT(DMAC_VIF0, g_vif0Cycles); return; } if (!vif0.done) { if (!(dmacRegs.ctrl.DMAE)) { Console.WriteLn("vif0 dma masked"); return; } if ((vif0.inprogress & 0x1) == 0) vif0SetupTransfer(); vif0Regs.stat.FQC = min(vif0ch.qwc, (u16)8); CPU_INT(DMAC_VIF0, g_vif0Cycles); return; } if (vif0.vifstalled.enabled && vif0.done) { DevCon.WriteLn("VIF0 looping on stall at end\n"); CPU_INT(DMAC_VIF0, 0); return; //Dont want to end if vif is stalled. } #ifdef PCSX2_DEVBUILD if (vif0ch.qwc > 0) Console.WriteLn("vif0 Ending with %x QWC left"); if (vif0.cmd != 0) Console.WriteLn("vif0.cmd still set %x tag size %x", vif0.cmd, vif0.tag.size); #endif vif0ch.chcr.STR = false; vif0Regs.stat.FQC = min((u16)0x8, vif0ch.qwc); vif0.vifstalled.enabled = false; vif0.irqoffset.enabled = false; if(vif0.queued_program == true) vifExecQueue(0); g_vif0Cycles = 0; hwDmacIrq(DMAC_VIF0); vif0Regs.stat.FQC = 0; DMA_LOG("VIF0 DMA End"); }
__fi void vif1Interrupt() { VIF_LOG("vif1Interrupt: %8.8x chcr %x, done %x, qwc %x", cpuRegs.cycle, vif1ch.chcr._u32, vif1.done, vif1ch.qwc); g_vif1Cycles = 0; if( gifRegs.stat.APATH == 2 && gifUnit.gifPath[GIF_PATH_2].isDone()) { gifRegs.stat.APATH = 0; gifRegs.stat.OPH = 0; vif1Regs.stat.VGW = false; //Let vif continue if it's stuck on a flush if(gifUnit.checkPaths(1,0,1)) gifUnit.Execute(false, true); } //Some games (Fahrenheit being one) start vif first, let it loop through blankness while it sets MFIFO mode, so we need to check it here. if (dmacRegs.ctrl.MFD == MFD_VIF1) { //Console.WriteLn("VIFMFIFO\n"); // Test changed because the Final Fantasy 12 opening somehow has the tag in *Undefined* mode, which is not in the documentation that I saw. if (vif1ch.chcr.MOD == NORMAL_MODE) Console.WriteLn("MFIFO mode is normal (which isn't normal here)! %x", vif1ch.chcr._u32); vif1Regs.stat.FQC = min((u16)0x10, vif1ch.qwc); vifMFIFOInterrupt(); return; } // We need to check the direction, if it is downloading // from the GS then we handle that separately (KH2 for testing) if (vif1ch.chcr.DIR) { bool isDirect = (vif1.cmd & 0x7f) == 0x50; bool isDirectHL = (vif1.cmd & 0x7f) == 0x51; if((isDirect && !gifUnit.CanDoPath2()) || (isDirectHL && !gifUnit.CanDoPath2HL())) { GUNIT_WARN("vif1Interrupt() - Waiting for Path 2 to be ready"); CPU_INT(DMAC_VIF1, 128); if(gifRegs.stat.APATH == 3) vif1Regs.stat.VGW = 1; //We're waiting for path 3. Gunslinger II return; } vif1Regs.stat.VGW = 0; //Path 3 isn't busy so we don't need to wait for it. vif1Regs.stat.FQC = min(vif1ch.qwc, (u16)16); //Simulated GS transfer time done, clear the flags } if(vif1.waitforvu == true) { //DevCon.Warning("Waiting on VU1"); //CPU_INT(DMAC_VIF1, 16); return; } if (!vif1ch.chcr.STR) Console.WriteLn("Vif1 running when CHCR == %x", vif1ch.chcr._u32); if (vif1.irq && vif1.tag.size == 0 &&vif1.cmd == 0) { VIF_LOG("VIF IRQ Firing"); vif1Regs.stat.INT = true; hwIntcIrq(VIF1intc); --vif1.irq; if (vif1Regs.stat.test(VIF1_STAT_VSS | VIF1_STAT_VIS | VIF1_STAT_VFS)) { //vif1Regs.stat.FQC = 0; //NFSHPS stalls when the whole packet has gone across (it stalls in the last 32bit cmd) //In this case VIF will end vif1Regs.stat.FQC = min((u16)0x10, vif1ch.qwc); if((vif1ch.qwc > 0 || !vif1.done) && !CHECK_VIF1STALLHACK) { VIF_LOG("VIF1 Stalled"); return; } } } vif1.vifstalled.enabled = false; //Mirroring change to VIF0 if (vif1.cmd) { if (vif1.done && (vif1ch.qwc == 0)) vif1Regs.stat.VPS = VPS_WAITING; } else { vif1Regs.stat.VPS = VPS_IDLE; } if (vif1.inprogress & 0x1) { _VIF1chain(); // VIF_NORMAL_FROM_MEM_MODE is a very slow operation. // Timesplitters 2 depends on this beeing a bit higher than 128. if (vif1ch.chcr.DIR) vif1Regs.stat.FQC = min(vif1ch.qwc, (u16)16); if(!(vif1Regs.stat.VGW && gifUnit.gifPath[GIF_PATH_3].state != GIF_PATH_IDLE)) //If we're waiting on GIF, stop looping, (can be over 1000 loops!) CPU_INT(DMAC_VIF1, g_vif1Cycles); return; } if (!vif1.done) { if (!(dmacRegs.ctrl.DMAE)) { Console.WriteLn("vif1 dma masked"); return; } if ((vif1.inprogress & 0x1) == 0) vif1SetupTransfer(); if (vif1ch.chcr.DIR) vif1Regs.stat.FQC = min(vif1ch.qwc, (u16)16); if(!(vif1Regs.stat.VGW && gifUnit.gifPath[GIF_PATH_3].state != GIF_PATH_IDLE)) //If we're waiting on GIF, stop looping, (can be over 1000 loops!) CPU_INT(DMAC_VIF1, g_vif1Cycles); return; } if (vif1.vifstalled.enabled && vif1.done) { DevCon.WriteLn("VIF1 looping on stall at end\n"); CPU_INT(DMAC_VIF1, 0); return; //Dont want to end if vif is stalled. } #ifdef PCSX2_DEVBUILD if (vif1ch.qwc > 0) Console.WriteLn("VIF1 Ending with %x QWC left", vif1ch.qwc); if (vif1.cmd != 0) Console.WriteLn("vif1.cmd still set %x tag size %x", vif1.cmd, vif1.tag.size); #endif if((vif1ch.chcr.DIR == VIF_NORMAL_TO_MEM_MODE) && vif1.GSLastDownloadSize <= 16) { //Reverse fifo has finished and nothing is left, so lets clear the outputting flag gifRegs.stat.OPH = false; } if (vif1ch.chcr.DIR) vif1Regs.stat.FQC = min(vif1ch.qwc, (u16)16); vif1ch.chcr.STR = false; vif1.vifstalled.enabled = false; vif1.irqoffset.enabled = false; if(vif1.queued_program == true) vifExecQueue(1); g_vif1Cycles = 0; DMA_LOG("VIF1 DMA End"); hwDmacIrq(DMAC_VIF1); }
__fi void SPR0chain() { int cycles = 0; cycles = _SPR0chain() * BIAS; CPU_INT(DMAC_FROM_SPR, cycles); }
void dmaVIF1() { VIF_LOG("dmaVIF1 chcr = %lx, madr = %lx, qwc = %lx\n" " tadr = %lx, asr0 = %lx, asr1 = %lx", vif1ch.chcr._u32, vif1ch.madr, vif1ch.qwc, vif1ch.tadr, vif1ch.asr0, vif1ch.asr1); g_vif1Cycles = 0; #ifdef PCSX2_DEVBUILD if (dmacRegs.ctrl.STD == STD_VIF1) { //DevCon.WriteLn("VIF Stall Control Source = %x, Drain = %x", (psHu32(0xe000) >> 4) & 0x3, (psHu32(0xe000) >> 6) & 0x3); } #endif if (vif1ch.qwc > 0) // Normal Mode { // ignore tag if it's a GS download (Def Jam Fight for NY) if(vif1ch.chcr.MOD == CHAIN_MODE && vif1ch.chcr.DIR) { vif1.dmamode = VIF_CHAIN_MODE; //DevCon.Warning(L"VIF1 QWC on Chain CHCR " + vif1ch.chcr.desc()); if ((vif1ch.chcr.tag().ID == TAG_REFE) || (vif1ch.chcr.tag().ID == TAG_END)) { vif1.done = true; } else { vif1.done = false; } } else //Assume normal mode for reverse FIFO and Normal. { if (dmacRegs.ctrl.STD == STD_VIF1) Console.WriteLn("DMA Stall Control on VIF1 normal"); if (vif1ch.chcr.DIR) // to Memory vif1.dmamode = VIF_NORMAL_FROM_MEM_MODE; else vif1.dmamode = VIF_NORMAL_TO_MEM_MODE; if(vif1.irqoffset.enabled == true && vif1.done == false) DevCon.Warning("Warning! VIF1 starting a Normal transfer with vif offset set (Possible force stop?)"); vif1.done = true; } vif1.inprogress |= 1; } else { if(vif1.irqoffset.enabled == true && vif1.done == false) DevCon.Warning("Warning! VIF1 starting a new Chain transfer with vif offset set (Possible force stop?)"); vif1.dmamode = VIF_CHAIN_MODE; vif1.done = false; vif1.inprogress &= ~0x1; } if (vif1ch.chcr.DIR) vif1Regs.stat.FQC = min((u16)0x10, vif1ch.qwc); // Chain Mode CPU_INT(DMAC_VIF1, 4); }
__fi void gifInterrupt() { GIF_LOG("gifInterrupt caught!"); gifCheckPathStatus(); if(gifUnit.gifPath[GIF_PATH_3].state == GIF_PATH_IDLE) { if(vif1Regs.stat.VGW) { //Check if VIF is in a cycle or is currently "idle" waiting for GIF to come back. if(!(cpuRegs.interrupt & (1<<DMAC_VIF1))) CPU_INT(DMAC_VIF1, 1); //Make sure it loops if the GIF packet is empty to prepare for the next packet //or end if it was the end of a packet. //This must trigger after VIF retriggers as VIf might instantly mask Path3 if (!gifUnit.Path3Masked() || gifch.qwc == 0) { GifDMAInt(16); } return; } } if (dmacRegs.ctrl.MFD == MFD_GIF) { // GIF MFIFO //Console.WriteLn("GIF MFIFO"); gifMFIFOInterrupt(); return; } if (CHECK_GIFFIFOHACK) { if (int amtRead = gif_fifo.read(true)) { if (!gifUnit.Path3Masked() || gifRegs.stat.FQC < 16) { GifDMAInt(amtRead * BIAS); return; } } else { if (!gifUnit.CanDoPath3() && gifRegs.stat.FQC == 16) { if (gifch.qwc > 0 || gspath3done == false) { if (!gifUnit.Path3Masked()) { GifDMAInt(128); } return; } } } } if (gifUnit.gsSIGNAL.queued) { GIF_LOG("Path 3 Paused"); GifDMAInt(128); return; } if (!(gifch.chcr.STR)) return; if ((gifch.qwc > 0) || (!gspath3done)) { if (!dmacRegs.ctrl.DMAE) { Console.Warning("gs dma masked, re-scheduling..."); // re-raise the int shortly in the future GifDMAInt( 64 ); return; } GIFdma(); return; } //Double check as we might have read the fifo as it's ending the DMA gifCheckPathStatus(); if (gifUnit.gifPath[GIF_PATH_3].state == GIF_PATH_IDLE) { if (vif1Regs.stat.VGW) { //Check if VIF is in a cycle or is currently "idle" waiting for GIF to come back. if (!(cpuRegs.interrupt & (1 << DMAC_VIF1))) { CPU_INT(DMAC_VIF1, 1); } } } if (!CHECK_GIFFIFOHACK) { gifRegs.stat.FQC = 0; clearFIFOstuff(false); } gscycles = 0; gspath3done = false; gifch.chcr.STR = false; hwDmacIrq(DMAC_GIF); GIF_LOG("GIF DMA End QWC in fifo %x APATH = %x OPH = %x state = %x", gifRegs.stat.FQC, gifRegs.stat.APATH, gifRegs.stat.OPH, gifUnit.gifPath[GIF_PATH_3].state); }
__fi void gifInterrupt() { GIF_LOG("gifInterrupt caught!"); if( gifRegs.stat.APATH == 3 ) { gifRegs.stat.APATH = 0; gifRegs.stat.OPH = 0; if(gifUnit.gifPath[GIF_PATH_3].state == GIF_PATH_IDLE || gifUnit.gifPath[GIF_PATH_3].state == GIF_PATH_WAIT) { if(gifUnit.checkPaths(1,1,0)) gifUnit.Execute(false, true); } } //Required for Path3 Masking timing! if(gifUnit.gifPath[GIF_PATH_3].state == GIF_PATH_WAIT) gifUnit.gifPath[GIF_PATH_3].state = GIF_PATH_IDLE; if(gifUnit.gifPath[GIF_PATH_3].state == GIF_PATH_IDLE) { if(vif1Regs.stat.VGW) { //Check if VIF is in a cycle or is currently "idle" waiting for GIF to come back. if(!(cpuRegs.interrupt & (1<<DMAC_VIF1))) CPU_INT(DMAC_VIF1, 1); //Make sure it loops if the GIF packet is empty to prepare for the next packet //or end if it was the end of a packet. if(!gifUnit.Path3Masked() || gifch.qwc == 0) CPU_INT(DMAC_GIF, 16); return; } } if (dmacRegs.ctrl.MFD == MFD_GIF) { // GIF MFIFO //Console.WriteLn("GIF MFIFO"); gifMFIFOInterrupt(); return; } if (gifUnit.gsSIGNAL.queued) { //DevCon.WriteLn("Path 3 Paused"); CPU_INT(DMAC_GIF, 128); return; } if (!(gifch.chcr.STR)) return; if ((gifch.qwc > 0) || (!gspath3done)) { if (!dmacRegs.ctrl.DMAE) { Console.Warning("gs dma masked, re-scheduling..."); // re-raise the int shortly in the future CPU_INT( DMAC_GIF, 64 ); return; } GIFdma(); return; } gifRegs.stat.FQC = 0; gscycles = 0; gspath3done = false; gifch.chcr.STR = false; clearFIFOstuff(false); hwDmacIrq(DMAC_GIF); DMA_LOG("GIF DMA End"); }
void gbemu_cpu_run(int cycles) { gbemu_cpu_t CPU = GB.CPU; CPU.cycles = 0; GB.APU.cycles = 0; GB.APU.write_pos = gbemu_sound_buffer; int cycles_last = 0; static int h_cycles = 0; next_instruction: gbemu_ppu_draw(CPU.cycles); gbemu_apu_run(CPU.cycles); CPU.timer.ticks += CPU.cycles - cycles_last; while (CPU.timer.ticks_last < CPU.timer.ticks) { CPU.timer.ticks_last++; if (!(CPU.timer.ticks_last & 0x3F)) GB.DIV++; if (GB.TAC.active) { /* 0: 0xFF * 1: 0x03 * 2: 0x0F * 3: 0x3F */ static const uint8_t timer_masks[4] = {0xFF, 0x03, 0x0F, 0x3F}; if (!(CPU.timer.ticks_last & timer_masks[GB.TAC.clock_select])) { GB.TIMA++; if (!GB.TIMA) { GB.TIMA = GB.TMA; GB.IF.timer = 1; } } } } if (CPU.cycles > cycles) goto cpu_exit; h_cycles += CPU.cycles - cycles_last; cycles_last = CPU.cycles; if (h_cycles > GB_LINE_TICK_COUNT) { h_cycles -= GB_LINE_TICK_COUNT; GB.LY++; if (GB.LY >= GB_V_COUNT) { GB.LY = 0; goto cpu_exit; } } if (GB.LCDC.LCD_enable) { if (GB.LY == GB.LYC) { if(!GB.LCD_STAT.LCY_eq_LY_flag) { GB.LCD_STAT.LCY_eq_LY_flag = 1; if(GB.LCD_STAT.LCY_eq_LY_IE) GB.IF.LCD_stat = 1; } } else GB.LCD_STAT.LCY_eq_LY_flag = 0; if (GB.LY < 144) { if (h_cycles > (80 + 20)) { if(GB.LCD_STAT.mode_flag != GB_LCD_STAT_MODE0_HBLANK) { GB.LCD_STAT.mode_flag = GB_LCD_STAT_MODE0_HBLANK; if(GB.LCD_STAT.HBlank_IE) GB.IF.LCD_stat = 1; } } else if (h_cycles > 20) { if(GB.LCD_STAT.mode_flag != GB_LCD_STAT_MODE3_OAM_VRAM_busy) GB.LCD_STAT.mode_flag = GB_LCD_STAT_MODE3_OAM_VRAM_busy; } else { if (GB.LCD_STAT.mode_flag != GB_LCD_STAT_MODE2_OAM_busy) { GB.LCD_STAT.mode_flag = GB_LCD_STAT_MODE2_OAM_busy; if(GB.LCD_STAT.OAM_IE) GB.IF.LCD_stat = 1; } } } else if (GB.LCD_STAT.mode_flag != GB_LCD_STAT_MODE1_VBLANK) { GB.LCD_STAT.mode_flag = GB_LCD_STAT_MODE1_VBLANK; GB.IF.Vblank = 1; if(GB.LCD_STAT.VBlank_IE) GB.IF.LCD_stat = 1; } } // if((GB.LCD_STAT.VBlank_IE && (GB.LCD_STAT.mode_flag == GB_LCD_STAT_MODE1_VBLANK)) || // (GB.LCD_STAT.HBlank_IE && (GB.LCD_STAT.mode_flag == GB_LCD_STAT_MODE0_HBLANK)) || // (GB.LCD_STAT.OAM_IE && (GB.LCD_STAT.mode_flag == GB_LCD_STAT_MODE2_OAM_busy)) || // (GB.LCD_STAT.LCY_eq_LY_IE && GB.LCD_STAT.LCY_eq_LY_flag)) // GB.IF.LCD_stat = 1; if(CPU.HALT) { if (GB.IF.Vblank || GB.IF.LCD_stat || GB.IF.timer || GB.IF.serial || GB.IF.joypad ) CPU_disable_halt(); else { CPU_cycles_inc(); goto next_instruction; } } if (CPU.IME) { if ((GB.IF.Vblank && GB.IE.Vblank) && GB.LCDC.LCD_enable) { CPU.IME = 0; GB.IF.Vblank = 0; CPU_INT(0x40); } else if (GB.IF.LCD_stat && GB.IE.LCD_stat) { CPU.IME = 0; GB.IF.LCD_stat = 0; CPU_INT(0x48); } else if (GB.IF.timer && GB.IE.timer) { CPU.IME = 0; GB.IF.timer = 0; CPU_INT(0x50); } else if (GB.IF.serial && GB.IE.serial) { CPU.IME = 0; GB.IF.serial = 0; CPU_INT(0x58); } else if (GB.IF.joypad && GB.IE.joypad) { CPU.IME = 0; GB.IF.joypad = 0; CPU_INT(0x60); } } //#define DISASM #define SKIP_COUNT 0x12000 //#define SKIP_COUNT 0xEEE9 // #define SKIP_COUNT 0x00049B4C //#define SKIP_COUNT 0xFFFFFFFF //#define SKIP_COUNT 0x00000000 static int total_exec = 0; #ifdef DISASM static bool force_disasm = false; #endif next_instruction_nocheck: #ifdef DISASM // if(CPU.PC == 0xC2B5) // if (CPU.PC >= 0x4000) // force_disasm = true; if (total_exec > SKIP_COUNT) { force_disasm = true; printf("0x%08X: ", total_exec); } if (force_disasm) { gbemu_disasm_current(&CPU, true); fflush(stdout); } #endif total_exec++; // if(GB.MEMORY[0xFF44] == 0x94) // fflush(stdout); // if ((CPU.AF == 0x810b)) // fflush(stdout); // if ((CPU.PC == 0xDEF8)) // fflush(stdout); // if ((CPU.PC == 0xC4C2) && (CPU.A == 0xF1)) // fflush(stdout); #ifdef USE_BIOS if(CPU.PC == 0x100) memcpy(GB.MEMORY, GB.BIOS, 0x100); #endif switch (GB_READ_PC()) { // NOP // case 0x7F: // case 0x40: // case 0x49: // case 0x52: // case 0x5B: // case 0x64: // case 0x6D: case 0x00: CPU_NOP(); case 0x08: CPU_LD_addr16_SP(); case 0x10: CPU_STOP(); case 0x18: CPU_JR(CPU_COND_ALWAYS); case 0x20: CPU_JR(CPU_COND_NZ); case 0x28: CPU_JR(CPU_COND_Z); case 0x30: CPU_JR(CPU_COND_NC); case 0x38: CPU_JR(CPU_COND_C); case 0x01: CPU_LD_rr_imm16(REG_BC); case 0x11: CPU_LD_rr_imm16(REG_DE); case 0x21: CPU_LD_rr_imm16(REG_HL); case 0x31: CPU_LD_rr_imm16(REG_SP); case 0x09: CPU_ADD_rr_rr(REG_HL, REG_BC); case 0x19: CPU_ADD_rr_rr(REG_HL, REG_DE); case 0x29: CPU_ADD_rr_rr(REG_HL, REG_HL); case 0x39: CPU_ADD_rr_rr(REG_HL, REG_SP); case 0x02: CPU_LD_raddr_r(REG_BC, REG_A); case 0x0A: CPU_LD_r_raddr(REG_A, REG_BC); case 0x12: CPU_LD_raddr_r(REG_DE, REG_A); case 0x1A: CPU_LD_r_raddr(REG_A, REG_DE); case 0x22: CPU_LD_raddr_r(REG_HL++, REG_A); case 0x2A: CPU_LD_r_raddr(REG_A, REG_HL++); case 0x32: CPU_LD_raddr_r(REG_HL--, REG_A); case 0x3A: CPU_LD_r_raddr(REG_A, REG_HL--); case 0x03: CPU_INC_rr(REG_BC); case 0x0B: CPU_DEC_rr(REG_BC); case 0x13: CPU_INC_rr(REG_DE); case 0x1B: CPU_DEC_rr(REG_DE); case 0x23: CPU_INC_rr(REG_HL); case 0x2B: CPU_DEC_rr(REG_HL); case 0x33: CPU_INC_rr(REG_SP); case 0x3B: CPU_DEC_rr(REG_SP); case 0x04: CPU_INC_r(REG_B); case 0x0C: CPU_INC_r(REG_C); case 0x14: CPU_INC_r(REG_D); case 0x1C: CPU_INC_r(REG_E); case 0x24: CPU_INC_r(REG_H); case 0x2C: CPU_INC_r(REG_L); case 0x34: CPU_INC_raddr(REG_HL); case 0x3C: CPU_INC_r(REG_A); case 0x05: CPU_DEC_r(REG_B); case 0x0D: CPU_DEC_r(REG_C); case 0x15: CPU_DEC_r(REG_D); case 0x1D: CPU_DEC_r(REG_E); case 0x25: CPU_DEC_r(REG_H); case 0x2D: CPU_DEC_r(REG_L); case 0x35: CPU_DEC_raddr(REG_HL); case 0x3D: CPU_DEC_r(REG_A); case 0x06: CPU_LD_r_imm8(REG_B); case 0x0E: CPU_LD_r_imm8(REG_C); case 0x16: CPU_LD_r_imm8(REG_D); case 0x1E: CPU_LD_r_imm8(REG_E); case 0x26: CPU_LD_r_imm8(REG_H); case 0x2E: CPU_LD_r_imm8(REG_L); case 0x36: CPU_LD_raddr_imm8(REG_HL); case 0x3E: CPU_LD_r_imm8(REG_A); case 0x07: CPU_RLCA(); case 0x0F: CPU_RRCA(); case 0x17: CPU_RLA(); case 0x1F: CPU_RRA(); case 0x27: CPU_DAA(); case 0x2F: CPU_CPL(); case 0x37: CPU_SCF(); case 0x3F: CPU_CCF(); // LD r1, r2 case 0x40: CPU_LD_r0_r1(CPU.B, CPU.B); case 0x41: CPU_LD_r0_r1(CPU.B, CPU.C); case 0x42: CPU_LD_r0_r1(CPU.B, CPU.D); case 0x43: CPU_LD_r0_r1(CPU.B, CPU.E); case 0x44: CPU_LD_r0_r1(CPU.B, CPU.H); case 0x45: CPU_LD_r0_r1(CPU.B, CPU.L); case 0x46: CPU_LD_r_raddr(CPU.B, CPU.HL); case 0x47: CPU_LD_r0_r1(CPU.B, CPU.A); case 0x48: CPU_LD_r0_r1(CPU.C, CPU.B); case 0x49: CPU_LD_r0_r1(CPU.C, CPU.C); case 0x4A: CPU_LD_r0_r1(CPU.C, CPU.D); case 0x4B: CPU_LD_r0_r1(CPU.C, CPU.E); case 0x4C: CPU_LD_r0_r1(CPU.C, CPU.H); case 0x4D: CPU_LD_r0_r1(CPU.C, CPU.L); case 0x4E: CPU_LD_r_raddr(CPU.C, CPU.HL); case 0x4F: CPU_LD_r0_r1(CPU.C, CPU.A); case 0x50: CPU_LD_r0_r1(CPU.D, CPU.B); case 0x51: CPU_LD_r0_r1(CPU.D, CPU.C); case 0x52: CPU_LD_r0_r1(CPU.D, CPU.D); case 0x53: CPU_LD_r0_r1(CPU.D, CPU.E); case 0x54: CPU_LD_r0_r1(CPU.D, CPU.H); case 0x55: CPU_LD_r0_r1(CPU.D, CPU.L); case 0x56: CPU_LD_r_raddr(CPU.D, CPU.HL); case 0x57: CPU_LD_r0_r1(CPU.D, CPU.A); case 0x58: CPU_LD_r0_r1(CPU.E, CPU.B); case 0x59: CPU_LD_r0_r1(CPU.E, CPU.C); case 0x5A: CPU_LD_r0_r1(CPU.E, CPU.D); case 0x5B: CPU_LD_r0_r1(CPU.E, CPU.E); case 0x5C: CPU_LD_r0_r1(CPU.E, CPU.H); case 0x5D: CPU_LD_r0_r1(CPU.E, CPU.L); case 0x5E: CPU_LD_r_raddr(CPU.E, CPU.HL); case 0x5F: CPU_LD_r0_r1(CPU.E, CPU.A); case 0x60: CPU_LD_r0_r1(CPU.H, CPU.B); case 0x61: CPU_LD_r0_r1(CPU.H, CPU.C); case 0x62: CPU_LD_r0_r1(CPU.H, CPU.D); case 0x63: CPU_LD_r0_r1(CPU.H, CPU.E); case 0x64: CPU_LD_r0_r1(CPU.H, CPU.H); case 0x65: CPU_LD_r0_r1(CPU.H, CPU.L); case 0x66: CPU_LD_r_raddr(CPU.H, CPU.HL); case 0x67: CPU_LD_r0_r1(CPU.H, CPU.A); case 0x68: CPU_LD_r0_r1(CPU.L, CPU.B); case 0x69: CPU_LD_r0_r1(CPU.L, CPU.C); case 0x6A: CPU_LD_r0_r1(CPU.L, CPU.D); case 0x6B: CPU_LD_r0_r1(CPU.L, CPU.E); case 0x6C: CPU_LD_r0_r1(CPU.L, CPU.H); case 0x6D: CPU_LD_r0_r1(CPU.L, CPU.L); case 0x6E: CPU_LD_r_raddr(CPU.L, CPU.HL); case 0x6F: CPU_LD_r0_r1(CPU.L, CPU.A); case 0x70: CPU_LD_raddr_r(CPU.HL, CPU.B); case 0x71: CPU_LD_raddr_r(CPU.HL, CPU.C); case 0x72: CPU_LD_raddr_r(CPU.HL, CPU.D); case 0x73: CPU_LD_raddr_r(CPU.HL, CPU.E); case 0x74: CPU_LD_raddr_r(CPU.HL, CPU.H); case 0x75: CPU_LD_raddr_r(CPU.HL, CPU.L); case 0x76: CPU_HALT(); case 0x77: CPU_LD_raddr_r(CPU.HL, CPU.A); case 0x78: CPU_LD_r0_r1(CPU.A, CPU.B); case 0x79: CPU_LD_r0_r1(CPU.A, CPU.C); case 0x7A: CPU_LD_r0_r1(CPU.A, CPU.D); case 0x7B: CPU_LD_r0_r1(CPU.A, CPU.E); case 0x7C: CPU_LD_r0_r1(CPU.A, CPU.H); case 0x7D: CPU_LD_r0_r1(CPU.A, CPU.L); case 0x7E: CPU_LD_r_raddr(CPU.A, CPU.HL); case 0x7F: CPU_LD_r0_r1(CPU.A, CPU.A); /* ALU */ case 0x80: CPU_ADD_r_r(REG_A, REG_B); case 0x81: CPU_ADD_r_r(REG_A, REG_C); case 0x82: CPU_ADD_r_r(REG_A, REG_D); case 0x83: CPU_ADD_r_r(REG_A, REG_E); case 0x84: CPU_ADD_r_r(REG_A, REG_H); case 0x85: CPU_ADD_r_r(REG_A, REG_L); case 0x86: CPU_ADD_r_raddr(REG_A, REG_HL); case 0x87: CPU_ADD_r_r(REG_A, REG_A); case 0x88: CPU_ADC_r_r(REG_A, REG_B); case 0x89: CPU_ADC_r_r(REG_A, REG_C); case 0x8A: CPU_ADC_r_r(REG_A, REG_D); case 0x8B: CPU_ADC_r_r(REG_A, REG_E); case 0x8C: CPU_ADC_r_r(REG_A, REG_H); case 0x8D: CPU_ADC_r_r(REG_A, REG_L); case 0x8E: CPU_ADC_r_raddr(REG_A, REG_HL); case 0x8F: CPU_ADC_r_r(REG_A, REG_A); case 0x90: CPU_SUB_r_r(REG_A, REG_B); case 0x91: CPU_SUB_r_r(REG_A, REG_C); case 0x92: CPU_SUB_r_r(REG_A, REG_D); case 0x93: CPU_SUB_r_r(REG_A, REG_E); case 0x94: CPU_SUB_r_r(REG_A, REG_H); case 0x95: CPU_SUB_r_r(REG_A, REG_L); case 0x96: CPU_SUB_r_raddr(REG_A, REG_HL); case 0x97: CPU_SUB_r_r(REG_A, REG_A); case 0x98: CPU_SBC_r_r(REG_A, REG_B); case 0x99: CPU_SBC_r_r(REG_A, REG_C); case 0x9A: CPU_SBC_r_r(REG_A, REG_D); case 0x9B: CPU_SBC_r_r(REG_A, REG_E); case 0x9C: CPU_SBC_r_r(REG_A, REG_H); case 0x9D: CPU_SBC_r_r(REG_A, REG_L); case 0x9E: CPU_SBC_r_raddr(REG_A, REG_HL); case 0x9F: CPU_SBC_r_r(REG_A, REG_A); case 0xA0: CPU_AND_A_r(REG_B); case 0xA1: CPU_AND_A_r(REG_C); case 0xA2: CPU_AND_A_r(REG_D); case 0xA3: CPU_AND_A_r(REG_E); case 0xA4: CPU_AND_A_r(REG_H); case 0xA5: CPU_AND_A_r(REG_L); case 0xA6: CPU_AND_A_raddr(REG_HL); case 0xA7: CPU_AND_A_r(REG_A); case 0xA8: CPU_XOR_A_r(REG_B); case 0xA9: CPU_XOR_A_r(REG_C); case 0xAA: CPU_XOR_A_r(REG_D); case 0xAB: CPU_XOR_A_r(REG_E); case 0xAC: CPU_XOR_A_r(REG_H); case 0xAD: CPU_XOR_A_r(REG_L); case 0xAE: CPU_XOR_A_raddr(REG_HL); case 0xAF: CPU_XOR_A_r(REG_A); case 0xB0: CPU_OR_A_r(REG_B); case 0xB1: CPU_OR_A_r(REG_C); case 0xB2: CPU_OR_A_r(REG_D); case 0xB3: CPU_OR_A_r(REG_E); case 0xB4: CPU_OR_A_r(REG_H); case 0xB5: CPU_OR_A_r(REG_L); case 0xB6: CPU_OR_A_raddr(REG_HL); case 0xB7: CPU_OR_A_r(REG_A); case 0xB8: CPU_CP_A_r(REG_B); case 0xB9: CPU_CP_A_r(REG_C); case 0xBA: CPU_CP_A_r(REG_D); case 0xBB: CPU_CP_A_r(REG_E); case 0xBC: CPU_CP_A_r(REG_H); case 0xBD: CPU_CP_A_r(REG_L); case 0xBE: CPU_CP_A_raddr(REG_HL); case 0xBF: CPU_CP_A_r(REG_A); /*******/ case 0xC0: CPU_RET_cc(CPU_COND_NZ); case 0xC8: CPU_RET_cc(CPU_COND_Z); case 0xD0: CPU_RET_cc(CPU_COND_NC); case 0xD8: CPU_RET_cc(CPU_COND_C); case 0xE0: CPU_LD_addr8_r(REG_A); case 0xF0: CPU_LD_r_addr8(REG_A); case 0xE8: CPU_ADD_SP_off8(); case 0xF8: CPU_LD_HL_SP_off8(); // POP (rr) case 0xC1: CPU_POP_BC(); case 0xD1: CPU_POP_DE(); case 0xE1: CPU_POP_HL(); case 0xF1: CPU_POP_AF(); case 0xC9: CPU_RET(); case 0xD9: CPU_RETI(); case 0xE9: CPU_JP_HL(); case 0xC2: CPU_JP(CPU_COND_NZ); case 0xD2: CPU_JP(CPU_COND_NC); case 0xCA: CPU_JP(CPU_COND_Z); case 0xDA: CPU_JP(CPU_COND_C); case 0xC3: CPU_JP(CPU_COND_ALWAYS); case 0xF3: CPU_DI(); case 0xFB: CPU_EI(); case 0xC4: CPU_CALL(CPU_COND_NZ); case 0xCC: CPU_CALL(CPU_COND_Z); case 0xD4: CPU_CALL(CPU_COND_NC); case 0xDC: CPU_CALL(CPU_COND_C); case 0xCD: CPU_CALL(CPU_COND_ALWAYS); // PUSH (rr) case 0xC5: CPU_PUSH_BC(); case 0xD5: CPU_PUSH_DE(); case 0xE5: CPU_PUSH_HL(); case 0xF5: CPU_PUSH_AF(); case 0xC6: CPU_ADD_r_imm8(REG_A); case 0xCE: // 0x0210 0xC674 0xDEF8 // if(REG_PC == 0xDEF9) // fflush(stdout); CPU_ADC_r_imm8(REG_A); case 0xD6: CPU_SUB_r_imm8(REG_A); case 0xDE: CPU_SBC_r_imm8(REG_A); case 0xE6: CPU_AND_A_imm8(); case 0xEE: CPU_XOR_A_imm8(); case 0xF6: CPU_OR_A_imm8(); case 0xFE: CPU_CP_A_imm8(); case 0xFA: { uint16_t addr = GB_READ_U8(CPU.PC++); addr |= GB_READ_U8(CPU.PC++) << 8; CPU.A = GB_READ_U8(addr); CPU.cycles += 4; CPU_exec_next(); } case 0xEA: CPU_LD_addr16_A(); // LD A,(C) case 0xF2: CPU.A = GB_READ_U8(CPU.C | 0xFF00); CPU.cycles += 2; CPU_exec_next(); // LD (C), A case 0xE2: GB_WRITE_U8((CPU.C | 0xFF00), CPU.A); CPU.cycles += 2; CPU_exec_next(); // LD SP, HL case 0xF9: CPU.SP = CPU.HL; CPU.cycles += 2; CPU_exec_next(); case 0xC7: CPU_RST(0x00); case 0xD7: CPU_RST(0x10); case 0xE7: CPU_RST(0x20); case 0xF7: CPU_RST(0x30); case 0xCF: CPU_RST(0x08); case 0xDF: CPU_RST(0x18); case 0xEF: CPU_RST(0x28); case 0xFF: CPU_RST(0x38); case 0xCB: { switch (GB_READ_PC()) { case 0x00: CPU_RLC(REG_B); case 0x01: CPU_RLC(REG_C); case 0x02: CPU_RLC(REG_D); case 0x03: CPU_RLC(REG_E); case 0x04: CPU_RLC(REG_H); case 0x05: CPU_RLC(REG_L); case 0x06: CPU_RLC_HL(); case 0x07: CPU_RLC(REG_A); case 0x08: CPU_RRC(REG_B); case 0x09: CPU_RRC(REG_C); case 0x0A: CPU_RRC(REG_D); case 0x0B: CPU_RRC(REG_E); case 0x0C: CPU_RRC(REG_H); case 0x0D: CPU_RRC(REG_L); case 0x0E: CPU_RRC_HL(); case 0x0F: CPU_RRC(REG_A); case 0x10: CPU_RL(REG_B); case 0x11: CPU_RL(REG_C); case 0x12: CPU_RL(REG_D); case 0x13: CPU_RL(REG_E); case 0x14: CPU_RL(REG_H); case 0x15: CPU_RL(REG_L); case 0x16: CPU_RL_HL(); case 0x17: CPU_RL(REG_A); case 0x18: CPU_RR(REG_B); case 0x19: CPU_RR(REG_C); case 0x1A: CPU_RR(REG_D); case 0x1B: CPU_RR(REG_E); case 0x1C: CPU_RR(REG_H); case 0x1D: CPU_RR(REG_L); case 0x1E: CPU_RR_HL(); case 0x1F: CPU_RR(REG_A); case 0x20: CPU_SLA(REG_B); case 0x21: CPU_SLA(REG_C); case 0x22: CPU_SLA(REG_D); case 0x23: CPU_SLA(REG_E); case 0x24: CPU_SLA(REG_H); case 0x25: CPU_SLA(REG_L); case 0x26: CPU_SLA_HL(); case 0x27: CPU_SLA(REG_A); case 0x28: CPU_SRA(REG_B); case 0x29: CPU_SRA(REG_C); case 0x2A: CPU_SRA(REG_D); case 0x2B: CPU_SRA(REG_E); case 0x2C: CPU_SRA(REG_H); case 0x2D: CPU_SRA(REG_L); case 0x2E: CPU_SRA_HL(); case 0x2F: CPU_SRA(REG_A); case 0x30: CPU_SWAP(REG_B); case 0x31: CPU_SWAP(REG_C); case 0x32: CPU_SWAP(REG_D); case 0x33: CPU_SWAP(REG_E); case 0x34: CPU_SWAP(REG_H); case 0x35: CPU_SWAP(REG_L); case 0x36: CPU_SWAP_HL(); case 0x37: CPU_SWAP(REG_A); case 0x38: CPU_SRL(REG_B); case 0x39: CPU_SRL(REG_C); case 0x3A: CPU_SRL(REG_D); case 0x3B: CPU_SRL(REG_E); case 0x3C: CPU_SRL(REG_H); case 0x3D: CPU_SRL(REG_L); case 0x3E: CPU_SRL_HL(); case 0x3F: CPU_SRL(REG_A); case 0x40: CPU_BIT(0, REG_B); case 0x41: CPU_BIT(0, REG_C); case 0x42: CPU_BIT(0, REG_D); case 0x43: CPU_BIT(0, REG_E); case 0x44: CPU_BIT(0, REG_H); case 0x45: CPU_BIT(0, REG_L); case 0x46: CPU_BIT_HL(0); case 0x47: CPU_BIT(0, REG_A); case 0x48: CPU_BIT(1, REG_B); case 0x49: CPU_BIT(1, REG_C); case 0x4A: CPU_BIT(1, REG_D); case 0x4B: CPU_BIT(1, REG_E); case 0x4C: CPU_BIT(1, REG_H); case 0x4D: CPU_BIT(1, REG_L); case 0x4E: CPU_BIT_HL(1); case 0x4F: CPU_BIT(1, REG_A); case 0x50: CPU_BIT(2, REG_B); case 0x51: CPU_BIT(2, REG_C); case 0x52: CPU_BIT(2, REG_D); case 0x53: CPU_BIT(2, REG_E); case 0x54: CPU_BIT(2, REG_H); case 0x55: CPU_BIT(2, REG_L); case 0x56: CPU_BIT_HL(2); case 0x57: CPU_BIT(2, REG_A); case 0x58: CPU_BIT(3, REG_B); case 0x59: CPU_BIT(3, REG_C); case 0x5A: CPU_BIT(3, REG_D); case 0x5B: CPU_BIT(3, REG_E); case 0x5C: CPU_BIT(3, REG_H); case 0x5D: CPU_BIT(3, REG_L); case 0x5E: CPU_BIT_HL(3); case 0x5F: CPU_BIT(3, REG_A); case 0x60: CPU_BIT(4, REG_B); case 0x61: CPU_BIT(4, REG_C); case 0x62: CPU_BIT(4, REG_D); case 0x63: CPU_BIT(4, REG_E); case 0x64: CPU_BIT(4, REG_H); case 0x65: CPU_BIT(4, REG_L); case 0x66: CPU_BIT_HL(4); case 0x67: CPU_BIT(4, REG_A); case 0x68: CPU_BIT(5, REG_B); case 0x69: CPU_BIT(5, REG_C); case 0x6A: CPU_BIT(5, REG_D); case 0x6B: CPU_BIT(5, REG_E); case 0x6C: CPU_BIT(5, REG_H); case 0x6D: CPU_BIT(5, REG_L); case 0x6E: CPU_BIT_HL(5); case 0x6F: CPU_BIT(5, REG_A); case 0x70: CPU_BIT(6, REG_B); case 0x71: CPU_BIT(6, REG_C); case 0x72: CPU_BIT(6, REG_D); case 0x73: CPU_BIT(6, REG_E); case 0x74: CPU_BIT(6, REG_H); case 0x75: CPU_BIT(6, REG_L); case 0x76: CPU_BIT_HL(6); case 0x77: CPU_BIT(6, REG_A); case 0x78: CPU_BIT(7, REG_B); case 0x79: CPU_BIT(7, REG_C); case 0x7A: CPU_BIT(7, REG_D); case 0x7B: CPU_BIT(7, REG_E); case 0x7C: CPU_BIT(7, REG_H); case 0x7D: CPU_BIT(7, REG_L); case 0x7E: CPU_BIT_HL(7); case 0x7F: CPU_BIT(7, REG_A); case 0x80: CPU_RES(0, REG_B); case 0x81: CPU_RES(0, REG_C); case 0x82: CPU_RES(0, REG_D); case 0x83: CPU_RES(0, REG_E); case 0x84: CPU_RES(0, REG_H); case 0x85: CPU_RES(0, REG_L); case 0x86: CPU_RES_HL(0); case 0x87: CPU_RES(0, REG_A); case 0x88: CPU_RES(1, REG_B); case 0x89: CPU_RES(1, REG_C); case 0x8A: CPU_RES(1, REG_D); case 0x8B: CPU_RES(1, REG_E); case 0x8C: CPU_RES(1, REG_H); case 0x8D: CPU_RES(1, REG_L); case 0x8E: CPU_RES_HL(1); case 0x8F: CPU_RES(1, REG_A); case 0x90: CPU_RES(2, REG_B); case 0x91: CPU_RES(2, REG_C); case 0x92: CPU_RES(2, REG_D); case 0x93: CPU_RES(2, REG_E); case 0x94: CPU_RES(2, REG_H); case 0x95: CPU_RES(2, REG_L); case 0x96: CPU_RES_HL(2); case 0x97: CPU_RES(2, REG_A); case 0x98: CPU_RES(3, REG_B); case 0x99: CPU_RES(3, REG_C); case 0x9A: CPU_RES(3, REG_D); case 0x9B: CPU_RES(3, REG_E); case 0x9C: CPU_RES(3, REG_H); case 0x9D: CPU_RES(3, REG_L); case 0x9E: CPU_RES_HL(3); case 0x9F: CPU_RES(3, REG_A); case 0xA0: CPU_RES(4, REG_B); case 0xA1: CPU_RES(4, REG_C); case 0xA2: CPU_RES(4, REG_D); case 0xA3: CPU_RES(4, REG_E); case 0xA4: CPU_RES(4, REG_H); case 0xA5: CPU_RES(4, REG_L); case 0xA6: CPU_RES_HL(4); case 0xA7: CPU_RES(4, REG_A); case 0xA8: CPU_RES(5, REG_B); case 0xA9: CPU_RES(5, REG_C); case 0xAA: CPU_RES(5, REG_D); case 0xAB: CPU_RES(5, REG_E); case 0xAC: CPU_RES(5, REG_H); case 0xAD: CPU_RES(5, REG_L); case 0xAE: CPU_RES_HL(5); case 0xAF: CPU_RES(5, REG_A); case 0xB0: CPU_RES(6, REG_B); case 0xB1: CPU_RES(6, REG_C); case 0xB2: CPU_RES(6, REG_D); case 0xB3: CPU_RES(6, REG_E); case 0xB4: CPU_RES(6, REG_H); case 0xB5: CPU_RES(6, REG_L); case 0xB6: CPU_RES_HL(6); case 0xB7: CPU_RES(6, REG_A); case 0xB8: CPU_RES(7, REG_B); case 0xB9: CPU_RES(7, REG_C); case 0xBA: CPU_RES(7, REG_D); case 0xBB: CPU_RES(7, REG_E); case 0xBC: CPU_RES(7, REG_H); case 0xBD: CPU_RES(7, REG_L); case 0xBE: CPU_RES_HL(7); case 0xBF: CPU_RES(7, REG_A); case 0xC0: CPU_SET(0, REG_B); case 0xC1: CPU_SET(0, REG_C); case 0xC2: CPU_SET(0, REG_D); case 0xC3: CPU_SET(0, REG_E); case 0xC4: CPU_SET(0, REG_H); case 0xC5: CPU_SET(0, REG_L); case 0xC6: CPU_SET_HL(0); case 0xC7: CPU_SET(0, REG_A); case 0xC8: CPU_SET(1, REG_B); case 0xC9: CPU_SET(1, REG_C); case 0xCA: CPU_SET(1, REG_D); case 0xCB: CPU_SET(1, REG_E); case 0xCC: CPU_SET(1, REG_H); case 0xCD: CPU_SET(1, REG_L); case 0xCE: CPU_SET_HL(1); case 0xCF: CPU_SET(1, REG_A); case 0xD0: CPU_SET(2, REG_B); case 0xD1: CPU_SET(2, REG_C); case 0xD2: CPU_SET(2, REG_D); case 0xD3: CPU_SET(2, REG_E); case 0xD4: CPU_SET(2, REG_H); case 0xD5: CPU_SET(2, REG_L); case 0xD6: CPU_SET_HL(2); case 0xD7: CPU_SET(2, REG_A); case 0xD8: CPU_SET(3, REG_B); case 0xD9: CPU_SET(3, REG_C); case 0xDA: CPU_SET(3, REG_D); case 0xDB: CPU_SET(3, REG_E); case 0xDC: CPU_SET(3, REG_H); case 0xDD: CPU_SET(3, REG_L); case 0xDE: CPU_SET_HL(3); case 0xDF: CPU_SET(3, REG_A); case 0xE0: CPU_SET(4, REG_B); case 0xE1: CPU_SET(4, REG_C); case 0xE2: CPU_SET(4, REG_D); case 0xE3: CPU_SET(4, REG_E); case 0xE4: CPU_SET(4, REG_H); case 0xE5: CPU_SET(4, REG_L); case 0xE6: CPU_SET_HL(4); case 0xE7: CPU_SET(4, REG_A); case 0xE8: CPU_SET(5, REG_B); case 0xE9: CPU_SET(5, REG_C); case 0xEA: CPU_SET(5, REG_D); case 0xEB: CPU_SET(5, REG_E); case 0xEC: CPU_SET(5, REG_H); case 0xED: CPU_SET(5, REG_L); case 0xEE: CPU_SET_HL(5); case 0xEF: CPU_SET(5, REG_A); case 0xF0: CPU_SET(6, REG_B); case 0xF1: CPU_SET(6, REG_C); case 0xF2: CPU_SET(6, REG_D); case 0xF3: CPU_SET(6, REG_E); case 0xF4: CPU_SET(6, REG_H); case 0xF5: CPU_SET(6, REG_L); case 0xF6: CPU_SET_HL(6); case 0xF7: CPU_SET(6, REG_A); case 0xF8: CPU_SET(7, REG_B); case 0xF9: CPU_SET(7, REG_C); case 0xFA: CPU_SET(7, REG_D); case 0xFB: CPU_SET(7, REG_E); case 0xFC: CPU_SET(7, REG_H); case 0xFD: CPU_SET(7, REG_L); case 0xFE: CPU_SET_HL(7); case 0xFF: CPU_SET(7, REG_A); default: gbemu_printf("(0xCB)"); goto unknown_opcode; } } case 0xD3: case 0xE3: case 0xE4: case 0xF4: case 0xDB: case 0xEB: case 0xEC: case 0xFC: case 0xDD: case 0xED: case 0xFD: invalid_opcode: { extern retro_environment_t environ_cb; retro_sleep(10); printf("invalid opcode : 0x%02X\n",GB_READ_U8(CPU.PC - 1)); fflush(stdout); // DEBUG_BREAK(); if (environ_cb) environ_cb(RETRO_ENVIRONMENT_SHUTDOWN, NULL); #ifdef PERF_TEST extern struct retro_perf_callback perf_cb; perf_cb.perf_log(); #endif // return; exit(0); } break; default: unknown_opcode: { extern retro_environment_t environ_cb; retro_sleep(10); printf("unknown opcode : 0x%02X\n", GB_READ_U8(CPU.PC - 1)); fflush(stdout); // DEBUG_BREAK(); if (environ_cb) environ_cb(RETRO_ENVIRONMENT_SHUTDOWN, NULL); #ifdef PERF_TEST extern struct retro_perf_callback perf_cb; perf_cb.perf_log(); #endif // return; exit(0); } break; } cpu_exit: GB.CPU = CPU; return; }