void CPU::dma_transfer(bool direction, uint8 bbus, uint32 abus) { if(direction == 0) { dma_add_clocks(4); regs.mdr = dma_read(abus); dma_add_clocks(4); dma_write(dma_transfer_valid(bbus, abus), 0x2100 | bbus, regs.mdr); } else { dma_add_clocks(4); regs.mdr = dma_transfer_valid(bbus, abus) ? bus.read(0x2100 | bbus) : 0x00; dma_add_clocks(4); dma_write(dma_addr_valid(abus), abus, regs.mdr); } }
void sCPU::hdma_update(uint8 i) { channel[i].hdma_line_counter = dma_read(hdma_addr(i)); dma_add_clocks(8); channel[i].hdma_completed = (channel[i].hdma_line_counter == 0); channel[i].hdma_do_transfer = !channel[i].hdma_completed; if(channel[i].hdma_indirect) { channel[i].hdma_iaddr = dma_read(hdma_addr(i)) << 8; dma_add_clocks(8); if(!channel[i].hdma_completed || hdma_active_after(i)) { channel[i].hdma_iaddr >>= 8; channel[i].hdma_iaddr |= dma_read(hdma_addr(i)) << 8; dma_add_clocks(8); }
void sCPU::dma_run() { dma_add_clocks(8); cycle_edge(); for(unsigned i = 0; i < 8; i++) { if(channel[i].dma_enabled == false) continue; dma_add_clocks(8); cycle_edge(); unsigned index = 0; do { dma_transfer(channel[i].direction, dma_bbus(i, index++), dma_addr(i)); } while(channel[i].dma_enabled && --channel[i].xfersize); channel[i].dma_enabled = false; } status.irq_lock = true; event.enqueue(2, EventIrqLockRelease); }
void sCPU::dma_transfer(bool direction, uint8 bbus, uint32 abus) { if(direction == 0) { //a->b transfer (to $21xx) if(bbus == 0x80 && ((abus & 0xfe0000) == 0x7e0000 || (abus & 0x40e000) == 0x0000)) { //illegal WRAM->WRAM transfer (bus conflict) //read most likely occurs; no write occurs //read is irrelevent, as it cannot be observed by software dma_add_clocks(8); } else { dma_add_clocks(4); uint8 data = dma_read(abus); dma_add_clocks(4); bus.write(0x2100 | bbus, data); } } else { //b->a transfer (from $21xx) if(bbus == 0x80 && ((abus & 0xfe0000) == 0x7e0000 || (abus & 0x40e000) == 0x0000)) { //illegal WRAM->WRAM transfer (bus conflict) //no read occurs; write does occur dma_add_clocks(8); bus.write(abus, 0x00); //does not write S-CPU MDR } else { dma_add_clocks(4); uint8 data = bus.read(0x2100 | bbus); dma_add_clocks(4); if(dma_addr_valid(abus) == true) { bus.write(abus, data); } } } cycle_edge(); }
void CPU::hdma_update(unsigned i) { dma_add_clocks(4); regs.mdr = dma_read((channel[i].source_bank << 16) | channel[i].hdma_addr); dma_add_clocks(4); dma_write(false); if((channel[i].line_counter & 0x7f) == 0) { channel[i].line_counter = regs.mdr; channel[i].hdma_addr++; channel[i].hdma_completed = (channel[i].line_counter == 0); channel[i].hdma_do_transfer = !channel[i].hdma_completed; if(channel[i].indirect) { dma_add_clocks(4); regs.mdr = dma_read(hdma_addr(i)); channel[i].indirect_addr = regs.mdr << 8; dma_add_clocks(4); dma_write(false); if(!channel[i].hdma_completed || hdma_active_after(i)) { dma_add_clocks(4); regs.mdr = dma_read(hdma_addr(i)); channel[i].indirect_addr >>= 8; channel[i].indirect_addr |= regs.mdr << 8; dma_add_clocks(4); dma_write(false); }
void CPU::dma_run() { dma_add_clocks(8); dma_write(false); dma_edge(); for(unsigned i = 0; i < 8; i++) { if(channel[i].dma_enabled == false) continue; unsigned index = 0; do { dma_transfer(channel[i].direction, dma_bbus(i, index++), dma_addr(i)); dma_edge(); } while(channel[i].dma_enabled && --channel[i].transfer_size); dma_add_clocks(8); dma_write(false); dma_edge(); channel[i].dma_enabled = false; } status.irq_lock = true; }