void pvr_dma_transfer( ) { sh4addr_t destaddr = MMIO_READ( ASIC, PVRDMADEST) &0x1FFFFFE0; uint32_t count = MMIO_READ( ASIC, PVRDMACNT ); unsigned char data[8192]; uint32_t rcount; while( count ) { uint32_t chunksize = (count < 8192) ? count : 8192; rcount = DMAC_get_buffer( 2, data, chunksize ); pvr2_dma_write( destaddr, data, rcount ); destaddr += rcount; count -= rcount; if( rcount != chunksize ) { WARN( "PVR received %08X bytes from DMA, expected %08X", rcount, chunksize ); break; } } MMIO_WRITE( ASIC, PVRDMACTL, 0 ); MMIO_WRITE( ASIC, PVRDMACNT, 0 ); if( destaddr & 0x01000000 ) { /* Write to texture RAM */ MMIO_WRITE( ASIC, PVRDMADEST, destaddr ); } asic_event( EVENT_PVR_DMA ); }
void g2_dma_transfer( int channel ) { uint32_t offset = channel << 5; if( MMIO_READ( EXTDMA, G2DMA0CTL1 + offset ) == 1 ) { if( MMIO_READ( EXTDMA, G2DMA0CTL2 + offset ) == 1 ) { uint32_t extaddr = MMIO_READ( EXTDMA, G2DMA0EXT + offset ); uint32_t sh4addr = MMIO_READ( EXTDMA, G2DMA0SH4 + offset ); uint32_t length = MMIO_READ( EXTDMA, G2DMA0SIZ + offset ) & 0x1FFFFFFF; uint32_t dir = MMIO_READ( EXTDMA, G2DMA0DIR + offset ); // uint32_t mode = MMIO_READ( EXTDMA, G2DMA0MOD + offset ); unsigned char buf[length]; if( dir == 0 ) { /* SH4 to device */ mem_copy_from_sh4( buf, sh4addr, length ); mem_copy_to_sh4( extaddr, buf, length ); } else { /* Device to SH4 */ mem_copy_from_sh4( buf, extaddr, length ); mem_copy_to_sh4( sh4addr, buf, length ); } MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 ); asic_event( EVENT_G2_DMA0 + channel ); } else { MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 ); } } }
/** * Count the specified timer for a given number of nanoseconds. */ uint32_t TMU_count( int timer, uint32_t nanosecs ) { uint32_t run_ns = nanosecs + TMU_timers[timer].timer_remainder - TMU_timers[timer].timer_run; TMU_timers[timer].timer_remainder = run_ns % TMU_timers[timer].timer_period; TMU_timers[timer].timer_run = nanosecs; uint32_t count = run_ns / TMU_timers[timer].timer_period; uint32_t value = MMIO_READ( TMU, TCNT0 + 12*timer ); uint32_t reset = MMIO_READ( TMU, TCOR0 + 12*timer ); // if( timer == 2 ) // WARN( "Counting timer %d: %d ns, %d ticks", timer, run_ns, count ); if( count > value ) { uint32_t tcr = MMIO_READ( TMU, TCR0 + 12*timer ); tcr |= TCR_UNF; count -= value; value = reset - (count % reset) + 1; MMIO_WRITE( TMU, TCR0 + 12*timer, tcr ); if( tcr & TCR_UNIE ) intc_raise_interrupt( INT_TMU_TUNI0 + timer ); MMIO_WRITE( TMU, TCNT0 + 12*timer, value ); // if( timer == 2 ) // WARN( "Underflowed timer %d", timer ); TMU_schedule_timer(timer); } else { value -= count; MMIO_WRITE( TMU, TCNT0 + 12*timer, value ); } return value; }
static void mmio_uart_init( void ) { /* Disable hardware interrupts */ MMIO_WRITE( MCR, 0 ); MMIO_WRITE( IER, 0 ); /* Disable FIFO's for 16550 devices */ MMIO_WRITE( FCR, 0 ); /* Set for 8-bit, no parity, DLAB bit cleared */ MMIO_WRITE( LCR, UART_LCR_8BITS ); /* Leave baud rate as set by firmware unless serialbaud boot-arg overrides */ if (uart_baud_rate != DEFAULT_UART_BAUD_RATE) { gPESF->uart_set_baud_rate ( 0, uart_baud_rate ); } /* Assert DTR# and RTS# lines (OUT2?) */ MMIO_WRITE( MCR, UART_MCR_DTR | UART_MCR_RTS ); /* Clear any garbage in the input buffer */ MMIO_READ( RBR ); uart_initted = 1; }
static int mmio_uart_present( void ) { MMIO_WRITE( SCR, 0x5a ); if (MMIO_READ(SCR) != 0x5a) return 0; MMIO_WRITE( SCR, 0xa5 ); if (MMIO_READ(SCR) != 0xa5) return 0; return 1; }
static void mmio_uart_set_baud_rate( __unused int unit, __unused uint32_t baud_rate ) { const unsigned char lcr = MMIO_READ( LCR ); unsigned long div; if (baud_rate == 0) baud_rate = 9600; div = LEGACY_UART_CLOCK / 16 / baud_rate; MMIO_WRITE( LCR, lcr | UART_LCR_DLAB ); MMIO_WRITE( DLM, (unsigned char)(div >> 8) ); MMIO_WRITE( DLL, (unsigned char) div ); MMIO_WRITE( LCR, lcr & ~UART_LCR_DLAB); }
MMIO_REGION_WRITE_FN( TMU, reg, val ) { uint32_t oldval; int i; reg &= 0xFFF; switch( reg ) { case TSTR: oldval = MMIO_READ( TMU, TSTR ); for( i=0; i<3; i++ ) { uint32_t tmp = 1<<i; if( (oldval & tmp) != 0 && (val&tmp) == 0 ) TMU_stop(i); else if( (oldval&tmp) == 0 && (val&tmp) != 0 ) TMU_start(i); } break; case TCR0: TMU_set_timer_control( 0, val ); return; case TCR1: TMU_set_timer_control( 1, val ); return; case TCR2: TMU_set_timer_control( 2, val ); return; case TCNT0: MMIO_WRITE( TMU, reg, val ); if( TMU_IS_RUNNING(0) ) { // reschedule TMU_timers[0].timer_run = sh4r.slice_cycle; TMU_schedule_timer( 0 ); } return; case TCNT1: MMIO_WRITE( TMU, reg, val ); if( TMU_IS_RUNNING(1) ) { // reschedule TMU_timers[1].timer_run = sh4r.slice_cycle; TMU_schedule_timer( 1 ); } return; case TCNT2: MMIO_WRITE( TMU, reg, val ); if( TMU_IS_RUNNING(2) ) { // reschedule TMU_timers[2].timer_run = sh4r.slice_cycle; TMU_schedule_timer( 2 ); } return; } MMIO_WRITE( TMU, reg, val ); }
void asic_clear_event( int event ) { int offset = ((event&0x60)>>3); uint32_t result = MMIO_READ(ASIC, PIRQ0 + offset) & (~(1<<(event&0x1F))); MMIO_WRITE( ASIC, PIRQ0 + offset, result ); if( result == 0 ) { /* clear cascades if necessary */ if( event >= 64 ) { MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF ); } else if( event >= 32 ) { MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0xBFFFFFFF ); } } asic_check_cleared_events(); }
/** * Setup the timers for the 3 FIFO status bits following a write through the G2 * bus from the SH4 side. The timing is roughly as follows: (times are * approximate based on software readings - I wouldn't take this as gospel but * it seems to be enough to fool most programs). * 0ns: Bit 5 (Input fifo?) goes high immediately on the write * 40ns: Bit 5 goes low and bit 4 goes high * 120ns: Bit 4 goes low, bit 0 goes high * 240ns: Bit 0 goes low. * * Additional writes while the FIFO is in operation extend the time that the * bits remain high as one might expect, without altering the time at which * they initially go high. */ void asic_g2_write_word() { if( g2_state.bit5_off_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit5_off_timer = sh4r.slice_cycle + G2_BIT5_TICKS; } else { g2_state.bit5_off_timer += G2_BIT5_TICKS; } if( g2_state.bit4_on_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit4_on_timer = sh4r.slice_cycle + G2_BIT5_TICKS; } if( g2_state.bit4_off_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit4_off_timer = g2_state.bit4_on_timer + G2_BIT4_TICKS; } else { g2_state.bit4_off_timer += G2_BIT4_TICKS; } if( g2_state.bit0_on_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit0_on_timer = sh4r.slice_cycle + G2_BIT0_ON_TICKS; } if( g2_state.bit0_off_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit0_off_timer = g2_state.bit0_on_timer + G2_BIT0_OFF_TICKS; } else { g2_state.bit0_off_timer += G2_BIT0_OFF_TICKS; } MMIO_WRITE( ASIC, G2STATUS, MMIO_READ(ASIC, G2STATUS) | 0x20 ); }
void TMU_set_timer_control( int timer, int tcr ) { uint32_t period = 1; uint32_t oldtcr = MMIO_READ( TMU, TCR0 + (12*timer) ); if( (oldtcr & TCR_UNF) == 0 ) { tcr = tcr & (~TCR_UNF); } else { if( ((oldtcr & TCR_UNIE) == 0) && (tcr & TCR_IRQ_ACTIVE) == TCR_IRQ_ACTIVE ) { intc_raise_interrupt( INT_TMU_TUNI0 + timer ); } else if( (oldtcr & TCR_UNIE) != 0 && (tcr & TCR_IRQ_ACTIVE) != TCR_IRQ_ACTIVE ) { intc_clear_interrupt( INT_TMU_TUNI0 + timer ); } } switch( tcr & 0x07 ) { case 0: period = sh4_peripheral_period << 2 ; break; case 1: period = sh4_peripheral_period << 4; break; case 2: period = sh4_peripheral_period << 6; break; case 3: period = sh4_peripheral_period << 8; break; case 4: period = sh4_peripheral_period << 10; break; case 5: /* Illegal value. */ ERROR( "TMU %d period set to illegal value (5)", timer ); period = sh4_peripheral_period << 12; /* for something to do */ break; case 6: period = rtc_output_period; break; case 7: /* External clock... Hrm? */ period = sh4_peripheral_period; /* I dunno... */ break; } if( period != TMU_timers[timer].timer_period ) { if( TMU_IS_RUNNING(timer) ) { /* If we're changing clock speed while counting, sync up and reschedule */ TMU_count(timer, sh4r.slice_cycle); TMU_timers[timer].timer_period = period; TMU_schedule_timer(timer); } else { TMU_timers[timer].timer_period = period; } } MMIO_WRITE( TMU, TCR0 + (12*timer), tcr ); }
MMIO_REGION_WRITE_FN( CPG, reg, val ) { uint32_t div; uint32_t primary_clock = sh4_input_freq; reg &= 0xFFF; switch( reg ) { case FRQCR: /* Frequency control */ if( (val & FRQCR_PLL1EN) == 0 ) primary_clock /= 6; div = ifc_divider[(val >> 6) & 0x07]; sh4_cpu_freq = primary_clock / div; sh4_cpu_period = sh4_cpu_multiplier * div / sh4_input_freq; div = ifc_divider[(val >> 3) & 0x07]; sh4_bus_freq = primary_clock / div; sh4_bus_period = 1000 * div / sh4_input_freq; div = pfc_divider[val & 0x07]; sh4_peripheral_freq = primary_clock / div; sh4_peripheral_period = 1000 * div / sh4_input_freq; /* Update everything that depends on the peripheral frequency */ SCIF_update_line_speed(); break; case WTCSR: /* Watchdog timer */ break; } MMIO_WRITE( CPG, reg, val ); }
static uint32_t g2_update_fifo_status( uint32_t nanos ) { uint32_t val = MMIO_READ( ASIC, G2STATUS ); if( ((uint32_t)g2_state.bit5_off_timer) <= nanos ) { val = val & (~0x20); g2_state.bit5_off_timer = -1; } if( ((uint32_t)g2_state.bit4_on_timer) <= nanos ) { val = val | 0x10; g2_state.bit4_on_timer = -1; } if( ((uint32_t)g2_state.bit4_off_timer) <= nanos ) { val = val & (~0x10); g2_state.bit4_off_timer = -1; } if( ((uint32_t)g2_state.bit0_on_timer) <= nanos ) { val = val | 0x01; g2_state.bit0_on_timer = -1; } if( ((uint32_t)g2_state.bit0_off_timer) <= nanos ) { val = val & (~0x01); g2_state.bit0_off_timer = -1; } MMIO_WRITE( ASIC, G2STATUS, val ); return val; }
void asic_ide_dma_transfer( ) { if( MMIO_READ( EXTDMA, IDEDMACTL2 ) == 1 ) { if( MMIO_READ( EXTDMA, IDEDMACTL1 ) == 1 ) { MMIO_WRITE( EXTDMA, IDEDMATXSIZ, 0 ); uint32_t addr = MMIO_READ( EXTDMA, IDEDMASH4 ); uint32_t length = MMIO_READ( EXTDMA, IDEDMASIZ ); // int dir = MMIO_READ( EXTDMA, IDEDMADIR ); uint32_t xfer = ide_read_data_dma( addr, length ); MMIO_WRITE( EXTDMA, IDEDMATXSIZ, xfer ); MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 ); asic_event( EVENT_IDE_DMA ); } else { /* 0 */ MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 ); } } }
void sort_dma_transfer( ) { sh4addr_t table_addr = MMIO_READ( ASIC, SORTDMATBL ); sh4addr_t data_addr = MMIO_READ( ASIC, SORTDMADATA ); int table_size = MMIO_READ( ASIC, SORTDMATSIZ ); int addr_shift = MMIO_READ( ASIC, SORTDMAASIZ ) ? 5 : 0; int count = 1; uint32_t *table32 = (uint32_t *)mem_get_region( table_addr ); uint16_t *table16 = (uint16_t *)table32; uint32_t next = table_size ? (*table32++) : (uint32_t)(*table16++); while(1) { next &= 0x07FFFFFF; if( next == 1 ) { next = table_size ? (*table32++) : (uint32_t)(*table16++); count++; continue; } else if( next == 2 ) { asic_event( EVENT_SORT_DMA ); break; } uint32_t *data = (uint32_t *)mem_get_region(data_addr + (next<<addr_shift)); if( data == NULL ) { break; } uint32_t *poly = pvr2_ta_find_polygon_context(data, 128); if( poly == NULL ) { asic_event( EVENT_SORT_DMA_ERR ); break; } uint32_t size = poly[6] & 0xFF; if( size == 0 ) { size = 0x100; } next = poly[7]; pvr2_ta_write( (unsigned char *)data, size<<5 ); } MMIO_WRITE( ASIC, SORTDMACNT, count ); MMIO_WRITE( ASIC, SORTDMACTL, 0 ); }
void maple_set_dma_state( uint32_t val ) { gboolean in_transfer = MMIO_READ( ASIC, MAPLE_STATE ) & 1; gboolean transfer_requested = val & 1; if( !in_transfer && transfer_requested ) { /* Initiate new DMA transfer */ uint32_t maple_addr = MMIO_READ( ASIC, MAPLE_DMA) &0x1FFFFFE0; maple_handle_buffer( maple_addr ); } else if ( in_transfer && !transfer_requested ) { /* Cancel current DMA transfer */ event_cancel( EVENT_MAPLE_DMA ); } MMIO_WRITE( ASIC, MAPLE_STATE, val ); }
void pvr_dma2_transfer() { if( MMIO_READ( EXTDMA, PVRDMA2CTL2 ) == 1 ) { if( MMIO_READ( EXTDMA, PVRDMA2CTL1 ) == 1 ) { sh4addr_t extaddr = MMIO_READ( EXTDMA, PVRDMA2EXT ); sh4addr_t sh4addr = MMIO_READ( EXTDMA, PVRDMA2SH4 ); int dir = MMIO_READ( EXTDMA, PVRDMA2DIR ); uint32_t length = MMIO_READ( EXTDMA, PVRDMA2SIZ ); unsigned char buf[length]; if( dir == 0 ) { /* SH4 to PVR */ mem_copy_from_sh4( buf, sh4addr, length ); mem_copy_to_sh4( extaddr, buf, length ); } else { /* PVR to SH4 */ mem_copy_from_sh4( buf, extaddr, length ); mem_copy_to_sh4( sh4addr, buf, length ); } MMIO_WRITE( EXTDMA, PVRDMA2CTL2, 0 ); asic_event( EVENT_PVR_DMA2 ); } } }
MMIO_REGION_WRITE_FN( EXTDMA, reg, val ) { reg &= 0xFFF; if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) { return; /* disabled */ } switch( reg ) { case IDEALTSTATUS: /* Device control */ ide_write_control( val ); break; case IDEDATA: ide_write_data_pio( val ); break; case IDEFEAT: if( ide_can_write_regs() ) idereg.feature = (uint8_t)val; break; case IDECOUNT: if( ide_can_write_regs() ) idereg.count = (uint8_t)val; break; case IDELBA0: if( ide_can_write_regs() ) idereg.lba0 = (uint8_t)val; break; case IDELBA1: if( ide_can_write_regs() ) idereg.lba1 = (uint8_t)val; break; case IDELBA2: if( ide_can_write_regs() ) idereg.lba2 = (uint8_t)val; break; case IDEDEV: if( ide_can_write_regs() ) idereg.device = (uint8_t)val; break; case IDECMD: if( ide_can_write_regs() || val == IDE_CMD_NOP ) { ide_write_command( (uint8_t)val ); } break; case IDEDMASH4: MMIO_WRITE( EXTDMA, reg, val & 0x1FFFFFE0 ); break; case IDEDMASIZ: MMIO_WRITE( EXTDMA, reg, val & 0x01FFFFFE ); break; case IDEDMADIR: MMIO_WRITE( EXTDMA, reg, val & 1 ); break; case IDEDMACTL1: case IDEDMACTL2: MMIO_WRITE( EXTDMA, reg, val & 0x01 ); asic_ide_dma_transfer( ); break; case IDEACTIVATE: if( val == 0x001FFFFF ) { idereg.interface_enabled = TRUE; /* Conventional wisdom says that this is necessary but not * sufficient to enable the IDE interface. */ } else if( val == 0x000042FE ) { idereg.interface_enabled = FALSE; } break; case G2DMA0EXT: case G2DMA0SH4: case G2DMA0SIZ: case G2DMA1EXT: case G2DMA1SH4: case G2DMA1SIZ: case G2DMA2EXT: case G2DMA2SH4: case G2DMA2SIZ: case G2DMA3EXT: case G2DMA3SH4: case G2DMA3SIZ: MMIO_WRITE( EXTDMA, reg, val & 0x9FFFFFE0 ); break; case G2DMA0MOD: case G2DMA1MOD: case G2DMA2MOD: case G2DMA3MOD: MMIO_WRITE( EXTDMA, reg, val & 0x07 ); break; case G2DMA0DIR: case G2DMA1DIR: case G2DMA2DIR: case G2DMA3DIR: MMIO_WRITE( EXTDMA, reg, val & 0x01 ); break; case G2DMA0CTL1: case G2DMA0CTL2: MMIO_WRITE( EXTDMA, reg, val & 1); g2_dma_transfer( 0 ); break; case G2DMA0STOP: MMIO_WRITE( EXTDMA, reg, val & 0x37 ); break; case G2DMA1CTL1: case G2DMA1CTL2: MMIO_WRITE( EXTDMA, reg, val & 1); g2_dma_transfer( 1 ); break; case G2DMA1STOP: MMIO_WRITE( EXTDMA, reg, val & 0x37 ); break; case G2DMA2CTL1: case G2DMA2CTL2: MMIO_WRITE( EXTDMA, reg, val &1 ); g2_dma_transfer( 2 ); break; case G2DMA2STOP: MMIO_WRITE( EXTDMA, reg, val & 0x37 ); break; case G2DMA3CTL1: case G2DMA3CTL2: MMIO_WRITE( EXTDMA, reg, val &1 ); g2_dma_transfer( 3 ); break; case G2DMA3STOP: MMIO_WRITE( EXTDMA, reg, val & 0x37 ); break; case PVRDMA2CTL1: case PVRDMA2CTL2: MMIO_WRITE( EXTDMA, reg, val & 1 ); pvr_dma2_transfer(); break; default: MMIO_WRITE( EXTDMA, reg, val ); } }
static void mmio_uart_td0( int c ) { MMIO_WRITE( THR, c ); }
MMIO_REGION_WRITE_FN( RTC, reg, val ) { MMIO_WRITE( RTC, reg &0xFFF, val ); }
MMIO_REGION_WRITE_FN( ASIC, reg, val ) { reg &= 0xFFF; switch( reg ) { case PIRQ1: break; /* Treat this as read-only for the moment */ case PIRQ0: val = val & 0x3FFFFFFF; /* Top two bits aren't clearable */ MMIO_WRITE( ASIC, reg, MMIO_READ(ASIC, reg)&~val ); asic_check_cleared_events(); break; case PIRQ2: /* Clear any events */ val = MMIO_READ(ASIC, reg)&(~val); MMIO_WRITE( ASIC, reg, val ); if( val == 0 ) { /* all clear - clear the cascade bit */ MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF ); } asic_check_cleared_events(); break; case IRQA0: case IRQA1: case IRQA2: case IRQB0: case IRQB1: case IRQB2: case IRQC0: case IRQC1: case IRQC2: MMIO_WRITE( ASIC, reg, val ); asic_event_mask_changed(); break; case SYSRESET: if( val == 0x7611 ) { dreamcast_reset(); } else { WARN( "Unknown value %08X written to SYSRESET port", val ); } break; case MAPLE_STATE: maple_set_dma_state( val ); break; case PVRDMADEST: MMIO_WRITE( ASIC, reg, (val & 0x03FFFFE0) | 0x10000000 ); break; case PVRDMACNT: MMIO_WRITE( ASIC, reg, val & 0x00FFFFE0 ); break; case PVRDMACTL: /* Initiate PVR DMA transfer */ val = val & 0x01; MMIO_WRITE( ASIC, reg, val ); if( val == 1 ) { pvr_dma_transfer(); } break; case SORTDMATBL: case SORTDMADATA: MMIO_WRITE( ASIC, reg, (val & 0x0FFFFFE0) | 0x08000000 ); break; case SORTDMATSIZ: case SORTDMAASIZ: MMIO_WRITE( ASIC, reg, (val & 1) ); break; case SORTDMACTL: val = val & 1; MMIO_WRITE( ASIC, reg, val ); if( val == 1 ) { sort_dma_transfer(); } break; case MAPLE_DMA: MMIO_WRITE( ASIC, reg, val ); break; default: MMIO_WRITE( ASIC, reg, val ); } }