void pvr_dma_transfer( ) { sh4addr_t destaddr = MMIO_READ( ASIC, PVRDMADEST) &0x1FFFFFE0; uint32_t count = MMIO_READ( ASIC, PVRDMACNT ); unsigned char data[8192]; uint32_t rcount; while( count ) { uint32_t chunksize = (count < 8192) ? count : 8192; rcount = DMAC_get_buffer( 2, data, chunksize ); pvr2_dma_write( destaddr, data, rcount ); destaddr += rcount; count -= rcount; if( rcount != chunksize ) { WARN( "PVR received %08X bytes from DMA, expected %08X", rcount, chunksize ); break; } } MMIO_WRITE( ASIC, PVRDMACTL, 0 ); MMIO_WRITE( ASIC, PVRDMACNT, 0 ); if( destaddr & 0x01000000 ) { /* Write to texture RAM */ MMIO_WRITE( ASIC, PVRDMADEST, destaddr ); } asic_event( EVENT_PVR_DMA ); }
/* Make sure there are more than num dwords left in the AGP queue. */ static void sisMakeRoomAGP( sisContextPtr smesa, GLint num ) { int size = num * 4; if (size <= AGP_SpaceLeft) { AGP_SpaceLeft -= size; return; } /* Wrapping */ if (AGP_WritePtr + num > (GLfloat *)(smesa->AGPCmdBufBase + smesa->AGPCmdBufSize)) { sisFireVertsAGP( smesa ); AGP_WritePtr = (GLfloat *)smesa->AGPCmdBufBase; AGP_StartPtr = AGP_WritePtr; sisUpdateAGP( smesa ); WaitEngIdle( smesa ); /* XXX Why is this necessary? */ } if (size > AGP_SpaceLeft) { /* Update the cached engine read pointer */ AGP_ReadPtr = (GLfloat *)((long)MMIO_READ(REG_3D_AGPCmBase) - (long)smesa->AGPCmdBufAddr + (long)smesa->AGPCmdBufBase); sisUpdateAGP( smesa ); while (size > AGP_SpaceLeft) { /* Spin until space is available. */ AGP_ReadPtr = (GLfloat *)((long)MMIO_READ(REG_3D_AGPCmBase) - (long)smesa->AGPCmdBufAddr + (long)smesa->AGPCmdBufBase); sisUpdateAGP( smesa ); } } AGP_SpaceLeft -= size; }
/** * Count the specified timer for a given number of nanoseconds. */ uint32_t TMU_count( int timer, uint32_t nanosecs ) { uint32_t run_ns = nanosecs + TMU_timers[timer].timer_remainder - TMU_timers[timer].timer_run; TMU_timers[timer].timer_remainder = run_ns % TMU_timers[timer].timer_period; TMU_timers[timer].timer_run = nanosecs; uint32_t count = run_ns / TMU_timers[timer].timer_period; uint32_t value = MMIO_READ( TMU, TCNT0 + 12*timer ); uint32_t reset = MMIO_READ( TMU, TCOR0 + 12*timer ); // if( timer == 2 ) // WARN( "Counting timer %d: %d ns, %d ticks", timer, run_ns, count ); if( count > value ) { uint32_t tcr = MMIO_READ( TMU, TCR0 + 12*timer ); tcr |= TCR_UNF; count -= value; value = reset - (count % reset) + 1; MMIO_WRITE( TMU, TCR0 + 12*timer, tcr ); if( tcr & TCR_UNIE ) intc_raise_interrupt( INT_TMU_TUNI0 + timer ); MMIO_WRITE( TMU, TCNT0 + 12*timer, value ); // if( timer == 2 ) // WARN( "Underflowed timer %d", timer ); TMU_schedule_timer(timer); } else { value -= count; MMIO_WRITE( TMU, TCNT0 + 12*timer, value ); } return value; }
MMIO_REGION_READ_FN( ASIC, reg ) { int32_t val; reg &= 0xFFF; switch( reg ) { case PIRQ0: case PIRQ1: case PIRQ2: case IRQA0: case IRQA1: case IRQA2: case IRQB0: case IRQB1: case IRQB2: case IRQC0: case IRQC1: case IRQC2: case MAPLE_STATE: val = MMIO_READ(ASIC, reg); return val; case G2STATUS: return g2_read_status(); default: val = MMIO_READ(ASIC, reg); return val; } }
void TMU_dump(unsigned timer) { fprintf(stderr, "Timer %d: %s %08x/%08x %dns run: %08X - %08X\n", timer, TMU_IS_RUNNING(timer) ? "running" : "stopped", MMIO_READ(TMU, TCNT0 + (timer*12)), MMIO_READ(TMU, TCOR0 + (timer*12)), TMU_timers[timer].timer_period, TMU_timers[timer].timer_run, TMU_timers[timer].timer_remainder ); }
static int mmio_uart_present( void ) { MMIO_WRITE( SCR, 0x5a ); if (MMIO_READ(SCR) != 0x5a) return 0; MMIO_WRITE( SCR, 0xa5 ); if (MMIO_READ(SCR) != 0xa5) return 0; return 1; }
void WaitEngIdle (sisContextPtr smesa) { GLuint engineState; if (smesa->is6326) { do { engineState = MMIO_READ(REG_3D_EngineFire); /* XXX right reg? */ } while ((engineState & ENG_3DIDLEQE) != 0); } else { do { engineState = MMIO_READ(REG_CommandQueue); } while ((engineState & SiS_EngIdle) != SiS_EngIdle); } }
void Wait2DEngIdle (sisContextPtr smesa) { GLuint engineState; if (smesa->is6326) { do { engineState = MMIO_READ(REG_6326_BitBlt_Cmd); } while ((engineState & BLT_BUSY) != 0); } else { do { engineState = MMIO_READ(REG_CommandQueue); } while ((engineState & SiS_EngIdle2d) != SiS_EngIdle2d); } }
void asic_clear_event( int event ) { int offset = ((event&0x60)>>3); uint32_t result = MMIO_READ(ASIC, PIRQ0 + offset) & (~(1<<(event&0x1F))); MMIO_WRITE( ASIC, PIRQ0 + offset, result ); if( result == 0 ) { /* clear cascades if necessary */ if( event >= 64 ) { MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0x7FFFFFFF ); } else if( event >= 32 ) { MMIO_WRITE( ASIC, PIRQ0, MMIO_READ( ASIC, PIRQ0 ) & 0xBFFFFFFF ); } } asic_check_cleared_events(); }
static int mmio_uart_rr0( void ) { unsigned char lsr; lsr = MMIO_READ( LSR ); if ( lsr & (UART_LSR_FE | UART_LSR_PE | UART_LSR_OE) ) { MMIO_READ( RBR ); /* discard */ return 0; } return (lsr & UART_LSR_DR); }
void maple_set_dma_state( uint32_t val ) { gboolean in_transfer = MMIO_READ( ASIC, MAPLE_STATE ) & 1; gboolean transfer_requested = val & 1; if( !in_transfer && transfer_requested ) { /* Initiate new DMA transfer */ uint32_t maple_addr = MMIO_READ( ASIC, MAPLE_DMA) &0x1FFFFFE0; maple_handle_buffer( maple_addr ); } else if ( in_transfer && !transfer_requested ) { /* Cancel current DMA transfer */ event_cancel( EVENT_MAPLE_DMA ); } MMIO_WRITE( ASIC, MAPLE_STATE, val ); }
MMIO_REGION_READ_FN( EXTDMA, reg ) { uint32_t val; reg &= 0xFFF; if( !idereg.interface_enabled && IS_IDE_REGISTER(reg) ) { return 0xFFFFFFFF; /* disabled */ } switch( reg ) { case IDEALTSTATUS: val = idereg.status; return val; case IDEDATA: return ide_read_data_pio( ); case IDEFEAT: return idereg.error; case IDECOUNT:return idereg.count; case IDELBA0: return ide_get_drive_status(); case IDELBA1: return idereg.lba1; case IDELBA2: return idereg.lba2; case IDEDEV: return idereg.device; case IDECMD: val = ide_read_status(); return val; default: val = MMIO_READ( EXTDMA, reg ); return val; } }
static void sisRunPipeline( GLcontext *ctx ) { sisContextPtr smesa = SIS_CONTEXT( ctx ); LOCK_HARDWARE(); sisUpdateHWState( ctx ); if (smesa->AGPCmdModeEnabled) { AGP_WritePtr = (GLfloat *)smesa->AGPCmdBufBase + *smesa->pAGPCmdBufNext; AGP_StartPtr = AGP_WritePtr; AGP_ReadPtr = (GLfloat *)((long)MMIO_READ(REG_3D_AGPCmBase) - (long)smesa->AGPCmdBufAddr + (long)smesa->AGPCmdBufBase); sisUpdateAGP( smesa ); } if (!smesa->Fallback && smesa->NewGLState) { if (smesa->NewGLState & _SIS_NEW_VERTEX_STATE) sisChooseVertexState( ctx ); if (smesa->NewGLState & (_SIS_NEW_RENDER_STATE | _NEW_TEXTURE)) sisChooseRenderState( ctx ); smesa->NewGLState = 0; } _tnl_run_pipeline( ctx ); if (smesa->AGPCmdModeEnabled) sisFireVertsAGP( smesa ); else mEndPrimitive(); UNLOCK_HARDWARE(); }
/** * Setup the timers for the 3 FIFO status bits following a write through the G2 * bus from the SH4 side. The timing is roughly as follows: (times are * approximate based on software readings - I wouldn't take this as gospel but * it seems to be enough to fool most programs). * 0ns: Bit 5 (Input fifo?) goes high immediately on the write * 40ns: Bit 5 goes low and bit 4 goes high * 120ns: Bit 4 goes low, bit 0 goes high * 240ns: Bit 0 goes low. * * Additional writes while the FIFO is in operation extend the time that the * bits remain high as one might expect, without altering the time at which * they initially go high. */ void asic_g2_write_word() { if( g2_state.bit5_off_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit5_off_timer = sh4r.slice_cycle + G2_BIT5_TICKS; } else { g2_state.bit5_off_timer += G2_BIT5_TICKS; } if( g2_state.bit4_on_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit4_on_timer = sh4r.slice_cycle + G2_BIT5_TICKS; } if( g2_state.bit4_off_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit4_off_timer = g2_state.bit4_on_timer + G2_BIT4_TICKS; } else { g2_state.bit4_off_timer += G2_BIT4_TICKS; } if( g2_state.bit0_on_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit0_on_timer = sh4r.slice_cycle + G2_BIT0_ON_TICKS; } if( g2_state.bit0_off_timer < (int32_t)sh4r.slice_cycle ) { g2_state.bit0_off_timer = g2_state.bit0_on_timer + G2_BIT0_OFF_TICKS; } else { g2_state.bit0_off_timer += G2_BIT0_OFF_TICKS; } MMIO_WRITE( ASIC, G2STATUS, MMIO_READ(ASIC, G2STATUS) | 0x20 ); }
static uint32_t g2_update_fifo_status( uint32_t nanos ) { uint32_t val = MMIO_READ( ASIC, G2STATUS ); if( ((uint32_t)g2_state.bit5_off_timer) <= nanos ) { val = val & (~0x20); g2_state.bit5_off_timer = -1; } if( ((uint32_t)g2_state.bit4_on_timer) <= nanos ) { val = val | 0x10; g2_state.bit4_on_timer = -1; } if( ((uint32_t)g2_state.bit4_off_timer) <= nanos ) { val = val & (~0x10); g2_state.bit4_off_timer = -1; } if( ((uint32_t)g2_state.bit0_on_timer) <= nanos ) { val = val | 0x01; g2_state.bit0_on_timer = -1; } if( ((uint32_t)g2_state.bit0_off_timer) <= nanos ) { val = val & (~0x01); g2_state.bit0_off_timer = -1; } MMIO_WRITE( ASIC, G2STATUS, val ); return val; }
static void mmio_uart_init( void ) { /* Disable hardware interrupts */ MMIO_WRITE( MCR, 0 ); MMIO_WRITE( IER, 0 ); /* Disable FIFO's for 16550 devices */ MMIO_WRITE( FCR, 0 ); /* Set for 8-bit, no parity, DLAB bit cleared */ MMIO_WRITE( LCR, UART_LCR_8BITS ); /* Leave baud rate as set by firmware unless serialbaud boot-arg overrides */ if (uart_baud_rate != DEFAULT_UART_BAUD_RATE) { gPESF->uart_set_baud_rate ( 0, uart_baud_rate ); } /* Assert DTR# and RTS# lines (OUT2?) */ MMIO_WRITE( MCR, UART_MCR_DTR | UART_MCR_RTS ); /* Clear any garbage in the input buffer */ MMIO_READ( RBR ); uart_initted = 1; }
void TMU_set_timer_control( int timer, int tcr ) { uint32_t period = 1; uint32_t oldtcr = MMIO_READ( TMU, TCR0 + (12*timer) ); if( (oldtcr & TCR_UNF) == 0 ) { tcr = tcr & (~TCR_UNF); } else { if( ((oldtcr & TCR_UNIE) == 0) && (tcr & TCR_IRQ_ACTIVE) == TCR_IRQ_ACTIVE ) { intc_raise_interrupt( INT_TMU_TUNI0 + timer ); } else if( (oldtcr & TCR_UNIE) != 0 && (tcr & TCR_IRQ_ACTIVE) != TCR_IRQ_ACTIVE ) { intc_clear_interrupt( INT_TMU_TUNI0 + timer ); } } switch( tcr & 0x07 ) { case 0: period = sh4_peripheral_period << 2 ; break; case 1: period = sh4_peripheral_period << 4; break; case 2: period = sh4_peripheral_period << 6; break; case 3: period = sh4_peripheral_period << 8; break; case 4: period = sh4_peripheral_period << 10; break; case 5: /* Illegal value. */ ERROR( "TMU %d period set to illegal value (5)", timer ); period = sh4_peripheral_period << 12; /* for something to do */ break; case 6: period = rtc_output_period; break; case 7: /* External clock... Hrm? */ period = sh4_peripheral_period; /* I dunno... */ break; } if( period != TMU_timers[timer].timer_period ) { if( TMU_IS_RUNNING(timer) ) { /* If we're changing clock speed while counting, sync up and reschedule */ TMU_count(timer, sh4r.slice_cycle); TMU_timers[timer].timer_period = period; TMU_schedule_timer(timer); } else { TMU_timers[timer].timer_period = period; } } MMIO_WRITE( TMU, TCR0 + (12*timer), tcr ); }
void asic_check_cleared_events( ) { int i, setA = 0, setB = 0, setC = 0; uint32_t bits; for( i=0; i<12; i+=4 ) { bits = MMIO_READ( ASIC, PIRQ0 + i ); setA |= (bits & MMIO_READ(ASIC, IRQA0 + i )); setB |= (bits & MMIO_READ(ASIC, IRQB0 + i )); setC |= (bits & MMIO_READ(ASIC, IRQC0 + i )); } if( setA == 0 ) intc_clear_interrupt( INT_IRQ13 ); if( setB == 0 ) intc_clear_interrupt( INT_IRQ11 ); if( setC == 0 ) intc_clear_interrupt( INT_IRQ9 ); }
/* To be called from mWait3DCmdQueue. Separate function for profiling * purposes, and speed doesn't matter because we're spinning anyway. */ void WaitingFor3dIdle(sisContextPtr smesa, int wLen) { while (*(smesa->CurrentQueueLenPtr) < wLen) { *(smesa->CurrentQueueLenPtr) = (MMIO_READ(REG_CommandQueue) & MASK_QueueLen) - 20; } }
void asic_event( int event ) { int offset = ((event&0x60)>>3); int result = (MMIO_READ(ASIC, PIRQ0 + offset)) |= (1<<(event&0x1F)); if( result & MMIO_READ(ASIC, IRQA0 + offset) ) intc_raise_interrupt( INT_IRQ13 ); if( result & MMIO_READ(ASIC, IRQB0 + offset) ) intc_raise_interrupt( INT_IRQ11 ); if( result & MMIO_READ(ASIC, IRQC0 + offset) ) intc_raise_interrupt( INT_IRQ9 ); if( event >= 64 ) { /* Third word */ asic_event( EVENT_CASCADE2 ); } else if( event >= 32 ) { /* Second word */ asic_event( EVENT_CASCADE1 ); } }
void Wait2DEngIdle (sisContextPtr smesa) { GLuint engineState; do { engineState = MMIO_READ(REG_CommandQueue); } while ((engineState & SiS_EngIdle2d) != SiS_EngIdle2d); }
void asic_ide_dma_transfer( ) { if( MMIO_READ( EXTDMA, IDEDMACTL2 ) == 1 ) { if( MMIO_READ( EXTDMA, IDEDMACTL1 ) == 1 ) { MMIO_WRITE( EXTDMA, IDEDMATXSIZ, 0 ); uint32_t addr = MMIO_READ( EXTDMA, IDEDMASH4 ); uint32_t length = MMIO_READ( EXTDMA, IDEDMASIZ ); // int dir = MMIO_READ( EXTDMA, IDEDMADIR ); uint32_t xfer = ide_read_data_dma( addr, length ); MMIO_WRITE( EXTDMA, IDEDMATXSIZ, xfer ); MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 ); asic_event( EVENT_IDE_DMA ); } else { /* 0 */ MMIO_WRITE( EXTDMA, IDEDMACTL2, 0 ); } } }
void sort_dma_transfer( ) { sh4addr_t table_addr = MMIO_READ( ASIC, SORTDMATBL ); sh4addr_t data_addr = MMIO_READ( ASIC, SORTDMADATA ); int table_size = MMIO_READ( ASIC, SORTDMATSIZ ); int addr_shift = MMIO_READ( ASIC, SORTDMAASIZ ) ? 5 : 0; int count = 1; uint32_t *table32 = (uint32_t *)mem_get_region( table_addr ); uint16_t *table16 = (uint16_t *)table32; uint32_t next = table_size ? (*table32++) : (uint32_t)(*table16++); while(1) { next &= 0x07FFFFFF; if( next == 1 ) { next = table_size ? (*table32++) : (uint32_t)(*table16++); count++; continue; } else if( next == 2 ) { asic_event( EVENT_SORT_DMA ); break; } uint32_t *data = (uint32_t *)mem_get_region(data_addr + (next<<addr_shift)); if( data == NULL ) { break; } uint32_t *poly = pvr2_ta_find_polygon_context(data, 128); if( poly == NULL ) { asic_event( EVENT_SORT_DMA_ERR ); break; } uint32_t size = poly[6] & 0xFF; if( size == 0 ) { size = 0x100; } next = poly[7]; pvr2_ta_write( (unsigned char *)data, size<<5 ); } MMIO_WRITE( ASIC, SORTDMACNT, count ); MMIO_WRITE( ASIC, SORTDMACTL, 0 ); }
void TMU_schedule_timer( int timer ) { uint64_t duration = ((uint64_t)((uint32_t)(MMIO_READ( TMU, TCNT0 + 12*timer )))+1) * (uint64_t)TMU_timers[timer].timer_period - TMU_timers[timer].timer_remainder; event_schedule_long( EVENT_TMU0+timer, (uint32_t)(duration / 1000000000), (uint32_t)(duration % 1000000000) ); // if( timer == 2 ) { // WARN( "Schedule timer %d: %lldns", timer, duration ); // TMU_dump(timer); // } }
void g2_dma_transfer( int channel ) { uint32_t offset = channel << 5; if( MMIO_READ( EXTDMA, G2DMA0CTL1 + offset ) == 1 ) { if( MMIO_READ( EXTDMA, G2DMA0CTL2 + offset ) == 1 ) { uint32_t extaddr = MMIO_READ( EXTDMA, G2DMA0EXT + offset ); uint32_t sh4addr = MMIO_READ( EXTDMA, G2DMA0SH4 + offset ); uint32_t length = MMIO_READ( EXTDMA, G2DMA0SIZ + offset ) & 0x1FFFFFFF; uint32_t dir = MMIO_READ( EXTDMA, G2DMA0DIR + offset ); // uint32_t mode = MMIO_READ( EXTDMA, G2DMA0MOD + offset ); unsigned char buf[length]; if( dir == 0 ) { /* SH4 to device */ mem_copy_from_sh4( buf, sh4addr, length ); mem_copy_to_sh4( extaddr, buf, length ); } else { /* Device to SH4 */ mem_copy_from_sh4( buf, extaddr, length ); mem_copy_to_sh4( sh4addr, buf, length ); } MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 ); asic_event( EVENT_G2_DMA0 + channel ); } else { MMIO_WRITE( EXTDMA, G2DMA0CTL2 + offset, 0 ); } } }
void TMU_count_all( uint32_t nanosecs ) { int tcr = MMIO_READ( TMU, TSTR ); if( tcr & 0x01 ) { TMU_count( 0, nanosecs ); } if( tcr & 0x02 ) { TMU_count( 1, nanosecs ); } if( tcr & 0x04 ) { TMU_count( 2, nanosecs ); } }
static void mmio_uart_set_baud_rate( __unused int unit, __unused uint32_t baud_rate ) { const unsigned char lcr = MMIO_READ( LCR ); unsigned long div; if (baud_rate == 0) baud_rate = 9600; div = LEGACY_UART_CLOCK / 16 / baud_rate; MMIO_WRITE( LCR, lcr | UART_LCR_DLAB ); MMIO_WRITE( DLM, (unsigned char)(div >> 8) ); MMIO_WRITE( DLL, (unsigned char) div ); MMIO_WRITE( LCR, lcr & ~UART_LCR_DLAB); }
MMIO_REGION_WRITE_FN( TMU, reg, val ) { uint32_t oldval; int i; reg &= 0xFFF; switch( reg ) { case TSTR: oldval = MMIO_READ( TMU, TSTR ); for( i=0; i<3; i++ ) { uint32_t tmp = 1<<i; if( (oldval & tmp) != 0 && (val&tmp) == 0 ) TMU_stop(i); else if( (oldval&tmp) == 0 && (val&tmp) != 0 ) TMU_start(i); } break; case TCR0: TMU_set_timer_control( 0, val ); return; case TCR1: TMU_set_timer_control( 1, val ); return; case TCR2: TMU_set_timer_control( 2, val ); return; case TCNT0: MMIO_WRITE( TMU, reg, val ); if( TMU_IS_RUNNING(0) ) { // reschedule TMU_timers[0].timer_run = sh4r.slice_cycle; TMU_schedule_timer( 0 ); } return; case TCNT1: MMIO_WRITE( TMU, reg, val ); if( TMU_IS_RUNNING(1) ) { // reschedule TMU_timers[1].timer_run = sh4r.slice_cycle; TMU_schedule_timer( 1 ); } return; case TCNT2: MMIO_WRITE( TMU, reg, val ); if( TMU_IS_RUNNING(2) ) { // reschedule TMU_timers[2].timer_run = sh4r.slice_cycle; TMU_schedule_timer( 2 ); } return; } MMIO_WRITE( TMU, reg, val ); }
MMIO_REGION_READ_FN( TMU, reg ) { reg &= 0xFFF; switch( reg ) { case TCNT0: if( TMU_IS_RUNNING(0) ) TMU_count( 0, sh4r.slice_cycle ); break; case TCNT1: if( TMU_IS_RUNNING(1) ) TMU_count( 1, sh4r.slice_cycle ); break; case TCNT2: if( TMU_IS_RUNNING(2) ) TMU_count( 2, sh4r.slice_cycle ); break; } return MMIO_READ( TMU, reg ); }
void pvr_dma2_transfer() { if( MMIO_READ( EXTDMA, PVRDMA2CTL2 ) == 1 ) { if( MMIO_READ( EXTDMA, PVRDMA2CTL1 ) == 1 ) { sh4addr_t extaddr = MMIO_READ( EXTDMA, PVRDMA2EXT ); sh4addr_t sh4addr = MMIO_READ( EXTDMA, PVRDMA2SH4 ); int dir = MMIO_READ( EXTDMA, PVRDMA2DIR ); uint32_t length = MMIO_READ( EXTDMA, PVRDMA2SIZ ); unsigned char buf[length]; if( dir == 0 ) { /* SH4 to PVR */ mem_copy_from_sh4( buf, sh4addr, length ); mem_copy_to_sh4( extaddr, buf, length ); } else { /* PVR to SH4 */ mem_copy_from_sh4( buf, extaddr, length ); mem_copy_to_sh4( sh4addr, buf, length ); } MMIO_WRITE( EXTDMA, PVRDMA2CTL2, 0 ); asic_event( EVENT_PVR_DMA2 ); } } }