void bsp_interrupt_dispatch(void) { rtems_vector_number vector = 31 - __builtin_clz(VICIRQStatus); bsp_interrupt_handler_dispatch(vector); VICVectAddr = 0; }
void bsp_interrupt_dispatch(void) { rtems_vector_number vector = AIC_CTL_REG(AIC_IVR); bsp_interrupt_handler_dispatch(vector); AIC_CTL_REG(AIC_EOICR) = 0; }
void mips_vector_isr_handlers( CPU_Interrupt_frame *frame ) { unsigned int sr; unsigned int cause; mips_get_sr( sr ); mips_get_cause( cause ); cause &= (sr & SR_IMASK); cause >>= CAUSE_IPSHIFT; if ( cause & 0x80 ) /* IP[5] ==> INT0 */ bsp_interrupt_handler_dispatch( TX3904_IRQ_INT0 ); if ( cause & 0x40 ) { /* (IP[4] == 1) ==> IP[0-3] are valid */ unsigned int v = (cause >> 2) & 0x0f; bsp_interrupt_handler_dispatch( MIPS_INTERRUPT_BASE + v ); }
void bsp_interrupt_dispatch(void) { unsigned reg_ie = GBA_REG_IE; unsigned reg_if = GBA_REG_IF & reg_ie; rtems_vector_number vector = 31 - __builtin_clz(reg_if); bsp_interrupt_handler_dispatch(vector); GBA_REG_IF = 1 << vector; }
void mips_vector_isr_handlers( CPU_Interrupt_frame *frame ) { unsigned int sr; unsigned int cause; unsigned int pending; mips_get_sr( sr ); mips_get_cause( cause ); pending = (cause & sr & 0x700) >> CAUSE_IPSHIFT; if ( pending & 0x4 ) { /* (IP[2] == 1) ==> IP[3-7] are valid */ unsigned int v = (cause >> (CAUSE_IPSHIFT + 3)) & 0x1f; bsp_interrupt_handler_dispatch( MIPS_INTERRUPT_BASE + v ); }
static void mpc55xx_interrupt_dispatch(void) { /* Acknowlege interrupt request */ rtems_vector_number vector = INTC.IACKR.B.INTVEC; /* Save machine state and enable external exceptions */ uint32_t msr = ppc_external_exceptions_enable(); /* Dispatch interrupt handlers */ bsp_interrupt_handler_dispatch( vector); /* Restore machine state */ ppc_external_exceptions_disable( msr); /* End of interrupt */ INTC.EOIR.R = 1; }
static void qoriq_interrupt_dispatch(void) { rtems_vector_number vector = qoriq.pic.iack; if (vector != SPURIOUS) { uint32_t msr = ppc_external_exceptions_enable(); bsp_interrupt_handler_dispatch(vector); ppc_external_exceptions_disable(msr); qoriq.pic.eoi = 0; qoriq.pic.whoami; } else { bsp_interrupt_handler_default(vector); } }
void bsp_interrupt_dispatch(void) { /* Read current vector number */ rtems_vector_number vector = VICVectAddr; /* Enable interrupts in program status register */ uint32_t psr = _ARMV4_Status_irq_enable(); /* Dispatch interrupt handlers */ bsp_interrupt_handler_dispatch(vector); /* Restore program status register */ _ARMV4_Status_restore(psr); /* Acknowledge interrupt */ VICVectAddr = 0; }
void bsp_interrupt_dispatch(void) { volatile gic_cpuif *cpuif = GIC_CPUIF; uint32_t icciar = cpuif->icciar; rtems_vector_number vector = GIC_CPUIF_ICCIAR_ACKINTID_GET(icciar); rtems_vector_number spurious = 1023; if (vector != spurious) { uint32_t psr = _ARMV4_Status_irq_enable(); bsp_interrupt_handler_dispatch(vector); _ARMV4_Status_restore(psr); cpuif->icceoir = icciar; } }
/* * Determine the source of the interrupt and dispatch the correct handler. */ void bsp_interrupt_dispatch(void) { unsigned int pend; unsigned int pend_bit; rtems_vector_number vector = 255; #ifdef RTEMS_SMP uint32_t cpu_index_self = _SMP_Get_current_processor(); uint32_t local_source = BCM2835_REG(BCM2836_IRQ_SOURCE_REG(cpu_index_self)); if ( local_source & BCM2836_IRQ_SOURCE_MBOX3 ) { /* reset mailbox 3 contents to zero */ BCM2835_REG(BCM2836_MAILBOX_3_READ_CLEAR_BASE + 0x10 * cpu_index_self) = 0xffffffff; _SMP_Inter_processor_interrupt_handler(); } if ( cpu_index_self != 0 ) return; #endif /* RTEMS_SMP */ pend = BCM2835_REG(BCM2835_IRQ_BASIC); if ( pend & BCM2835_IRQ_BASIC_SPEEDUP_USED_BITS ) { pend_bit = ffs(pend) - 1; vector = bcm2835_irq_speedup_table[pend_bit]; } else { pend = BCM2835_REG(BCM2835_IRQ_PENDING1); if ( pend != 0 ) { pend_bit = ffs(pend) - 1; vector = pend_bit; } else { pend = BCM2835_REG(BCM2835_IRQ_PENDING2); if ( pend != 0 ) { pend_bit = ffs(pend) - 1; vector = pend_bit + 32; } } } if ( vector < 255 ) { bsp_interrupt_handler_dispatch(vector); } }
static int qoriq_external_exception_handler(BSP_Exception_frame *frame, unsigned exception_number) { rtems_vector_number vector = qoriq.pic.iack; if (vector != SPURIOUS) { uint32_t msr = ppc_external_exceptions_enable(); bsp_interrupt_handler_dispatch(vector); ppc_external_exceptions_disable(msr); qoriq.pic.eoi = 0; qoriq.pic.whoami; } else { bsp_interrupt_handler_default(vector); } return 0; }
void edb7312_interrupt_dispatch(rtems_vector_number vector) { bsp_interrupt_handler_dispatch(vector); }
void C_dispatch_isr(int vector) { irq_count[vector]++; bsp_interrupt_handler_dispatch(vector); }
void bsp_interrupt_dispatch(void) { rtems_vector_number vector = *((uint32_t *) rINTOFFSET_ADDR); bsp_interrupt_handler_dispatch(vector); }
void bsp_interrupt_dispatch(void) { rtems_vector_number vector = 31 - __builtin_clz(XSCALE_INT_ICIP); bsp_interrupt_handler_dispatch(vector); }
/* * IRQ Handler: this is called from the primary exception dispatcher */ static int BSP_irq_handle_at_ipic( unsigned excNum) { int32_t vecnum; mpc83xx_ipic_mask_t mask_save; const mpc83xx_ipic_mask_t *mask_ptr; uint32_t msr = 0; rtems_interrupt_level level; /* Get vector number */ switch (excNum) { case ASM_EXT_VECTOR: vecnum = MPC83xx_VCR_TO_VEC( mpc83xx.ipic.sivcr); break; case ASM_E300_SYSMGMT_VECTOR: vecnum = MPC83xx_VCR_TO_VEC( mpc83xx.ipic.smvcr); break; case ASM_E300_CRIT_VECTOR: vecnum = MPC83xx_VCR_TO_VEC( mpc83xx.ipic.scvcr); break; default: return 1; } /* * Check the vector number, mask lower priority interrupts, enable * exceptions and dispatch the handler. */ if (MPC83XX_IPIC_IS_VALID_VECTOR( vecnum)) { #ifdef GEN83XX_ENABLE_INTERRUPT_NESTING mask_ptr = &mpc83xx_ipic_prio2mask [vecnum]; rtems_interrupt_disable( level); /* Save current mask registers */ mask_save.simsr_mask [0] = mpc83xx.ipic.simsr [0]; mask_save.simsr_mask [1] = mpc83xx.ipic.simsr [1]; mask_save.semsr_mask = mpc83xx.ipic.semsr; mask_save.sermr_mask = mpc83xx.ipic.sermr; /* Mask all lower priority interrupts */ mpc83xx.ipic.simsr [0] &= mask_ptr->simsr_mask [0]; mpc83xx.ipic.simsr [1] &= mask_ptr->simsr_mask [1]; mpc83xx.ipic.semsr &= mask_ptr->semsr_mask; mpc83xx.ipic.sermr &= mask_ptr->sermr_mask; rtems_interrupt_enable( level); /* Enable all interrupts */ if (excNum != ASM_E300_CRIT_VECTOR) { msr = ppc_external_exceptions_enable(); } #endif /* GEN83XX_ENABLE_INTERRUPT_NESTING */ /* Dispatch interrupt handlers */ bsp_interrupt_handler_dispatch( vecnum + BSP_IPIC_IRQ_LOWEST_OFFSET); #ifdef GEN83XX_ENABLE_INTERRUPT_NESTING /* Restore machine state */ if (excNum != ASM_E300_CRIT_VECTOR) { ppc_external_exceptions_disable( msr); } /* Restore initial masks */ rtems_interrupt_disable( level); mpc83xx.ipic.simsr [0] = mask_save.simsr_mask [0]; mpc83xx.ipic.simsr [1] = mask_save.simsr_mask [1]; mpc83xx.ipic.semsr = mask_save.semsr_mask; mpc83xx.ipic.sermr = mask_save.sermr_mask; rtems_interrupt_enable( level); #endif /* GEN83XX_ENABLE_INTERRUPT_NESTING */ } else { bsp_interrupt_handler_default( vecnum); } return 0; }
/* * This rather strangely coded routine enforces an interrupt priority * scheme. As it runs thru finding whichever interrupt caused it to get * here, it test for other interrupts arriving in the meantime (maybe it * occured while the vector code is executing for instance). Each new * interrupt will be served in order of its priority. In an effort to * minimize overhead, the cause register is only fetched after an * interrupt is serviced. Because of the intvect goto's, this routine * will only exit when all interrupts have been serviced and no more * have arrived, this improves interrupt latency at the cost of * increasing scheduling jitter; though scheduling jitter should only * become apparent in high interrupt load conditions. */ void mips_vector_isr_handlers( CPU_Interrupt_frame *frame ) { uint32_t cshifted; /* mips_get_sr( sr ); */ _ivsr = frame->c0_sr; cshifted = READ_CAUSE(); intvect: if( cshifted & 0x3 ) { /* making the software interrupt the highest priority is kind of * stupid, but it makes the bit testing lots easier. On the other * hand, these ints are infrequently used and the testing overhead * is minimal. Who knows, high-priority software ints might be * handy in some situation. */ /* unset both software int cause bits */ mips_set_cause( _ivcause & ~(3 << CAUSE_IPSHIFT) ); if ( cshifted & 0x01 ) /* SW[0] */ { bsp_interrupt_handler_dispatch( MONGOOSEV_IRQ_SOFTWARE_1 ); } if ( cshifted & 0x02 ) /* SW[1] */ { bsp_interrupt_handler_dispatch( MONGOOSEV_IRQ_SOFTWARE_2 ); } cshifted = READ_CAUSE(); } if ( cshifted & 0x04 ) /* IP[0] ==> INT0 == TIMER1 */ { SET_ISR_FLAG( 0x4 ); bsp_interrupt_handler_dispatch( MONGOOSEV_IRQ_TIMER1 ); CLR_ISR_FLAG( 0x4 ); if( (cshifted = READ_CAUSE()) & 0x3 ) goto intvect; } if ( cshifted & 0x08 ) /* IP[1] ==> INT1 == TIMER2*/ { SET_ISR_FLAG( 0x8 ); bsp_interrupt_handler_dispatch( MONGOOSEV_IRQ_TIMER2 ); CLR_ISR_FLAG( 0x8 ); if( (cshifted = READ_CAUSE()) & 0x7 ) goto intvect; } if ( cshifted & 0x10 ) /* IP[2] ==> INT2 */ { SET_ISR_FLAG( 0x10 ); bsp_interrupt_handler_dispatch( MONGOOSEV_IRQ_INT2 ); CLR_ISR_FLAG( 0x10 ); if( (cshifted = READ_CAUSE()) & 0xf ) goto intvect; } if ( cshifted & 0x20 ) /* IP[3] ==> INT3 == FPU interrupt */ { SET_ISR_FLAG( 0x20 ); bsp_interrupt_handler_dispatch( MONGOOSEV_IRQ_INT3 ); CLR_ISR_FLAG( 0x20 ); if( (cshifted = READ_CAUSE()) & 0x1f ) goto intvect; } if ( cshifted & 0x40 ) /* IP[4] ==> INT4, external interrupt */ { SET_ISR_FLAG( 0x40 ); bsp_interrupt_handler_dispatch( MONGOOSEV_IRQ_INT4 ); CLR_ISR_FLAG( 0x40 ); if( (cshifted = READ_CAUSE()) & 0x3f ) goto intvect; } if ( cshifted & 0x80 ) /* IP[5] ==> INT5, peripheral interrupt */ { uint32_t bit; uint32_t pf_icr, pf_mask, pf_reset = 0; uint32_t i, m; pf_icr = MONGOOSEV_READ( MONGOOSEV_PERIPHERAL_FUNCTION_INTERRUPT_CAUSE_REGISTER ); /* for (bit=0, pf_mask = 1; bit < 32; bit++, pf_mask <<= 1 ) { if ( pf_icr & pf_mask ) { SET_ISR_FLAG( 0x80 + (bit*4) ); bsp_interrupt_handler_dispatch( MONGOOSEV_IRQ_PERIPHERAL_BASE + bit ); CLR_ISR_FLAG( 0x80 + (bit*4) ); pf_reset |= pf_mask; if( (cshifted = READ_CAUSE()) & 0xff ) break; } } */ /* * iterate thru 32 bits in 4 chunks of 8 bits each. This lets us * quickly get past unasserted interrupts instead of flogging our * way thru a full 32 bits. pf_mask shifts left 8 bits at a time * to serve as a interrupt cause test mask. */ for( bit=0, pf_mask = 0xff; (bit < 32 && pf_icr); (bit+=8, pf_mask <<= 8) ) { if ( pf_icr & pf_mask ) { /* one or more of the 8 bits we're testing is high */ m = (1 << bit); /* iterate thru the 8 bits, servicing any of the interrupts */ for(i=0; (i<8 && pf_icr); (i++, m <<= 1)) { if( pf_icr & m ) { SET_ISR_FLAG( 0x80 + ((bit + i) * 4) ); bsp_interrupt_handler_dispatch( MONGOOSEV_IRQ_PERIPHERAL_BASE + bit + i ); CLR_ISR_FLAG( 0x80 + ((bit + i) * 4) ); /* or each serviced interrupt into our interrupt clear mask */ pf_reset |= m; /* xor off each int we service so we can immediately * exit once we get the last one */ pf_icr %= m; /* if another interrupt has arrived, jump out right * away but be sure to reset all the interrupts we've * already serviced */ if( READ_CAUSE() & 0xff ) goto pfexit; } } } } pfexit: MONGOOSEV_WRITE( MONGOOSEV_PERIPHERAL_STATUS_REGISTER, pf_reset ); } /* * this is a last ditch interrupt check, if an interrupt arrives * after this step, servicing it will incur the entire interrupt * overhead cost. */ if( (cshifted = READ_CAUSE()) & 0xff ) goto intvect; }