fastcall void irq_handler(pt_regs_t *regs) { assert(!critical_inside(CRITICAL_IRQ_LOCK)); critical_enter(CRITICAL_IRQ_HANDLER); { int irq = regs->trapno - 0x20; ipl_enable(); irq_dispatch(irq); /* next lines ordered this way thats why. * On eoi current irq unmasked and may occur again right there, * on irq stack. It may repeat till stack exhaustion. * Disabling ipl first prevents irq handling of same or lower * level till switched to lower critical level. */ ipl_disable(); irqctrl_eoi(irq); } critical_leave(CRITICAL_IRQ_HANDLER); critical_dispatch_pending(); }
void interrupt_handle(void) { assert(!critical_inside(CRITICAL_IRQ_LOCK)); critical_enter(CRITICAL_IRQ_HANDLER); __raspi__dispatch_bank(regs->irq_pending_1, 0); __raspi__dispatch_bank(regs->irq_pending_2, (1 << 5)); /* * 31:21 bits are unused, 20:8 are used for speeding up interrupts * processing by adding a number of 'normal' interrupt status bits there. * It might be used in order to improve this driver later, but for now * we apply a 0xFF mask to distinguish unique interrupt requests. */ __raspi__dispatch_bank(regs->irq_basic_pending & 0xFF, (2 << 5)); critical_leave(CRITICAL_IRQ_HANDLER); critical_dispatch_pending(); }
void interrupt_handle(void) { unsigned int irq = REG_LOAD(GICC_IAR); if (irq == SPURIOUS_IRQ) return; /* TODO check if IRQ number is correct */ assert(!critical_inside(CRITICAL_IRQ_LOCK)); irqctrl_disable(irq); irqctrl_eoi(irq); critical_enter(CRITICAL_IRQ_HANDLER); { ipl_enable(); irq_dispatch(irq); ipl_disable(); } irqctrl_enable(irq); critical_leave(CRITICAL_IRQ_HANDLER); critical_dispatch_pending(); }