fastcall void irq_handler(pt_regs_t *regs) { assert(!critical_inside(CRITICAL_IRQ_LOCK)); critical_enter(CRITICAL_IRQ_HANDLER); { int irq = regs->trapno - 0x20; ipl_enable(); irq_dispatch(irq); /* next lines ordered this way thats why. * On eoi current irq unmasked and may occur again right there, * on irq stack. It may repeat till stack exhaustion. * Disabling ipl first prevents irq handling of same or lower * level till switched to lower critical level. */ ipl_disable(); irqctrl_eoi(irq); } critical_leave(CRITICAL_IRQ_HANDLER); critical_dispatch_pending(); }
void rwlock_any_down(rwlock_t *r) { assert(r); assert(!critical_inside(__CRITICAL_HARDER(CRITICAL_SCHED_LOCK))); sched_lock(); { r->count--; if (r->count == 0) { r->status = RWLOCK_STATUS_NONE; waitq_wakeup_all(&r->wq); } } sched_unlock(); }
/** * Wrapper for thread start routine. * Called from sched_switch() function with interrupts off. */ static void _NORETURN thread_trampoline(void) { struct thread *current = thread_self(); void *res; assertf(!critical_allows(CRITICAL_SCHED_LOCK), "0x%x", (uint32_t)__critical_count); thread_ack_switched(); assert(!critical_inside(CRITICAL_SCHED_LOCK)); /* execute user function handler */ res = current->run(current->run_arg); thread_exit(res); /* NOTREACHED */ }
static int tryenter_sched_lock(rwlock_t *r, int status) { assert(r); assert(critical_inside(CRITICAL_SCHED_LOCK)); if (r->status == RWLOCK_STATUS_NONE) { r->status = status; r->count = 1; return 0; } if (r->status == status && status == RWLOCK_STATUS_READING) { r->count++; return 0; } return -EAGAIN; }
void interrupt_handle(void) { assert(!critical_inside(CRITICAL_IRQ_LOCK)); critical_enter(CRITICAL_IRQ_HANDLER); __raspi__dispatch_bank(regs->irq_pending_1, 0); __raspi__dispatch_bank(regs->irq_pending_2, (1 << 5)); /* * 31:21 bits are unused, 20:8 are used for speeding up interrupts * processing by adding a number of 'normal' interrupt status bits there. * It might be used in order to improve this driver later, but for now * we apply a 0xFF mask to distinguish unique interrupt requests. */ __raspi__dispatch_bank(regs->irq_basic_pending & 0xFF, (2 << 5)); critical_leave(CRITICAL_IRQ_HANDLER); critical_dispatch_pending(); }
void interrupt_handle(void) { unsigned int irq = REG_LOAD(GICC_IAR); if (irq == SPURIOUS_IRQ) return; /* TODO check if IRQ number is correct */ assert(!critical_inside(CRITICAL_IRQ_LOCK)); irqctrl_disable(irq); irqctrl_eoi(irq); critical_enter(CRITICAL_IRQ_HANDLER); { ipl_enable(); irq_dispatch(irq); ipl_disable(); } irqctrl_enable(irq); critical_leave(CRITICAL_IRQ_HANDLER); critical_dispatch_pending(); }