void xnintr_clock_handler(void) { struct xnsched *sched = xnpod_current_sched(); xnstat_exectime_t *prev; xnticks_t start; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); xnarch_announce_tick(); trace_mark(xn_nucleus, irq_enter, "irq %u", XNARCH_TIMER_IRQ); trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&nklock); xntimer_tick_aperiodic(); xnlock_put(&nklock); xnstat_counter_inc(&nkclock.stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &nkclock.stat[xnsched_cpu(sched)].account, start); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); } /* * If the clock interrupt preempted a real-time thread, any * transition to the root thread has already triggered a host * tick propagation from xnpod_schedule(), so at this point, * we only need to propagate the host tick in case the * interrupt preempted the root thread. */ if (testbits(sched->lflags, XNHTICK) && xnthread_test_state(sched->curr, XNROOT)) xnintr_host_tick(sched); trace_mark(xn_nucleus, irq_exit, "irq %u", XNARCH_TIMER_IRQ); xnstat_exectime_switch(sched, prev); }
/* * Low-level interrupt handler dispatching non-shared ISRs -- Called with * interrupts off. */ static void xnintr_irq_handler(unsigned irq, void *cookie) { struct xnsched *sched = xnpod_current_sched(); xnstat_exectime_t *prev; struct xnintr *intr; xnticks_t start; int s; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); trace_mark(xn_nucleus, irq_enter, "irq %u", irq); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&xnirqs[irq].lock); #ifdef CONFIG_SMP /* * In SMP case, we have to reload the cookie under the per-IRQ * lock to avoid racing with xnintr_detach. However, we * assume that no CPU migration will occur while running the * interrupt service routine, so the scheduler pointer will * remain valid throughout this function. */ intr = xnarch_get_irq_cookie(irq); if (unlikely(!intr)) { s = 0; goto unlock_and_exit; } #else /* cookie always valid, attach/detach happens with IRQs disabled */ intr = cookie; #endif s = intr->isr(intr); if (unlikely(s == XN_ISR_NONE)) { if (++intr->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else { xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &intr->stat[xnsched_cpu(sched)].account, start); intr->unhandled = 0; } #ifdef CONFIG_SMP unlock_and_exit: #endif xnlock_put(&xnirqs[irq].lock); if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); xnstat_exectime_switch(sched, prev); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); } trace_mark(xn_nucleus, irq_exit, "irq %u", irq); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared edge-triggered interrupts -- Called with interrupts off. */ static void xnintr_edge_shirq_handler(unsigned irq, void *cookie) { const int MAX_EDGEIRQ_COUNTER = 128; struct xnsched *sched = xnpod_current_sched(); xnintr_irq_t *shirq = &xnirqs[irq]; int s = 0, counter = 0, ret, code; struct xnintr *intr, *end = NULL; xnstat_exectime_t *prev; xnticks_t start; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); trace_mark(xn_nucleus, irq_enter, "irq %u", irq); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr != end) { xnstat_exectime_switch(sched, &intr->stat[xnsched_cpu(sched)].account); /* * NOTE: We assume that no CPU migration will occur * while running the interrupt service routine. */ ret = intr->isr(intr); code = ret & ~XN_ISR_BITMASK; s |= ret; if (code == XN_ISR_HANDLED) { end = NULL; xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &intr->stat[xnsched_cpu(sched)].account, start); start = xnstat_exectime_now(); } else if (end == NULL) end = intr; if (counter++ > MAX_EDGEIRQ_COUNTER) break; if (!(intr = intr->next)) intr = shirq->handlers; } xnlock_put(&shirq->lock); if (counter > MAX_EDGEIRQ_COUNTER) xnlogerr ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n", irq); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); xnstat_exectime_switch(sched, prev); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); } trace_mark(xn_nucleus, irq_exit, "irq %u", irq); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared interrupts -- Called with interrupts off. */ static void xnintr_shirq_handler(unsigned irq, void *cookie) { struct xnsched *sched = xnpod_current_sched(); xnintr_irq_t *shirq = &xnirqs[irq]; xnstat_exectime_t *prev; xnticks_t start; xnintr_t *intr; int s = 0, ret; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); trace_mark(xn_nucleus, irq_enter, "irq %u", irq); ++sched->inesting; __setbits(sched->status, XNINIRQ); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr) { /* * NOTE: We assume that no CPU migration will occur * while running the interrupt service routine. */ ret = intr->isr(intr); s |= ret; if (ret & XN_ISR_HANDLED) { xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &intr->stat[xnsched_cpu(sched)].account, start); start = xnstat_exectime_now(); } intr = intr->next; } xnlock_put(&shirq->lock); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); if (--sched->inesting == 0) { __clrbits(sched->status, XNINIRQ); xnpod_schedule(); } trace_mark(xn_nucleus, irq_exit, "irq %u", irq); xnstat_exectime_switch(sched, prev); }