/* * Low-level interrupt handler dispatching non-shared ISRs -- Called with * interrupts off. */ static void xnintr_irq_handler(unsigned irq, void *cookie) { xnintr_t *intr; int s; RTAI_SCHED_ISR_LOCK(); xnlock_get(&xnirqs[irq].lock); #ifdef CONFIG_SMP /* In SMP case, we have to reload the cookie under the per-IRQ lock to avoid racing with xnintr_detach. */ intr = xnarch_get_irq_cookie(irq); if (unlikely(!intr)) { s = 0; goto unlock_and_exit; } #else /* cookie always valid, attach/detach happens with IRQs disabled */ intr = cookie; #endif s = intr->isr(intr); if (unlikely(s == XN_ISR_NONE)) { if (++intr->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else { xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits); intr->unhandled = 0; } #ifdef CONFIG_SMP unlock_and_exit: #endif xnlock_put(&xnirqs[irq].lock); if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); RTAI_SCHED_ISR_UNLOCK(); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared interrupts -- Called with interrupts off. */ static void xnintr_shirq_handler(unsigned irq, void *cookie) { xnintr_irq_t *shirq = &xnirqs[irq]; xnintr_t *intr; int s = 0; RTAI_SCHED_ISR_LOCK(); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr) { int ret; ret = intr->isr(intr); s |= ret; if (ret & XN_ISR_HANDLED) { xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); } intr = intr->next; } xnlock_put(&shirq->lock); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); RTAI_SCHED_ISR_UNLOCK(); }
/* * Low-level interrupt handler dispatching non-shared ISRs -- Called with * interrupts off. */ static void xnintr_irq_handler(unsigned irq, void *cookie) { struct xnsched *sched = xnpod_current_sched(); xnstat_exectime_t *prev; struct xnintr *intr; xnticks_t start; int s; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); trace_mark(xn_nucleus, irq_enter, "irq %u", irq); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&xnirqs[irq].lock); #ifdef CONFIG_SMP /* * In SMP case, we have to reload the cookie under the per-IRQ * lock to avoid racing with xnintr_detach. However, we * assume that no CPU migration will occur while running the * interrupt service routine, so the scheduler pointer will * remain valid throughout this function. */ intr = xnarch_get_irq_cookie(irq); if (unlikely(!intr)) { s = 0; goto unlock_and_exit; } #else /* cookie always valid, attach/detach happens with IRQs disabled */ intr = cookie; #endif s = intr->isr(intr); if (unlikely(s == XN_ISR_NONE)) { if (++intr->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else { xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &intr->stat[xnsched_cpu(sched)].account, start); intr->unhandled = 0; } #ifdef CONFIG_SMP unlock_and_exit: #endif xnlock_put(&xnirqs[irq].lock); if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); xnstat_exectime_switch(sched, prev); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); } trace_mark(xn_nucleus, irq_exit, "irq %u", irq); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared edge-triggered interrupts -- Called with interrupts off. */ static void xnintr_edge_shirq_handler(unsigned irq, void *cookie) { const int MAX_EDGEIRQ_COUNTER = 128; struct xnsched *sched = xnpod_current_sched(); xnintr_irq_t *shirq = &xnirqs[irq]; int s = 0, counter = 0, ret, code; struct xnintr *intr, *end = NULL; xnstat_exectime_t *prev; xnticks_t start; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); trace_mark(xn_nucleus, irq_enter, "irq %u", irq); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr != end) { xnstat_exectime_switch(sched, &intr->stat[xnsched_cpu(sched)].account); /* * NOTE: We assume that no CPU migration will occur * while running the interrupt service routine. */ ret = intr->isr(intr); code = ret & ~XN_ISR_BITMASK; s |= ret; if (code == XN_ISR_HANDLED) { end = NULL; xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &intr->stat[xnsched_cpu(sched)].account, start); start = xnstat_exectime_now(); } else if (end == NULL) end = intr; if (counter++ > MAX_EDGEIRQ_COUNTER) break; if (!(intr = intr->next)) intr = shirq->handlers; } xnlock_put(&shirq->lock); if (counter > MAX_EDGEIRQ_COUNTER) xnlogerr ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n", irq); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); xnstat_exectime_switch(sched, prev); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); } trace_mark(xn_nucleus, irq_exit, "irq %u", irq); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared edge-triggered interrupts -- Called with interrupts off. */ static void xnintr_edge_shirq_handler(unsigned irq, void *cookie) { const int MAX_EDGEIRQ_COUNTER = 128; xnintr_irq_t *shirq = &xnirqs[irq]; xnintr_t *intr, *end = NULL; int s = 0, counter = 0; RTAI_SCHED_ISR_LOCK(); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr != end) { int ret, code; ret = intr->isr(intr); code = ret & ~XN_ISR_BITMASK; s |= ret; if (code == XN_ISR_HANDLED) { end = NULL; xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); } else if (end == NULL) end = intr; if (counter++ > MAX_EDGEIRQ_COUNTER) break; if (!(intr = intr->next)) intr = shirq->handlers; } xnlock_put(&shirq->lock); if (counter > MAX_EDGEIRQ_COUNTER) xnlogerr ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n", irq); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); RTAI_SCHED_ISR_UNLOCK(); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared interrupts -- Called with interrupts off. */ static void xnintr_shirq_handler(unsigned irq, void *cookie) { struct xnsched *sched = xnpod_current_sched(); xnintr_irq_t *shirq = &xnirqs[irq]; xnstat_exectime_t *prev; xnticks_t start; xnintr_t *intr; int s = 0, ret; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); trace_mark(xn_nucleus, irq_enter, "irq %u", irq); ++sched->inesting; __setbits(sched->status, XNINIRQ); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr) { /* * NOTE: We assume that no CPU migration will occur * while running the interrupt service routine. */ ret = intr->isr(intr); s |= ret; if (ret & XN_ISR_HANDLED) { xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &intr->stat[xnsched_cpu(sched)].account, start); start = xnstat_exectime_now(); } intr = intr->next; } xnlock_put(&shirq->lock); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); if (--sched->inesting == 0) { __clrbits(sched->status, XNINIRQ); xnpod_schedule(); } trace_mark(xn_nucleus, irq_exit, "irq %u", irq); xnstat_exectime_switch(sched, prev); }