/* * Low-level interrupt handler dispatching non-shared ISRs -- Called with * interrupts off. */ static void xnintr_irq_handler(unsigned irq, void *cookie) { xnintr_t *intr; int s; RTAI_SCHED_ISR_LOCK(); xnlock_get(&xnirqs[irq].lock); #ifdef CONFIG_SMP /* In SMP case, we have to reload the cookie under the per-IRQ lock to avoid racing with xnintr_detach. */ intr = xnarch_get_irq_cookie(irq); if (unlikely(!intr)) { s = 0; goto unlock_and_exit; } #else /* cookie always valid, attach/detach happens with IRQs disabled */ intr = cookie; #endif s = intr->isr(intr); if (unlikely(s == XN_ISR_NONE)) { if (++intr->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else { xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits); intr->unhandled = 0; } #ifdef CONFIG_SMP unlock_and_exit: #endif xnlock_put(&xnirqs[irq].lock); if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); RTAI_SCHED_ISR_UNLOCK(); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared interrupts -- Called with interrupts off. */ static void xnintr_shirq_handler(unsigned irq, void *cookie) { xnintr_irq_t *shirq = &xnirqs[irq]; xnintr_t *intr; int s = 0; RTAI_SCHED_ISR_LOCK(); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr) { int ret; ret = intr->isr(intr); s |= ret; if (ret & XN_ISR_HANDLED) { xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); } intr = intr->next; } xnlock_put(&shirq->lock); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); RTAI_SCHED_ISR_UNLOCK(); }
/* this can be a prototype for a handler pending something for Linux */ int rtai_decr_timer_handler(struct pt_regs *regs) { unsigned long cpuid; unsigned long sflags; HAL_LOCK_LINUX(); RTAI_SCHED_ISR_LOCK(); decr_timer_handler(); RTAI_SCHED_ISR_UNLOCK(); HAL_UNLOCK_LINUX(); if (!test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) { rtai_sti(); hal_fast_flush_pipeline(cpuid); return 1; } return 0; }
static void rtai_hirq_dispatcher(int irq) { unsigned long cpuid; if (rtai_domain.irqs[irq].handler) { unsigned long sflags; HAL_LOCK_LINUX(); RTAI_SCHED_ISR_LOCK(); rtai_domain.irqs[irq].handler(irq, rtai_domain.irqs[irq].cookie); RTAI_SCHED_ISR_UNLOCK(); HAL_UNLOCK_LINUX(); if (rtai_realtime_irq[irq].retmode || test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) { return; } } rtai_sti(); hal_fast_flush_pipeline(); return; }
static int rtai_hirq_dispatcher(struct pt_regs *regs) { unsigned long cpuid; int irq; #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14) if ((irq = ppc_md.get_irq()) >= RTAI_NR_IRQS) { #else if ((irq = ppc_md.get_irq(regs)) >= RTAI_NR_IRQS) { #endif spurious_interrupts++; return 0; } if (rtai_realtime_irq[irq].handler) { unsigned long sflags; HAL_LOCK_LINUX(); RTAI_IRQ_ACK(irq); // rtai_realtime_irq[irq].irq_ack(irq); mb(); RTAI_SCHED_ISR_LOCK(); rtai_realtime_irq[irq].handler(irq, rtai_realtime_irq[irq].cookie); RTAI_SCHED_ISR_UNLOCK(); HAL_UNLOCK_LINUX(); if (rtai_realtime_irq[irq].retmode || test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) { return 0; } } else { unsigned long lflags; lflags = xchg((unsigned long *)ROOT_STATUS_ADR(cpuid = rtai_cpuid()), (1 << IPIPE_STALL_FLAG)); RTAI_IRQ_ACK(irq); // rtai_realtime_irq[irq].irq_ack(irq); mb(); hal_pend_uncond(irq, cpuid); ROOT_STATUS_VAL(cpuid) = lflags; if (test_bit(IPIPE_STALL_FLAG, &lflags)) { return 0; } } rtai_sti(); hal_fast_flush_pipeline(cpuid); return 1; } /* * rt_set_trap_handler */ RT_TRAP_HANDLER rt_set_trap_handler (RT_TRAP_HANDLER handler) { return (RT_TRAP_HANDLER)xchg(&rtai_trap_handler, handler); } /* * rtai_trap_fault */ static int rtai_trap_fault (unsigned event, void *evdata) { #ifdef HINT_DIAG_TRAPS static unsigned long traps_in_hard_intr = 0; do { unsigned long flags; rtai_save_flags_and_cli(flags); if (!test_bit(RTAI_IFLAG, &flags)) { if (!test_and_set_bit(event, &traps_in_hard_intr)) { HINT_DIAG_MSG(rt_printk("TRAP %d HAS INTERRUPT DISABLED (TRAPS PICTURE %lx).\n", event, traps_in_hard_intr);); } } } while (0);
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared edge-triggered interrupts -- Called with interrupts off. */ static void xnintr_edge_shirq_handler(unsigned irq, void *cookie) { const int MAX_EDGEIRQ_COUNTER = 128; xnintr_irq_t *shirq = &xnirqs[irq]; xnintr_t *intr, *end = NULL; int s = 0, counter = 0; RTAI_SCHED_ISR_LOCK(); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr != end) { int ret, code; ret = intr->isr(intr); code = ret & ~XN_ISR_BITMASK; s |= ret; if (code == XN_ISR_HANDLED) { end = NULL; xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); } else if (end == NULL) end = intr; if (counter++ > MAX_EDGEIRQ_COUNTER) break; if (!(intr = intr->next)) intr = shirq->handlers; } xnlock_put(&shirq->lock); if (counter > MAX_EDGEIRQ_COUNTER) xnlogerr ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n", irq); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); RTAI_SCHED_ISR_UNLOCK(); }