void xnintr_clock_handler(void) { xnstat_exectime_t *prev; struct xnsched *sched; unsigned cpu; cpu = xnarch_current_cpu(); if (!cpumask_test_cpu(cpu, &xnarch_supported_cpus)) { xnarch_relay_tick(); return; } sched = xnpod_sched_slot(cpu); prev = xnstat_exectime_switch(sched, &nkclock.stat[xnsched_cpu(sched)].account); xnstat_counter_inc(&nkclock.stat[xnsched_cpu(sched)].hits); trace_mark(xn_nucleus, irq_enter, "irq %u", XNARCH_TIMER_IRQ); trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&nklock); xntimer_tick_aperiodic(); xnlock_put(&nklock); xnstat_exectime_switch(sched, prev); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); sched = xnpod_current_sched(); } /* * If the clock interrupt preempted a real-time thread, any * transition to the root thread has already triggered a host * tick propagation from xnpod_schedule(), so at this point, * we only need to propagate the host tick in case the * interrupt preempted the root thread. */ if (testbits(sched->lflags, XNHTICK) && xnthread_test_state(sched->curr, XNROOT)) xnintr_host_tick(sched); trace_mark(xn_nucleus, irq_exit, "irq %u", XNARCH_TIMER_IRQ); }
/* * Low-level interrupt handler dispatching non-shared ISRs -- Called with * interrupts off. */ static void xnintr_irq_handler(unsigned irq, void *cookie) { xnintr_t *intr; int s; RTAI_SCHED_ISR_LOCK(); xnlock_get(&xnirqs[irq].lock); #ifdef CONFIG_SMP /* In SMP case, we have to reload the cookie under the per-IRQ lock to avoid racing with xnintr_detach. */ intr = xnarch_get_irq_cookie(irq); if (unlikely(!intr)) { s = 0; goto unlock_and_exit; } #else /* cookie always valid, attach/detach happens with IRQs disabled */ intr = cookie; #endif s = intr->isr(intr); if (unlikely(s == XN_ISR_NONE)) { if (++intr->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else { xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits); intr->unhandled = 0; } #ifdef CONFIG_SMP unlock_and_exit: #endif xnlock_put(&xnirqs[irq].lock); if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); RTAI_SCHED_ISR_UNLOCK(); }
static inline void xntimer_dequeue_periodic(xntimer_t *timer) { unsigned slot = (xntlholder_date(&timer->plink) & XNTIMER_WHEELMASK); unsigned cpu = xnsched_cpu(timer->sched); struct percpu_cascade *pc = &base2slave(timer->base)->cascade[cpu]; xntlist_remove(&pc->wheel[slot], &timer->plink); __setbits(timer->status, XNTIMER_DEQUEUED); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared interrupts -- Called with interrupts off. */ static void xnintr_shirq_handler(unsigned irq, void *cookie) { xnintr_irq_t *shirq = &xnirqs[irq]; xnintr_t *intr; int s = 0; RTAI_SCHED_ISR_LOCK(); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr) { int ret; ret = intr->isr(intr); s |= ret; if (ret & XN_ISR_HANDLED) { xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); } intr = intr->next; } xnlock_put(&shirq->lock); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); RTAI_SCHED_ISR_UNLOCK(); }
static inline void xntimer_enqueue_periodic(xntimer_t *timer) { unsigned slot = (xntlholder_date(&timer->plink) & XNTIMER_WHEELMASK); unsigned cpu = xnsched_cpu(timer->sched); struct percpu_cascade *pc = &base2slave(timer->base)->cascade[cpu]; /* Just prepend the new timer to the proper slot. */ xntlist_insert(&pc->wheel[slot], &timer->plink); __clrbits(timer->status, XNTIMER_DEQUEUED); xnstat_counter_inc(&timer->scheduled); }
/** * Migrate a timer. * * This call migrates a timer to another cpu. In order to avoid pathological * cases, it must be called from the CPU to which @a timer is currently * attached. * * @param timer The address of the timer object to be migrated. * * @param sched The address of the destination CPU xnsched_t structure. * * @retval -EINVAL if @a timer is queued on another CPU than current ; * @retval 0 otherwise. * */ int xntimer_migrate(xntimer_t *timer, xnsched_t *sched) { int err = 0; int queued; spl_t s; trace_mark(xn_nucleus, timer_migrate, "timer %p cpu %d", timer, (int)xnsched_cpu(sched)); xnlock_get_irqsave(&nklock, s); if (sched == timer->sched) goto unlock_and_exit; queued = !testbits(timer->status, XNTIMER_DEQUEUED); /* Avoid the pathological case where the timer interrupt did not occur yet for the current date on the timer source CPU, whereas we are trying to migrate it to a CPU where the timer interrupt already occured. This would not be a problem in aperiodic mode. */ if (queued) { if (timer->sched != xnpod_current_sched()) { err = -EINVAL; goto unlock_and_exit; } #ifdef CONFIG_XENO_OPT_TIMING_PERIODIC timer->base->ops->stop_timer(timer); #else /* !CONFIG_XENO_OPT_TIMING_PERIODIC */ xntimer_stop_aperiodic(timer); #endif /* !CONFIG_XENO_OPT_TIMING_PERIODIC */ } timer->sched = sched; if (queued) #ifdef CONFIG_XENO_OPT_TIMING_PERIODIC timer->base->ops->move_timer(timer); #else /* !CONFIG_XENO_OPT_TIMING_PERIODIC */ xntimer_move_aperiodic(timer); #endif /* !CONFIG_XENO_OPT_TIMING_PERIODIC */ unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
/** * @internal * @fn void watchdog_handler(struct xntimer *timer) * @brief Process watchdog ticks. * * This internal routine handles incoming watchdog ticks to detect * software lockups. It kills any offending thread which is found to * monopolize the CPU so as to starve the Linux kernel for too long. * * @coretags{coreirq-only, atomic-entry} */ static void watchdog_handler(struct xntimer *timer) { struct xnsched *sched = xnsched_current(); struct xnthread *curr = sched->curr; if (likely(xnthread_test_state(curr, XNROOT))) { xnsched_reset_watchdog(sched); return; } if (likely(++sched->wdcount < wd_timeout_arg)) return; trace_cobalt_watchdog_signal(curr); if (xnthread_test_state(curr, XNUSER)) { printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread " "'%s' signaled\n", xnsched_cpu(sched), curr->name); xnthread_call_mayday(curr, SIGDEBUG_WATCHDOG); } else { printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread " "'%s' canceled\n", xnsched_cpu(sched), curr->name); /* * On behalf on an IRQ handler, xnthread_cancel() * would go half way cancelling the preempted * thread. Therefore we manually raise XNKICKED to * cause the next call to xnthread_suspend() to return * early in XNBREAK condition, and XNCANCELD so that * @thread exits next time it invokes * xnthread_test_cancel(). */ xnthread_set_info(curr, XNKICKED|XNCANCELD); } xnsched_reset_watchdog(sched); }
void xntimer_tick_periodic_inner(xntslave_t *slave) { xnsched_t *sched = xnpod_current_sched(); xntbase_t *base = &slave->base; xntlholder_t *holder; xnqueue_t *timerq; xntimer_t *timer; /* * Update the periodic clocks keeping the things strictly * monotonous (this routine is run on every cpu, but only CPU * XNTIMER_KEEPER_ID should do this). */ if (sched == xnpod_sched_slot(XNTIMER_KEEPER_ID)) ++base->jiffies; timerq = &slave->cascade[xnsched_cpu(sched)].wheel[base->jiffies & XNTIMER_WHEELMASK]; while ((holder = xntlist_head(timerq)) != NULL) { timer = plink2timer(holder); if ((xnsticks_t) (xntlholder_date(&timer->plink) - base->jiffies) > 0) break; trace_mark(xn_nucleus, timer_expire, "timer %p", timer); xntimer_dequeue_periodic(timer); xnstat_counter_inc(&timer->fired); timer->handler(timer); if (!xntimer_reload_p(timer)) continue; __setbits(timer->status, XNTIMER_FIRED); xntlholder_date(&timer->plink) = base->jiffies + timer->interval; xntimer_enqueue_periodic(timer); } xnsched_tick(sched->curr, base); /* Do time-slicing if required. */ }
/** * Migrate a timer. * * This call migrates a timer to another cpu. In order to avoid * pathological cases, it must be called from the CPU to which @a * timer is currently attached. * * @param timer The address of the timer object to be migrated. * * @param sched The address of the destination per-CPU scheduler * slot. * * @coretags{unrestricted, atomic-entry} */ void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched) { /* nklocked, IRQs off */ struct xnclock *clock; xntimerq_t *q; if (sched == timer->sched) return; trace_cobalt_timer_migrate(timer, xnsched_cpu(sched)); if (timer->status & XNTIMER_RUNNING) { xntimer_stop(timer); timer->sched = sched; clock = xntimer_clock(timer); q = xntimer_percpu_queue(timer); xntimer_enqueue(timer, q); if (xntimer_heading_p(timer)) xnclock_remote_shot(clock, sched); } else timer->sched = sched; }
static inline void xntimer_next_remote_shot(xnsched_t *sched) { xnarch_send_timer_ipi(xnarch_cpumask_of_cpu(xnsched_cpu(sched))); }
/* * Low-level interrupt handler dispatching non-shared ISRs -- Called with * interrupts off. */ static void xnintr_irq_handler(unsigned irq, void *cookie) { struct xnsched *sched = xnpod_current_sched(); xnstat_exectime_t *prev; struct xnintr *intr; xnticks_t start; int s; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); trace_mark(xn_nucleus, irq_enter, "irq %u", irq); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&xnirqs[irq].lock); #ifdef CONFIG_SMP /* * In SMP case, we have to reload the cookie under the per-IRQ * lock to avoid racing with xnintr_detach. However, we * assume that no CPU migration will occur while running the * interrupt service routine, so the scheduler pointer will * remain valid throughout this function. */ intr = xnarch_get_irq_cookie(irq); if (unlikely(!intr)) { s = 0; goto unlock_and_exit; } #else /* cookie always valid, attach/detach happens with IRQs disabled */ intr = cookie; #endif s = intr->isr(intr); if (unlikely(s == XN_ISR_NONE)) { if (++intr->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else { xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &intr->stat[xnsched_cpu(sched)].account, start); intr->unhandled = 0; } #ifdef CONFIG_SMP unlock_and_exit: #endif xnlock_put(&xnirqs[irq].lock); if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); xnstat_exectime_switch(sched, prev); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); } trace_mark(xn_nucleus, irq_exit, "irq %u", irq); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared edge-triggered interrupts -- Called with interrupts off. */ static void xnintr_edge_shirq_handler(unsigned irq, void *cookie) { const int MAX_EDGEIRQ_COUNTER = 128; struct xnsched *sched = xnpod_current_sched(); xnintr_irq_t *shirq = &xnirqs[irq]; int s = 0, counter = 0, ret, code; struct xnintr *intr, *end = NULL; xnstat_exectime_t *prev; xnticks_t start; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); trace_mark(xn_nucleus, irq_enter, "irq %u", irq); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr != end) { xnstat_exectime_switch(sched, &intr->stat[xnsched_cpu(sched)].account); /* * NOTE: We assume that no CPU migration will occur * while running the interrupt service routine. */ ret = intr->isr(intr); code = ret & ~XN_ISR_BITMASK; s |= ret; if (code == XN_ISR_HANDLED) { end = NULL; xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &intr->stat[xnsched_cpu(sched)].account, start); start = xnstat_exectime_now(); } else if (end == NULL) end = intr; if (counter++ > MAX_EDGEIRQ_COUNTER) break; if (!(intr = intr->next)) intr = shirq->handlers; } xnlock_put(&shirq->lock); if (counter > MAX_EDGEIRQ_COUNTER) xnlogerr ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n", irq); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); xnstat_exectime_switch(sched, prev); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); } trace_mark(xn_nucleus, irq_exit, "irq %u", irq); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared edge-triggered interrupts -- Called with interrupts off. */ static void xnintr_edge_shirq_handler(unsigned irq, void *cookie) { const int MAX_EDGEIRQ_COUNTER = 128; xnintr_irq_t *shirq = &xnirqs[irq]; xnintr_t *intr, *end = NULL; int s = 0, counter = 0; RTAI_SCHED_ISR_LOCK(); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr != end) { int ret, code; ret = intr->isr(intr); code = ret & ~XN_ISR_BITMASK; s |= ret; if (code == XN_ISR_HANDLED) { end = NULL; xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); } else if (end == NULL) end = intr; if (counter++ > MAX_EDGEIRQ_COUNTER) break; if (!(intr = intr->next)) intr = shirq->handlers; } xnlock_put(&shirq->lock); if (counter > MAX_EDGEIRQ_COUNTER) xnlogerr ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n", irq); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); RTAI_SCHED_ISR_UNLOCK(); }
/* * Low-level interrupt handler dispatching the user-defined ISRs for * shared interrupts -- Called with interrupts off. */ static void xnintr_shirq_handler(unsigned irq, void *cookie) { struct xnsched *sched = xnpod_current_sched(); xnintr_irq_t *shirq = &xnirqs[irq]; xnstat_exectime_t *prev; xnticks_t start; xnintr_t *intr; int s = 0, ret; prev = xnstat_exectime_get_current(sched); start = xnstat_exectime_now(); trace_mark(xn_nucleus, irq_enter, "irq %u", irq); ++sched->inesting; __setbits(sched->status, XNINIRQ); xnlock_get(&shirq->lock); intr = shirq->handlers; while (intr) { /* * NOTE: We assume that no CPU migration will occur * while running the interrupt service routine. */ ret = intr->isr(intr); s |= ret; if (ret & XN_ISR_HANDLED) { xnstat_counter_inc( &intr->stat[xnsched_cpu(sched)].hits); xnstat_exectime_lazy_switch(sched, &intr->stat[xnsched_cpu(sched)].account, start); start = xnstat_exectime_now(); } intr = intr->next; } xnlock_put(&shirq->lock); if (unlikely(s == XN_ISR_NONE)) { if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) { xnlogerr("%s: IRQ%d not handled. Disabling IRQ " "line.\n", __FUNCTION__, irq); s |= XN_ISR_NOENABLE; } } else shirq->unhandled = 0; if (s & XN_ISR_PROPAGATE) xnarch_chain_irq(irq); else if (!(s & XN_ISR_NOENABLE)) xnarch_end_irq(irq); if (--sched->inesting == 0) { __clrbits(sched->status, XNINIRQ); xnpod_schedule(); } trace_mark(xn_nucleus, irq_exit, "irq %u", irq); xnstat_exectime_switch(sched, prev); }