void xntimer_adjust_all_aperiodic(xnsticks_t delta) { unsigned cpu, nr_cpus; xnqueue_t adjq; initq(&adjq); delta = xnarch_ns_to_tsc(delta); for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) { xnsched_t *sched = xnpod_sched_slot(cpu); xntimerq_t *q = &sched->timerqueue; xnholder_t *adjholder; xntimerh_t *holder; xntimerq_it_t it; for (holder = xntimerq_it_begin(q, &it); holder; holder = xntimerq_it_next(q, &it, holder)) { xntimer_t *timer = aplink2timer(holder); if (testbits(timer->status, XNTIMER_REALTIME)) { inith(&timer->adjlink); appendq(&adjq, &timer->adjlink); } } while ((adjholder = getq(&adjq))) { xntimer_t *timer = adjlink2timer(adjholder); xntimer_dequeue_aperiodic(timer); xntimer_adjust_aperiodic(timer, delta); } if (sched != xnpod_current_sched()) xntimer_next_remote_shot(sched); else xntimer_next_local_shot(sched); } }
void xntimer_freeze(void) { int nr_cpus, cpu; spl_t s; trace_mark(xn_nucleus, timer_freeze, MARK_NOARGS); xnlock_get_irqsave(&nklock, s); nr_cpus = xnarch_num_online_cpus(); for (cpu = 0; cpu < nr_cpus; cpu++) { xntimerq_t *timerq = &xnpod_sched_slot(cpu)->timerqueue; xntimerh_t *holder; while ((holder = xntimerq_head(timerq)) != NULL) { __setbits(aplink2timer(holder)->status, XNTIMER_DEQUEUED); xntimerq_remove(timerq, holder); } /* Dequeuing all timers from the master time base * freezes all slave time bases the same way, so there * is no need to handle anything more here. */ } xnlock_put_irqrestore(&nklock, s); }
int xnintr_query_next(int irq, xnintr_iterator_t *iterator, char *name_buf) { xnintr_t *intr; xnticks_t last_switch; int cpu_no = iterator->cpu + 1; int err = 0; spl_t s; if (cpu_no == xnarch_num_online_cpus()) cpu_no = 0; iterator->cpu = cpu_no; xnlock_get_irqsave(&intrlock, s); if (iterator->list_rev != xnintr_list_rev) { err = -EAGAIN; goto unlock_and_exit; } if (!iterator->prev) { if (irq == XNARCH_TIMER_IRQ) intr = &nkclock; else intr = xnintr_shirq_first(irq); } else intr = xnintr_shirq_next(iterator->prev); if (!intr) { cpu_no = -1; iterator->prev = NULL; err = -ENODEV; goto unlock_and_exit; } snprintf(name_buf, XNOBJECT_NAME_LEN, "IRQ%d: %s", irq, intr->name); iterator->hits = xnstat_counter_get(&intr->stat[cpu_no].hits); last_switch = xnpod_sched_slot(cpu_no)->last_account_switch; iterator->exectime_period = intr->stat[cpu_no].account.total; iterator->account_period = last_switch - intr->stat[cpu_no].account.start; intr->stat[cpu_no].sum.total += iterator->exectime_period; iterator->exectime_total = intr->stat[cpu_no].sum.total; intr->stat[cpu_no].account.total = 0; intr->stat[cpu_no].account.start = last_switch; /* Proceed to next entry in shared IRQ chain when all CPUs * have been visited for this one. */ if (cpu_no + 1 == xnarch_num_online_cpus()) iterator->prev = intr; unlock_and_exit: xnlock_put_irqrestore(&intrlock, s); return err; }
static inline void xnintr_sync_stat_references(xnintr_t *intr) { int cpu; for_each_online_cpu(cpu) { struct xnsched *sched = xnpod_sched_slot(cpu); /* Synchronize on all dangling references to go away. */ while (sched->current_account == &intr->stat[cpu].account) cpu_relax(); } }
void xnintr_clock_handler(void) { xnstat_exectime_t *prev; struct xnsched *sched; unsigned cpu; cpu = xnarch_current_cpu(); if (!cpumask_test_cpu(cpu, &xnarch_supported_cpus)) { xnarch_relay_tick(); return; } sched = xnpod_sched_slot(cpu); prev = xnstat_exectime_switch(sched, &nkclock.stat[xnsched_cpu(sched)].account); xnstat_counter_inc(&nkclock.stat[xnsched_cpu(sched)].hits); trace_mark(xn_nucleus, irq_enter, "irq %u", XNARCH_TIMER_IRQ); trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&nklock); xntimer_tick_aperiodic(); xnlock_put(&nklock); xnstat_exectime_switch(sched, prev); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); sched = xnpod_current_sched(); } /* * If the clock interrupt preempted a real-time thread, any * transition to the root thread has already triggered a host * tick propagation from xnpod_schedule(), so at this point, * we only need to propagate the host tick in case the * interrupt preempted the root thread. */ if (testbits(sched->lflags, XNHTICK) && xnthread_test_state(sched->curr, XNROOT)) xnintr_host_tick(sched); trace_mark(xn_nucleus, irq_exit, "irq %u", XNARCH_TIMER_IRQ); }
void xntslave_init(xntslave_t *slave) { int nr_cpus, cpu, n; for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) { struct percpu_cascade *pc = &slave->cascade[cpu]; for (n = 0; n < XNTIMER_WHEELSIZE; n++) xntlist_init(&pc->wheel[n]); /* Slave periodic time bases are cascaded from the * master aperiodic time base. */ xntimer_init(&pc->timer, &nktbase, xntimer_tick_periodic); xntimer_set_name(&pc->timer, slave->base.name); xntimer_set_priority(&pc->timer, XNTIMER_HIPRIO); xntimer_set_sched(&pc->timer, xnpod_sched_slot(cpu)); } }
void xntimer_tick_periodic_inner(xntslave_t *slave) { xnsched_t *sched = xnpod_current_sched(); xntbase_t *base = &slave->base; xntlholder_t *holder; xnqueue_t *timerq; xntimer_t *timer; /* * Update the periodic clocks keeping the things strictly * monotonous (this routine is run on every cpu, but only CPU * XNTIMER_KEEPER_ID should do this). */ if (sched == xnpod_sched_slot(XNTIMER_KEEPER_ID)) ++base->jiffies; timerq = &slave->cascade[xnsched_cpu(sched)].wheel[base->jiffies & XNTIMER_WHEELMASK]; while ((holder = xntlist_head(timerq)) != NULL) { timer = plink2timer(holder); if ((xnsticks_t) (xntlholder_date(&timer->plink) - base->jiffies) > 0) break; trace_mark(xn_nucleus, timer_expire, "timer %p", timer); xntimer_dequeue_periodic(timer); xnstat_counter_inc(&timer->fired); timer->handler(timer); if (!xntimer_reload_p(timer)) continue; __setbits(timer->status, XNTIMER_FIRED); xntlholder_date(&timer->plink) = base->jiffies + timer->interval; xntimer_enqueue_periodic(timer); } xnsched_tick(sched->curr, base); /* Do time-slicing if required. */ }