int xntimer_start_aperiodic(xntimer_t *timer, xnticks_t value, xnticks_t interval, xntmode_t mode) { xnticks_t date, now; trace_mark(xn_nucleus, timer_start, "timer %p base %s value %Lu interval %Lu mode %u", timer, xntimer_base(timer)->name, value, interval, mode); if (!testbits(timer->status, XNTIMER_DEQUEUED)) xntimer_dequeue_aperiodic(timer); now = xnarch_get_cpu_tsc(); __clrbits(timer->status, XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC); switch (mode) { case XN_RELATIVE: if ((xnsticks_t)value < 0) return -ETIMEDOUT; date = xnarch_ns_to_tsc(value) + now; break; case XN_REALTIME: __setbits(timer->status, XNTIMER_REALTIME); value -= nktbase.wallclock_offset; /* fall through */ default: /* XN_ABSOLUTE || XN_REALTIME */ date = xnarch_ns_to_tsc(value); if ((xnsticks_t)(date - now) <= 0) return -ETIMEDOUT; break; } xntimerh_date(&timer->aplink) = date; timer->interval = XN_INFINITE; if (interval != XN_INFINITE) { timer->interval = xnarch_ns_to_tsc(interval); timer->pexpect = date; __setbits(timer->status, XNTIMER_PERIODIC); } xntimer_enqueue_aperiodic(timer); if (xntimer_heading_p(timer)) { if (xntimer_sched(timer) != xnpod_current_sched()) xntimer_next_remote_shot(xntimer_sched(timer)); else xntimer_next_local_shot(xntimer_sched(timer)); } return 0; }
void xntimer_adjust_all_aperiodic(xnsticks_t delta) { unsigned cpu, nr_cpus; xnqueue_t adjq; initq(&adjq); delta = xnarch_ns_to_tsc(delta); for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) { xnsched_t *sched = xnpod_sched_slot(cpu); xntimerq_t *q = &sched->timerqueue; xnholder_t *adjholder; xntimerh_t *holder; xntimerq_it_t it; for (holder = xntimerq_it_begin(q, &it); holder; holder = xntimerq_it_next(q, &it, holder)) { xntimer_t *timer = aplink2timer(holder); if (testbits(timer->status, XNTIMER_REALTIME)) { inith(&timer->adjlink); appendq(&adjq, &timer->adjlink); } } while ((adjholder = getq(&adjq))) { xntimer_t *timer = adjlink2timer(adjholder); xntimer_dequeue_aperiodic(timer); xntimer_adjust_aperiodic(timer, delta); } if (sched != xnpod_current_sched()) xntimer_next_remote_shot(sched); else xntimer_next_local_shot(sched); } }
void xntimer_tick_aperiodic(void) { xnsched_t *sched = xnpod_current_sched(); xntimerq_t *timerq = &sched->timerqueue; xnticks_t now, interval; xntimerh_t *holder; xntimer_t *timer; xnsticks_t delta; /* * Optimisation: any local timer reprogramming triggered by * invoked timer handlers can wait until we leave the tick * handler. Use this status flag as hint to * xntimer_start_aperiodic. */ __setbits(sched->status, XNINTCK); now = xnarch_get_cpu_tsc(); while ((holder = xntimerq_head(timerq)) != NULL) { timer = aplink2timer(holder); /* * If the delay to the next shot is greater than the * intrinsic latency value, we may stop scanning the * timer queue there, since timeout dates are ordered * by increasing values. */ delta = (xnsticks_t)(xntimerh_date(&timer->aplink) - now); if (delta > (xnsticks_t)(nklatency + nktimerlat)) break; trace_mark(xn_nucleus, timer_expire, "timer %p", timer); xntimer_dequeue_aperiodic(timer); xnstat_counter_inc(&timer->fired); if (likely(timer != &sched->htimer)) { if (likely(!testbits(nktbase.status, XNTBLCK) || testbits(timer->status, XNTIMER_NOBLCK))) { timer->handler(timer); now = xnarch_get_cpu_tsc(); /* * If the elapsed timer has no reload * value, or was re-enqueued or killed * by the timeout handler: don't not * re-enqueue it for the next shot. */ if (!xntimer_reload_p(timer)) continue; __setbits(timer->status, XNTIMER_FIRED); } else if (likely(!testbits(timer->status, XNTIMER_PERIODIC))) { /* * Make the blocked timer elapse again * at a reasonably close date in the * future, waiting for the timebase to * be unlocked at some point. Timers * are blocked when single-stepping * into an application using a * debugger, so it is fine to wait for * 250 ms for the user to continue * program execution. */ interval = xnarch_ns_to_tsc(250000000ULL); goto requeue; } } else { /* * By postponing the propagation of the * low-priority host tick to the interrupt * epilogue (see xnintr_irq_handler()), we * save some I-cache, which translates into * precious microsecs on low-end hw. */ __setbits(sched->lflags, XNHTICK); __clrbits(sched->lflags, XNHDEFER); if (!testbits(timer->status, XNTIMER_PERIODIC)) continue; } interval = timer->interval; requeue: do { xntimerh_date(&timer->aplink) += interval; } while (xntimerh_date(&timer->aplink) < now + nklatency); xntimer_enqueue_aperiodic(timer); } __clrbits(sched->status, XNINTCK); xntimer_next_local_shot(sched); }