static void xntimer_move_aperiodic(xntimer_t *timer) { xntimer_enqueue_aperiodic(timer); if (xntimer_heading_p(timer)) xntimer_next_remote_shot(timer->sched); }
int xntimer_start_aperiodic(xntimer_t *timer, xnticks_t value, xnticks_t interval, xntmode_t mode) { xnticks_t date, now; trace_mark(xn_nucleus, timer_start, "timer %p base %s value %Lu interval %Lu mode %u", timer, xntimer_base(timer)->name, value, interval, mode); if (!testbits(timer->status, XNTIMER_DEQUEUED)) xntimer_dequeue_aperiodic(timer); now = xnarch_get_cpu_tsc(); __clrbits(timer->status, XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC); switch (mode) { case XN_RELATIVE: if ((xnsticks_t)value < 0) return -ETIMEDOUT; date = xnarch_ns_to_tsc(value) + now; break; case XN_REALTIME: __setbits(timer->status, XNTIMER_REALTIME); value -= nktbase.wallclock_offset; /* fall through */ default: /* XN_ABSOLUTE || XN_REALTIME */ date = xnarch_ns_to_tsc(value); if ((xnsticks_t)(date - now) <= 0) return -ETIMEDOUT; break; } xntimerh_date(&timer->aplink) = date; timer->interval = XN_INFINITE; if (interval != XN_INFINITE) { timer->interval = xnarch_ns_to_tsc(interval); timer->pexpect = date; __setbits(timer->status, XNTIMER_PERIODIC); } xntimer_enqueue_aperiodic(timer); if (xntimer_heading_p(timer)) { if (xntimer_sched(timer) != xnpod_current_sched()) xntimer_next_remote_shot(xntimer_sched(timer)); else xntimer_next_local_shot(xntimer_sched(timer)); } return 0; }
void xntimer_stop_aperiodic(xntimer_t *timer) { int heading; trace_mark(xn_nucleus, timer_stop, "timer %p", timer); heading = xntimer_heading_p(timer); xntimer_dequeue_aperiodic(timer); /* If we removed the heading timer, reprogram the next shot if any. If the timer was running on another CPU, let it tick. */ if (heading && xntimer_sched(timer) == xnpod_current_sched()) xntimer_next_local_shot(xntimer_sched(timer)); }
/** * Migrate a timer. * * This call migrates a timer to another cpu. In order to avoid * pathological cases, it must be called from the CPU to which @a * timer is currently attached. * * @param timer The address of the timer object to be migrated. * * @param sched The address of the destination per-CPU scheduler * slot. * * @coretags{unrestricted, atomic-entry} */ void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched) { /* nklocked, IRQs off */ struct xnclock *clock; xntimerq_t *q; if (sched == timer->sched) return; trace_cobalt_timer_migrate(timer, xnsched_cpu(sched)); if (timer->status & XNTIMER_RUNNING) { xntimer_stop(timer); timer->sched = sched; clock = xntimer_clock(timer); q = xntimer_percpu_queue(timer); xntimer_enqueue(timer, q); if (xntimer_heading_p(timer)) xnclock_remote_shot(clock, sched); } else timer->sched = sched; }
/** * @fn int xntimer_stop(struct xntimer *timer) * * @brief Disarm a timer. * * This service deactivates a timer previously armed using * xntimer_start(). Once disarmed, the timer can be subsequently * re-armed using the latter service. * * @param timer The address of a valid timer descriptor. * * @coretags{unrestricted, atomic-entry} */ void __xntimer_stop(struct xntimer *timer) { struct xnclock *clock = xntimer_clock(timer); xntimerq_t *q = xntimer_percpu_queue(timer); struct xnsched *sched; int heading = 1; trace_cobalt_timer_stop(timer); if ((timer->status & XNTIMER_DEQUEUED) == 0) { heading = xntimer_heading_p(timer); xntimer_dequeue(timer, q); } timer->status &= ~(XNTIMER_FIRED|XNTIMER_RUNNING); sched = xntimer_sched(timer); /* * If we removed the heading timer, reprogram the next shot if * any. If the timer was running on another CPU, let it tick. */ if (heading && sched == xnsched_current()) xnclock_program_shot(clock, sched); }
/** * Arm a timer. * * Activates a timer so that the associated timeout handler will be * fired after each expiration time. A timer can be either periodic or * one-shot, depending on the reload value passed to this routine. The * given timer must have been previously initialized. * * A timer is attached to the clock specified in xntimer_init(). * * @param timer The address of a valid timer descriptor. * * @param value The date of the initial timer shot, expressed in * nanoseconds. * * @param interval The reload value of the timer. It is a periodic * interval value to be used for reprogramming the next timer shot, * expressed in nanoseconds. If @a interval is equal to XN_INFINITE, * the timer will not be reloaded after it has expired. * * @param mode The timer mode. It can be XN_RELATIVE if @a value shall * be interpreted as a relative date, XN_ABSOLUTE for an absolute date * based on the monotonic clock of the related time base (as returned * my xnclock_read_monotonic()), or XN_REALTIME if the absolute date * is based on the adjustable real-time date for the relevant clock * (obtained from xnclock_read_realtime()). * * @return 0 is returned upon success, or -ETIMEDOUT if an absolute * date in the past has been given. In such an event, the timer is * nevertheless armed for the next shot in the timeline if @a interval * is different from XN_INFINITE. * * @coretags{unrestricted, atomic-entry} */ int xntimer_start(struct xntimer *timer, xnticks_t value, xnticks_t interval, xntmode_t mode) { struct xnclock *clock = xntimer_clock(timer); xntimerq_t *q = xntimer_percpu_queue(timer); xnticks_t date, now, delay, period; unsigned long gravity; struct xnsched *sched; int ret = 0; trace_cobalt_timer_start(timer, value, interval, mode); if ((timer->status & XNTIMER_DEQUEUED) == 0) xntimer_dequeue(timer, q); now = xnclock_read_raw(clock); timer->status &= ~(XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC); switch (mode) { case XN_RELATIVE: if ((xnsticks_t)value < 0) return -ETIMEDOUT; date = xnclock_ns_to_ticks(clock, value) + now; break; case XN_REALTIME: timer->status |= XNTIMER_REALTIME; value -= xnclock_get_offset(clock); /* fall through */ default: /* XN_ABSOLUTE || XN_REALTIME */ date = xnclock_ns_to_ticks(clock, value); if ((xnsticks_t)(date - now) <= 0) { ret = -ETIMEDOUT; if (interval == XN_INFINITE) return ret; /* * We are late on arrival for the first * delivery, wait for the next shot on the * periodic time line. */ delay = now - date; period = xnclock_ns_to_ticks(clock, interval); date += period * (xnarch_div64(delay, period) + 1); } break; } /* * To cope with the basic system latency, we apply a clock * gravity value, which is the amount of time expressed in * clock ticks by which we should anticipate the shot for any * outstanding timer. The gravity value varies with the type * of context the timer wakes up, i.e. irq handler, kernel or * user thread. */ gravity = xntimer_gravity(timer); xntimerh_date(&timer->aplink) = date - gravity; if (now >= xntimerh_date(&timer->aplink)) xntimerh_date(&timer->aplink) += gravity / 2; timer->interval_ns = XN_INFINITE; timer->interval = XN_INFINITE; if (interval != XN_INFINITE) { timer->interval_ns = interval; timer->interval = xnclock_ns_to_ticks(clock, interval); timer->periodic_ticks = 0; timer->start_date = date; timer->pexpect_ticks = 0; timer->status |= XNTIMER_PERIODIC; } xntimer_enqueue(timer, q); timer->status |= XNTIMER_RUNNING; if (xntimer_heading_p(timer)) { sched = xntimer_sched(timer); if (sched != xnsched_current()) xnclock_remote_shot(clock, sched); else xnclock_program_shot(clock, sched); } return ret; }