void xnclock_core_local_shot(struct xnsched *sched) { struct xntimerdata *tmd; struct xntimer *timer; xnsticks_t delay; xntimerq_it_t it; xntimerh_t *h; /* * Do not reprogram locally when inside the tick handler - * will be done on exit anyway. Also exit if there is no * pending timer. */ if (sched->status & XNINTCK) return; tmd = xnclock_this_timerdata(&nkclock); h = xntimerq_it_begin(&tmd->q, &it); if (h == NULL) return; /* * Here we try to defer the host tick heading the timer queue, * so that it does not preempt a real-time activity uselessly, * in two cases: * * 1) a rescheduling is pending for the current CPU. We may * assume that a real-time thread is about to resume, so we * want to move the host tick out of the way until the host * kernel resumes, unless there is no other outstanding * timers. * * 2) the current thread is running in primary mode, in which * case we may also defer the host tick until the host kernel * resumes. * * The host tick deferral is cleared whenever Xenomai is about * to yield control to the host kernel (see __xnsched_run()), * or a timer with an earlier timeout date is scheduled, * whichever comes first. */ sched->lflags &= ~XNHDEFER; timer = container_of(h, struct xntimer, aplink); if (unlikely(timer == &sched->htimer)) { if (xnsched_resched_p(sched) || !xnthread_test_state(sched->curr, XNROOT)) { h = xntimerq_it_next(&tmd->q, &it, h); if (h) { sched->lflags |= XNHDEFER; timer = container_of(h, struct xntimer, aplink); } } }
/** * @fn int xnsched_run(void) * @brief The rescheduling procedure. * * This is the central rescheduling routine which should be called to * validate and apply changes which have previously been made to the * nucleus scheduling state, such as suspending, resuming or changing * the priority of threads. This call performs context switches as * needed. xnsched_run() schedules out the current thread if: * * - the current thread is about to block. * - a runnable thread from a higher priority scheduling class is * waiting for the CPU. * - the current thread does not lead the runnable threads from its * own scheduling class (i.e. round-robin). * * The Cobalt core implements a lazy rescheduling scheme so that most * of the services affecting the threads state MUST be followed by a * call to the rescheduling procedure for the new scheduling state to * be applied. * * In other words, multiple changes on the scheduler state can be done * in a row, waking threads up, blocking others, without being * immediately translated into the corresponding context switches. * When all changes have been applied, xnsched_run() should be called * for considering those changes, and possibly switching context. * * As a notable exception to the previous principle however, every * action which ends up suspending the current thread begets an * implicit call to the rescheduling procedure on behalf of the * blocking service. * * Typically, self-suspension or sleeping on a synchronization object * automatically leads to a call to the rescheduling procedure, * therefore the caller does not need to explicitly issue * xnsched_run() after such operations. * * The rescheduling procedure always leads to a null-effect if it is * called on behalf of an interrupt service routine. Any outstanding * scheduler lock held by the outgoing thread will be restored when * the thread is scheduled back in. * * Calling this procedure with no applicable context switch pending is * harmless and simply leads to a null-effect. * * @return Non-zero is returned if a context switch actually happened, * otherwise zero if the current thread was left running. * * @coretags{unrestricted} */ static inline int test_resched(struct xnsched *sched) { int resched = xnsched_resched_p(sched); #ifdef CONFIG_SMP /* Send resched IPI to remote CPU(s). */ if (unlikely(!cpus_empty(sched->resched))) { smp_mb(); ipipe_send_ipi(IPIPE_RESCHEDULE_IPI, sched->resched); cpus_clear(sched->resched); } #endif sched->status &= ~XNRESCHED; return resched; }
void xntimer_next_local_shot(xnsched_t *sched) { struct xntimer *timer; xnsticks_t delay; xntimerq_it_t it; xntimerh_t *h; /* * Do not reprogram locally when inside the tick handler - * will be done on exit anyway. Also exit if there is no * pending timer. */ if (testbits(sched->status, XNINTCK)) return; h = xntimerq_it_begin(&sched->timerqueue, &it); if (h == NULL) return; /* * Here we try to defer the host tick heading the timer queue, * so that it does not preempt a real-time activity uselessly, * in two cases: * * 1) a rescheduling is pending for the current CPU. We may * assume that a real-time thread is about to resume, so we * want to move the host tick out of the way until the host * kernel resumes, unless there is no other outstanding * timers. * * 2) the current thread is running in primary mode, in which * case we may also defer the host tick until the host kernel * resumes. * * The host tick deferral is cleared whenever Xenomai is about * to yield control to the host kernel (see * __xnpod_schedule()), or a timer with an earlier timeout * date is scheduled, whichever comes first. */ __clrbits(sched->lflags, XNHDEFER); timer = aplink2timer(h); if (unlikely(timer == &sched->htimer)) { if (xnsched_resched_p(sched) || !xnthread_test_state(sched->curr, XNROOT)) { h = xntimerq_it_next(&sched->timerqueue, &it, h); if (h) { __setbits(sched->lflags, XNHDEFER); timer = aplink2timer(h); } } } delay = xntimerh_date(&timer->aplink) - (xnarch_get_cpu_tsc() + nklatency); if (delay < 0) delay = 0; else if (delay > ULONG_MAX) delay = ULONG_MAX; xnarch_trace_tick((unsigned)delay); xnarch_program_timer_shot(delay); }