/** * @fn int xnsched_run(void) * @brief The rescheduling procedure. * * This is the central rescheduling routine which should be called to * validate and apply changes which have previously been made to the * nucleus scheduling state, such as suspending, resuming or changing * the priority of threads. This call performs context switches as * needed. xnsched_run() schedules out the current thread if: * * - the current thread is about to block. * - a runnable thread from a higher priority scheduling class is * waiting for the CPU. * - the current thread does not lead the runnable threads from its * own scheduling class (i.e. round-robin). * * The Cobalt core implements a lazy rescheduling scheme so that most * of the services affecting the threads state MUST be followed by a * call to the rescheduling procedure for the new scheduling state to * be applied. * * In other words, multiple changes on the scheduler state can be done * in a row, waking threads up, blocking others, without being * immediately translated into the corresponding context switches. * When all changes have been applied, xnsched_run() should be called * for considering those changes, and possibly switching context. * * As a notable exception to the previous principle however, every * action which ends up suspending the current thread begets an * implicit call to the rescheduling procedure on behalf of the * blocking service. * * Typically, self-suspension or sleeping on a synchronization object * automatically leads to a call to the rescheduling procedure, * therefore the caller does not need to explicitly issue * xnsched_run() after such operations. * * The rescheduling procedure always leads to a null-effect if it is * called on behalf of an interrupt service routine. Any outstanding * scheduler lock held by the outgoing thread will be restored when * the thread is scheduled back in. * * Calling this procedure with no applicable context switch pending is * harmless and simply leads to a null-effect. * * @return Non-zero is returned if a context switch actually happened, * otherwise zero if the current thread was left running. * * @coretags{unrestricted} */ static inline int test_resched(struct xnsched *sched) { int resched = xnsched_resched_p(sched); #ifdef CONFIG_SMP /* Send resched IPI to remote CPU(s). */ if (unlikely(!cpus_empty(sched->resched))) { smp_mb(); ipipe_send_ipi(IPIPE_RESCHEDULE_IPI, sched->resched); cpus_clear(sched->resched); } #endif sched->status &= ~XNRESCHED; return resched; }
unsigned long ipipe_critical_enter(void (*syncfn)(void)) { int cpu __maybe_unused, n __maybe_unused; unsigned long flags, loops __maybe_unused; cpumask_t allbutself __maybe_unused; flags = hard_local_irq_save(); if (num_online_cpus() == 1) return flags; #ifdef CONFIG_SMP cpu = ipipe_processor_id(); if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) { while (test_and_set_bit(0, &__ipipe_critical_lock)) { n = 0; hard_local_irq_enable(); do cpu_relax(); while (++n < cpu); hard_local_irq_disable(); } restart: spin_lock(&__ipipe_cpu_barrier); __ipipe_cpu_sync = syncfn; cpus_clear(__ipipe_cpu_pass_map); cpu_set(cpu, __ipipe_cpu_pass_map); /* * Send the sync IPI to all processors but the current * one. */ cpus_andnot(allbutself, cpu_online_map, __ipipe_cpu_pass_map); ipipe_send_ipi(IPIPE_CRITICAL_IPI, allbutself); loops = IPIPE_CRITICAL_TIMEOUT; while (!cpus_equal(__ipipe_cpu_sync_map, allbutself)) { if (--loops > 0) { cpu_relax(); continue; } /* * We ran into a deadlock due to a contended * rwlock. Cancel this round and retry. */ __ipipe_cpu_sync = NULL; spin_unlock(&__ipipe_cpu_barrier); /* * Ensure all CPUs consumed the IPI to avoid * running __ipipe_cpu_sync prematurely. This * usually resolves the deadlock reason too. */ while (!cpus_equal(cpu_online_map, __ipipe_cpu_pass_map)) cpu_relax(); goto restart; } } atomic_inc(&__ipipe_critical_count); #endif /* CONFIG_SMP */ return flags; }