/* Must be called with nklock locked, interrupts off. thread must be * runnable. */ void xnsched_migrate(struct xnthread *thread, struct xnsched *sched) { struct xnsched_class *sched_class = thread->sched_class; if (xnthread_test_state(thread, XNREADY)) { xnsched_dequeue(thread); xnthread_clear_state(thread, XNREADY); } if (sched_class->sched_migrate) sched_class->sched_migrate(thread, sched); /* * WARNING: the scheduling class may have just changed as a * result of calling the per-class migration hook. */ xnsched_set_resched(thread->sched); thread->sched = sched; #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH /* * Mark the thread in flight, xnsched_finish_unlocked_switch() * will put the thread on the remote runqueue. */ xnthread_set_state(thread, XNMIGRATE); #else /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */ /* Move thread to the remote runnable queue. */ xnsched_putback(thread); #endif /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */ }
/* * Must be called with nklock locked, interrupts off. thread must be * runnable. */ void xnsched_migrate(struct xnthread *thread, struct xnsched *sched) { migrate_thread(thread, sched); #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH /* * Mark the thread in flight, xnsched_finish_unlocked_switch() * will put the thread on the remote runqueue. */ xnthread_set_state(thread, XNMIGRATE); #else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */ /* Move thread to the remote runnable queue. */ xnsched_putback(thread); #endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */ }
struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched) { struct xnthread *last; spl_t s; xnlock_get_irqsave(&nklock, s); #ifdef CONFIG_SMP /* If current thread migrated while suspended */ sched = xnsched_current(); #endif /* CONFIG_SMP */ last = sched->last; sched->status &= ~XNINSW; /* Detect a thread which called xnthread_migrate() */ if (last->sched != sched) { xnsched_putback(last); xnthread_clear_state(last, XNMIGRATE); } return sched; }
struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched) { struct xnthread *last; spl_t s; xnlock_get_irqsave(&nklock, s); #ifdef CONFIG_SMP /* If current thread migrated while suspended */ sched = xnpod_current_sched(); #endif /* CONFIG_SMP */ last = sched->last; __clrbits(sched->status, XNSWLOCK); /* Detect a thread which called xnpod_migrate_thread */ if (last->sched != sched) { xnsched_putback(last); xnthread_clear_state(last, XNMIGRATE); } if (xnthread_test_state(last, XNZOMBIE)) { /* * There are two cases where sched->last has the zombie * bit: * - either it had it before the context switch, the hooks * have been executed and sched->zombie is last; * - or it has been killed while the nklocked was unlocked * during the context switch, in which case we must run the * hooks, and we do it now. */ if (sched->zombie != last) xnsched_zombie_hooks(last); } return sched; }