/* * Wake up all threads sleeping on a wait channel. */ void wchan_wakeall(struct wchan* wc, struct spinlock* lk) { assert(spinlock_held(lk)); struct threadlist list = {0}; threadlist_init(&list); /* * Grab all the threads from the channel, moving them to a * private list. */ struct thread* target = NULL; while ((target = threadlist_remhead(&wc->wc_threads)) != NULL) threadlist_addtail(&list, target); /* * We could conceivably sort by cpu first to cause fewer lock * ops and fewer IPIs, but for now at least don't bother. Just * make each thread runnable. */ while ((target = threadlist_remhead(&list)) != NULL) thread_make_runnable(target, false); threadlist_cleanup(&list); }
/** * Release a spinlock without changing interrupt state. * * @param lock Spinlock to release. */ void spinlock_unlock_noirq(spinlock_t *lock) { if(unlikely(!spinlock_held(lock))) fatal("Release of already unlocked spinlock %p (%s)", lock, lock->name); atomic_set(&lock->value, 1); }
/* * Return nonzero if there are no threads sleeping on the channel. * This is meant to be used only for diagnostic purposes. */ bool wchan_isempty(struct wchan* wc, struct spinlock* lk) { bool ret; assert(spinlock_held(lk)); ret = threadlist_isempty(&wc->wc_threads); return ret; }
/** * Release a spinlock. * * Releases the specified spinlock and restores the interrupt state to what it * was before the lock was acquired. This should only be used if the lock was * acquired using spinlock_lock(). * * @param lock Spinlock to release. */ void spinlock_unlock(spinlock_t *lock) { bool state; if(unlikely(!spinlock_held(lock))) fatal("Release of already unlocked spinlock %p (%s)", lock, lock->name); state = lock->state; atomic_set(&lock->value, 1); local_irq_restore(state); }
/* * Yield the cpu to another process, and go to sleep, on the specified * wait channel WC, whose associated spinlock is LK. Calling wakeup on * the channel will make the thread runnable again. The spinlock must * be locked. The call to thread_switch unlocks it; we relock it * before returning. */ void wchan_sleep(struct wchan* wc, struct spinlock* lk) { asm volatile ("mfence" ::: "memory"); /* may not sleep in an interrupt handler */ assert(!thisthread->in_interrupt); /* must hold the spinlock */ assert(spinlock_held(lk)); /* must not hold other spinlocks */ assert(thiscpu->spinlocks == 1); thread_wait(wc, lk); spinlock_acquire(lk); }
/* * Wake up one thread sleeping on a wait channel. */ void wchan_wakeone(struct wchan* wc, struct spinlock* lk) { struct thread* target; assert(spinlock_held(lk)); /* Grab a thread from the channel */ target = threadlist_remhead(&wc->wc_threads); if (target == NULL) { /* Nobody was sleeping. */ return; } /* * Note that thread_make_runnable acquires a runqueue lock * while we're holding LK. This is ok; all spinlocks * associated with wchans must come before the runqueue locks, * as we also bridge from the wchan lock to the runqueue lock * in thread_wait. */ thread_make_runnable(target, false); }