/* * Create a wait channel. NAME is a symbolic string name for it. * This is what's displayed by ps -alx in Unix. * * NAME should generally be a string constant. If it isn't, alternate * arrangements should be made to free it after the wait channel is * destroyed. */ struct wchan * wchan_create(const char *name) { struct wchan *wc; int result; wc = kmalloc(sizeof(*wc)); if (wc == NULL) { return NULL; } threadlist_init(&wc->wc_threads); wc->wc_name = name; /* add to allwchans[] */ spinlock_acquire(&allwchans_lock); result = wchanarray_add(&allwchans, wc, &wc->wc_index); spinlock_release(&allwchans_lock); if (result) { KASSERT(result == ENOMEM); threadlist_cleanup(&wc->wc_threads); kfree(wc); return NULL; } return wc; }
/* * Wake up all threads sleeping on a wait channel. */ void wchan_wakeall(struct wchan *wc, struct spinlock *lk) { struct thread *target; struct threadlist list; KASSERT(spinlock_do_i_hold(lk)); threadlist_init(&list); /* * Grab all the threads from the channel, moving them to a * private list. */ while ((target = threadlist_remhead(&wc->wc_threads)) != NULL) { threadlist_addtail(&list, target); } /* * We could conceivably sort by cpu first to cause fewer lock * ops and fewer IPIs, but for now at least don't bother. Just * make each thread runnable. */ while ((target = threadlist_remhead(&list)) != NULL) { thread_make_runnable(target, false); } threadlist_cleanup(&list); }
/* * Create a wait channel. NAME is a symbolic string name for it. * This is what's displayed by ps -alx in Unix. * * NAME should generally be a string constant. If it isn't, alternate * arrangements should be made to free it after the wait channel is * destroyed. */ struct wchan * wchan_create(const char *name) { struct wchan *wc; wc = kmalloc(sizeof(*wc)); if (wc == NULL) { return NULL; } threadlist_init(&wc->wc_threads); wc->wc_name = name; return wc; }
/* * Thread migration. * * This is also called periodically from hardclock(). If the current * CPU is busy and other CPUs are idle, or less busy, it should move * threads across to those other other CPUs. * * Migrating threads isn't free because of cache affinity; a thread's * working cache set will end up having to be moved to the other CPU, * which is fairly slow. The tradeoff between this performance loss * and the performance loss due to underutilization of some CPUs is * something that needs to be tuned and probably is workload-specific. * * For here and now, because we know we're running on System/161 and * System/161 does not (yet) model such cache effects, we'll be very * aggressive. */ void thread_consider_migration(void) { unsigned my_count, total_count, one_share, to_send; unsigned i, numcpus; struct cpu *c; struct threadlist victims; struct thread *t; my_count = total_count = 0; numcpus = cpuarray_num(&allcpus); for (i=0; i<numcpus; i++) { c = cpuarray_get(&allcpus, i); spinlock_acquire(&c->c_runqueue_lock); total_count += c->c_runqueue.tl_count; if (c == curcpu->c_self) { my_count = c->c_runqueue.tl_count; } spinlock_release(&c->c_runqueue_lock); } one_share = DIVROUNDUP(total_count, numcpus); if (my_count < one_share) { return; } to_send = my_count - one_share; threadlist_init(&victims); spinlock_acquire(&curcpu->c_runqueue_lock); for (i=0; i<to_send; i++) { t = threadlist_remtail(&curcpu->c_runqueue); threadlist_addhead(&victims, t); } spinlock_release(&curcpu->c_runqueue_lock); for (i=0; i < numcpus && to_send > 0; i++) { c = cpuarray_get(&allcpus, i); if (c == curcpu->c_self) { continue; } spinlock_acquire(&c->c_runqueue_lock); while (c->c_runqueue.tl_count < one_share && to_send > 0) { t = threadlist_remhead(&victims); /* * Ordinarily, curthread will not appear on * the run queue. However, it can under the * following circumstances: * - it went to sleep; * - the processor became idle, so it * remained curthread; * - it was reawakened, so it was put on the * run queue; * - and the processor hasn't fully unidled * yet, so all these things are still true. * * If the timer interrupt happens at (almost) * exactly the proper moment, we can come here * while things are in this state and see * curthread. However, *migrating* curthread * can cause bad things to happen (Exercise: * Why? And what?) so shuffle it to the end of * the list and decrement to_send in order to * skip it. Then it goes back on our own run * queue below. */ if (t == curthread) { threadlist_addtail(&victims, t); to_send--; continue; } t->t_cpu = c; threadlist_addtail(&c->c_runqueue, t); DEBUG(DB_THREADS, "Migrated thread %s: cpu %u -> %u", t->t_name, curcpu->c_number, c->c_number); to_send--; if (c->c_isidle) { /* * Other processor is idle; send * interrupt to make sure it unidles. */ ipi_send(c, IPI_UNIDLE); } } spinlock_release(&c->c_runqueue_lock); } /* * Because the code above isn't atomic, the thread counts may have * changed while we were working and we may end up with leftovers. * Don't panic; just put them back on our own run queue. */ if (!threadlist_isempty(&victims)) { spinlock_acquire(&curcpu->c_runqueue_lock); while ((t = threadlist_remhead(&victims)) != NULL) { threadlist_addtail(&curcpu->c_runqueue, t); } spinlock_release(&curcpu->c_runqueue_lock); } KASSERT(threadlist_isempty(&victims)); threadlist_cleanup(&victims); }
/* * Create a CPU structure. This is used for the bootup CPU and * also for secondary CPUs. * * The hardware number (the number assigned by firmware or system * board config or whatnot) is tracked separately because it is not * necessarily anything sane or meaningful. */ struct cpu * cpu_create(unsigned hardware_number) { struct cpu *c; int result; char namebuf[16]; c = kmalloc(sizeof(*c)); if (c == NULL) { panic("cpu_create: Out of memory\n"); } c->c_self = c; c->c_hardware_number = hardware_number; c->c_curthread = NULL; threadlist_init(&c->c_zombies); c->c_hardclocks = 0; c->c_spinlocks = 0; c->c_isidle = false; threadlist_init(&c->c_runqueue); spinlock_init(&c->c_runqueue_lock); c->c_ipi_pending = 0; c->c_numshootdown = 0; spinlock_init(&c->c_ipi_lock); result = cpuarray_add(&allcpus, c, &c->c_number); if (result != 0) { panic("cpu_create: array_add: %s\n", strerror(result)); } snprintf(namebuf, sizeof(namebuf), "<boot #%d>", c->c_number); c->c_curthread = thread_create(namebuf); if (c->c_curthread == NULL) { panic("cpu_create: thread_create failed\n"); } result = proc_addthread(kproc, c->c_curthread); if (result) { panic("cpu_create: proc_addthread:: %s\n", strerror(result)); } if (c->c_number == 0) { /* * Leave c->c_curthread->t_stack NULL for the boot * cpu. This means we're using the boot stack, which * can't be freed. (Exercise: what would it take to * make it possible to free the boot stack?) */ /*c->c_curthread->t_stack = ... */ } else { c->c_curthread->t_stack = kmalloc(STACK_SIZE); if (c->c_curthread->t_stack == NULL) { panic("cpu_create: couldn't allocate stack"); } thread_checkstack_init(c->c_curthread); } c->c_curthread->t_cpu = c; cpu_machdep_init(c); return c; }
/* * Create a CPU structure. This is used for the bootup CPU and * also for secondary CPUs. * * The hardware number (the number assigned by firmware or system * board config or whatnot) is tracked separately because it is not * necessarily anything sane or meaningful. */ struct cpu * cpu_create(unsigned hardware_number) { struct cpu *c; int result; char namebuf[16]; c = kmalloc(sizeof(*c)); if (c == NULL) { panic("cpu_create: Out of memory\n"); } c->c_self = c; c->c_hardware_number = hardware_number; c->c_curthread = NULL; threadlist_init(&c->c_zombies); c->c_hardclocks = 0; c->c_spinlocks = 0; c->c_isidle = false; threadlist_init(&c->c_runqueue); spinlock_init(&c->c_runqueue_lock); c->c_ipi_pending = 0; c->c_numshootdown = 0; spinlock_init(&c->c_ipi_lock); result = cpuarray_add(&allcpus, c, &c->c_number); if (result != 0) { panic("cpu_create: array_add: %s\n", strerror(result)); } snprintf(namebuf, sizeof(namebuf), "<boot #%d>", c->c_number); c->c_curthread = thread_create(namebuf); if (c->c_curthread == NULL) { panic("cpu_create: thread_create failed\n"); } c->c_curthread->t_cpu = c; if (c->c_number == 0) { /* * Leave c->c_curthread->t_stack NULL for the boot * cpu. This means we're using the boot stack, which * can't be freed. (Exercise: what would it take to * make it possible to free the boot stack?) */ /*c->c_curthread->t_stack = ... */ } else { c->c_curthread->t_stack = kmalloc(STACK_SIZE); if (c->c_curthread->t_stack == NULL) { panic("cpu_create: couldn't allocate stack"); } thread_checkstack_init(c->c_curthread); } /* * If there is no curcpu (or curthread) yet, we are creating * the first (boot) cpu. Initialize curcpu and curthread as * early as possible so that other code can take locks without * exploding. */ if (!CURCPU_EXISTS()) { /* * Initializing curcpu and curthread is * machine-dependent because either of curcpu and * curthread might be defined in terms of the other. */ INIT_CURCPU(c, c->c_curthread); /* * Now make sure both t_cpu and c_curthread are * set. This might be partially redundant with * INIT_CURCPU depending on how things are defined. */ curthread->t_cpu = curcpu; curcpu->c_curthread = curthread; } result = proc_addthread(kproc, c->c_curthread); if (result) { panic("cpu_create: proc_addthread:: %s\n", strerror(result)); } cpu_machdep_init(c); return c; }