/* * Locks the runqueue itself. * * Thread must be locked. */ static boolean_t sched_traditional_processor_queue_remove(processor_t processor, thread_t thread) { processor_set_t pset; run_queue_t rq; pset = processor->processor_set; pset_lock(pset); rq = runq_for_processor(processor); if (processor == thread->runq) { /* * Thread is on a run queue and we have a lock on * that run queue. */ runq_consider_decr_bound_count(processor, thread); run_queue_remove(rq, thread); } else { /* * The thread left the run queue before we could * lock the run queue. */ assert(thread->runq == PROCESSOR_NULL); processor = PROCESSOR_NULL; } pset_unlock(pset); return (processor != PROCESSOR_NULL); }
/* * processor_up: * * Flag processor as up and running, and available * for scheduling. */ void processor_up( processor_t processor) { processor_set_t pset; spl_t s; s = splsched(); init_ast_check(processor); pset = processor->processor_set; pset_lock(pset); ++pset->online_processor_count; enqueue_tail(&pset->active_queue, (queue_entry_t)processor); processor->state = PROCESSOR_RUNNING; (void)hw_atomic_add(&processor_avail_count, 1); commpage_update_active_cpus(); pset_unlock(pset); ml_cpu_up(); splx(s); #if CONFIG_DTRACE if (dtrace_cpu_state_changed_hook) (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE); #endif }
/* * Called with interrupts disabled. */ void processor_doshutdown( processor_t processor) { thread_t old_thread, self = current_thread(); processor_t prev; processor_set_t pset; /* * Get onto the processor to shutdown */ prev = thread_bind(processor); thread_block(THREAD_CONTINUE_NULL); assert(processor->state == PROCESSOR_SHUTDOWN); #if CONFIG_DTRACE if (dtrace_cpu_state_changed_hook) (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE); #endif ml_cpu_down(); #if HIBERNATION if (processor_avail_count < 2) { hibernate_vm_lock(); hibernate_vm_unlock(); } #endif pset = processor->processor_set; pset_lock(pset); processor->state = PROCESSOR_OFF_LINE; --pset->online_processor_count; (void)hw_atomic_sub(&processor_avail_count, 1); commpage_update_active_cpus(); SCHED(processor_queue_shutdown)(processor); /* pset lock dropped */ /* * Continue processor shutdown in shutdown context. * * We save the current context in machine_processor_shutdown in such a way * that when this thread is next invoked it will return from here instead of * from the machine_switch_context() in thread_invoke like a normal context switch. * * As such, 'old_thread' is neither the idle thread nor the current thread - it's whatever * thread invoked back to this one. (Usually, it's another processor's idle thread.) * * TODO: Make this a real thread_run of the idle_thread, so we don't have to keep this in sync * with thread_invoke. */ thread_bind(prev); old_thread = machine_processor_shutdown(self, processor_offline, processor); thread_dispatch(old_thread, self); }
/* * Initialize the given processor for the cpu * indicated by cpu_id, and assign to the * specified processor set. */ void processor_init( processor_t processor, int cpu_id, processor_set_t pset) { spl_t s; if (processor != master_processor) { /* Scheduler state for master_processor initialized in sched_init() */ SCHED(processor_init)(processor); } processor->state = PROCESSOR_OFF_LINE; processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL; processor->processor_set = pset; processor_state_update_idle(processor); processor->starting_pri = MINPRI; processor->cpu_id = cpu_id; timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor); processor->quantum_end = UINT64_MAX; processor->deadline = UINT64_MAX; processor->first_timeslice = FALSE; processor->processor_primary = processor; /* no SMT relationship known at this point */ processor->processor_secondary = NULL; processor->is_SMT = FALSE; processor->is_recommended = (pset->recommended_bitmask & (1ULL << cpu_id)) ? TRUE : FALSE; processor->processor_self = IP_NULL; processor_data_init(processor); processor->processor_list = NULL; s = splsched(); pset_lock(pset); bit_set(pset->cpu_bitmask, cpu_id); if (pset->cpu_set_count++ == 0) pset->cpu_set_low = pset->cpu_set_hi = cpu_id; else { pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low; pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi; } pset_unlock(pset); splx(s); simple_lock(&processor_list_lock); if (processor_list == NULL) processor_list = processor; else processor_list_tail->processor_list = processor; processor_list_tail = processor; processor_count++; assert(cpu_id < MAX_SCHED_CPUS); processor_array[cpu_id] = processor; simple_unlock(&processor_list_lock); }
/* * ipc_pset_enable: * * Enable ipc access to a processor set. */ void ipc_pset_enable( processor_set_t pset) { pset_lock(pset); if (pset->active) { ipc_kobject_set(pset->pset_self, (ipc_kobject_t) pset, IKOT_PSET); ipc_kobject_set(pset->pset_name_self, (ipc_kobject_t) pset, IKOT_PSET_NAME); pset->ref_count += 2; } pset_unlock(pset); }
ipc_port_t convert_pset_name_to_port( processor_set_t pset) { ipc_port_t port; pset_lock(pset); if (pset->active) port = ipc_port_make_send(pset->pset_name_self); else port = IP_NULL; pset_unlock(pset); pset_deallocate(pset); return port; }
/* * Called with interrupts disabled. */ void processor_doshutdown( processor_t processor) { thread_t old_thread, self = current_thread(); processor_t prev; processor_set_t pset; /* * Get onto the processor to shutdown */ prev = thread_bind(processor); thread_block(THREAD_CONTINUE_NULL); assert(processor->state == PROCESSOR_SHUTDOWN); #if CONFIG_DTRACE if (dtrace_cpu_state_changed_hook) (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE); #endif ml_cpu_down(); #if HIBERNATION if (processor_avail_count < 2) { hibernate_vm_lock(); hibernate_vm_unlock(); } #endif pset = processor->processor_set; pset_lock(pset); processor->state = PROCESSOR_OFF_LINE; --pset->online_processor_count; (void)hw_atomic_sub(&processor_avail_count, 1); commpage_update_active_cpus(); SCHED(processor_queue_shutdown)(processor); /* pset lock dropped */ /* * Continue processor shutdown in shutdown context. */ thread_bind(prev); old_thread = machine_processor_shutdown(self, processor_offline, processor); thread_dispatch(old_thread, self); }
static void sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context) { boolean_t restart_needed = FALSE; processor_t processor = processor_list; processor_set_t pset; thread_t thread; spl_t s; do { do { /* * TODO: in sched_traditional_use_pset_runqueue case, * avoid scanning the same runq multiple times */ pset = processor->processor_set; s = splsched(); pset_lock(pset); restart_needed = runq_scan(runq_for_processor(processor), scan_context); pset_unlock(pset); splx(s); if (restart_needed) break; thread = processor->idle_thread; if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) { if (thread_update_add_thread(thread) == FALSE) { restart_needed = TRUE; break; } } } while ((processor = processor->processor_list) != NULL); /* Ok, we now have a collection of candidates -- fix them. */ thread_update_process_threads(); } while (restart_needed); }
/* * Locate and steal a thread, beginning * at the pset. * * The pset must be locked, and is returned * unlocked. * * Returns the stolen thread, or THREAD_NULL on * failure. */ static thread_t sched_traditional_steal_thread(processor_set_t pset) { processor_set_t nset, cset = pset; processor_t processor; thread_t thread; do { processor = (processor_t)(uintptr_t)queue_first(&cset->active_queue); while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) { if (runq_for_processor(processor)->count > 0) { thread = sched_traditional_steal_processor_thread(processor); if (thread != THREAD_NULL) { remqueue((queue_entry_t)processor); enqueue_tail(&cset->active_queue, (queue_entry_t)processor); pset_unlock(cset); return (thread); } } processor = (processor_t)(uintptr_t)queue_next((queue_entry_t)processor); } nset = next_pset(cset); if (nset != pset) { pset_unlock(cset); cset = nset; pset_lock(cset); } } while (nset != pset); pset_unlock(cset); return (THREAD_NULL); }
void processor_doaction( processor_t processor) { thread_t this_thread; spl_t s; register processor_set_t pset; #if MACH_HOST register processor_set_t new_pset; register thread_t thread; register thread_t prev_thread = THREAD_NULL; thread_act_t thr_act; boolean_t have_pset_ref = FALSE; #endif /* MACH_HOST */ /* * Get onto the processor to shutdown */ this_thread = current_thread(); thread_bind(this_thread, processor); thread_block((void (*)(void)) 0); pset = processor->processor_set; #if MACH_HOST /* * If this is the last processor in the processor_set, * stop all the threads first. */ pset_lock(pset); if (pset->processor_count == 1) { thread = (thread_t) queue_first(&pset->threads); prev_thread = THREAD_NULL; pset->ref_count++; have_pset_ref = TRUE; pset->empty = TRUE; /* * loop through freezing the processor set assignment * and reference counting the threads; */ while (!queue_end(&pset->threads, (queue_entry_t) thread)) { thread_reference(thread); pset_unlock(pset); /* * Freeze the thread on the processor set. * If it's moved, just release the reference. * Get the next thread in the processor set list * from the last one which was frozen. */ if( thread_stop_freeze(thread, pset) ) prev_thread = thread; else thread_deallocate(thread); pset_lock(pset); if( prev_thread != THREAD_NULL ) thread = (thread_t)queue_next(&prev_thread->pset_threads); else thread = (thread_t) queue_first(&pset->threads); } /* * Remove the processor from the set so that when the threads * are unstopped below the ones blocked in the kernel don't * start running again. */ s = splsched(); processor_lock(processor); pset_remove_processor(pset, processor); /* * Prevent race with another processor being added to the set * See code after Restart_pset: * while(new_pset->empty && new_pset->processor_count > 0) * * ... it tests for the condition where a new processor is * added to the set while the last one is still being removed. */ pset->processor_count++; /* block new processors being added */ assert( pset->processor_count == 1 ); /* * Release the thread assignment locks, unstop the threads and * release the thread references which were taken above. */ thread = (thread_t) queue_first(&pset->threads); while( !queue_empty( &pset->threads) && (thread != THREAD_NULL) ) { prev_thread = thread; if( queue_end(&pset->threads, (queue_entry_t) thread) ) thread = THREAD_NULL; else thread = (thread_t) queue_next(&prev_thread->pset_threads); pset_unlock(pset); thread_unfreeze(prev_thread); thread_unstop(prev_thread); thread_deallocate(prev_thread); pset_lock(pset); } /* * allow a processor to be added to the empty pset */ pset->processor_count--; } else { /* not last processor in set */ #endif /* MACH_HOST */ /* * At this point, it is ok to rm the processor from the pset. */ s = splsched(); processor_lock(processor); pset_remove_processor(pset, processor); #if MACH_HOST } pset_unlock(pset); /* * Copy the next pset pointer into a local variable and clear * it because we are taking over its reference. */ new_pset = processor->processor_set_next; processor->processor_set_next = PROCESSOR_SET_NULL; if (processor->state == PROCESSOR_ASSIGN) { Restart_pset: /* * Nasty problem: we want to lock the target pset, but * we have to enable interrupts to do that which requires * dropping the processor lock. While the processor * is unlocked, it could be reassigned or shutdown. */ processor_unlock(processor); splx(s); /* * Lock target pset and handle remove last / assign first race. * Only happens if there is more than one action thread. */ pset_lock(new_pset); while (new_pset->empty && new_pset->processor_count > 0) { pset_unlock(new_pset); while (*(volatile boolean_t *)&new_pset->empty && *(volatile int *)&new_pset->processor_count > 0) /* spin */; pset_lock(new_pset); } /* * Finally relock the processor and see if something changed. * The only possibilities are assignment to a different pset * and shutdown. */ s = splsched(); processor_lock(processor); if (processor->state == PROCESSOR_SHUTDOWN) { pset_unlock(new_pset); goto shutdown; /* will release pset reference */ } if (processor->processor_set_next != PROCESSOR_SET_NULL) { /* * Processor was reassigned. Drop the reference * we have on the wrong new_pset, and get the * right one. Involves lots of lock juggling. */ processor_unlock(processor); splx(s); pset_unlock(new_pset); pset_deallocate(new_pset); s = splsched(); processor_lock(processor); new_pset = processor->processor_set_next; processor->processor_set_next = PROCESSOR_SET_NULL; goto Restart_pset; } /* * If the pset has been deactivated since the operation * was requested, redirect to the default pset. */ if (!(new_pset->active)) { pset_unlock(new_pset); pset_deallocate(new_pset); new_pset = &default_pset; pset_lock(new_pset); new_pset->ref_count++; } /* * Do assignment, then wakeup anyone waiting for it. * Finally context switch to have it take effect. */ pset_add_processor(new_pset, processor); if (new_pset->empty) { /* * Set all the threads loose */ thread = (thread_t) queue_first(&new_pset->threads); while (!queue_end(&new_pset->threads,(queue_entry_t)thread)) { thr_act = thread_lock_act(thread); thread_release(thread->top_act); act_unlock_thread(thr_act); thread = (thread_t) queue_next(&thread->pset_threads); } new_pset->empty = FALSE; } processor->processor_set_next = PROCESSOR_SET_NULL; processor->state = PROCESSOR_RUNNING; thread_wakeup((event_t)processor); processor_unlock(processor); splx(s); pset_unlock(new_pset); /* * Clean up dangling references, and release our binding. */ pset_deallocate(new_pset); if (have_pset_ref) pset_deallocate(pset); if (prev_thread != THREAD_NULL) thread_deallocate(prev_thread); thread_bind(this_thread, PROCESSOR_NULL); thread_block((void (*)(void)) 0); return; } shutdown: #endif /* MACH_HOST */ /* * Do shutdown, make sure we live when processor dies. */ if (processor->state != PROCESSOR_SHUTDOWN) { printf("state: %d\n", processor->state); panic("action_thread -- bad processor state"); } processor_unlock(processor); /* * Clean up dangling references, and release our binding. */ #if MACH_HOST if (new_pset != PROCESSOR_SET_NULL) pset_deallocate(new_pset); if (have_pset_ref) pset_deallocate(pset); if (prev_thread != THREAD_NULL) thread_deallocate(prev_thread); #endif /* MACH_HOST */ thread_bind(this_thread, PROCESSOR_NULL); switch_to_shutdown_context(this_thread, processor_doshutdown, processor); splx(s); }
kern_return_t processor_start( processor_t processor) { processor_set_t pset; thread_t thread; kern_return_t result; spl_t s; if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL) return (KERN_INVALID_ARGUMENT); if (processor == master_processor) { processor_t prev; prev = thread_bind(processor); thread_block(THREAD_CONTINUE_NULL); result = cpu_start(processor->cpu_id); thread_bind(prev); return (result); } s = splsched(); pset = processor->processor_set; pset_lock(pset); if (processor->state != PROCESSOR_OFF_LINE) { pset_unlock(pset); splx(s); return (KERN_FAILURE); } processor->state = PROCESSOR_START; pset_unlock(pset); splx(s); /* * Create the idle processor thread. */ if (processor->idle_thread == THREAD_NULL) { result = idle_thread_create(processor); if (result != KERN_SUCCESS) { s = splsched(); pset_lock(pset); processor->state = PROCESSOR_OFF_LINE; pset_unlock(pset); splx(s); return (result); } } /* * If there is no active thread, the processor * has never been started. Create a dedicated * start up thread. */ if ( processor->active_thread == THREAD_NULL && processor->next_thread == THREAD_NULL ) { result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread); if (result != KERN_SUCCESS) { s = splsched(); pset_lock(pset); processor->state = PROCESSOR_OFF_LINE; pset_unlock(pset); splx(s); return (result); } s = splsched(); thread_lock(thread); thread->bound_processor = processor; processor->next_thread = thread; thread->state = TH_RUN; thread_unlock(thread); splx(s); thread_deallocate(thread); } if (processor->processor_self == IP_NULL) ipc_processor_init(processor); result = cpu_start(processor->cpu_id); if (result != KERN_SUCCESS) { s = splsched(); pset_lock(pset); processor->state = PROCESSOR_OFF_LINE; pset_unlock(pset); splx(s); return (result); } ipc_processor_enable(processor); return (KERN_SUCCESS); }
kern_return_t processor_shutdown( processor_t processor) { processor_set_t pset; spl_t s; s = splsched(); pset = processor->processor_set; pset_lock(pset); if (processor->state == PROCESSOR_OFF_LINE) { /* * Success if already shutdown. */ pset_unlock(pset); splx(s); return (KERN_SUCCESS); } if (processor->state == PROCESSOR_START) { /* * Failure if currently being started. */ pset_unlock(pset); splx(s); return (KERN_FAILURE); } /* * If the processor is dispatching, let it finish. */ while (processor->state == PROCESSOR_DISPATCHING) { pset_unlock(pset); splx(s); delay(1); s = splsched(); pset_lock(pset); } /* * Success if already being shutdown. */ if (processor->state == PROCESSOR_SHUTDOWN) { pset_unlock(pset); splx(s); return (KERN_SUCCESS); } if (processor->state == PROCESSOR_IDLE) remqueue((queue_entry_t)processor); else if (processor->state == PROCESSOR_RUNNING) remqueue((queue_entry_t)processor); processor->state = PROCESSOR_SHUTDOWN; pset_unlock(pset); processor_doshutdown(processor); splx(s); cpu_exit_wait(processor->cpu_id); return (KERN_SUCCESS); }