/* * Internal cpu startup sequencer * The sequence is as follows: * * MASTER SLAVE * ------- ---------- * assume the kernel data is initialized * clear the proxy bit * start the slave cpu * wait for the slave cpu to set the proxy * * the slave runs slave_startup and then sets the proxy * the slave waits for the master to add slave to the ready set * * the master finishes the initialization and * adds the slave to the ready set * * the slave exits the startup thread and is running */ void start_cpu(int cpuid, void(*flag_func)(int)) { extern void cpu_startup(int); int timout; ASSERT(MUTEX_HELD(&cpu_lock)); /* * Before we begin the dance, tell DTrace that we're about to start * a CPU. */ if (dtrace_cpustart_init != NULL) (*dtrace_cpustart_init)(); /* start the slave cpu */ CPUSET_DEL(proxy_ready_set, cpuid); if (prom_test("SUNW,start-cpu-by-cpuid") == 0) { (void) prom_startcpu_bycpuid(cpuid, (caddr_t)&cpu_startup, cpuid); } else { /* "by-cpuid" interface didn't exist. Do it the old way */ pnode_t nodeid = cpunodes[cpuid].nodeid; ASSERT(nodeid != (pnode_t)0); (void) prom_startcpu(nodeid, (caddr_t)&cpu_startup, cpuid); } /* wait for the slave cpu to check in. */ for (timout = CPU_WAKEUP_GRACE_MSEC; timout; timout--) { if (CPU_IN_SET(proxy_ready_set, cpuid)) break; DELAY(1000); } if (timout == 0) { panic("cpu%d failed to start (2)", cpuid); } /* * The slave has started; we can tell DTrace that it's safe again. */ if (dtrace_cpustart_fini != NULL) (*dtrace_cpustart_fini)(); /* run the master side of stick synchronization for the slave cpu */ sticksync_master(); /* * deal with the cpu flags in a phase-specific manner * for various reasons, this needs to run after the slave * is checked in but before the slave is released. */ (*flag_func)(cpuid); /* release the slave */ CPUSET_ADD(cpu_ready_set, cpuid); }
static int kdi_cpu_ready_iter(int (*cb)(int, void *), void *arg) { int rc, i; for (rc = 0, i = 0; i < NCPU; i++) { if (CPU_IN_SET(cpu_ready_set, i)) rc += cb(i, arg); } return (rc); }
static void resume_cpus(void) { int i; for (i = 1; i < ncpus; i++) { if (cpu[i] == NULL) continue; if (!CPU_IN_SET(cpu_suspend_lost_set, i)) { SUSPEND_DEBUG("xen_vcpu_up %d\n", i); mach_cpucontext_restore(cpu[i]); (void) xen_vcpu_up(i); } } mp_leave_barrier(); }
static void suspend_cpus(void) { int i; SUSPEND_DEBUG("suspend_cpus\n"); mp_enter_barrier(); for (i = 1; i < ncpus; i++) { if (!CPU_IN_SET(cpu_suspend_lost_set, i)) { SUSPEND_DEBUG("xen_vcpu_down %d\n", i); (void) xen_vcpu_down(i); } mach_cpucontext_reset(cpu[i]); } }
static void pwrnow_power(cpuset_t set, uint32_t req_state) { /* * If thread is already running on target CPU then just * make the transition request. Otherwise, we'll need to * make a cross-call. */ kpreempt_disable(); if (CPU_IN_SET(set, CPU->cpu_id)) { pwrnow_pstate_transition(req_state); CPUSET_DEL(set, CPU->cpu_id); } if (!CPUSET_ISNULL(set)) { xc_call((xc_arg_t)req_state, NULL, NULL, CPUSET2BV(set), (xc_func_t)pwrnow_pstate_transition); } kpreempt_enable(); }
/* ARGSUSED */ void cmp_error_resteer(processorid_t cpuid) { #ifndef _CMP_NO_ERROR_STEERING cpuset_t mycores; cpu_t *cpu; chipid_t chipid; int i; if (!cmp_cpu_is_cmp(cpuid)) return; ASSERT(MUTEX_HELD(&cpu_lock)); chipid = cpunodes[cpuid].portid; mycores = chips[chipid]; /* Look for an online sibling core */ for (i = 0; i < NCPU; i++) { if (i == cpuid) continue; if (CPU_IN_SET(mycores, i) && (cpu = cpu_get(i)) != NULL && cpu_is_active(cpu)) { /* Found one, reset error steering */ xc_one(i, (xcfunc_t *)set_cmp_error_steering, 0, 0); break; } } /* No online sibling cores, point to this core. */ if (i == NCPU) { xc_one(cpuid, (xcfunc_t *)set_cmp_error_steering, 0, 0); } #else /* Not all CMP's support (e.g. Olympus-C by Fujitsu) error steering */ return; #endif /* _CMP_NO_ERROR_STEERING */ }
/* * Drop the prom lock if it is held by the current CPU. If the lock is held * recursively, return without clearing prom_cpu. If the hold count is now * zero, clear prom_cpu and cv_signal any waiting CPU. */ void kern_postprom(void) { processorid_t cpuid = getprocessorid(); cpu_t *cp = cpu[cpuid]; if (panicstr) return; /* do not modify lock further if we have panicked */ if (prom_cpu != cp) panic("kern_postprom: not owner, cp=%p owner=%p", (void *)cp, (void *)prom_cpu); if (prom_holdcnt == 0) panic("kern_postprom: prom_holdcnt == 0, owner=%p", (void *)prom_cpu); if (atomic_dec_32_nv(&prom_holdcnt) != 0) return; /* prom lock is held recursively by this CPU */ if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger) kmdb_enter(); prom_thread = NULL; membar_producer(); prom_cpu = NULL; membar_producer(); if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) { mutex_enter(&prom_mutex); cv_signal(&prom_cv); mutex_exit(&prom_mutex); kpreempt_enable(); } }
/*ARGSUSED*/ void start_other_cpus(int flag) { int cpuid; extern void idlestop_init(void); int bootcpu; /* * Check if cpu_bringup_set has been explicitly set before * initializing it. */ if (CPUSET_ISNULL(cpu_bringup_set)) { CPUSET_ALL(cpu_bringup_set); } if (&cpu_feature_init) cpu_feature_init(); /* * Initialize CPC. */ kcpc_hw_init(); mutex_enter(&cpu_lock); /* * Initialize our own cpu_info. */ init_cpu_info(CPU); /* * Initialize CPU 0 cpu module private data area, including scrubber. */ cpu_init_private(CPU); populate_idstr(CPU); /* * perform such initialization as is needed * to be able to take CPUs on- and off-line. */ cpu_pause_init(); xc_init(); /* initialize processor crosscalls */ idlestop_init(); if (!use_mp) { mutex_exit(&cpu_lock); cmn_err(CE_CONT, "?***** Not in MP mode\n"); return; } /* * should we be initializing this cpu? */ bootcpu = getprocessorid(); /* * launch all the slave cpus now */ for (cpuid = 0; cpuid < NCPU; cpuid++) { pnode_t nodeid = cpunodes[cpuid].nodeid; if (nodeid == (pnode_t)0) continue; if (cpuid == bootcpu) { if (!CPU_IN_SET(cpu_bringup_set, cpuid)) { cmn_err(CE_WARN, "boot cpu not a member " "of cpu_bringup_set, adding it"); CPUSET_ADD(cpu_bringup_set, cpuid); } continue; } if (!CPU_IN_SET(cpu_bringup_set, cpuid)) continue; ASSERT(cpu[cpuid] == NULL); if (setup_cpu_common(cpuid)) { cmn_err(CE_PANIC, "cpu%d: setup failed", cpuid); } common_startup_init(cpu[cpuid], cpuid); start_cpu(cpuid, cold_flag_set); /* * Because slave_startup() gets fired off after init() * starts, we can't use the '?' trick to do 'boot -v' * printing - so we always direct the 'cpu .. online' * messages to the log. */ cmn_err(CE_CONT, "!cpu%d initialization complete - online\n", cpuid); cpu_state_change_notify(cpuid, CPU_SETUP); if (dtrace_cpu_init != NULL) (*dtrace_cpu_init)(cpuid); } /* * since all the cpus are online now, redistribute interrupts to them. */ intr_redist_all_cpus(); mutex_exit(&cpu_lock); /* * Start the Ecache scrubber. Must be done after all calls to * cpu_init_private for every cpu (including CPU 0). */ cpu_init_cache_scrub(); if (&cpu_mp_init) cpu_mp_init(); }
/* * Startup function executed on 'other' CPUs. This is the first * C function after cpu_start sets up the cpu registers. */ static void slave_startup(void) { struct cpu *cp = CPU; ushort_t original_flags = cp->cpu_flags; mach_htraptrace_configure(cp->cpu_id); cpu_intrq_register(CPU); cp->cpu_m.mutex_ready = 1; /* acknowledge that we are done with initialization */ CPUSET_ADD(proxy_ready_set, cp->cpu_id); /* synchronize STICK */ sticksync_slave(); if (boothowto & RB_DEBUG) kdi_dvec_cpu_init(cp); /* * the slave will wait here forever -- assuming that the master * will get back to us. if it doesn't we've got bigger problems * than a master not replying to this slave. * the small delay improves the slave's responsiveness to the * master's ack and decreases the time window between master and * slave operations. */ while (!CPU_IN_SET(cpu_ready_set, cp->cpu_id)) DELAY(1); /* * The CPU is now in cpu_ready_set, safely able to take pokes. */ cp->cpu_m.poke_cpu_outstanding = B_FALSE; /* enable interrupts */ (void) spl0(); /* * Signature block update to indicate that this CPU is in OS now. * This needs to be done after the PIL is lowered since on * some platforms the update code may block. */ CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id); /* * park the slave thread in a safe/quiet state and wait for the master * to finish configuring this CPU before proceeding to thread_exit(). */ while (((volatile ushort_t)cp->cpu_flags) & CPU_QUIESCED) DELAY(1); /* * Initialize CPC CPU state. */ kcpc_hw_startup_cpu(original_flags); /* * Notify the PG subsystem that the CPU has started */ pg_cmt_cpu_startup(CPU); /* * Now we are done with the startup thread, so free it up. */ thread_exit(); cmn_err(CE_PANIC, "slave_startup: cannot return"); /*NOTREACHED*/ }
void softint(void) { softcall_t *sc = NULL; void (*func)(); caddr_t arg; int cpu_id = CPU->cpu_id; mutex_enter(&softcall_lock); if (softcall_state & (SOFT_STEAL|SOFT_PEND)) { softcall_state = SOFT_DRAIN; } else { /* * The check for softcall_cpuset being * NULL is required because it may get * called very early during boot. */ if (softcall_cpuset != NULL && CPU_IN_SET(*softcall_cpuset, cpu_id)) CPUSET_DEL(*softcall_cpuset, cpu_id); mutex_exit(&softcall_lock); goto out; } /* * Setting softcall_latest_cpuid to current CPU ensures * that there is only one active softlevel1 handler to * process softcall queues. * * Since softcall_lock lock is dropped before calling * func (callback), we need softcall_latest_cpuid * to prevent two softlevel1 hanlders working on the * queue when the first softlevel1 handler gets * stuck due to high interrupt load. */ softcall_latest_cpuid = cpu_id; /* add ourself to the cpuset */ if (!CPU_IN_SET(*softcall_cpuset, cpu_id)) CPUSET_ADD(*softcall_cpuset, cpu_id); for (;;) { softcall_tick = lbolt; if ((sc = softhead) != NULL) { func = sc->sc_func; arg = sc->sc_arg; softhead = sc->sc_next; sc->sc_next = softfree; softfree = sc; } if (sc == NULL) { if (CPU_IN_SET(*softcall_cpuset, cpu_id)) CPUSET_DEL(*softcall_cpuset, cpu_id); softcall_state = SOFT_IDLE; ASSERT(softcall_latest_cpuid == cpu_id); softcall_latest_cpuid = -1; mutex_exit(&softcall_lock); break; } mutex_exit(&softcall_lock); func(arg); mutex_enter(&softcall_lock); /* * No longer need softcall processing from current * interrupt handler because either * (a) softcall is in SOFT_IDLE state or * (b) There is a CPU already draining softcall * queue and the current softlevel1 is no * longer required. */ if (softcall_latest_cpuid != cpu_id) { if (CPU_IN_SET(*softcall_cpuset, cpu_id)) CPUSET_DEL(*softcall_cpuset, cpu_id); mutex_exit(&softcall_lock); break; } } out: if ((func = kdi_softcall_func) != NULL) { kdi_softcall_func = NULL; func(); } }
/* * Gets called when softcall queue is not moving forward. We choose * a CPU and poke except the ones which are already poked. */ static int softcall_choose_cpu() { cpu_t *cplist = CPU; cpu_t *cp; int intr_load = INT_MAX; int cpuid = -1; cpuset_t poke; int s; ASSERT(getpil() >= DISP_LEVEL); ASSERT(ncpus > 1); ASSERT(MUTEX_HELD(&softcall_lock)); CPUSET_ZERO(poke); /* * The hint is to start from current CPU. */ cp = cplist; do { if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) || (cp->cpu_flags & CPU_ENABLE) == 0) continue; /* if CPU is not busy */ if (cp->cpu_intrload == 0) { cpuid = cp->cpu_id; break; } if (cp->cpu_intrload < intr_load) { cpuid = cp->cpu_id; intr_load = cp->cpu_intrload; } else if (cp->cpu_intrload == intr_load) { /* * We want to poke CPUs having similar * load because we don't know which CPU is * can acknowledge level1 interrupt. The * list of such CPUs should not be large. */ if (cpuid != -1) { /* * Put the last CPU chosen because * it also has same interrupt load. */ CPUSET_ADD(poke, cpuid); cpuid = -1; } CPUSET_ADD(poke, cp->cpu_id); } } while ((cp = cp->cpu_next_onln) != cplist); /* if we found a CPU which suits best to poke */ if (cpuid != -1) { CPUSET_ZERO(poke); CPUSET_ADD(poke, cpuid); } if (CPUSET_ISNULL(poke)) { mutex_exit(&softcall_lock); return (0); } /* * We first set the bit in cpuset and then poke. */ CPUSET_XOR(*softcall_cpuset, poke); mutex_exit(&softcall_lock); /* * If softcall() was called at low pil then we may * get preempted before we raise PIL. It should be okay * because we are just going to poke CPUs now or at most * another thread may start choosing CPUs in this routine. */ s = splhigh(); siron_poke_cpu(poke); splx(s); return (1); }
/* * Gets called when softcall queue is not moving forward. We choose * a CPU and poke except the ones which are already poked. */ static int softcall_choose_cpu() { cpu_t *cplist = CPU; cpu_t *cp; int intr_load = INT_MAX; int cpuid = -1; cpuset_t poke; int s; ASSERT(getpil() >= DISP_LEVEL); ASSERT(ncpus > 1); ASSERT(MUTEX_HELD(&softcall_lock)); CPUSET_ZERO(poke); /* * The hint is to start from current CPU. */ cp = cplist; do { /* * Don't select this CPU if : * - in cpuset already * - CPU is not accepting interrupts * - CPU is being offlined */ if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) || (cp->cpu_flags & CPU_ENABLE) == 0 || (cp == cpu_inmotion)) continue; #if defined(__x86) /* * Don't select this CPU if a hypervisor indicates it * isn't currently scheduled onto a physical cpu. We are * looking for a cpu that can respond quickly and the time * to get the virtual cpu scheduled and switched to running * state is likely to be relatively lengthy. */ if (vcpu_on_pcpu(cp->cpu_id) == VCPU_NOT_ON_PCPU) continue; #endif /* __x86 */ /* if CPU is not busy */ if (cp->cpu_intrload == 0) { cpuid = cp->cpu_id; break; } if (cp->cpu_intrload < intr_load) { cpuid = cp->cpu_id; intr_load = cp->cpu_intrload; } else if (cp->cpu_intrload == intr_load) { /* * We want to poke CPUs having similar * load because we don't know which CPU is * can acknowledge level1 interrupt. The * list of such CPUs should not be large. */ if (cpuid != -1) { /* * Put the last CPU chosen because * it also has same interrupt load. */ CPUSET_ADD(poke, cpuid); cpuid = -1; } CPUSET_ADD(poke, cp->cpu_id); } } while ((cp = cp->cpu_next_onln) != cplist); /* if we found a CPU which suits best to poke */ if (cpuid != -1) { CPUSET_ZERO(poke); CPUSET_ADD(poke, cpuid); } if (CPUSET_ISNULL(poke)) { mutex_exit(&softcall_lock); return (0); } /* * We first set the bit in cpuset and then poke. */ CPUSET_XOR(*softcall_cpuset, poke); mutex_exit(&softcall_lock); /* * If softcall() was called at low pil then we may * get preempted before we raise PIL. It should be okay * because we are just going to poke CPUs now or at most * another thread may start choosing CPUs in this routine. */ s = splhigh(); siron_poke_cpu(poke); splx(s); return (1); }
void kern_preprom(void) { for (;;) { /* * Load the current CPU pointer and examine the mutex_ready bit. * It doesn't matter if we are preempted here because we are * only trying to determine if we are in the *set* of mutex * ready CPUs. We cannot disable preemption until we confirm * that we are running on a CPU in this set, since a call to * kpreempt_disable() requires access to curthread. */ processorid_t cpuid = getprocessorid(); cpu_t *cp = cpu[cpuid]; cpu_t *prcp; if (panicstr) return; /* just return if we are currently panicking */ if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) { /* * Disable premption, and reload the current CPU. We * can't move from a mutex_ready cpu to a non-ready cpu * so we don't need to re-check cp->cpu_m.mutex_ready. */ kpreempt_disable(); cp = CPU; ASSERT(cp->cpu_m.mutex_ready); /* * Try the lock. If we don't get the lock, re-enable * preemption and see if we should sleep. If we are * already the lock holder, remove the effect of the * previous kpreempt_disable() before returning since * preemption was disabled by an earlier kern_preprom. */ prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp); if (prcp == NULL || (prcp == cp && prom_thread == curthread)) { if (prcp == cp) kpreempt_enable(); break; } kpreempt_enable(); /* * We have to be very careful here since both prom_cpu * and prcp->cpu_m.mutex_ready can be changed at any * time by a non mutex_ready cpu holding the lock. * If the owner is mutex_ready, holding prom_mutex * prevents kern_postprom() from completing. If the * owner isn't mutex_ready, we only know it will clear * prom_cpu before changing cpu_m.mutex_ready, so we * issue a membar after checking mutex_ready and then * re-verify that prom_cpu is still held by the same * cpu before actually proceeding to cv_wait(). */ mutex_enter(&prom_mutex); prcp = prom_cpu; if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) { membar_consumer(); if (prcp == prom_cpu) cv_wait(&prom_cv, &prom_mutex); } mutex_exit(&prom_mutex); } else { /* * If we are not yet mutex_ready, just attempt to grab * the lock. If we get it or already hold it, break. */ ASSERT(getpil() == PIL_MAX); prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp); if (prcp == NULL || prcp == cp) break; } } /* * We now hold the prom_cpu lock. Increment the hold count by one * and assert our current state before returning to the caller. */ atomic_inc_32(&prom_holdcnt); ASSERT(prom_holdcnt >= 1); prom_thread = curthread; }