/* * Start secondary processors in motion. */ void cpu_boot_secondary_processors() { int i, pstate; struct cpu_info *ci; sparc64_ipi_init(); for (ci = cpus; ci != NULL; ci = ci->ci_next) { if (ci->ci_cpuid == CPU_UPAID) continue; cpu_pmap_prepare(ci, false); cpu_args->cb_node = ci->ci_node; cpu_args->cb_cpuinfo = ci->ci_paddr; membar_sync(); /* Disable interrupts and start another CPU. */ pstate = getpstate(); setpstate(PSTATE_KERN); prom_startcpu(ci->ci_node, (void *)cpu_spinup_trampoline, 0); for (i = 0; i < 2000; i++) { membar_sync(); if (CPUSET_HAS(cpus_active, ci->ci_index)) break; delay(10000); } setpstate(pstate); if (!CPUSET_HAS(cpus_active, ci->ci_index)) printf("cpu%d: startup failed\n", ci->ci_cpuid); } }
/* * Start secondary processors in motion. */ void cpu_boot_secondary_processors(void) { int i, pstate; struct cpu_info *ci; sync_tick = 0; sparc64_ipi_init(); if (boothowto & RB_MD1) { cpus[0].ci_next = NULL; sparc_ncpus = ncpu = ncpuonline = 1; return; } for (ci = cpus; ci != NULL; ci = ci->ci_next) { if (ci->ci_cpuid == CPU_UPAID) continue; cpu_pmap_prepare(ci, false); cpu_args->cb_node = ci->ci_node; cpu_args->cb_cpuinfo = ci->ci_paddr; membar_Sync(); /* Disable interrupts and start another CPU. */ pstate = getpstate(); setpstate(PSTATE_KERN); prom_startcpu(ci->ci_node, (void *)cpu_spinup_trampoline, 0); for (i = 0; i < 2000; i++) { membar_Sync(); if (CPUSET_HAS(cpus_active, ci->ci_index)) break; delay(10000); } /* synchronize %tick ( to some degree at least ) */ delay(1000); sync_tick = 1; membar_Sync(); settick(0); if (ci->ci_system_clockrate[0] != 0) setstick(0); setpstate(pstate); if (!CPUSET_HAS(cpus_active, ci->ci_index)) printf("cpu%d: startup failed\n", ci->ci_cpuid); } }
/* * Flush pte on all active processors. */ void smp_tlb_flush_pte(vaddr_t va, struct pmap * pm) { sparc64_cpuset_t cpuset; struct cpu_info *ci; int ctx; bool kpm = (pm == pmap_kernel()); /* Flush our own TLB */ ctx = pm->pm_ctx[cpu_number()]; KASSERT(ctx >= 0); if (kpm || ctx > 0) sp_tlb_flush_pte(va, ctx); CPUSET_ASSIGN(cpuset, cpus_active); CPUSET_DEL(cpuset, cpu_number()); if (CPUSET_EMPTY(cpuset)) return; /* Flush others */ for (ci = cpus; ci != NULL; ci = ci->ci_next) { if (CPUSET_HAS(cpuset, ci->ci_index)) { CPUSET_DEL(cpuset, ci->ci_index); ctx = pm->pm_ctx[ci->ci_index]; KASSERT(ctx >= 0); if (!kpm && ctx == 0) continue; sparc64_send_ipi(ci->ci_cpuid, smp_tlb_flush_pte_func, va, ctx); } } }
/* * Send an IPI to all in the list but ourselves. */ void sparc64_multicast_ipi(sparc64_cpuset_t cpuset, ipifunc_t func, uint64_t arg1, uint64_t arg2) { struct cpu_info *ci; CPUSET_DEL(cpuset, cpu_number()); if (CPUSET_EMPTY(cpuset)) return; for (ci = cpus; ci != NULL; ci = ci->ci_next) { if (CPUSET_HAS(cpuset, ci->ci_index)) { CPUSET_DEL(cpuset, ci->ci_index); sparc64_send_ipi(ci->ci_cpuid, func, arg1, arg2); } } }
void fpusave_lwp(struct lwp *l, bool save) { #ifdef MULTIPROCESSOR volatile struct cpu_info *ci; if (l == fplwp) { int s = intr_disable(); fpusave_cpu(save); intr_restore(s); return; } for (ci = cpus; ci != NULL; ci = ci->ci_next) { int spincount; if (ci == curcpu() || !CPUSET_HAS(cpus_active, ci->ci_index)) continue; if (ci->ci_fplwp != l) continue; sparc64_send_ipi(ci->ci_cpuid, save ? sparc64_ipi_save_fpstate : sparc64_ipi_drop_fpstate, (uintptr_t)l, 0); spincount = 0; while (ci->ci_fplwp == l) { membar_Sync(); spincount++; if (spincount > 10000000) panic("fpusave_lwp ipi didn't"); } break; } #else if (l == fplwp) fpusave_cpu(save); #endif }
void sparc64_do_pause(void) { #if defined(DDB) extern bool ddb_running_on_this_cpu(void); extern void db_resume_others(void); #endif CPUSET_ADD(cpus_paused, cpu_number()); do { membar_Sync(); } while(CPUSET_HAS(cpus_paused, cpu_number())); membar_Sync(); CPUSET_ADD(cpus_resumed, cpu_number()); #if defined(DDB) if (ddb_running_on_this_cpu()) { db_command_loop(); db_resume_others(); } #endif }
/* * Halt all cpus but ourselves. */ void mp_halt_cpus(void) { sparc64_cpuset_t cpumask, cpuset; struct cpu_info *ci; CPUSET_ASSIGN(cpuset, cpus_active); CPUSET_DEL(cpuset, cpu_number()); CPUSET_ASSIGN(cpumask, cpuset); CPUSET_SUB(cpuset, cpus_halted); if (CPUSET_EMPTY(cpuset)) return; CPUSET_CLEAR(cpus_spinning); sparc64_multicast_ipi(cpuset, sparc64_ipi_halt, 0, 0); if (sparc64_ipi_wait(&cpus_halted, cpumask)) sparc64_ipi_error("halt", cpumask, cpus_halted); /* * Depending on available firmware methods, other cpus will * either shut down themselfs, or spin and wait for us to * stop them. */ if (CPUSET_EMPTY(cpus_spinning)) { /* give other cpus a few cycles to actually power down */ delay(10000); return; } /* there are cpus spinning - shut them down if we can */ if (prom_has_stop_other()) { for (ci = cpus; ci != NULL; ci = ci->ci_next) { if (!CPUSET_HAS(cpus_spinning, ci->ci_index)) continue; prom_stop_other(ci->ci_cpuid); } } }
int mp_cpu_is_paused(sparc64_cpuset_t cpunum) { return CPUSET_HAS(cpus_paused, cpunum); }