/* * Send an interprocessor interrupt - sun4u. */ void sparc64_send_ipi_sun4u(int upaid, ipifunc_t func, uint64_t arg1, uint64_t arg2) { int i, ik, shift = 0; uint64_t intr_func; KASSERT(upaid != curcpu()->ci_cpuid); /* * UltraSPARC-IIIi CPUs select the BUSY/NACK pair based on the * lower two bits of the ITID. */ if (CPU_IS_USIIIi()) shift = (upaid & 0x3) * 2; if (ldxa(0, ASI_IDSR) & (IDSR_BUSY << shift)) panic("recursive IPI?"); intr_func = (uint64_t)(u_long)func; /* Schedule an interrupt. */ for (i = 0; i < 10000; i++) { int s = intr_disable(); stxa(IDDR_0H, ASI_INTERRUPT_DISPATCH, intr_func); stxa(IDDR_1H, ASI_INTERRUPT_DISPATCH, arg1); stxa(IDDR_2H, ASI_INTERRUPT_DISPATCH, arg2); stxa(IDCR(upaid), ASI_INTERRUPT_DISPATCH, 0); membar_Sync(); /* Workaround for SpitFire erratum #54, from FreeBSD */ if (CPU_IS_SPITFIRE()) { (void)ldxa(P_DCR_0, ASI_INTERRUPT_RECEIVE_DATA); membar_Sync(); } for (ik = 0; ik < 1000000; ik++) { if (ldxa(0, ASI_IDSR) & (IDSR_BUSY << shift)) continue; else break; } intr_restore(s); if (ik == 1000000) break; if ((ldxa(0, ASI_IDSR) & (IDSR_NACK << shift)) == 0) return; /* * Wait for a while with enabling interrupts to avoid * deadlocks. XXX - random value is better. */ DELAY(1); } if (panicstr == NULL) panic("cpu%d: ipi_send: couldn't send ipi to UPAID %u" " (tried %d times)", cpu_number(), upaid, i); }
/* * Start secondary processors in motion. */ void cpu_boot_secondary_processors(void) { int i, pstate; struct cpu_info *ci; sync_tick = 0; sparc64_ipi_init(); if (boothowto & RB_MD1) { cpus[0].ci_next = NULL; sparc_ncpus = ncpu = ncpuonline = 1; return; } for (ci = cpus; ci != NULL; ci = ci->ci_next) { if (ci->ci_cpuid == CPU_UPAID) continue; cpu_pmap_prepare(ci, false); cpu_args->cb_node = ci->ci_node; cpu_args->cb_cpuinfo = ci->ci_paddr; membar_Sync(); /* Disable interrupts and start another CPU. */ pstate = getpstate(); setpstate(PSTATE_KERN); prom_startcpu(ci->ci_node, (void *)cpu_spinup_trampoline, 0); for (i = 0; i < 2000; i++) { membar_Sync(); if (CPUSET_HAS(cpus_active, ci->ci_index)) break; delay(10000); } /* synchronize %tick ( to some degree at least ) */ delay(1000); sync_tick = 1; membar_Sync(); settick(0); if (ci->ci_system_clockrate[0] != 0) setstick(0); setpstate(pstate); if (!CPUSET_HAS(cpus_active, ci->ci_index)) printf("cpu%d: startup failed\n", ci->ci_cpuid); } }
void cpu_hatch(void) { char *v = (char*)CPUINFO_VA; int i; for (i = 0; i < 4*PAGE_SIZE; i += sizeof(long)) flush(v + i); cpu_pmap_init(curcpu()); CPUSET_ADD(cpus_active, cpu_number()); cpu_reset_fpustate(); curlwp = curcpu()->ci_data.cpu_idlelwp; membar_Sync(); /* wait for the boot CPU to flip the switch */ while (sync_tick == 0) { /* we do nothing here */ } settick(0); if (curcpu()->ci_system_clockrate[0] != 0) { setstick(0); stickintr_establish(PIL_CLOCK, stickintr); } else { tickintr_establish(PIL_CLOCK, tickintr); } spl0(); }
/* * Wait for IPI operation to complete. * Return 0 on success. */ int sparc64_ipi_wait(sparc64_cpuset_t volatile *cpus_watchset, sparc64_cpuset_t cpus_mask) { uint64_t limit = gettick() + cpu_frequency(curcpu()); while (gettick() < limit) { membar_Sync(); if (CPUSET_EQUAL(*cpus_watchset, cpus_mask)) return 0; } return 1; }
void sparc64_do_pause(void) { #if defined(DDB) extern bool ddb_running_on_this_cpu(void); extern void db_resume_others(void); #endif CPUSET_ADD(cpus_paused, cpu_number()); do { membar_Sync(); } while(CPUSET_HAS(cpus_paused, cpu_number())); membar_Sync(); CPUSET_ADD(cpus_resumed, cpu_number()); #if defined(DDB) if (ddb_running_on_this_cpu()) { db_command_loop(); db_resume_others(); } #endif }
/* * Resume all paused cpus. */ void mp_resume_cpus(void) { int i = 3; sparc64_cpuset_t cpuset; CPUSET_CLEAR(cpuset); /* XXX: gcc -Wuninitialized */ while (i-- > 0) { CPUSET_CLEAR(cpus_resumed); CPUSET_ASSIGN(cpuset, cpus_paused); membar_Sync(); CPUSET_CLEAR(cpus_paused); /* CPUs awake on cpus_paused clear */ if (!sparc64_ipi_wait(&cpus_resumed, cpuset)) return; } sparc64_ipi_error("resume", cpus_resumed, cpuset); }
void fpusave_lwp(struct lwp *l, bool save) { #ifdef MULTIPROCESSOR volatile struct cpu_info *ci; if (l == fplwp) { int s = intr_disable(); fpusave_cpu(save); intr_restore(s); return; } for (ci = cpus; ci != NULL; ci = ci->ci_next) { int spincount; if (ci == curcpu() || !CPUSET_HAS(cpus_active, ci->ci_index)) continue; if (ci->ci_fplwp != l) continue; sparc64_send_ipi(ci->ci_cpuid, save ? sparc64_ipi_save_fpstate : sparc64_ipi_drop_fpstate, (uintptr_t)l, 0); spincount = 0; while (ci->ci_fplwp == l) { membar_Sync(); spincount++; if (spincount > 10000000) panic("fpusave_lwp ipi didn't"); } break; } #else if (l == fplwp) fpusave_cpu(save); #endif }
/* * Resume a single cpu */ void mp_resume_cpu(int cno) { CPUSET_DEL(cpus_paused, cno); membar_Sync(); }