/*ARGSUSED*/ static int xen_uppc_addspl(int irqno, int ipl, int min_ipl, int max_ipl) { int ret = PSM_SUCCESS; cpuset_t cpus; if (irqno >= 0 && irqno <= MAX_ISA_IRQ) atomic_add_16(&xen_uppc_irq_shared_table[irqno], 1); /* * We are called at splhi() so we can't call anything that might end * up trying to context switch. */ if (irqno >= PIRQ_BASE && irqno < NR_PIRQS && DOMAIN_IS_INITDOMAIN(xen_info)) { CPUSET_ZERO(cpus); CPUSET_ADD(cpus, 0); ec_setup_pirq(irqno, ipl, &cpus); } else { /* * Set priority/affinity/enable for non PIRQs */ ret = ec_set_irq_priority(irqno, ipl); ASSERT(ret == 0); CPUSET_ZERO(cpus); CPUSET_ADD(cpus, 0); ec_set_irq_affinity(irqno, cpus); ec_enable_irq(irqno); } return (ret); }
void softcall_init(void) { softcall_t *sc; softcalls = kmem_zalloc(sizeof (softcall_t) * NSOFTCALLS, KM_SLEEP); softcall_cpuset = kmem_zalloc(sizeof (cpuset_t), KM_SLEEP); for (sc = softcalls; sc < &softcalls[NSOFTCALLS]; sc++) { sc->sc_next = softfree; softfree = sc; } mutex_init(&softcall_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL8)); softcall_state = SOFT_IDLE; softcall_tick = lbolt; if (softcall_delay < 0) softcall_delay = 1; /* * Since softcall_delay is expressed as 1 = 10 milliseconds. */ softcall_delay = softcall_delay * (hz/100); CPUSET_ZERO(*softcall_cpuset); }
int __gnat_set_affinity (int tid, unsigned cpu) { cpuset_t cpuset; CPUSET_ZERO(cpuset); CPUSET_SET(cpuset, cpu); return taskCpuAffinitySet (tid, cpu); }
/* * Ensure counters are enabled on the given processor. */ void kcpc_remote_program(cpu_t *cp) { cpuset_t set; CPUSET_ZERO(set); CPUSET_ADD(set, cp->cpu_id); xc_sync(0, 0, 0, CPUSET2BV(set), (xc_func_t)kcpc_remoteprogram_func); }
long ACE_OS::num_processors_online (void) { ACE_OS_TRACE ("ACE_OS::num_processors_online"); #if defined (ACE_HAS_PHARLAP) return 1; #elif defined (ACE_WIN32) SYSTEM_INFO sys_info; ::GetSystemInfo (&sys_info); long active_processors = 0; DWORD_PTR mask = sys_info.dwActiveProcessorMask; while (mask != 0) { if (mask & 1) ++active_processors; mask >>= 1; } return active_processors; #elif defined (ACE_HAS_VXCPULIB) long num_cpu = 0; cpuset_t cpuset; CPUSET_ZERO (cpuset); cpuset = vxCpuEnabledGet(); unsigned int const maxcpu = vxCpuConfiguredGet(); for (unsigned int i =0; i < maxcpu; i++) { if (CPUSET_ISSET (cpuset, i)) { ++num_cpu; } } return num_cpu; #elif defined (_SC_NPROCESSORS_ONLN) return ::sysconf (_SC_NPROCESSORS_ONLN); #elif defined (ACE_HAS_SYSCTL) int num_processors; int mib[2] = { CTL_HW, HW_NCPU }; size_t len = sizeof (num_processors); if (::sysctl (mib, 2, &num_processors, &len, 0, 0) != -1) return num_processors; else return -1; #elif defined (__hpux) struct pst_dynamic psd; if (::pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) != -1) return psd.psd_proc_cnt; else return -1; #else ACE_NOTSUP_RETURN (-1); #endif }
/* * Jump to the fast reboot switcher. This function never returns. */ void fast_reboot() { processorid_t bootcpuid = 0; extern uintptr_t postbootkernelbase; extern char fb_swtch_image[]; fastboot_file_t *fb; int i; postbootkernelbase = 0; fb = &newkernel.fi_files[FASTBOOT_SWTCH]; /* * Map the address into both the current proc's address * space and the kernel's address space in case the panic * is forced by kmdb. */ if (&kas != curproc->p_as) { hat_devload(curproc->p_as->a_hat, (caddr_t)fb->fb_va, MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa), PROT_READ | PROT_WRITE | PROT_EXEC, HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK); } bcopy((void *)fb_swtch_image, (void *)fb->fb_va, fb->fb_size); /* * Set fb_va to fake_va */ for (i = 0; i < FASTBOOT_MAX_FILES_MAP; i++) { newkernel.fi_files[i].fb_va = fake_va; } if (panicstr && CPU->cpu_id != bootcpuid && CPU_ACTIVE(cpu_get(bootcpuid))) { extern void panic_idle(void); cpuset_t cpuset; CPUSET_ZERO(cpuset); CPUSET_ADD(cpuset, bootcpuid); xc_priority((xc_arg_t)&newkernel, 0, 0, CPUSET2BV(cpuset), (xc_func_t)fastboot_xc_func); panic_idle(); } else (void) fastboot_xc_func(&newkernel, 0, 0); }
/* * Gets called when softcall queue is not moving forward. We choose * a CPU and poke except the ones which are already poked. */ static int softcall_choose_cpu() { cpu_t *cplist = CPU; cpu_t *cp; int intr_load = INT_MAX; int cpuid = -1; cpuset_t poke; int s; ASSERT(getpil() >= DISP_LEVEL); ASSERT(ncpus > 1); ASSERT(MUTEX_HELD(&softcall_lock)); CPUSET_ZERO(poke); /* * The hint is to start from current CPU. */ cp = cplist; do { if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) || (cp->cpu_flags & CPU_ENABLE) == 0) continue; /* if CPU is not busy */ if (cp->cpu_intrload == 0) { cpuid = cp->cpu_id; break; } if (cp->cpu_intrload < intr_load) { cpuid = cp->cpu_id; intr_load = cp->cpu_intrload; } else if (cp->cpu_intrload == intr_load) { /* * We want to poke CPUs having similar * load because we don't know which CPU is * can acknowledge level1 interrupt. The * list of such CPUs should not be large. */ if (cpuid != -1) { /* * Put the last CPU chosen because * it also has same interrupt load. */ CPUSET_ADD(poke, cpuid); cpuid = -1; } CPUSET_ADD(poke, cp->cpu_id); } } while ((cp = cp->cpu_next_onln) != cplist); /* if we found a CPU which suits best to poke */ if (cpuid != -1) { CPUSET_ZERO(poke); CPUSET_ADD(poke, cpuid); } if (CPUSET_ISNULL(poke)) { mutex_exit(&softcall_lock); return (0); } /* * We first set the bit in cpuset and then poke. */ CPUSET_XOR(*softcall_cpuset, poke); mutex_exit(&softcall_lock); /* * If softcall() was called at low pil then we may * get preempted before we raise PIL. It should be okay * because we are just going to poke CPUs now or at most * another thread may start choosing CPUs in this routine. */ s = splhigh(); siron_poke_cpu(poke); splx(s); return (1); }
/* * Gets called when softcall queue is not moving forward. We choose * a CPU and poke except the ones which are already poked. */ static int softcall_choose_cpu() { cpu_t *cplist = CPU; cpu_t *cp; int intr_load = INT_MAX; int cpuid = -1; cpuset_t poke; int s; ASSERT(getpil() >= DISP_LEVEL); ASSERT(ncpus > 1); ASSERT(MUTEX_HELD(&softcall_lock)); CPUSET_ZERO(poke); /* * The hint is to start from current CPU. */ cp = cplist; do { /* * Don't select this CPU if : * - in cpuset already * - CPU is not accepting interrupts * - CPU is being offlined */ if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) || (cp->cpu_flags & CPU_ENABLE) == 0 || (cp == cpu_inmotion)) continue; #if defined(__x86) /* * Don't select this CPU if a hypervisor indicates it * isn't currently scheduled onto a physical cpu. We are * looking for a cpu that can respond quickly and the time * to get the virtual cpu scheduled and switched to running * state is likely to be relatively lengthy. */ if (vcpu_on_pcpu(cp->cpu_id) == VCPU_NOT_ON_PCPU) continue; #endif /* __x86 */ /* if CPU is not busy */ if (cp->cpu_intrload == 0) { cpuid = cp->cpu_id; break; } if (cp->cpu_intrload < intr_load) { cpuid = cp->cpu_id; intr_load = cp->cpu_intrload; } else if (cp->cpu_intrload == intr_load) { /* * We want to poke CPUs having similar * load because we don't know which CPU is * can acknowledge level1 interrupt. The * list of such CPUs should not be large. */ if (cpuid != -1) { /* * Put the last CPU chosen because * it also has same interrupt load. */ CPUSET_ADD(poke, cpuid); cpuid = -1; } CPUSET_ADD(poke, cp->cpu_id); } } while ((cp = cp->cpu_next_onln) != cplist); /* if we found a CPU which suits best to poke */ if (cpuid != -1) { CPUSET_ZERO(poke); CPUSET_ADD(poke, cpuid); } if (CPUSET_ISNULL(poke)) { mutex_exit(&softcall_lock); return (0); } /* * We first set the bit in cpuset and then poke. */ CPUSET_XOR(*softcall_cpuset, poke); mutex_exit(&softcall_lock); /* * If softcall() was called at low pil then we may * get preempted before we raise PIL. It should be okay * because we are just going to poke CPUs now or at most * another thread may start choosing CPUs in this routine. */ s = splhigh(); siron_poke_cpu(poke); splx(s); return (1); }
/* * launch slave cpus into kernel text, pause them, * and restore the original prom pages */ void i_cpr_mp_setup(void) { extern void restart_other_cpu(int); cpu_t *cp; uint64_t kctx = kcontextreg; /* * Do not allow setting page size codes in MMU primary context * register while using cif wrapper. This is needed to work * around OBP incorrect handling of this MMU register. */ kcontextreg = 0; /* * reset cpu_ready_set so x_calls work properly */ CPUSET_ZERO(cpu_ready_set); CPUSET_ADD(cpu_ready_set, getprocessorid()); /* * setup cif to use the cookie from the new/tmp prom * and setup tmp handling for calling prom services. */ i_cpr_cif_setup(CIF_SPLICE); /* * at this point, only the nucleus and a few cpr pages are * mapped in. once we switch to the kernel trap table, * we can access the rest of kernel space. */ prom_set_traptable(&trap_table); if (ncpus > 1) { sfmmu_init_tsbs(); mutex_enter(&cpu_lock); /* * All of the slave cpus are not ready at this time, * yet the cpu structures have various cpu_flags set; * clear cpu_flags and mutex_ready. * Since we are coming up from a CPU suspend, the slave cpus * are frozen. */ for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) { cp->cpu_flags = CPU_FROZEN; cp->cpu_m.mutex_ready = 0; } for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) restart_other_cpu(cp->cpu_id); pause_cpus(NULL, NULL); mutex_exit(&cpu_lock); i_cpr_xcall(i_cpr_clear_entries); } else i_cpr_clear_entries(0, 0); /* * now unlink the cif wrapper; WARNING: do not call any * prom_xxx() routines until after prom pages are restored. */ i_cpr_cif_setup(CIF_UNLINK); (void) i_cpr_prom_pages(CPR_PROM_RESTORE); /* allow setting page size codes in MMU primary context register */ kcontextreg = kctx; }