static void stop_other_cpus(void) { ulong_t s = clear_int_flag(); /* fast way to keep CPU from changing */ cpuset_t xcset; CPUSET_ALL_BUT(xcset, CPU->cpu_id); xc_priority(0, 0, 0, CPUSET2BV(xcset), (xc_func_t)mach_cpu_halt); restore_int_flag(s); }
/* * Ensure counters are enabled on the given processor. */ void kcpc_remote_program(cpu_t *cp) { cpuset_t set; CPUSET_ZERO(set); CPUSET_ADD(set, cp->cpu_id); xc_sync(0, 0, 0, CPUSET2BV(set), (xc_func_t)kcpc_remoteprogram_func); }
/* * Jump to the fast reboot switcher. This function never returns. */ void fast_reboot() { processorid_t bootcpuid = 0; extern uintptr_t postbootkernelbase; extern char fb_swtch_image[]; fastboot_file_t *fb; int i; postbootkernelbase = 0; fb = &newkernel.fi_files[FASTBOOT_SWTCH]; /* * Map the address into both the current proc's address * space and the kernel's address space in case the panic * is forced by kmdb. */ if (&kas != curproc->p_as) { hat_devload(curproc->p_as->a_hat, (caddr_t)fb->fb_va, MMU_PAGESIZE, mmu_btop(fb->fb_dest_pa), PROT_READ | PROT_WRITE | PROT_EXEC, HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK); } bcopy((void *)fb_swtch_image, (void *)fb->fb_va, fb->fb_size); /* * Set fb_va to fake_va */ for (i = 0; i < FASTBOOT_MAX_FILES_MAP; i++) { newkernel.fi_files[i].fb_va = fake_va; } if (panicstr && CPU->cpu_id != bootcpuid && CPU_ACTIVE(cpu_get(bootcpuid))) { extern void panic_idle(void); cpuset_t cpuset; CPUSET_ZERO(cpuset); CPUSET_ADD(cpuset, bootcpuid); xc_priority((xc_arg_t)&newkernel, 0, 0, CPUSET2BV(cpuset), (xc_func_t)fastboot_xc_func); panic_idle(); } else (void) fastboot_xc_func(&newkernel, 0, 0); }
/*ARGSUSED*/ int cmci_cpu_setup(cpu_setup_t what, int cpuid, void *arg) { cpuset_t cpu_set; CPUSET_ONLY(cpu_set, cpuid); switch (what) { case CPU_ON: xc_call(NULL, NULL, NULL, CPUSET2BV(cpu_set), (xc_func_t)apic_cmci_enable); break; case CPU_OFF: xc_call(NULL, NULL, NULL, CPUSET2BV(cpu_set), (xc_func_t)apic_cmci_disable); break; default: break; } return (0); }
/* * Wrapper for kmdb to capture other CPUs, causing them to enter the debugger. */ void kdi_xc_others(int this_cpu, void (*func)(void)) { extern int IGNORE_KERNEL_PREEMPTION; int save_kernel_preemption; cpuset_t set; if (!xc_initialized) return; save_kernel_preemption = IGNORE_KERNEL_PREEMPTION; IGNORE_KERNEL_PREEMPTION = 1; CPUSET_ALL_BUT(set, this_cpu); xc_priority_common((xc_func_t)func, 0, 0, 0, CPUSET2BV(set)); IGNORE_KERNEL_PREEMPTION = save_kernel_preemption; }
static void pwrnow_power(cpuset_t set, uint32_t req_state) { /* * If thread is already running on target CPU then just * make the transition request. Otherwise, we'll need to * make a cross-call. */ kpreempt_disable(); if (CPU_IN_SET(set, CPU->cpu_id)) { pwrnow_pstate_transition(req_state); CPUSET_DEL(set, CPU->cpu_id); } if (!CPUSET_ISNULL(set)) { xc_call((xc_arg_t)req_state, NULL, NULL, CPUSET2BV(set), (xc_func_t)pwrnow_pstate_transition); } kpreempt_enable(); }
* handling. However since nothing important uses ASYNC, I've not bothered. */ #define XC_MSG_FREE (0) /* msg in xc_free queue */ #define XC_MSG_ASYNC (1) /* msg in slave xc_msgbox */ #define XC_MSG_CALL (2) /* msg in slave xc_msgbox */ #define XC_MSG_SYNC (3) /* msg in slave xc_msgbox */ #define XC_MSG_WAITING (4) /* msg in master xc_msgbox or xc_waiters */ #define XC_MSG_RELEASED (5) /* msg in slave xc_msgbox */ #define XC_MSG_DONE (6) /* msg in master xc_msgbox */ /* * We allow for one high priority message at a time to happen in the system. * This is used for panic, kmdb, etc., so no locking is done. */ static volatile cpuset_t xc_priority_set_store; static volatile ulong_t *xc_priority_set = CPUSET2BV(xc_priority_set_store); static xc_data_t xc_priority_data; /* * Wrappers to avoid C compiler warnings due to volatile. The atomic bit * operations don't accept volatile bit vectors - which is a bit silly. */ #define XC_BT_SET(vector, b) BT_ATOMIC_SET((ulong_t *)(vector), (b)) #define XC_BT_CLEAR(vector, b) BT_ATOMIC_CLEAR((ulong_t *)(vector), (b)) /* * Decrement a CPU's work count */ static void xc_decrement(struct machcpu *mcpu) {