/* * Halt all running cpus, excluding current cpu. */ void cpu_halt_others(void) { kcpuset_t *kcp; // If we are the only CPU running, there's nothing to do. if (kcpuset_match(cpus_running, curcpu()->ci_data.cpu_kcpuset)) return; // Get all running CPUs kcpuset_clone(&kcp, cpus_running); // Remove ourself kcpuset_remove(kcp, curcpu()->ci_data.cpu_kcpuset); // Remove any halted CPUs kcpuset_remove(kcp, cpus_halted); // If there are CPUs left, send the IPIs if (!kcpuset_iszero(kcp)) { cpu_multicast_ipi(kcp, IPI_HALT); cpu_ipi_wait("halt", cpus_halted, kcp); } kcpuset_destroy(kcp); /* * TBD * Depending on available firmware methods, other cpus will * either shut down themselves, or spin and wait for us to * stop them. */ }
/* * Resume a single cpu */ void cpu_resume(int index) { CPUSET_CLEAR(cpus_resumed); CPUSET_DEL(cpus_paused, index); if (cpu_ipi_wait(&cpus_resumed, CPUSET_SINGLE(index))) cpu_ipi_error("resume", cpus_resumed, CPUSET_SINGLE(index)); }
/* * Resume all paused cpus. */ void cpu_resume_others(void) { __cpuset_t cpuset; CPUSET_CLEAR(cpus_resumed); CPUSET_ASSIGN(cpuset, cpus_paused); CPUSET_CLEAR(cpus_paused); /* CPUs awake on cpus_paused clear */ if (cpu_ipi_wait(&cpus_resumed, cpuset)) cpu_ipi_error("resume", cpus_resumed, cpuset); }
/* * Resume a single cpu */ void cpu_resume(cpuid_t cii) { if (__predict_false(cold)) return; struct cpu_info * const ci = curcpu(); kcpuset_t *kcp = ci->ci_ddbcpus; kcpuset_set(kcp, cii); kcpuset_atomicly_remove(cpus_resumed, cpus_resumed); kcpuset_atomic_clear(cpus_paused, cii); cpu_ipi_wait("resume", cpus_resumed, kcp); }
/* * Pause all running cpus, excluding current cpu. */ void cpu_pause_others(void) { __cpuset_t cpuset; CPUSET_ASSIGN(cpuset, cpus_running); CPUSET_DEL(cpuset, cpu_index(curcpu())); if (CPUSET_EMPTY_P(cpuset)) return; cpu_multicast_ipi(cpuset, IPI_SUSPEND); if (cpu_ipi_wait(&cpus_paused, cpuset)) cpu_ipi_error("pause", cpus_paused, cpuset); }
/* * Resume all paused cpus. */ void cpu_resume_others(void) { if (__predict_false(cold)) return; struct cpu_info * const ci = curcpu(); kcpuset_t *kcp = ci->ci_ddbcpus; kcpuset_atomicly_remove(cpus_resumed, cpus_resumed); kcpuset_copy(kcp, cpus_paused); kcpuset_atomicly_remove(cpus_paused, cpus_paused); /* CPUs awake on cpus_paused clear */ cpu_ipi_wait("resume", cpus_resumed, kcp); }
/* * Pause all running cpus, excluding current cpu. */ void cpu_pause_others(void) { struct cpu_info * const ci = curcpu(); if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset)) return; kcpuset_t *kcp = ci->ci_ddbcpus; kcpuset_copy(kcp, cpus_running); kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset); kcpuset_remove(kcp, cpus_paused); cpu_broadcast_ipi(IPI_SUSPEND); cpu_ipi_wait("pause", cpus_paused, kcp); }
/* * Halt all running cpus, excluding current cpu. */ void cpu_halt_others(void) { __cpuset_t cpumask, cpuset; CPUSET_ASSIGN(cpuset, cpus_running); CPUSET_DEL(cpuset, cpu_index(curcpu())); CPUSET_ASSIGN(cpumask, cpuset); CPUSET_SUB(cpuset, cpus_halted); if (CPUSET_EMPTY_P(cpuset)) return; cpu_multicast_ipi(cpuset, IPI_HALT); if (cpu_ipi_wait(&cpus_halted, cpumask)) cpu_ipi_error("halt", cpumask, cpus_halted); /* * TBD * Depending on available firmware methods, other cpus will * either shut down themselfs, or spin and wait for us to * stop them. */ }