/* * Halt all running cpus, excluding current cpu. */ void cpu_halt_others(void) { kcpuset_t *kcp; // If we are the only CPU running, there's nothing to do. if (kcpuset_match(cpus_running, curcpu()->ci_data.cpu_kcpuset)) return; // Get all running CPUs kcpuset_clone(&kcp, cpus_running); // Remove ourself kcpuset_remove(kcp, curcpu()->ci_data.cpu_kcpuset); // Remove any halted CPUs kcpuset_remove(kcp, cpus_halted); // If there are CPUs left, send the IPIs if (!kcpuset_iszero(kcp)) { cpu_multicast_ipi(kcp, IPI_HALT); cpu_ipi_wait("halt", cpus_halted, kcp); } kcpuset_destroy(kcp); /* * TBD * Depending on available firmware methods, other cpus will * either shut down themselves, or spin and wait for us to * stop them. */ }
/* * pserialize_perform: * * Perform the write side of passive serialization. The calling * thread holds an exclusive lock on the data object(s) being updated. * We wait until every processor in the system has made at least two * passes through cpu_swichto(). The wait is made with the caller's * update lock held, but is short term. */ void pserialize_perform(pserialize_t psz) { uint64_t xc; KASSERT(!cpu_intr_p()); KASSERT(!cpu_softintr_p()); if (__predict_false(panicstr != NULL)) { return; } KASSERT(psz->psz_owner == NULL); KASSERT(ncpu > 0); /* * Set up the object and put it onto the queue. The lock * activity here provides the necessary memory barrier to * make the caller's data update completely visible to * other processors. */ psz->psz_owner = curlwp; kcpuset_copy(psz->psz_target, kcpuset_running); kcpuset_zero(psz->psz_pass); mutex_spin_enter(&psz_lock); TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain); psz_work_todo++; do { mutex_spin_exit(&psz_lock); /* * Force some context switch activity on every CPU, as * the system may not be busy. Pause to not flood. */ xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL); xc_wait(xc); kpause("psrlz", false, 1, NULL); mutex_spin_enter(&psz_lock); } while (!kcpuset_iszero(psz->psz_target)); psz_ev_excl.ev_count++; mutex_spin_exit(&psz_lock); psz->psz_owner = NULL; }
struct intrids_handler * interrupt_construct_intrids(const kcpuset_t *cpuset) { struct intr_source *is; struct intrids_handler *ii_handler; intrid_t *ids; int i, irq, count; if (kcpuset_iszero(cpuset)) return NULL; if (!kcpuset_isset(cpuset, 0)) /* XXX */ return NULL; count = 0; for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) { if (is->is_hand != NULL) count++; } ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, KM_SLEEP); if (ii_handler == NULL) return NULL; ii_handler->iih_nids = count; if (count == 0) return ii_handler; ids = ii_handler->iih_intrids; i = 0; for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) { /* Ignore devices attached after counting "count". */ if (i >= count) break; if (is->is_hand == NULL) continue; strncpy(ids[i], is->is_source, sizeof(intrid_t)); i++; } return ii_handler; }