static void percpu_cpu_enlarge(size_t size) { CPU_INFO_ITERATOR cii; struct cpu_info *ci; for (CPU_INFO_FOREACH(cii, ci)) { percpu_cpu_t pcc; pcc.pcc_data = kmem_alloc(size, KM_SLEEP); /* XXX cacheline */ pcc.pcc_size = size; if (!mp_online) { percpu_cpu_swap(ci, &pcc); } else { uint64_t where; uvm_lwp_hold(curlwp); /* don't swap out pcc */ where = xc_unicast(0, percpu_cpu_swap, ci, &pcc, ci); xc_wait(where); uvm_lwp_rele(curlwp); } KASSERT(pcc.pcc_size < size); if (pcc.pcc_data != NULL) { kmem_free(pcc.pcc_data, pcc.pcc_size); } } }
static void cpufreq_set_raw(struct cpu_info *ci, uint32_t freq) { struct cpufreq *cf = cf_backend; uint64_t xc; KASSERT(cf->cf_init != false); KASSERT(mutex_owned(&cpufreq_lock) != 0); xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci); xc_wait(xc); }
/* * pcu_lwp_op: perform PCU state save, release or both operations on LWP. */ static void pcu_lwp_op(const pcu_ops_t *pcu, lwp_t *l, const int flags) { const u_int id = pcu->pcu_id; struct cpu_info *ci; uint64_t where; int s; /* * Caller should have re-checked if there is any state to manage. * Block the interrupts and inspect again, since cross-call sent * by remote CPU could have changed the state. */ s = splsoftclock(); ci = l->l_pcu_cpu[id]; if (ci == curcpu()) { /* * State is on the current CPU - just perform the operations. */ KASSERT((flags & PCU_CLAIM) == 0); KASSERTMSG(ci->ci_pcu_curlwp[id] == l, "%s: cpu%u: pcu_curlwp[%u] (%p) != l (%p)", __func__, cpu_index(ci), id, ci->ci_pcu_curlwp[id], l); pcu_do_op(pcu, l, flags); splx(s); return; } if (__predict_false(ci == NULL)) { if (flags & PCU_CLAIM) { pcu_do_op(pcu, l, flags); } /* Cross-call has won the race - no state to manage. */ splx(s); return; } splx(s); /* * State is on the remote CPU - perform the operations there. * Note: there is a race condition; see description in the top. */ where = xc_unicast(XC_HIGHPRI, (xcfunc_t)pcu_cpu_op, __UNCONST(pcu), (void *)(uintptr_t)flags, ci); xc_wait(where); KASSERT((flags & PCU_RELEASE) == 0 || l->l_pcu_cpu[id] == NULL); }
/* * pcu_load: load/initialize the PCU state of current LWP on current CPU. */ void pcu_load(const pcu_ops_t *pcu) { const u_int id = pcu->pcu_id; struct cpu_info *ci, *curci; lwp_t * const l = curlwp; uint64_t where; int s; KASSERT(!cpu_intr_p() && !cpu_softintr_p()); s = splsoftclock(); curci = curcpu(); ci = l->l_pcu_cpu[id]; /* Does this CPU already have our PCU state loaded? */ if (ci == curci) { KASSERT(curci->ci_pcu_curlwp[id] == l); pcu->pcu_state_load(l, PCU_ENABLE); /* Re-enable */ splx(s); return; } /* If PCU state of this LWP is on the remote CPU - save it there. */ if (ci) { splx(s); /* Note: there is a race; see description in the top. */ where = xc_unicast(XC_HIGHPRI, (xcfunc_t)pcu_cpu_op, __UNCONST(pcu), (void *)(PCU_SAVE | PCU_RELEASE), ci); xc_wait(where); /* Enter IPL_SOFTCLOCK and re-fetch the current CPU. */ s = splsoftclock(); curci = curcpu(); } KASSERT(l->l_pcu_cpu[id] == NULL); /* Save the PCU state on the current CPU, if there is any. */ pcu_cpu_op(pcu, PCU_SAVE | PCU_RELEASE); KASSERT(curci->ci_pcu_curlwp[id] == NULL); /* * Finally, load the state for this LWP on this CPU. Indicate to * load function whether PCU was used before. Note the usage. */ pcu_do_op(pcu, l, PCU_CLAIM | PCU_ENABLE | PCU_RELOAD); splx(s); }