void cpu_need_resched(struct cpu_info *ci, int flags) { struct lwp * const l = ci->ci_data.cpu_onproc; #ifdef MULTIPROCESSOR struct cpu_info * const cur_ci = curcpu(); #endif KASSERT(kpreempt_disabled()); ci->ci_want_resched |= flags; if (__predict_false((l->l_pflag & LP_INTR) != 0)) { /* * No point doing anything, it will switch soon. * Also here to prevent an assertion failure in * kpreempt() due to preemption being set on a * soft interrupt LWP. */ return; } if (__predict_false(l == ci->ci_data.cpu_idlelwp)) { #ifdef MULTIPROCESSOR /* * If the other CPU is idling, it must be waiting for an * interrupt. So give it one. */ if (__predict_false(ci != cur_ci)) cpu_send_ipi(ci, IPI_NOP); #endif return; } #ifdef MULTIPROCESSOR atomic_or_uint(&ci->ci_want_resched, flags); #else ci->ci_want_resched |= flags; #endif if (flags & RESCHED_KPREEMPT) { #ifdef __HAVE_PREEMPTION atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE); if (ci == cur_ci) { softint_trigger(SOFTINT_KPREEMPT); } else { cpu_send_ipi(ci, IPI_KPREEMPT); } #endif return; } l->l_md.md_astpending = 1; /* force call to ast() */ #ifdef MULTIPROCESSOR if (ci != cur_ci && (flags & RESCHED_IMMED)) { cpu_send_ipi(ci, IPI_AST); } #endif }
void cpu_multicast_ipi(const kcpuset_t *kcp, int tag) { struct cpu_info * const ci = curcpu(); kcpuset_t *kcp2 = ci->ci_multicastcpus; if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset)) return; kcpuset_copy(kcp2, kcp); kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset); for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) { kcpuset_clear(kcp2, --cii); (void)cpu_send_ipi(cpu_lookup(cii), tag); } }
void cpu_multicast_ipi(__cpuset_t cpuset, int tag) { CPU_INFO_ITERATOR cii; struct cpu_info *ci; CPUSET_DEL(cpuset, cpu_index(curcpu())); if (CPUSET_EMPTY_P(cpuset)) return; for (CPU_INFO_FOREACH(cii, ci)) { if (CPUSET_HAS_P(cpuset, cpu_index(ci))) { CPUSET_DEL(cpuset, cpu_index(ci)); (void)cpu_send_ipi(ci, tag); } } }