bool cpu_kpreempt_enter(uintptr_t where, int s) { KASSERT(kpreempt_disabled()); #if 0 if (where == (intptr_t)-2) { KASSERT(curcpu()->ci_mtx_count == 0); /* * We must be called via kern_intr (which already checks for * IPL_NONE so of course we call be preempted). */ return true; } /* * We are called from KPREEMPT_ENABLE(). If we are at IPL_NONE, * of course we can be preempted. If we aren't, ask for a * softint so that kern_intr can call kpreempt. */ if (s == IPL_NONE) { KASSERT(curcpu()->ci_mtx_count == 0); return true; } softint_trigger(SOFTINT_KPREEMPT); #endif return false; }
void cpu_need_resched(struct cpu_info *ci, int flags) { struct lwp * const l = ci->ci_data.cpu_onproc; #ifdef MULTIPROCESSOR struct cpu_info * const cur_ci = curcpu(); #endif KASSERT(kpreempt_disabled()); ci->ci_want_resched |= flags; if (__predict_false((l->l_pflag & LP_INTR) != 0)) { /* * No point doing anything, it will switch soon. * Also here to prevent an assertion failure in * kpreempt() due to preemption being set on a * soft interrupt LWP. */ return; } if (__predict_false(l == ci->ci_data.cpu_idlelwp)) { #ifdef MULTIPROCESSOR /* * If the other CPU is idling, it must be waiting for an * interrupt. So give it one. */ if (__predict_false(ci != cur_ci)) cpu_send_ipi(ci, IPI_NOP); #endif return; } #ifdef MULTIPROCESSOR atomic_or_uint(&ci->ci_want_resched, flags); #else ci->ci_want_resched |= flags; #endif if (flags & RESCHED_KPREEMPT) { #ifdef __HAVE_PREEMPTION atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE); if (ci == cur_ci) { softint_trigger(SOFTINT_KPREEMPT); } else { cpu_send_ipi(ci, IPI_KPREEMPT); } #endif return; } l->l_md.md_astpending = 1; /* force call to ast() */ #ifdef MULTIPROCESSOR if (ci != cur_ci && (flags & RESCHED_IMMED)) { cpu_send_ipi(ci, IPI_AST); } #endif }
void i386_ipi_kpreempt(struct cpu_info *ci) { softint_trigger(1 << SIR_PREEMPT); }
static inline void ipi_kpreempt(struct cpu_info *ci) { softint_trigger(SOFTINT_KPREEMPT); }