void _kernel_lock(int nlocks) { struct lwp *l = curlwp; while (nlocks) { if (giantowner == l) { giantcnt += nlocks; nlocks = 0; ev_biglock_recurse.ev_count++; } else { if (rumpuser_mutex_tryenter(rump_giantlock) != 0) { rump_unschedule_cpu1(l, NULL); rumpuser_mutex_enter_nowrap(rump_giantlock); rump_schedule_cpu(l); ev_biglock_slow.ev_count++; } else { ev_biglock_fast.ev_count++; } giantowner = l; giantcnt = 1; nlocks--; } } }
void rump_unschedule_cpu_interlock(struct lwp *l, void *interlock) { if ((l->l_pflag & LP_INTR) == 0) rump_softint_run(l->l_cpu); rump_unschedule_cpu1(l, interlock); }