void rumpuser_mutex_enter(struct rumpuser_mtx *mtx) { int nlocks; if (rumpuser_mutex_tryenter(mtx) != 0) { rumpkern_unsched(&nlocks, NULL); while (rumpuser_mutex_tryenter(mtx) != 0) wait(&mtx->waiters, BMK_SCHED_BLOCK_INFTIME); rumpkern_sched(nlocks, NULL); } }
void _kernel_lock(int nlocks) { struct lwp *l = curlwp; while (nlocks) { if (giantowner == l) { giantcnt += nlocks; nlocks = 0; ev_biglock_recurse.ev_count++; } else { if (rumpuser_mutex_tryenter(rump_giantlock) != 0) { rump_unschedule_cpu1(l, NULL); rumpuser_mutex_enter_nowrap(rump_giantlock); rump_schedule_cpu(l); ev_biglock_slow.ev_count++; } else { ev_biglock_fast.ev_count++; } giantowner = l; giantcnt = 1; nlocks--; } } }
void rumpuser_mutex_enter_nowrap(struct rumpuser_mtx *mtx) { int rv; rv = rumpuser_mutex_tryenter(mtx); /* one VCPU supported, no preemption => must succeed */ if (rv != 0) { bmk_platform_halt("rumpuser mutex error"); } }
void rumpuser_mutex_enter_nowrap(struct rumpuser_mtx *mtx) { int rv; rv = rumpuser_mutex_tryenter(mtx); /* one VCPU supported, no preemption => must succeed */ if (rv != 0) { panic("spin mutex error"); } }
int mutex_tryenter(kmutex_t *mtx) { int error; error = rumpuser_mutex_tryenter(RUMPMTX(mtx)); if (error == 0) { WANTLOCK(mtx, 0); LOCKED(mtx, false); } return error == 0; }
int mutex_tryenter(kmutex_t *mtx) { return rumpuser_mutex_tryenter(mtx->kmtx_mtx); }