static int
docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
{
	struct lwp *l = curlwp;
	int rv;

	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
		/*
		 * yield() here, someone might want the cpu
		 * to set a condition.  otherwise we'll just
		 * loop forever.
		 */
		yield();
		return EINTR;
	}

	UNLOCKED(mtx, false);

	l->l_private = cv;
	rv = 0;
	if (ts) {
		if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
		    ts->tv_sec, ts->tv_nsec))
			rv = EWOULDBLOCK;
	} else {
		rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
	}

	LOCKED(mtx, false);

	/*
	 * Check for QEXIT.  if so, we need to wait here until we
	 * are allowed to exit.
	 */
	if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
		struct proc *p = l->l_proc;

		UNLOCKED(mtx, false);
		mutex_exit(mtx); /* drop and retake later */

		mutex_enter(p->p_lock);
		while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
			/* avoid recursion */
			rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
			    RUMPMTX(p->p_lock));
		}
		KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
		mutex_exit(p->p_lock);

		/* ok, we can exit and remove "reference" to l->private */

		mutex_enter(mtx);
		LOCKED(mtx, false);
		rv = EINTR;
	}
	l->l_private = NULL;

	return rv;
}
示例#2
0
int
rump_schedlock_cv_timedwait(struct rumpuser_cv *cv, const struct timespec *ts)
{
    struct lwp *l = curlwp;
    struct rumpcpu *rcpu = cpuinfo_to_rumpcpu(l->l_cpu);

    /* mutex will be taken and released in cpu schedule/unschedule */
    return rumpuser_cv_timedwait(cv, rcpu->rcpu_mtx,
                                 ts->tv_sec, ts->tv_nsec);
}
示例#3
0
int
cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
{
#ifdef DIAGNOSTIC
	extern int hz;
#endif

	if (ticks == 0) {
		cv_wait(cv, mtx);
		return 0;
	} else {
		KASSERT(hz == 100);
		return rumpuser_cv_timedwait(RUMPCV(cv), mtx->kmtx_mtx, ticks);
	}
}