Esempio n. 1
0
int
lwp_unpark(lwpid_t target, const void *hint)
{
	sleepq_t *sq;
	wchan_t wchan;
	int swapin;
	kmutex_t *mp;
	proc_t *p;
	lwp_t *t;

	/*
	 * Easy case: search for the LWP on the sleep queue.  If
	 * it's parked, remove it from the queue and set running.
	 */
	p = curproc;
	wchan = lwp_park_wchan(p, hint);
	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);

	TAILQ_FOREACH(t, sq, l_sleepchain)
		if (t->l_proc == p && t->l_lid == target)
			break;

	if (__predict_true(t != NULL)) {
		swapin = sleepq_remove(sq, t);
		mutex_spin_exit(mp);
		if (swapin)
			uvm_kick_scheduler();
		return 0;
	}

	/*
	 * The LWP hasn't parked yet.  Take the hit and mark the
	 * operation as pending.
	 */
	mutex_spin_exit(mp);

	mutex_enter(p->p_lock);
	if ((t = lwp_find(p, target)) == NULL) {
		mutex_exit(p->p_lock);
		return ESRCH;
	}

	/*
	 * It may not have parked yet, we may have raced, or it
	 * is parked on a different user sync object.
	 */
	lwp_lock(t);
	if (t->l_syncobj == &lwp_park_sobj) {
		/* Releases the LWP lock. */
		(void)lwp_unsleep(t, true);
	} else {
		/*
		 * Set the operation pending.  The next call to _lwp_park
		 * will return early.
		 */
		t->l_flag |= LW_UNPARKED;
		lwp_unlock(t);
	}

	mutex_exit(p->p_lock);
	return 0;
}
Esempio n. 2
0
int
sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, register_t *retval)
{
	/* {
		syscallarg(const lwpid_t *)	targets;
		syscallarg(size_t)		ntargets;
		syscallarg(const void *)	hint;
	} */
	struct proc *p;
	struct lwp *t;
	sleepq_t *sq;
	wchan_t wchan;
	lwpid_t targets[32], *tp, *tpp, *tmax, target;
	int swapin, error;
	kmutex_t *mp;
	u_int ntargets;
	size_t sz;

	p = l->l_proc;
	ntargets = SCARG(uap, ntargets);

	if (SCARG(uap, targets) == NULL) {
		/*
		 * Let the caller know how much we are willing to do, and
		 * let it unpark the LWPs in blocks.
		 */
		*retval = LWP_UNPARK_MAX;
		return 0;
	}
	if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
		return EINVAL;

	/*
	 * Copy in the target array.  If it's a small number of LWPs, then
	 * place the numbers on the stack.
	 */
	sz = sizeof(target) * ntargets;
	if (sz <= sizeof(targets))
		tp = targets;
	else {
		tp = kmem_alloc(sz, KM_SLEEP);
		if (tp == NULL)
			return ENOMEM;
	}
	error = copyin(SCARG(uap, targets), tp, sz);
	if (error != 0) {
		if (tp != targets) {
			kmem_free(tp, sz);
		}
		return error;
	}

	swapin = 0;
	wchan = lwp_park_wchan(p, SCARG(uap, hint));
	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);

	for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
		target = *tpp;

		/*
		 * Easy case: search for the LWP on the sleep queue.  If
		 * it's parked, remove it from the queue and set running.
		 */
		TAILQ_FOREACH(t, sq, l_sleepchain)
			if (t->l_proc == p && t->l_lid == target)
				break;

		if (t != NULL) {
			swapin |= sleepq_remove(sq, t);
			continue;
		}

		/*
		 * The LWP hasn't parked yet.  Take the hit and
		 * mark the operation as pending.
		 */
		mutex_spin_exit(mp);
		mutex_enter(p->p_lock);
		if ((t = lwp_find(p, target)) == NULL) {
			mutex_exit(p->p_lock);
			mutex_spin_enter(mp);
			continue;
		}
		lwp_lock(t);

		/*
		 * It may not have parked yet, we may have raced, or
		 * it is parked on a different user sync object.
		 */
		if (t->l_syncobj == &lwp_park_sobj) {
			/* Releases the LWP lock. */
			(void)lwp_unsleep(t, true);
		} else {
			/*
			 * Set the operation pending.  The next call to
			 * _lwp_park will return early.
			 */
			t->l_flag |= LW_UNPARKED;
			lwp_unlock(t);
		}

		mutex_exit(p->p_lock);
		mutex_spin_enter(mp);
	}

	mutex_spin_exit(mp);
	if (tp != targets)
		kmem_free(tp, sz);
	if (swapin)
		uvm_kick_scheduler();

	return 0;
}
Esempio n. 3
0
void
sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj)
{
	struct lwp *l = curlwp;

#ifndef T2EX
	if (__predict_false(sobj != &sleep_syncobj || strcmp(wemsg, "callout"))) {
#else
	if (__predict_false(sobj != &sleep_syncobj || (strcmp(wmesg, "callout") != 0 && strcmp(wmesg, "select") != 0 && strcmp(wmesg, "pollsock") != 0))) {
#endif
		panic("sleepq: unsupported enqueue");
	}

	/*
	 * Remove an LWP from a sleep queue if the LWP was deleted while in
	 * the waiting state.
	 */
	if ( l->l_sleepq != NULL && (l->l_stat & LSSLEEP) != 0 ) {
		sleepq_remove(l->l_sleepq, l);
	}

#ifndef T2EX
	l->l_syncobj = sobj;
#endif
	l->l_wchan = wchan;
	l->l_sleepq = sq;
#ifndef T2EX
	l->l_wmesg = wmesg;
	l->l_slptime = 0;
#endif
	l->l_stat = LSSLEEP;
#ifndef T2EX
	l->l_sleeperr = 0;
#endif

	TAILQ_INSERT_TAIL(sq, l, l_sleepchain);
}

int
sleepq_block(int timo, bool hatch)
{
	struct lwp *l = curlwp;
	int error = 0;

	//KASSERT(timo == 0 && !hatch);

	if (timo != 0) {
		callout_schedule(&l->l_timeout_ch, timo);
	}

#ifdef T2EX
	if ( l->l_mutex != NULL ) {
		mutex_exit(l->l_mutex);
	}
#endif

	mutex_enter(&sq_mtx);
	while (l->l_wchan) {
		if ( hatch ) {
			error = cv_timedwait_sig( &sq_cv, &sq_mtx, timo );
		}
		else {
			error = cv_timedwait( &sq_cv, &sq_mtx, timo );
		}

		if (error == EINTR) {
			if (l->l_wchan) {
				TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
				l->l_wchan = NULL;
				l->l_sleepq = NULL;
			}
		}
	}
	mutex_exit(&sq_mtx);

#ifdef T2EX
	l->l_mutex = &spc_lock;
#endif

	if (timo != 0) {
		/*
		 * Even if the callout appears to have fired, we need to
		 * stop it in order to synchronise with other CPUs.
		 */
		if (callout_halt(&l->l_timeout_ch, NULL)) {
			error = EWOULDBLOCK;
		}
	}

	return error;
}

#ifdef T2EX
lwp_t *
sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
{
	struct lwp *l;
	bool found = false;

	TAILQ_FOREACH(l, sq, l_sleepchain) {
		if (l->l_wchan == wchan) {
			found = true;
			l->l_wchan = NULL;
		}
	}
	if (found)
		cv_broadcast(&sq_cv);

	mutex_spin_exit(mp);
	return NULL;
}
#else
/*
 * sleepq_wake:
 *
 *	Wake zero or more LWPs blocked on a single wait channel.
 */
lwp_t *
sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
{
	lwp_t *l, *next;
	int swapin = 0;

	KASSERT(mutex_owned(mp));

	for (l = TAILQ_FIRST(sq); l != NULL; l = next) {
		KASSERT(l->l_sleepq == sq);
		KASSERT(l->l_mutex == mp);
		next = TAILQ_NEXT(l, l_sleepchain);
		if (l->l_wchan != wchan)
			continue;
		swapin |= sleepq_remove(sq, l);
		if (--expected == 0)
			break;
	}

	mutex_spin_exit(mp);

#if 0
	/*
	 * If there are newly awakend threads that need to be swapped in,
	 * then kick the swapper into action.
	 */
	if (swapin)
		uvm_kick_scheduler();
#endif

	return l;
}