Exemple #1
0
/*
 *	Routine:	wait_queue_wakeup64_one
 *	Purpose:
 *		Wakeup the most appropriate thread that is in the specified
 *		wait queue for the specified event.
 *	Conditions:
 *		Nothing locked
 *	Returns:
 *		KERN_SUCCESS - Thread was woken up
 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
 */
kern_return_t
wait_queue_wakeup64_one(
	wait_queue_t wq,
	event64_t event,
	wait_result_t result)
{
	thread_t thread;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}
	s = splsched();
	wait_queue_lock(wq);
	thread = _wait_queue_select64_one(wq, event);
	wait_queue_unlock(wq);

	if (thread) {
		kern_return_t res;

		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
		splx(s);
		return res;
	}

	splx(s);
	return KERN_NOT_WAITING;
}
Exemple #2
0
/*
 *	Routine:	wait_queue_wakeup64_one_locked
 *	Purpose:
 *		Select a single thread that is most-eligible to run and set
 *		set it runnings.
 *
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		KERN_SUCCESS: It was, and is, now removed.
 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
 */
__private_extern__ kern_return_t
wait_queue_wakeup64_one_locked(
	wait_queue_t wq,
	event64_t event,
	wait_result_t result,
	boolean_t unlock)
{
	thread_t thread;

	assert(wait_queue_held(wq));

	thread = _wait_queue_select64_one(wq, event);
	if (unlock)
		wait_queue_unlock(wq);

	if (thread) {
		kern_return_t res;
		
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
		return res;
	}

	return KERN_NOT_WAITING;
}
Exemple #3
0
/*
 *	Routine:	_wait_queue_select64_one
 *	Purpose:
 *		Select the best thread off a wait queue that meet the
 *		supplied criteria.
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		a locked thread - if one found
 *	Note:
 *		This is where the sync policy of the wait queue comes
 *		into effect.  For now, we just assume FIFO.
 */
static thread_t
_wait_queue_select64_one(
	wait_queue_t wq,
	event64_t event)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wqe_next;
	thread_t t = THREAD_NULL;
	queue_t q;

	assert(wq->wq_fifo);

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wqe_next = (wait_queue_element_t)
			       queue_next((queue_t) wq_element);

		/*
		 * We may have to recurse if this is a compound wait queue.
		 */
		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_t set_queue;

			/*
			 * We have to check the set wait queue.
			 */
			set_queue = (wait_queue_t)wql->wql_setqueue;
			wait_queue_lock(set_queue);
			if (! wait_queue_empty(set_queue)) {
				t = _wait_queue_select64_one(set_queue, event);
			}
			wait_queue_unlock(set_queue);
			if (t != THREAD_NULL)
				return t;
		} else {
			
			/*
			 * Otherwise, its a thread.  If it is waiting on
			 * the event we are posting to this queue, pull
			 * it off the queue and stick it in out wake_queue.
			 */
			thread_t t = (thread_t)wq_element;

			if (t->wait_event == event) {
				thread_lock(t);
				remqueue(q, (queue_entry_t) t);
				t->wait_queue = WAIT_QUEUE_NULL;
				t->wait_event = NO_EVENT64;
				t->at_safe_point = FALSE;
				return t;	/* still locked */
			}
		}
		wq_element = wqe_next;
	}
	return THREAD_NULL;
}
Exemple #4
0
/*
 *	Routine:	wait_queue_wakeup_one
 *	Purpose:
 *		Wakeup the most appropriate thread that is in the specified
 *		wait queue for the specified event.
 *	Conditions:
 *		Nothing locked
 *	Returns:
 *		KERN_SUCCESS - Thread was woken up
 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
 */
kern_return_t
wait_queue_wakeup_one(
	wait_queue_t wq,
	event_t event,
	wait_result_t result,
	int priority)
{
	thread_t thread;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	s = splsched();
	wait_queue_lock(wq);
	thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event));
	wait_queue_unlock(wq);

	if (thread) {
		kern_return_t res;

		if (thread->sched_pri < priority) {
			if (priority <= MAXPRI) {
				set_sched_pri(thread, priority);

				thread->was_promoted_on_wakeup = 1;
				thread->sched_flags |= TH_SFLAG_PROMOTED;
			}
		}
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
		splx(s);
		return res;
	}

	splx(s);
	return KERN_NOT_WAITING;
}
Exemple #5
0
/*
 *	Routine:	wait_queue_wakeup64_identity_locked
 *	Purpose:
 *		Select a single thread that is most-eligible to run and set
 *		set it running.  But return the thread locked.
 *
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		a pointer to the locked thread that was awakened
 */
__private_extern__ thread_t
wait_queue_wakeup64_identity_locked(
	wait_queue_t wq,
	event64_t event,
	wait_result_t result,
	boolean_t unlock)
{
	kern_return_t res;
	thread_t thread;

	assert(wait_queue_held(wq));

	thread = _wait_queue_select64_one(wq, event);
	if (unlock)
		wait_queue_unlock(wq);

	if (thread) {
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
	}
	return thread;  /* still locked if not NULL */
}
Exemple #6
0
/*
 *	Routine:	_wait_queue_select64_one
 *	Purpose:
 *		Select the best thread off a wait queue that meet the
 *		supplied criteria.
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		a locked thread - if one found
 *	Note:
 *		This is where the sync policy of the wait queue comes
 *		into effect.  For now, we just assume FIFO/LIFO.
 */
static thread_t
_wait_queue_select64_one(
	wait_queue_t wq,
	event64_t event)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wqe_next;
	thread_t t = THREAD_NULL;
	queue_t q;

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wqe_next = (wait_queue_element_t)
			       queue_next((queue_t) wq_element);

		/*
		 * We may have to recurse if this is a compound wait queue.
		 */
		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_set_t set_queue = wql->wql_setqueue;

			/*
			 * We have to check the set wait queue. If the set
			 * supports pre-posting, it isn't already preposted,
			 * and we didn't find a thread in the set, then mark it.
			 *
			 * If we later find a thread, there may be a spurious
			 * pre-post here on this set.  The wait side has to check
			 * for that either pre- or post-wait.
			 */
			wqs_lock(set_queue);
			if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
				t = _wait_queue_select64_one(&set_queue->wqs_wait_queue, event);
			}
			if (t != THREAD_NULL) {
				wqs_unlock(set_queue);
				return t;
			}
			if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
				queue_t ppq = &set_queue->wqs_preposts;
				queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
			}
			wqs_unlock(set_queue);

		} else {
			
			/*
			 * Otherwise, its a thread.  If it is waiting on
			 * the event we are posting to this queue, pull
			 * it off the queue and stick it in out wake_queue.
			 */
			t = (thread_t)wq_element;
			if (t->wait_event == event) {
				thread_lock(t);
				remqueue((queue_entry_t) t);
				t->wait_queue = WAIT_QUEUE_NULL;
				t->wait_event = NO_EVENT64;
				t->at_safe_point = FALSE;
				return t;	/* still locked */
			}

			t = THREAD_NULL;
		}
		wq_element = wqe_next;
	}
	return THREAD_NULL;
}