Ejemplo n.º 1
0
/*
 *	Routine:	wait_queue_assert_wait64
 *	Purpose:
 *		Insert the current thread into the supplied wait queue
 *		waiting for a particular event to be posted to that queue.
 *	Conditions:
 *		nothing of interest locked.
 */
wait_result_t
wait_queue_assert_wait64(
	wait_queue_t wq,
	event64_t event,
	wait_interrupt_t interruptible)
{
	spl_t s;
	wait_result_t ret;
	thread_t cur_thread = current_thread();

	/* If it is an invalid wait queue, you cant wait on it */
	if (!wait_queue_is_valid(wq)) {
		thread_t thread = current_thread();
		return (thread->wait_result = THREAD_RESTART);
	}

	s = splsched();
	wait_queue_lock(wq);
	thread_lock(cur_thread);
	ret = wait_queue_assert_wait64_locked(wq, event, interruptible, cur_thread);
	thread_unlock(cur_thread);
	wait_queue_unlock(wq);
	splx(s);
	return(ret);
}
Ejemplo n.º 2
0
/*
 *	Routine:	wait_queue_assert_wait
 *	Purpose:
 *		Insert the current thread into the supplied wait queue
 *		waiting for a particular event to be posted to that queue.
 *
 *	Conditions:
 *		nothing of interest locked.
 */
wait_result_t
wait_queue_assert_wait(
	wait_queue_t wq,
	event_t event,
	wait_interrupt_t interruptible,
	uint64_t deadline)
{
	spl_t s;
	wait_result_t ret;
	thread_t thread = current_thread();

	/* If it is an invalid wait queue, you can't wait on it */
	if (!wait_queue_is_valid(wq))
		return (thread->wait_result = THREAD_RESTART);

	s = splsched();
	wait_queue_lock(wq);
	thread_lock(thread);
	ret = wait_queue_assert_wait64_locked(wq, CAST_DOWN(event64_t,event),
											interruptible, deadline, thread);
	thread_unlock(thread);
	wait_queue_unlock(wq);
	splx(s);
	return(ret);
}
Ejemplo n.º 3
0
/*
 *	Routine:	wait_queue_wakeup64_thread
 *	Purpose:
 *		Wakeup the particular thread that was specified if and only
 *		it was in this wait queue (or one of it's set's queues)
 *		and waiting on the specified event.
 *
 *		This is much safer than just removing the thread from
 *		whatever wait queue it happens to be on.  For instance, it
 *		may have already been awoken from the wait you intended to
 *		interrupt and waited on something else (like another
 *		semaphore).
 *	Conditions:
 *		nothing of interest locked
 *		we need to assume spl needs to be raised
 *	Returns:
 *		KERN_SUCCESS - the thread was found waiting and awakened
 *		KERN_NOT_WAITING - the thread was not waiting here
 */
kern_return_t
wait_queue_wakeup64_thread(
	wait_queue_t wq,
	event64_t event,
	thread_t thread,
	wait_result_t result)
{
	kern_return_t res;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	s = splsched();
	wait_queue_lock(wq);
	res = _wait_queue_select64_thread(wq, event, thread);
	wait_queue_unlock(wq);

	if (res == KERN_SUCCESS) {
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
		splx(s);
		return res;
	}
	splx(s);
	return KERN_NOT_WAITING;
}
Ejemplo n.º 4
0
/*
 *	Routine:	_wait_queue_select64_one
 *	Purpose:
 *		Select the best thread off a wait queue that meet the
 *		supplied criteria.
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		a locked thread - if one found
 *	Note:
 *		This is where the sync policy of the wait queue comes
 *		into effect.  For now, we just assume FIFO.
 */
static thread_t
_wait_queue_select64_one(
	wait_queue_t wq,
	event64_t event)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wqe_next;
	thread_t t = THREAD_NULL;
	queue_t q;

	assert(wq->wq_fifo);

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wqe_next = (wait_queue_element_t)
			       queue_next((queue_t) wq_element);

		/*
		 * We may have to recurse if this is a compound wait queue.
		 */
		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_t set_queue;

			/*
			 * We have to check the set wait queue.
			 */
			set_queue = (wait_queue_t)wql->wql_setqueue;
			wait_queue_lock(set_queue);
			if (! wait_queue_empty(set_queue)) {
				t = _wait_queue_select64_one(set_queue, event);
			}
			wait_queue_unlock(set_queue);
			if (t != THREAD_NULL)
				return t;
		} else {
			
			/*
			 * Otherwise, its a thread.  If it is waiting on
			 * the event we are posting to this queue, pull
			 * it off the queue and stick it in out wake_queue.
			 */
			thread_t t = (thread_t)wq_element;

			if (t->wait_event == event) {
				thread_lock(t);
				remqueue(q, (queue_entry_t) t);
				t->wait_queue = WAIT_QUEUE_NULL;
				t->wait_event = NO_EVENT64;
				t->at_safe_point = FALSE;
				return t;	/* still locked */
			}
		}
		wq_element = wqe_next;
	}
	return THREAD_NULL;
}
Ejemplo n.º 5
0
/*
 *	Routine:		wait_queue_wakeup_all
 *	Purpose:
 *		Wakeup some number of threads that are in the specified
 *		wait queue and waiting on the specified event.
 *	Conditions:
 *		Nothing locked
 *	Returns:
 *		KERN_SUCCESS - Threads were woken up
 *		KERN_NOT_WAITING - No threads were waiting <wq,event> pair
 */
kern_return_t
wait_queue_wakeup_all(
	wait_queue_t wq,
	event_t event,
	wait_result_t result)
{
	kern_return_t ret;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	s = splsched();
	wait_queue_lock(wq);
//	if(!wq->wq_interlock.lock_data) {		/* (BRINGUP */
//		panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq);	/* (BRINGUP) */
//	}
	ret = wait_queue_wakeup64_all_locked(
				wq, CAST_DOWN(event64_t,event),
				result, TRUE);
	/* lock released */
	splx(s);
	return ret;
}
Ejemplo n.º 6
0
/*
 *	Routine:	wait_queue_wakeup_one
 *	Purpose:
 *		Wakeup the most appropriate thread that is in the specified
 *		wait queue for the specified event.
 *	Conditions:
 *		Nothing locked
 *	Returns:
 *		KERN_SUCCESS - Thread was woken up
 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
 */
kern_return_t
wait_queue_wakeup_one(
	wait_queue_t wq,
	event_t event,
	wait_result_t result)
{
	thread_t thread;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	s = splsched();
	wait_queue_lock(wq);
	thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
	wait_queue_unlock(wq);

	if (thread) {
		kern_return_t res;

		res = thread_go_locked(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
		splx(s);
		return res;
	}

	splx(s);
	return KERN_NOT_WAITING;
}
Ejemplo n.º 7
0
/*
 *	Routine:	wait_queue_link_internal
 *	Purpose:
 *		Insert a set wait queue into a wait queue.  This
 *		requires us to link the two together using a wait_queue_link
 *		structure that was provided.
 *	Conditions:
 *		The wait queue being inserted must be inited as a set queue
 *		The wait_queue_link structure must already be properly typed
 */
static 
kern_return_t
wait_queue_link_internal(
	wait_queue_t wq,
	wait_queue_set_t wq_set,
	wait_queue_link_t wql)
{
	wait_queue_element_t wq_element;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set))
  		return KERN_INVALID_ARGUMENT;

	/*
	 * There are probably fewer threads and sets associated with
	 * the wait queue than there are wait queues associated with
	 * the set.  So let's validate it that way.
	 */
	s = splsched();
	wait_queue_lock(wq);
	q = &wq->wq_queue;
	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		if ((wq_element->wqe_type == WAIT_QUEUE_LINK ||
		     wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) &&
		    ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
			wait_queue_unlock(wq);
			splx(s);
			return KERN_ALREADY_IN_SET;
		}
		wq_element = (wait_queue_element_t)
				queue_next((queue_t) wq_element);
	}

	/*
	 * Not already a member, so we can add it.
	 */
	wqs_lock(wq_set);

	WAIT_QUEUE_SET_CHECK(wq_set);

	assert(wql->wql_type == WAIT_QUEUE_LINK ||
	       wql->wql_type == WAIT_QUEUE_LINK_NOALLOC);

	wql->wql_queue = wq;
	wql_clear_prepost(wql);
	queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
	wql->wql_setqueue = wq_set;
	queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);

	wqs_unlock(wq_set);
	wait_queue_unlock(wq);
	splx(s);

	return KERN_SUCCESS;
}	
Ejemplo n.º 8
0
kern_return_t
wait_queue_unlink_all(
	wait_queue_t wq)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wq_next_element;
	wait_queue_set_t wq_set;
	wait_queue_link_t wql;
	queue_head_t links_queue_head;
	queue_t links = &links_queue_head;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	queue_init(links);

	s = splsched();
	wait_queue_lock(wq);

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		boolean_t alloced;

		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wq_next_element = (wait_queue_element_t)
			     queue_next((queue_t) wq_element);

		alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK);
		if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
			wql = (wait_queue_link_t)wq_element;
			wq_set = wql->wql_setqueue;
			wqs_lock(wq_set);
			wait_queue_unlink_locked(wq, wq_set, wql);
			wqs_unlock(wq_set);
			if (alloced)
				enqueue(links, &wql->wql_links);
		}
		wq_element = wq_next_element;
	}
	wait_queue_unlock(wq);
	splx(s);

	while(!queue_empty(links)) {
		wql = (wait_queue_link_t) dequeue(links);
		zfree(_wait_queue_link_zone, wql);
	}

	return(KERN_SUCCESS);
}	
Ejemplo n.º 9
0
/*
 *	Routine:	wait_queue_select64_thread
 *	Purpose:
 *		Look for a thread and remove it from the queues, if
 *		(and only if) the thread is waiting on the supplied
 *		<wait_queue, event> pair.
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		KERN_NOT_WAITING: Thread is not waiting here.
 *		KERN_SUCCESS: It was, and is now removed (returned locked)
 */
static kern_return_t
_wait_queue_select64_thread(
	wait_queue_t wq,
	event64_t event,
	thread_t thread)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wqe_next;
	kern_return_t res = KERN_NOT_WAITING;
	queue_t q = &wq->wq_queue;

	thread_lock(thread);
	if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
		remqueue(q, (queue_entry_t) thread);
		thread->at_safe_point = FALSE;
		thread->wait_event = NO_EVENT64;
		thread->wait_queue = WAIT_QUEUE_NULL;
		/* thread still locked */
		return KERN_SUCCESS;
	}
	thread_unlock(thread);
	
	/*
	 * The wait_queue associated with the thread may be one of this
	 * wait queue's sets.  Go see.  If so, removing it from
	 * there is like removing it from here.
	 */
	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wqe_next = (wait_queue_element_t)
			       queue_next((queue_t) wq_element);

		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_t set_queue;

			set_queue = (wait_queue_t)wql->wql_setqueue;
			wait_queue_lock(set_queue);
			if (! wait_queue_empty(set_queue)) {
				res = _wait_queue_select64_thread(set_queue,
								event,
								thread);
			}
			wait_queue_unlock(set_queue);
			if (res == KERN_SUCCESS)
				return KERN_SUCCESS;
		}
		wq_element = wqe_next;
	}
	return res;
}
Ejemplo n.º 10
0
kern_return_t
wait_queue_unlink_all(
	wait_queue_t wq)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wq_next_element;
	wait_queue_set_t wq_set;
	wait_queue_link_t wql;
	queue_head_t links_queue_head;
	queue_t links = &links_queue_head;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_queue(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	queue_init(links);

	s = splsched();
	wait_queue_lock(wq);

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wq_next_element = (wait_queue_element_t)
			     queue_next((queue_t) wq_element);

		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wql = (wait_queue_link_t)wq_element;
			wq_set = wql->wql_setqueue;
			wqs_lock(wq_set);
			wait_queue_unlink_locked(wq, wq_set, wql);
			wqs_unlock(wq_set);
			enqueue(links, &wql->wql_links);
		}
		wq_element = wq_next_element;
	}
	wait_queue_unlock(wq);
	splx(s);

	while(!queue_empty(links)) {
		wql = (wait_queue_link_t) dequeue(links);
		kfree((vm_offset_t) wql, sizeof(struct wait_queue_link));
	}

	return(KERN_SUCCESS);
}	
Ejemplo n.º 11
0
/*
 *	Routine:	wait_queue_unlink
 *	Purpose:
 *		Remove the linkage between a wait queue and a set,
 *		freeing the linkage structure.
 *	Conditions:
 *		The wait queue being must be a member set queue
 */
kern_return_t
wait_queue_unlink(
	wait_queue_t wq,
	wait_queue_set_t wq_set)
{
	wait_queue_element_t wq_element;
	wait_queue_link_t wql;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) {
		return KERN_INVALID_ARGUMENT;
	}
	s = splsched();
	wait_queue_lock(wq);

	q = &wq->wq_queue;
	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {

		   	wql = (wait_queue_link_t)wq_element;
			
			if (wql->wql_setqueue == wq_set) {
				boolean_t alloced;

				alloced = (wql->wql_type == WAIT_QUEUE_LINK);
				wqs_lock(wq_set);
				wait_queue_unlink_locked(wq, wq_set, wql);
				wqs_unlock(wq_set);
				wait_queue_unlock(wq);
				splx(s);
				if (alloced)
					zfree(_wait_queue_link_zone, wql);
				return KERN_SUCCESS;
			}
		}
		wq_element = (wait_queue_element_t)
				queue_next((queue_t) wq_element);
	}
	wait_queue_unlock(wq);
	splx(s);
	return KERN_NOT_IN_SET;
}	
Ejemplo n.º 12
0
/*
 *	Routine:	wait_queue_member
 *	Purpose:
 *		Indicate if this set queue is a member of the queue
 *	Conditions:
 *		The set queue is just that, a set queue
 */
boolean_t
wait_queue_member(
	wait_queue_t wq,
	wait_queue_set_t wq_set)
{
	boolean_t ret;
	spl_t s;

	if (!wait_queue_is_set(wq_set))
		return FALSE;

	s = splsched();
	wait_queue_lock(wq);
	ret = wait_queue_member_locked(wq, wq_set);
	wait_queue_unlock(wq);
	splx(s);

	return ret;
}
Ejemplo n.º 13
0
/*
 *	Routine:		wait_queue_wakeup64_all
 *	Purpose:
 *		Wakeup some number of threads that are in the specified
 *		wait queue and waiting on the specified event.
 *	Conditions:
 *		Nothing locked
 *	Returns:
 *		KERN_SUCCESS - Threads were woken up
 *		KERN_NOT_WAITING - No threads were waiting <wq,event> pair
 */
kern_return_t
wait_queue_wakeup64_all(
	wait_queue_t wq,
	event64_t event,
	wait_result_t result)
{
	kern_return_t ret;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	s = splsched();
	wait_queue_lock(wq);
	ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
	/* lock released */
	splx(s);
	return ret;
}
Ejemplo n.º 14
0
/*
 *	Routine:	wait_queue_wakeup_one
 *	Purpose:
 *		Wakeup the most appropriate thread that is in the specified
 *		wait queue for the specified event.
 *	Conditions:
 *		Nothing locked
 *	Returns:
 *		KERN_SUCCESS - Thread was woken up
 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
 */
kern_return_t
wait_queue_wakeup_one(
	wait_queue_t wq,
	event_t event,
	wait_result_t result,
	int priority)
{
	thread_t thread;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	s = splsched();
	wait_queue_lock(wq);
	thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event));
	wait_queue_unlock(wq);

	if (thread) {
		kern_return_t res;

		if (thread->sched_pri < priority) {
			if (priority <= MAXPRI) {
				set_sched_pri(thread, priority);

				thread->was_promoted_on_wakeup = 1;
				thread->sched_flags |= TH_SFLAG_PROMOTED;
			}
		}
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
		splx(s);
		return res;
	}

	splx(s);
	return KERN_NOT_WAITING;
}
Ejemplo n.º 15
0
/*
 *	Routine:	wait_queue_unlink
 *	Purpose:
 *		Remove the linkage between a wait queue and a set,
 *		freeing the linkage structure.
 *	Conditions:
 *		The wait queue being must be a member set queue
 */
kern_return_t
wait_queue_unlink(
	wait_queue_t wq,
	wait_queue_set_t wq_set)
{
	wait_queue_element_t wq_element;
	wait_queue_link_t wql;
	queue_t q;
	spl_t s;

	if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
		return KERN_INVALID_ARGUMENT;
	}
	s = splsched();
	wait_queue_lock(wq);

	q = &wq->wq_queue;
	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
		   	wql = (wait_queue_link_t)wq_element;
			
			if (wql->wql_setqueue == wq_set) {
				wqs_lock(wq_set);
				wait_queue_unlink_locked(wq, wq_set, wql);
				wqs_unlock(wq_set);
				wait_queue_unlock(wq);
				splx(s);
				kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
				return KERN_SUCCESS;
			}
		}
		wq_element = (wait_queue_element_t)
				queue_next((queue_t) wq_element);
	}
	wait_queue_unlock(wq);
	splx(s);
	return KERN_NOT_IN_SET;
}	
Ejemplo n.º 16
0
/*
 *	Routine:	wait_queue_unlink_one
 *	Purpose:
 *		Find and unlink one set wait queue
 *	Conditions:
 *		Nothing of interest locked.
 */
void
wait_queue_unlink_one(
	wait_queue_t wq,
	wait_queue_set_t *wq_setp)
{
	wait_queue_element_t wq_element;
	queue_t q;
	spl_t s;

	s = splsched();
	wait_queue_lock(wq);

	q = &wq->wq_queue;
	
	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {

		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_set_t wq_set = wql->wql_setqueue;
			
			wqs_lock(wq_set);
			wait_queue_unlink_locked(wq, wq_set, wql);
			wqs_unlock(wq_set);
			wait_queue_unlock(wq);
			splx(s);
			kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
			*wq_setp = wq_set;
			return;
		}

		wq_element = (wait_queue_element_t)
			queue_next((queue_t) wq_element);
	}
	wait_queue_unlock(wq);
	splx(s);
	*wq_setp = WAIT_QUEUE_SET_NULL;
}
Ejemplo n.º 17
0
/*
 *	Routine:	wait_queue_peek64_locked
 *	Purpose:
 *		Select the best thread from a wait queue that meet the
 *		supplied criteria, but leave it on the queue it was
 *		found on.  The thread, and the actual wait_queue the
 *		thread was found on are identified.
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		a locked thread - if one found
 *		a locked waitq - the one the thread was found on
 *	Note:
 *		Both the waitq the thread was actually found on, and
 *		the supplied wait queue, are locked after this.
 */
__private_extern__ void
wait_queue_peek64_locked(
	wait_queue_t wq,
	event64_t event,
	thread_t *tp,
	wait_queue_t *wqp)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wqe_next;
	thread_t t;
	queue_t q;

	assert(wq->wq_fifo);

	*tp = THREAD_NULL;

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wqe_next = (wait_queue_element_t)
			       queue_next((queue_t) wq_element);

		/*
		 * We may have to recurse if this is a compound wait queue.
		 */
		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_t set_queue;

			/*
			 * We have to check the set wait queue.
			 */
			set_queue = (wait_queue_t)wql->wql_setqueue;
			wait_queue_lock(set_queue);
			if (! wait_queue_empty(set_queue)) {
				wait_queue_peek64_locked(set_queue, event, tp, wqp);
			}
			if (*tp != THREAD_NULL) {
				if (*wqp != set_queue)
					wait_queue_unlock(set_queue);
				return;  /* thread and its waitq locked */
			}

			wait_queue_unlock(set_queue);
		} else {
			
			/*
			 * Otherwise, its a thread.  If it is waiting on
			 * the event we are posting to this queue, return
			 * it locked, but leave it on the queue.
			 */
			thread_t t = (thread_t)wq_element;

			if (t->wait_event == event) {
				thread_lock(t);
				*tp = t;
				*wqp = wq;
				return;
			}
		}
		wq_element = wqe_next;
	}
}
Ejemplo n.º 18
0
/*
 *	ROUTINE:	ulock_release_internal	[internal]
 *
 *	Releases the ulock.
 *	If any threads are blocked waiting for the ulock, one is woken-up.
 *
 */
kern_return_t
ulock_release_internal (ulock_t ulock, thread_t thread)
{
	lock_set_t	lock_set;

	if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}
	ulock_lock(ulock);
	lock_set_unlock(lock_set);		

	if (ulock->holder != thread) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

 	/*
	 *  If we have a hint that threads might be waiting,
	 *  try to transfer the lock ownership to a waiting thread
	 *  and wake it up.
	 */
	if (ulock->blocked) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	wqthread;
		spl_t		s;

		s = splsched();
		wait_queue_lock(wq);
		wqthread = wait_queue_wakeup64_identity_locked(wq,
							   LOCK_SET_EVENT,
							   THREAD_AWAKENED,
							   TRUE);
		/* wait_queue now unlocked, thread locked */

		if (wqthread != THREAD_NULL) {
			thread_unlock(wqthread);
			splx(s);

			/*
			 *  Transfer ulock ownership
			 *  from the current thread to the acquisition thread.
			 */
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, wqthread);
			ulock_unlock(ulock);
			
			return KERN_SUCCESS;
		} else {
			ulock->blocked = FALSE;
			splx(s);
		}
	}

	/*
	 *  Disown ulock
	 */
	ulock_ownership_clear(ulock);
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}
Ejemplo n.º 19
0
/*
 *	Routine:	_wait_queue_select64_all
 *	Purpose:
 *		Select all threads off a wait queue that meet the
 *		supplied criteria.
 *	Conditions:
 *		at splsched
 *		wait queue locked
 *		wake_queue initialized and ready for insertion
 *		possibly recursive
 *	Returns:
 *		a queue of locked threads
 */
static void
_wait_queue_select64_all(
	wait_queue_t wq,
	event64_t event,
	queue_t wake_queue)
{
	wait_queue_element_t wq_element;
	wait_queue_element_t wqe_next;
	queue_t q;

	q = &wq->wq_queue;

	wq_element = (wait_queue_element_t) queue_first(q);
	while (!queue_end(q, (queue_entry_t)wq_element)) {
		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
		wqe_next = (wait_queue_element_t)
			   queue_next((queue_t) wq_element);

		/*
		 * We may have to recurse if this is a compound wait queue.
		 */
		if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
			wait_queue_t set_queue;

			/*
			 * We have to check the set wait queue.
			 */
			set_queue = (wait_queue_t)wql->wql_setqueue;
			wait_queue_lock(set_queue);
			if (set_queue->wq_isprepost) {
				wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
				
				/*
				 * Preposting is only for sets and wait queue
				 * is the first element of set 
				 */
				wqs->wqs_refcount++;
			}
			if (! wait_queue_empty(set_queue)) 
				_wait_queue_select64_all(set_queue, event, wake_queue);
			wait_queue_unlock(set_queue);
		} else {
			
			/*
			 * Otherwise, its a thread.  If it is waiting on
			 * the event we are posting to this queue, pull
			 * it off the queue and stick it in out wake_queue.
			 */
			thread_t t = (thread_t)wq_element;

			if (t->wait_event == event) {
				thread_lock(t);
				remqueue(q, (queue_entry_t) t);
				enqueue (wake_queue, (queue_entry_t) t);
				t->wait_queue = WAIT_QUEUE_NULL;
				t->wait_event = NO_EVENT64;
				t->at_safe_point = FALSE;
				/* returned locked */
			}
		}
		wq_element = wqe_next;
	}
}
Ejemplo n.º 20
0
/*
 *	ROUTINE:	ulock_release_internal	[internal]
 *
 *	Releases the ulock.
 *	If any threads are blocked waiting for the ulock, one is woken-up.
 *
 */
kern_return_t
ulock_release_internal (ulock_t ulock, thread_t thread)
{
	lock_set_t	lock_set;

	if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}
	ulock_lock(ulock);
	lock_set_unlock(lock_set);		

	if (ulock->holder != thread) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

 	/*
	 *  If we have a hint that threads might be waiting,
	 *  try to transfer the lock ownership to a waiting thread
	 *  and wake it up.
	 */
	if (ulock->blocked) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	wqthread;
		spl_t		s;

		s = splsched();
		wait_queue_lock(wq);
		wqthread = wait_queue_wakeup64_identity_locked(wq,
							   LOCK_SET_EVENT,
							   THREAD_AWAKENED,
							   TRUE);
		/* wait_queue now unlocked, thread locked */

		if (wqthread != THREAD_NULL) {
			/*
			 * JMM - These ownership transfer macros have a
			 * locking/race problem.  To keep the thread from
			 * changing states on us (nullifying the ownership
			 * assignment) we need to keep the thread locked
			 * during the assignment.  But we can't because the
			 * macros take an activation lock, which is a mutex.
			 * Since this code was already broken before I got
			 * here, I will leave it for now.
			 */
			thread_unlock(wqthread);
			splx(s);

			/*
			 *  Transfer ulock ownership
			 *  from the current thread to the acquisition thread.
			 */
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, wqthread);
			ulock_unlock(ulock);
			
			return KERN_SUCCESS;
		} else {
			ulock->blocked = FALSE;
			splx(s);
		}
	}

	/*
	 *  Disown ulock
	 */
	ulock_ownership_clear(ulock);
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}
Ejemplo n.º 21
0
kern_return_t
lock_handoff (lock_set_t lock_set, int lock_id)
{
	ulock_t   ulock;
	int	  wait_result;


	if (lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
		return KERN_INVALID_ARGUMENT;

 retry:
	lock_set_lock(lock_set);

	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
	ulock_lock(ulock);
	lock_set_unlock(lock_set);

	if (ulock->holder != current_thread()) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}
	
	/*
	 *  If the accepting thread (the receiver) is already waiting
	 *  to accept the lock from the handoff thread (the sender),
	 *  then perform the hand-off now.
	 */

	if (ulock->accept_wait) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	thread;
		spl_t		s;

		/*
		 *  See who the lucky devil is, if he is still there waiting.
		 */
		s = splsched();
		wait_queue_lock(wq);
		thread = wait_queue_wakeup64_identity_locked(
					   wq,
					   LOCK_SET_HANDOFF,
					   THREAD_AWAKENED,
					   TRUE);
		/* wait queue unlocked, thread locked */

		/*
		 *  Transfer lock ownership
		 */
		if (thread != THREAD_NULL) {
			/* 
			 * The thread we are transferring to will try
			 * to take the lock on the ulock, and therefore
			 * will wait for us complete the handoff even
			 * through we set the thread running.
			 */
			thread_unlock(thread);
			splx(s);
			
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, thread);
			ulock->accept_wait = FALSE;
			ulock_unlock(ulock);
			return KERN_SUCCESS;
		} else {

			/*
			 * OOPS.  The accepting thread must have been aborted.
			 * and is racing back to clear the flag that says is
			 * waiting for an accept.  He will clear it when we
			 * release the lock, so just fall thru and wait for
			 * the next accept thread (that's the way it is
			 * specified).
			 */
			splx(s);
		}
	}

	/*
	 * Indicate that there is a hand-off thread waiting, and then wait
	 * for an accepting thread.
	 */
	ulock->ho_wait = TRUE;
	wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
			       LOCK_SET_HANDOFF,
			       THREAD_ABORTSAFE, 0);
	ulock_unlock(ulock);

	if (wait_result == THREAD_WAITING)
		wait_result = thread_block(THREAD_CONTINUE_NULL);

	/*
	 *  If the thread was woken-up via some action other than
	 *  lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
	 *  then we need to clear the ulock's handoff state.
	 */
	switch (wait_result) {


	case THREAD_AWAKENED:
		/*
		 * we take the ulock lock to syncronize with the
		 * thread that is accepting ownership.
		 */
		ulock_lock(ulock);
		assert(ulock->holder != current_thread());
		ulock_unlock(ulock);
		return KERN_SUCCESS;

	case THREAD_INTERRUPTED:
		ulock_lock(ulock);
		assert(ulock->holder == current_thread());
		ulock->ho_wait = FALSE;
		ulock_unlock(ulock);
		return KERN_ABORTED;

	case THREAD_RESTART:
		goto retry;
	}

	panic("lock_handoff");
	return KERN_FAILURE;
}