예제 #1
0
파일: sync_lock.c 프로젝트: CptFrazz/xnu
/*
 *	ROUTINE:	lock_make_unstable	[internal]
 *
 *	Marks the lock as unstable.
 *
 *	NOTES:
 *	- All future acquisitions of the lock will return with a
 *	  KERN_LOCK_UNSTABLE status, until the lock is made stable again.
 */
kern_return_t
lock_make_unstable (ulock_t ulock, thread_t thread)
{
	lock_set_t	lock_set;

	lock_set = ulock->lock_set;
	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	ulock_lock(ulock);
	lock_set_unlock(lock_set);

	if (ulock->holder != thread) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

	ulock->unstable = TRUE;
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}
예제 #2
0
파일: sync_lock.c 프로젝트: CptFrazz/xnu
kern_return_t
lock_make_stable (lock_set_t lock_set, int lock_id)
{
	ulock_t	 ulock;


	if (lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
		return KERN_INVALID_ARGUMENT;


	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
	ulock_lock(ulock);
	lock_set_unlock(lock_set);

	if (ulock->holder != current_thread()) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

	ulock->unstable = FALSE;
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}
예제 #3
0
파일: sync_lock.c 프로젝트: CptFrazz/xnu
kern_return_t
lock_try (lock_set_t lock_set, int lock_id)
{
	ulock_t   ulock;


	if (lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
		return KERN_INVALID_ARGUMENT;


	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
	ulock_lock(ulock);
	lock_set_unlock(lock_set);

	/*
	 *  If the lock is already owned, we return without blocking.
	 *
	 *  An ownership status is returned to inform the caller as to
	 *  whether it already holds the lock or another thread does.
	 */

	if (ulock->holder != THREAD_NULL) {
		lock_set_unlock(lock_set);

		if (ulock->holder == current_thread()) {
			ulock_unlock(ulock);
			return KERN_LOCK_OWNED_SELF;
		}
		
		ulock_unlock(ulock);
		return KERN_LOCK_OWNED;
 	}

	/*
	 *  Add the ulock to the lock set's held_ulocks list.
	 */

	ulock_ownership_set(ulock, current_thread());
	ulock_unlock(ulock);

	return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
}
예제 #4
0
/*
 *	Routine:	lock_set_reference
 *
 *	Take out a reference on a lock set.  This keeps the data structure
 *	in existence (but the lock set may be deactivated).
 */
void
lock_set_reference(lock_set_t lock_set)
{
	lock_set_lock(lock_set);
	lock_set->ref_count++;
	lock_set_unlock(lock_set);
}
예제 #5
0
/*
 *	Routine:	lock_set_dereference
 *
 *	Release a reference on a lock set.  If this is the last reference,
 *	the lock set data structure is deallocated.
 */
void
lock_set_dereference(lock_set_t lock_set)
{
	int	ref_count;
	int 	size;

	lock_set_lock(lock_set);
	ref_count = --(lock_set->ref_count);
	lock_set_unlock(lock_set);

	if (ref_count == 0) {
		size =	sizeof(struct lock_set) +
			(sizeof(struct ulock) * (lock_set->n_ulocks - 1));
		kfree(lock_set, size);
	}
}
예제 #6
0
파일: sync_lock.c 프로젝트: CptFrazz/xnu
kern_return_t
lock_handoff_accept (lock_set_t lock_set, int lock_id)
{
	ulock_t   ulock;
	int	  wait_result;


	if (lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
		return KERN_INVALID_ARGUMENT;

 retry:
	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
	ulock_lock(ulock);
	lock_set_unlock(lock_set);

	/*
	 * If there is another accepting thread that beat us, just
	 * return with an error.
	 */
	if (ulock->accept_wait) {
		ulock_unlock(ulock);
		return KERN_ALREADY_WAITING;
	}

	if (ulock->holder == current_thread()) {
		ulock_unlock(ulock);
		return KERN_LOCK_OWNED_SELF;
	}

	/*
	 *  If the handoff thread (the sender) is already waiting to
	 *  hand-off the lock to the accepting thread (the receiver),
	 *  then perform the hand-off now.
	 */
	if (ulock->ho_wait) {
		wait_queue_t	wq = &ulock->wait_queue;

		/*
		 *  See who the lucky devil is, if he is still there waiting.
		 */
		assert(ulock->holder != THREAD_NULL);

		if (wait_queue_wakeup64_thread(wq,
					    LOCK_SET_HANDOFF,
					    ulock->holder,
					    THREAD_AWAKENED) == KERN_SUCCESS) {
			/*
			 * Holder thread was still waiting to give it
			 * away.  Take over ownership.
			 */
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, current_thread());
			ulock->ho_wait = FALSE;
			ulock_unlock(ulock);
			return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
						   KERN_SUCCESS;
		}
			
		/*
		 * OOPS.  The owner was aborted out of the handoff.
		 * He will clear his own flag when he gets back.
		 * in the meantime, we will wait as if we didn't
		 * even see his flag (by falling thru).
		 */
	}		

	ulock->accept_wait = TRUE;
	wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
			       LOCK_SET_HANDOFF,
			       THREAD_ABORTSAFE, 0);
	ulock_unlock(ulock);

	if (wait_result == THREAD_WAITING)
		wait_result = thread_block(THREAD_CONTINUE_NULL);

	/*
	 *  If the thread was woken-up via some action other than
	 *  lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
	 *  then we need to clear the ulock's handoff state.
	 */
	switch (wait_result) {

	case THREAD_AWAKENED:
		/*
		 * Take the lock to synchronize with the thread handing
		 * off the lock to us.  We don't want to continue until
		 * they complete the handoff.
		 */
		ulock_lock(ulock);
		assert(ulock->accept_wait == FALSE);
		assert(ulock->holder == current_thread());
		ulock_unlock(ulock);
		return KERN_SUCCESS;

	case THREAD_INTERRUPTED:
		ulock_lock(ulock);
		ulock->accept_wait = FALSE;
		ulock_unlock(ulock);
		return KERN_ABORTED;

	case THREAD_RESTART:
		goto retry;
	}

	panic("lock_handoff_accept");
	return KERN_FAILURE;
}
예제 #7
0
파일: sync_lock.c 프로젝트: CptFrazz/xnu
kern_return_t
lock_handoff (lock_set_t lock_set, int lock_id)
{
	ulock_t   ulock;
	int	  wait_result;


	if (lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
		return KERN_INVALID_ARGUMENT;

 retry:
	lock_set_lock(lock_set);

	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
	ulock_lock(ulock);
	lock_set_unlock(lock_set);

	if (ulock->holder != current_thread()) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}
	
	/*
	 *  If the accepting thread (the receiver) is already waiting
	 *  to accept the lock from the handoff thread (the sender),
	 *  then perform the hand-off now.
	 */

	if (ulock->accept_wait) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	thread;
		spl_t		s;

		/*
		 *  See who the lucky devil is, if he is still there waiting.
		 */
		s = splsched();
		wait_queue_lock(wq);
		thread = wait_queue_wakeup64_identity_locked(
					   wq,
					   LOCK_SET_HANDOFF,
					   THREAD_AWAKENED,
					   TRUE);
		/* wait queue unlocked, thread locked */

		/*
		 *  Transfer lock ownership
		 */
		if (thread != THREAD_NULL) {
			/* 
			 * The thread we are transferring to will try
			 * to take the lock on the ulock, and therefore
			 * will wait for us complete the handoff even
			 * through we set the thread running.
			 */
			thread_unlock(thread);
			splx(s);
			
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, thread);
			ulock->accept_wait = FALSE;
			ulock_unlock(ulock);
			return KERN_SUCCESS;
		} else {

			/*
			 * OOPS.  The accepting thread must have been aborted.
			 * and is racing back to clear the flag that says is
			 * waiting for an accept.  He will clear it when we
			 * release the lock, so just fall thru and wait for
			 * the next accept thread (that's the way it is
			 * specified).
			 */
			splx(s);
		}
	}

	/*
	 * Indicate that there is a hand-off thread waiting, and then wait
	 * for an accepting thread.
	 */
	ulock->ho_wait = TRUE;
	wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
			       LOCK_SET_HANDOFF,
			       THREAD_ABORTSAFE, 0);
	ulock_unlock(ulock);

	if (wait_result == THREAD_WAITING)
		wait_result = thread_block(THREAD_CONTINUE_NULL);

	/*
	 *  If the thread was woken-up via some action other than
	 *  lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
	 *  then we need to clear the ulock's handoff state.
	 */
	switch (wait_result) {


	case THREAD_AWAKENED:
		/*
		 * we take the ulock lock to syncronize with the
		 * thread that is accepting ownership.
		 */
		ulock_lock(ulock);
		assert(ulock->holder != current_thread());
		ulock_unlock(ulock);
		return KERN_SUCCESS;

	case THREAD_INTERRUPTED:
		ulock_lock(ulock);
		assert(ulock->holder == current_thread());
		ulock->ho_wait = FALSE;
		ulock_unlock(ulock);
		return KERN_ABORTED;

	case THREAD_RESTART:
		goto retry;
	}

	panic("lock_handoff");
	return KERN_FAILURE;
}
예제 #8
0
파일: sync_lock.c 프로젝트: CptFrazz/xnu
/*
 *	ROUTINE:	ulock_release_internal	[internal]
 *
 *	Releases the ulock.
 *	If any threads are blocked waiting for the ulock, one is woken-up.
 *
 */
kern_return_t
ulock_release_internal (ulock_t ulock, thread_t thread)
{
	lock_set_t	lock_set;

	if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}
	ulock_lock(ulock);
	lock_set_unlock(lock_set);		

	if (ulock->holder != thread) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

 	/*
	 *  If we have a hint that threads might be waiting,
	 *  try to transfer the lock ownership to a waiting thread
	 *  and wake it up.
	 */
	if (ulock->blocked) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	wqthread;
		spl_t		s;

		s = splsched();
		wait_queue_lock(wq);
		wqthread = wait_queue_wakeup64_identity_locked(wq,
							   LOCK_SET_EVENT,
							   THREAD_AWAKENED,
							   TRUE);
		/* wait_queue now unlocked, thread locked */

		if (wqthread != THREAD_NULL) {
			thread_unlock(wqthread);
			splx(s);

			/*
			 *  Transfer ulock ownership
			 *  from the current thread to the acquisition thread.
			 */
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, wqthread);
			ulock_unlock(ulock);
			
			return KERN_SUCCESS;
		} else {
			ulock->blocked = FALSE;
			splx(s);
		}
	}

	/*
	 *  Disown ulock
	 */
	ulock_ownership_clear(ulock);
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}
예제 #9
0
파일: sync_lock.c 프로젝트: CptFrazz/xnu
kern_return_t
lock_acquire (lock_set_t lock_set, int lock_id)
{
	ulock_t   ulock;

	if (lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
		return KERN_INVALID_ARGUMENT;

 retry:
	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
	ulock_lock(ulock);
	lock_set_unlock(lock_set);

	/*
	 *  Block the current thread if the lock is already held.
	 */

	if (ulock->holder != THREAD_NULL) {
		int wait_result;

		if (ulock->holder == current_thread()) {
			ulock_unlock(ulock);
			return KERN_LOCK_OWNED_SELF;
		}

		ulock->blocked = TRUE;
		wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
				       LOCK_SET_EVENT,
				       THREAD_ABORTSAFE, 0);
		ulock_unlock(ulock);

		/*
		 *  Block - Wait for lock to become available.
		 */
		if (wait_result == THREAD_WAITING)
			wait_result = thread_block(THREAD_CONTINUE_NULL);

		/*
		 *  Check the result status:
		 *
		 *  Check to see why thread was woken up.  In all cases, we
		 *  already have been removed from the queue.
		 */
		switch (wait_result) {
		case THREAD_AWAKENED:
			/* lock transitioned from old locker to us */
			/* he already made us owner */
			return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
				                   KERN_SUCCESS;

		case THREAD_INTERRUPTED:
			return KERN_ABORTED;

		case THREAD_RESTART:
			goto retry;  /* probably a dead lock_set */

		default:
			panic("lock_acquire\n");
		}
	}

	/*
	 *  Assign lock ownership
	 */
	ulock_ownership_set(ulock, current_thread());
	ulock_unlock(ulock);

	return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
}
예제 #10
0
파일: sync_lock.c 프로젝트: CptFrazz/xnu
/*
 *	ROUTINE:	lock_set_destroy	[exported]
 *	
 *	Destroys a lock set.  This call will only succeed if the
 *	specified task is the SAME task name specified at the lock set's
 *	creation.
 *
 *	NOTES:
 *	- All threads currently blocked on the lock set's ulocks are awoken.
 *	- These threads will return with the KERN_LOCK_SET_DESTROYED error.
 */
kern_return_t
lock_set_destroy (task_t task, lock_set_t lock_set)
{
	ulock_t		ulock;
	int		i;

	if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_set->owner != task)
		return KERN_INVALID_RIGHT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	/*
	 *  Deactivate lock set
	 */
	lock_set->active = FALSE;

	/*
	 *  If a ulock is currently held in the target lock set:
	 *
	 *  1) Wakeup all threads blocked on the ulock (if any).  Threads
	 *     may be blocked waiting normally, or waiting for a handoff.
	 *     Blocked threads will return with KERN_LOCK_SET_DESTROYED.
	 *
	 *  2) ulock ownership is cleared.
	 *     The thread currently holding the ulock is revoked of its
	 *     ownership.
	 */
	for (i = 0; i < lock_set->n_ulocks; i++) {
		ulock = &lock_set->ulock_list[i];

		ulock_lock(ulock);

		if (ulock->accept_wait) {
			ulock->accept_wait = FALSE;
			wait_queue_wakeup64_one(&ulock->wait_queue,
					      LOCK_SET_HANDOFF,
					      THREAD_RESTART);
		}
					  
		if (ulock->holder) {
			if (ulock->blocked) {
				ulock->blocked = FALSE;
				wait_queue_wakeup64_all(&ulock->wait_queue,
						      LOCK_SET_EVENT,
						      THREAD_RESTART);
			}
			if (ulock->ho_wait) {
				ulock->ho_wait = FALSE;
				wait_queue_wakeup64_one(&ulock->wait_queue,
						      LOCK_SET_HANDOFF,
						      THREAD_RESTART);
			}
			ulock_ownership_clear(ulock);
		}
		
		ulock_unlock(ulock);
	}

	lock_set_unlock(lock_set);
	lock_set_ownership_clear(lock_set, task);

	/*
	 *  Drop the lock set reference given to the containing task,
	 *  which inturn destroys the lock set structure if the reference
	 *  count goes to zero.
	 */
	lock_set_dereference(lock_set);

	return KERN_SUCCESS;
}
예제 #11
0
/*
 *	ROUTINE:	ulock_release_internal	[internal]
 *
 *	Releases the ulock.
 *	If any threads are blocked waiting for the ulock, one is woken-up.
 *
 */
kern_return_t
ulock_release_internal (ulock_t ulock, thread_t thread)
{
	lock_set_t	lock_set;

	if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}
	ulock_lock(ulock);
	lock_set_unlock(lock_set);		

	if (ulock->holder != thread) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

 	/*
	 *  If we have a hint that threads might be waiting,
	 *  try to transfer the lock ownership to a waiting thread
	 *  and wake it up.
	 */
	if (ulock->blocked) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	wqthread;
		spl_t		s;

		s = splsched();
		wait_queue_lock(wq);
		wqthread = wait_queue_wakeup64_identity_locked(wq,
							   LOCK_SET_EVENT,
							   THREAD_AWAKENED,
							   TRUE);
		/* wait_queue now unlocked, thread locked */

		if (wqthread != THREAD_NULL) {
			/*
			 * JMM - These ownership transfer macros have a
			 * locking/race problem.  To keep the thread from
			 * changing states on us (nullifying the ownership
			 * assignment) we need to keep the thread locked
			 * during the assignment.  But we can't because the
			 * macros take an activation lock, which is a mutex.
			 * Since this code was already broken before I got
			 * here, I will leave it for now.
			 */
			thread_unlock(wqthread);
			splx(s);

			/*
			 *  Transfer ulock ownership
			 *  from the current thread to the acquisition thread.
			 */
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, wqthread);
			ulock_unlock(ulock);
			
			return KERN_SUCCESS;
		} else {
			ulock->blocked = FALSE;
			splx(s);
		}
	}

	/*
	 *  Disown ulock
	 */
	ulock_ownership_clear(ulock);
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}