Beispiel #1
0
/*
 *	Routine:	ipc_mqueue_post
 *	Purpose:
 *		Post a message to a waiting receiver or enqueue it.  If a
 *		receiver is waiting, we can release our reserved space in
 *		the message queue.
 *
 *	Conditions:
 *		If we need to queue, our space in the message queue is reserved.
 */
void
ipc_mqueue_post(
	register ipc_mqueue_t 	mqueue,
	register ipc_kmsg_t		kmsg)
{
	spl_t s;

	/*
	 *	While the msg queue	is locked, we have control of the
	 *  kmsg, so the ref in	it for the port is still good.
	 *
	 *	Check for a receiver for the message.
	 */
	s = splsched();
	imq_lock(mqueue);
	for (;;) {
		wait_queue_t waitq = &mqueue->imq_wait_queue;
		thread_t receiver;
		mach_msg_size_t msize;

		receiver = wait_queue_wakeup64_identity_locked(
							waitq,
							IPC_MQUEUE_RECEIVE,
							THREAD_AWAKENED,
							FALSE);
		/* waitq still locked, thread locked */

		if (receiver == THREAD_NULL) {
			/* 
			 * no receivers; queue kmsg
			 */
			assert(mqueue->imq_msgcount > 0);
			ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
			break;
		}
	
		/*
		 * If the receiver waited with a facility not directly
		 * related to Mach messaging, then it isn't prepared to get
		 * handed the message directly.  Just set it running, and
		 * go look for another thread that can.
		 */
		if (receiver->ith_state != MACH_RCV_IN_PROGRESS) {
				  thread_unlock(receiver);
				  continue;
		}

	
		/*
		 * We found a waiting thread.
		 * If the message is too large or the scatter list is too small
		 * the thread we wake up will get that as its status.
		 */
		msize =	ipc_kmsg_copyout_size(kmsg, receiver->map);
		if (receiver->ith_msize <
				(msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(receiver), receiver->ith_option))) {
			receiver->ith_msize = msize;
			receiver->ith_state = MACH_RCV_TOO_LARGE;
		} else {
			receiver->ith_state = MACH_MSG_SUCCESS;
		}

		/*
		 * If there is no problem with the upcoming receive, or the
		 * receiver thread didn't specifically ask for special too
		 * large error condition, go ahead and select it anyway.
		 */
		if ((receiver->ith_state == MACH_MSG_SUCCESS) ||
		    !(receiver->ith_option & MACH_RCV_LARGE)) {

			receiver->ith_kmsg = kmsg;
			receiver->ith_seqno = mqueue->imq_seqno++;
			thread_unlock(receiver);

			/* we didn't need our reserved spot in the queue */
			ipc_mqueue_release_msgcount(mqueue);
			break;
		}

		/*
		 * Otherwise, this thread needs to be released to run
		 * and handle its error without getting the message.  We
		 * need to go back and pick another one.
		 */
		receiver->ith_receiver_name = mqueue->imq_receiver_name;
		receiver->ith_kmsg = IKM_NULL;
		receiver->ith_seqno = 0;
		thread_unlock(receiver);
	}

	imq_unlock(mqueue);
	splx(s);
	
	current_task()->messages_sent++;
	return;
}
Beispiel #2
0
/*
 *	Routine:	ipc_mqueue_add
 *	Purpose:
 *		Associate the portset's mqueue with the port's mqueue.
 *		This has to be done so that posting the port will wakeup
 *		a portset waiter.  If there are waiters on the portset
 *		mqueue and messages on the port mqueue, try to match them
 *		up now.
 *	Conditions:
 *		May block.
 */
kern_return_t
ipc_mqueue_add(
	ipc_mqueue_t	 port_mqueue,
	ipc_mqueue_t	 set_mqueue,
	wait_queue_link_t wql)
{
	wait_queue_t	 port_waitq = &port_mqueue->imq_wait_queue;
	wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
	ipc_kmsg_queue_t kmsgq;
	ipc_kmsg_t       kmsg, next;
	kern_return_t	 kr;
	spl_t		 s;

	kr = wait_queue_link_noalloc(port_waitq, set_waitq, wql);
	if (kr != KERN_SUCCESS)
		return kr;

	/*
	 * Now that the set has been added to the port, there may be
	 * messages queued on the port and threads waiting on the set
	 * waitq.  Lets get them together.
	 */
	s = splsched();
	imq_lock(port_mqueue);
	kmsgq = &port_mqueue->imq_messages;
	for (kmsg = ipc_kmsg_queue_first(kmsgq);
	     kmsg != IKM_NULL;
	     kmsg = next) {
		next = ipc_kmsg_queue_next(kmsgq, kmsg);

		for (;;) {
			thread_t th;
			mach_msg_size_t msize;

			th = wait_queue_wakeup64_identity_locked(
						port_waitq,
						IPC_MQUEUE_RECEIVE,
						THREAD_AWAKENED,
						FALSE);
			/* waitq/mqueue still locked, thread locked */

			if (th == THREAD_NULL)
				goto leave;

			/*
			 * If the receiver waited with a facility not directly
			 * related to Mach messaging, then it isn't prepared to get
			 * handed the message directly.  Just set it running, and
			 * go look for another thread that can.
			 */
			if (th->ith_state != MACH_RCV_IN_PROGRESS) {
				  thread_unlock(th);
				  continue;
			}

			/*
			 * Found a receiver. see if they can handle the message
			 * correctly (the message is not too large for them, or
			 * they didn't care to be informed that the message was
			 * too large).  If they can't handle it, take them off
			 * the list and let them go back and figure it out and
			 * just move onto the next.
			 */
			msize = ipc_kmsg_copyout_size(kmsg, th->map);
			if (th->ith_msize <
					(msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(th), th->ith_option))) {
				th->ith_state = MACH_RCV_TOO_LARGE;
				th->ith_msize = msize;
				if (th->ith_option & MACH_RCV_LARGE) {
					/*
					 * let him go without message
					 */
					th->ith_receiver_name = port_mqueue->imq_receiver_name;
					th->ith_kmsg = IKM_NULL;
					th->ith_seqno = 0;
					thread_unlock(th);
					continue; /* find another thread */
				}
			} else {
				th->ith_state = MACH_MSG_SUCCESS;
			}

			/*
			 * This thread is going to take this message,
			 * so give it to him.
			 */
			ipc_kmsg_rmqueue(kmsgq, kmsg);
			ipc_mqueue_release_msgcount(port_mqueue);

			th->ith_kmsg = kmsg;
			th->ith_seqno = port_mqueue->imq_seqno++;
			thread_unlock(th);
			break;  /* go to next message */
		}
			
	}
 leave:
	imq_unlock(port_mqueue);
	splx(s);
	return KERN_SUCCESS;
}
Beispiel #3
0
/*
 *	ROUTINE:	ulock_release_internal	[internal]
 *
 *	Releases the ulock.
 *	If any threads are blocked waiting for the ulock, one is woken-up.
 *
 */
kern_return_t
ulock_release_internal (ulock_t ulock, thread_t thread)
{
	lock_set_t	lock_set;

	if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}
	ulock_lock(ulock);
	lock_set_unlock(lock_set);		

	if (ulock->holder != thread) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

 	/*
	 *  If we have a hint that threads might be waiting,
	 *  try to transfer the lock ownership to a waiting thread
	 *  and wake it up.
	 */
	if (ulock->blocked) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	wqthread;
		spl_t		s;

		s = splsched();
		wait_queue_lock(wq);
		wqthread = wait_queue_wakeup64_identity_locked(wq,
							   LOCK_SET_EVENT,
							   THREAD_AWAKENED,
							   TRUE);
		/* wait_queue now unlocked, thread locked */

		if (wqthread != THREAD_NULL) {
			thread_unlock(wqthread);
			splx(s);

			/*
			 *  Transfer ulock ownership
			 *  from the current thread to the acquisition thread.
			 */
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, wqthread);
			ulock_unlock(ulock);
			
			return KERN_SUCCESS;
		} else {
			ulock->blocked = FALSE;
			splx(s);
		}
	}

	/*
	 *  Disown ulock
	 */
	ulock_ownership_clear(ulock);
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}
Beispiel #4
0
kern_return_t
lock_handoff (lock_set_t lock_set, int lock_id)
{
	ulock_t   ulock;
	int	  wait_result;


	if (lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
		return KERN_INVALID_ARGUMENT;

 retry:
	lock_set_lock(lock_set);

	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
	ulock_lock(ulock);
	lock_set_unlock(lock_set);

	if (ulock->holder != current_thread()) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}
	
	/*
	 *  If the accepting thread (the receiver) is already waiting
	 *  to accept the lock from the handoff thread (the sender),
	 *  then perform the hand-off now.
	 */

	if (ulock->accept_wait) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	thread;
		spl_t		s;

		/*
		 *  See who the lucky devil is, if he is still there waiting.
		 */
		s = splsched();
		wait_queue_lock(wq);
		thread = wait_queue_wakeup64_identity_locked(
					   wq,
					   LOCK_SET_HANDOFF,
					   THREAD_AWAKENED,
					   TRUE);
		/* wait queue unlocked, thread locked */

		/*
		 *  Transfer lock ownership
		 */
		if (thread != THREAD_NULL) {
			/* 
			 * The thread we are transferring to will try
			 * to take the lock on the ulock, and therefore
			 * will wait for us complete the handoff even
			 * through we set the thread running.
			 */
			thread_unlock(thread);
			splx(s);
			
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, thread);
			ulock->accept_wait = FALSE;
			ulock_unlock(ulock);
			return KERN_SUCCESS;
		} else {

			/*
			 * OOPS.  The accepting thread must have been aborted.
			 * and is racing back to clear the flag that says is
			 * waiting for an accept.  He will clear it when we
			 * release the lock, so just fall thru and wait for
			 * the next accept thread (that's the way it is
			 * specified).
			 */
			splx(s);
		}
	}

	/*
	 * Indicate that there is a hand-off thread waiting, and then wait
	 * for an accepting thread.
	 */
	ulock->ho_wait = TRUE;
	wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
			       LOCK_SET_HANDOFF,
			       THREAD_ABORTSAFE, 0);
	ulock_unlock(ulock);

	if (wait_result == THREAD_WAITING)
		wait_result = thread_block(THREAD_CONTINUE_NULL);

	/*
	 *  If the thread was woken-up via some action other than
	 *  lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
	 *  then we need to clear the ulock's handoff state.
	 */
	switch (wait_result) {


	case THREAD_AWAKENED:
		/*
		 * we take the ulock lock to syncronize with the
		 * thread that is accepting ownership.
		 */
		ulock_lock(ulock);
		assert(ulock->holder != current_thread());
		ulock_unlock(ulock);
		return KERN_SUCCESS;

	case THREAD_INTERRUPTED:
		ulock_lock(ulock);
		assert(ulock->holder == current_thread());
		ulock->ho_wait = FALSE;
		ulock_unlock(ulock);
		return KERN_ABORTED;

	case THREAD_RESTART:
		goto retry;
	}

	panic("lock_handoff");
	return KERN_FAILURE;
}
/*
 *	ROUTINE:	ulock_release_internal	[internal]
 *
 *	Releases the ulock.
 *	If any threads are blocked waiting for the ulock, one is woken-up.
 *
 */
kern_return_t
ulock_release_internal (ulock_t ulock, thread_t thread)
{
	lock_set_t	lock_set;

	if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}
	ulock_lock(ulock);
	lock_set_unlock(lock_set);		

	if (ulock->holder != thread) {
		ulock_unlock(ulock);
		return KERN_INVALID_RIGHT;
	}

 	/*
	 *  If we have a hint that threads might be waiting,
	 *  try to transfer the lock ownership to a waiting thread
	 *  and wake it up.
	 */
	if (ulock->blocked) {
		wait_queue_t	wq = &ulock->wait_queue;
		thread_t	wqthread;
		spl_t		s;

		s = splsched();
		wait_queue_lock(wq);
		wqthread = wait_queue_wakeup64_identity_locked(wq,
							   LOCK_SET_EVENT,
							   THREAD_AWAKENED,
							   TRUE);
		/* wait_queue now unlocked, thread locked */

		if (wqthread != THREAD_NULL) {
			/*
			 * JMM - These ownership transfer macros have a
			 * locking/race problem.  To keep the thread from
			 * changing states on us (nullifying the ownership
			 * assignment) we need to keep the thread locked
			 * during the assignment.  But we can't because the
			 * macros take an activation lock, which is a mutex.
			 * Since this code was already broken before I got
			 * here, I will leave it for now.
			 */
			thread_unlock(wqthread);
			splx(s);

			/*
			 *  Transfer ulock ownership
			 *  from the current thread to the acquisition thread.
			 */
			ulock_ownership_clear(ulock);
			ulock_ownership_set(ulock, wqthread);
			ulock_unlock(ulock);
			
			return KERN_SUCCESS;
		} else {
			ulock->blocked = FALSE;
			splx(s);
		}
	}

	/*
	 *  Disown ulock
	 */
	ulock_ownership_clear(ulock);
	ulock_unlock(ulock);

	return KERN_SUCCESS;
}