Example #1
0
/*
 *	Routine:	wait_queue_wakeup64_thread_locked
 *	Purpose:
 *		Wakeup the particular thread that was specified if and only
 *		it was in this wait queue (or one of it's set queues)
 *		and waiting on the specified event.
 *
 *		This is much safer than just removing the thread from
 *		whatever wait queue it happens to be on.  For instance, it
 *		may have already been awoken from the wait you intended to
 *		interrupt and waited on something else (like another
 *		semaphore).
 *	Conditions:
 *		at splsched
 *		wait queue already locked (may be released).
 *	Returns:
 *		KERN_SUCCESS - the thread was found waiting and awakened
 *		KERN_NOT_WAITING - the thread was not waiting here
 */
__private_extern__ kern_return_t
wait_queue_wakeup64_thread_locked(
	wait_queue_t wq,
	event64_t event,
	thread_t thread,
	wait_result_t result,
	boolean_t unlock)
{
	kern_return_t res;

	assert(wait_queue_held(wq));

	/*
	 * See if the thread was still waiting there.  If so, it got
	 * dequeued and returned locked.
	 */
	res = _wait_queue_select64_thread(wq, event, thread);
	if (unlock)
	    wait_queue_unlock(wq);

	if (res != KERN_SUCCESS)
		return KERN_NOT_WAITING;

	res = thread_go(thread, result);
	assert(res == KERN_SUCCESS);
	thread_unlock(thread);
	return res;
}
Example #2
0
void
ipc_port_set_qlimit(
	ipc_port_t		port,
	mach_port_msgcount_t	qlimit)
{
	assert(ip_active(port));

	/* wake up senders allowed by the new qlimit */

	if (qlimit > port->ip_qlimit) {
		mach_port_msgcount_t i, wakeup;

		/* caution: wakeup, qlimit are unsigned */

		wakeup = qlimit - port->ip_qlimit;

		for (i = 0; i < wakeup; i++) {
			ipc_thread_t th;

			th = ipc_thread_dequeue(&port->ip_blocked);
			if (th == ITH_NULL)
				break;

			th->ith_state = MACH_MSG_SUCCESS;
			thread_go(th);
		}
	}

	port->ip_qlimit = qlimit;
}
Example #3
0
/*
 *	Routine:	wait_queue_wakeup64_thread
 *	Purpose:
 *		Wakeup the particular thread that was specified if and only
 *		it was in this wait queue (or one of it's set's queues)
 *		and waiting on the specified event.
 *
 *		This is much safer than just removing the thread from
 *		whatever wait queue it happens to be on.  For instance, it
 *		may have already been awoken from the wait you intended to
 *		interrupt and waited on something else (like another
 *		semaphore).
 *	Conditions:
 *		nothing of interest locked
 *		we need to assume spl needs to be raised
 *	Returns:
 *		KERN_SUCCESS - the thread was found waiting and awakened
 *		KERN_NOT_WAITING - the thread was not waiting here
 */
kern_return_t
wait_queue_wakeup64_thread(
	wait_queue_t wq,
	event64_t event,
	thread_t thread,
	wait_result_t result)
{
	kern_return_t res;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	s = splsched();
	wait_queue_lock(wq);
	res = _wait_queue_select64_thread(wq, event, thread);
	wait_queue_unlock(wq);

	if (res == KERN_SUCCESS) {
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
		splx(s);
		return res;
	}
	splx(s);
	return KERN_NOT_WAITING;
}
Example #4
0
/*
 *	Routine:	wait_queue_wakeup64_one_locked
 *	Purpose:
 *		Select a single thread that is most-eligible to run and set
 *		set it runnings.
 *
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		KERN_SUCCESS: It was, and is, now removed.
 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
 */
__private_extern__ kern_return_t
wait_queue_wakeup64_one_locked(
	wait_queue_t wq,
	event64_t event,
	wait_result_t result,
	boolean_t unlock)
{
	thread_t thread;

	assert(wait_queue_held(wq));

	thread = _wait_queue_select64_one(wq, event);
	if (unlock)
		wait_queue_unlock(wq);

	if (thread) {
		kern_return_t res;
		
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
		return res;
	}

	return KERN_NOT_WAITING;
}
Example #5
0
int	event_go_left(t_core *core)
{
  core->curr_cam->pos.y += 100;
  thread_go(core);
  myx_flip(core->myx);
  return (0);
}
Example #6
0
int	event_prev_cam(t_core *core)
{
  if (!core->curr_cam->prev)
    return (-1);
  core->curr_cam = core->curr_cam->prev;
  printf("Switching to previous camera : %s\n",
	 core->curr_cam->name);
  thread_go(core);
  myx_flip(core->myx);
  return (0);
}
Example #7
0
int	event_next_cam(t_core *core)
{
  if (!core->curr_cam->next)
    return (-1);
  core->curr_cam = core->curr_cam->next;
  printf("Switching to next camera : %s\n",
	 core->curr_cam->name);
  thread_go(core);
  myx_flip(core->myx);
  return (0);
}
Example #8
0
static void
ipc_port_changed(
	ipc_port_t		port,
	mach_msg_return_t	mr)
{
	ipc_thread_t th;

	while ((th = thread_pool_get_act((ipc_object_t)port, 0)) != ITH_NULL) {
		th->ith_state = mr;
		thread_go(th);
	}
}
Example #9
0
void
ipc_mqueue_move(
	ipc_mqueue_t		dest,
	ipc_mqueue_t		source,
	const ipc_port_t	port)
{
	ipc_kmsg_queue_t oldq, newq;
	ipc_thread_queue_t blockedq;
	ipc_kmsg_t kmsg, next;
	ipc_thread_t th;

	oldq = &source->imq_messages;
	newq = &dest->imq_messages;
	blockedq = &dest->imq_threads;

	for (kmsg = ipc_kmsg_queue_first(oldq);
	     kmsg != IKM_NULL; kmsg = next) {
		next = ipc_kmsg_queue_next(oldq, kmsg);

		/* only move messages sent to port */

		if (kmsg->ikm_header.msgh_remote_port != (mach_port_t) port)
			continue;

		ipc_kmsg_rmqueue(oldq, kmsg);

		/* before adding kmsg to newq, check for a blocked receiver */

		while ((th = ipc_thread_dequeue(blockedq)) != ITH_NULL) {
			assert(ipc_kmsg_queue_empty(newq));

			thread_go(th);

			/* check if the receiver can handle the message */

			if (kmsg->ikm_header.msgh_size <= th->ith_msize) {
				th->ith_state = MACH_MSG_SUCCESS;
				th->ith_kmsg = kmsg;
				th->ith_seqno = port->ip_seqno++;

				goto next_kmsg;
			}

			th->ith_state = MACH_RCV_TOO_LARGE;
			th->ith_msize = kmsg->ikm_header.msgh_size;
		}

		/* didn't find a receiver to handle the message */

		ipc_kmsg_enqueue(newq, kmsg);
	    next_kmsg:;
	}
}
Example #10
0
void
ipc_mqueue_changed(
	ipc_mqueue_t		mqueue,
	mach_msg_return_t	mr)
{
	ipc_thread_t th;

	while ((th = ipc_thread_dequeue(&mqueue->imq_threads)) != ITH_NULL) {
		th->ith_state = mr;
		thread_go(th);
	}
}
Example #11
0
void
ipc_mqueue_move(
	ipc_mqueue_t	dest,
	ipc_mqueue_t	source,
	ipc_port_t	port)
{
	ipc_kmsg_queue_t oldq, newq;
	ipc_thread_queue_t blockedq;
	ipc_kmsg_t kmsg, next;
	ipc_thread_t th;

	oldq = &source->imq_messages;
	newq = &dest->imq_messages;
	blockedq = &dest->imq_threads;

	for (kmsg = ipc_kmsg_queue_first(oldq); kmsg != IKM_NULL; kmsg = next) {
		next = ipc_kmsg_queue_next(oldq, kmsg);

		/* only move messages sent to port */

		if (kmsg->ikm_header.msgh_remote_port != (mach_port_t) port)
			continue;

		ipc_kmsg_rmqueue(oldq, kmsg);

		/* before adding kmsg to newq, check for a blocked receiver */

		if ((th = ipc_thread_dequeue(blockedq)) != ITH_NULL) {
			assert(ipc_kmsg_queue_empty(newq));

			/*
			 * Got a receiver.  Hand the receiver the message
			 * and process the next one.
			 */
			th->ith_state = MACH_MSG_SUCCESS;
			th->ith_kmsg = kmsg;
			th->ith_seqno = port->ip_seqno++;
			thread_go(th);
		} else {
			/* didn't find a receiver; enqueue the message */
			ipc_kmsg_enqueue(newq, kmsg);
		}
	}
}
Example #12
0
/*
 *	Routine:	wait_queue_wakeup_one
 *	Purpose:
 *		Wakeup the most appropriate thread that is in the specified
 *		wait queue for the specified event.
 *	Conditions:
 *		Nothing locked
 *	Returns:
 *		KERN_SUCCESS - Thread was woken up
 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
 */
kern_return_t
wait_queue_wakeup_one(
	wait_queue_t wq,
	event_t event,
	wait_result_t result,
	int priority)
{
	thread_t thread;
	spl_t s;

	if (!wait_queue_is_valid(wq)) {
		return KERN_INVALID_ARGUMENT;
	}

	s = splsched();
	wait_queue_lock(wq);
	thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event));
	wait_queue_unlock(wq);

	if (thread) {
		kern_return_t res;

		if (thread->sched_pri < priority) {
			if (priority <= MAXPRI) {
				set_sched_pri(thread, priority);

				thread->was_promoted_on_wakeup = 1;
				thread->sched_flags |= TH_SFLAG_PROMOTED;
			}
		}
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
		splx(s);
		return res;
	}

	splx(s);
	return KERN_NOT_WAITING;
}
Example #13
0
/*
 *	Routine:	wait_queue_wakeup64_identity_locked
 *	Purpose:
 *		Select a single thread that is most-eligible to run and set
 *		set it running.  But return the thread locked.
 *
 * 	Conditions:
 *		at splsched
 *		wait queue locked
 *		possibly recursive
 * 	Returns:
 *		a pointer to the locked thread that was awakened
 */
__private_extern__ thread_t
wait_queue_wakeup64_identity_locked(
	wait_queue_t wq,
	event64_t event,
	wait_result_t result,
	boolean_t unlock)
{
	kern_return_t res;
	thread_t thread;

	assert(wait_queue_held(wq));

	thread = _wait_queue_select64_one(wq, event);
	if (unlock)
		wait_queue_unlock(wq);

	if (thread) {
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
	}
	return thread;  /* still locked if not NULL */
}
Example #14
0
/*
 *	Routine:        wait_queue_wakeup64_all_locked
 *	Purpose:
 *		Wakeup some number of threads that are in the specified
 *		wait queue and waiting on the specified event.
 *	Conditions:
 *		wait queue already locked (may be released).
 *	Returns:
 *		KERN_SUCCESS - Threads were woken up
 *		KERN_NOT_WAITING - No threads were waiting <wq,event> pair
 */
__private_extern__ kern_return_t
wait_queue_wakeup64_all_locked(
	wait_queue_t wq,
	event64_t event,
	wait_result_t result,
	boolean_t unlock)
{
	queue_head_t wake_queue_head;
	queue_t q = &wake_queue_head;
	kern_return_t res;

//	assert(wait_queue_held(wq));
//	if(!wq->wq_interlock.lock_data) {		/* (BRINGUP */
//		panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq);	/* (BRINGUP) */
//	}

	queue_init(q);

	/*
	 * Select the threads that we will wake up.	 The threads
	 * are returned to us locked and cleanly removed from the
	 * wait queue.
	 */
	_wait_queue_select64_all(wq, event, q);
	if (unlock)
		wait_queue_unlock(wq);

	/*
	 * For each thread, set it running.
	 */
	res = KERN_NOT_WAITING;
	while (!queue_empty (q)) {
		thread_t thread = (thread_t) dequeue(q);
		res = thread_go(thread, result);
		assert(res == KERN_SUCCESS);
		thread_unlock(thread);
	}
	return res;
}
Example #15
0
mach_msg_return_t
ipc_mqueue_receive(
	ipc_mqueue_t		mqueue,
	mach_msg_option_t	option,
	mach_msg_size_t		max_size,
	mach_msg_timeout_t	time_out,
	boolean_t		resume,
	void			(*continuation)(void),
	ipc_kmsg_t		*kmsgp,
	mach_port_seqno_t	*seqnop)
{
	ipc_port_t port;
	ipc_kmsg_t kmsg;
	mach_port_seqno_t seqno;

    {
	ipc_kmsg_queue_t kmsgs = &mqueue->imq_messages;
	ipc_thread_t self = current_thread();

	if (resume)
		goto after_thread_block;

	for (;;) {
		kmsg = ipc_kmsg_queue_first(kmsgs);
		if (kmsg != IKM_NULL) {
			/* check space requirements */

			if (kmsg->ikm_header.msgh_size > max_size) {
				* (mach_msg_size_t *) kmsgp =
					kmsg->ikm_header.msgh_size;
				imq_unlock(mqueue);
				return MACH_RCV_TOO_LARGE;
			}

			ipc_kmsg_rmqueue_first_macro(kmsgs, kmsg);
			port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
			seqno = port->ip_seqno++;
			break;
		}

		/* must block waiting for a message */

		if (option & MACH_RCV_TIMEOUT) {
			if (time_out == 0) {
				imq_unlock(mqueue);
				return MACH_RCV_TIMED_OUT;
			}

			thread_will_wait_with_timeout(self, time_out);
		} else
			thread_will_wait(self);

		ipc_thread_enqueue_macro(&mqueue->imq_threads, self);
		self->ith_state = MACH_RCV_IN_PROGRESS;
		self->ith_msize = max_size;

		imq_unlock(mqueue);
		if (continuation != (void (*)(void)) 0) {
			counter(c_ipc_mqueue_receive_block_user++);
		} else {
			counter(c_ipc_mqueue_receive_block_kernel++);
		}
		thread_block(continuation);
	after_thread_block:
		imq_lock(mqueue);

		/* why did we wake up? */

		if (self->ith_state == MACH_MSG_SUCCESS) {
			/* pick up the message that was handed to us */

			kmsg = self->ith_kmsg;
			seqno = self->ith_seqno;
			port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
			break;
		}

		switch (self->ith_state) {
		    case MACH_RCV_TOO_LARGE:
			/* pick up size of the too-large message */

			* (mach_msg_size_t *) kmsgp = self->ith_msize;
			/* fall-through */

		    case MACH_RCV_PORT_DIED:
		    case MACH_RCV_PORT_CHANGED:
			/* something bad happened to the port/set */

			imq_unlock(mqueue);
			return self->ith_state;

		    case MACH_RCV_IN_PROGRESS:
			/*
			 *	Awakened for other than IPC completion.
			 *	Remove ourselves from the waiting queue,
			 *	then check the wakeup cause.
			 */

			ipc_thread_rmqueue(&mqueue->imq_threads, self);

			switch (self->ith_wait_result) {
			    case THREAD_INTERRUPTED:
				/* receive was interrupted - give up */

				imq_unlock(mqueue);
				return MACH_RCV_INTERRUPTED;

			    case THREAD_TIMED_OUT:
				/* timeout expired */

				assert(option & MACH_RCV_TIMEOUT);
				time_out = 0;
				break;

			    case THREAD_RESTART:
			    default:
#if MACH_ASSERT
				assert(!"ipc_mqueue_receive");
#else
				panic("ipc_mqueue_receive");
#endif
			}
			break;

		    default:
#if MACH_ASSERT
			assert(!"ipc_mqueue_receive: strange ith_state");
#else
			panic("ipc_mqueue_receive: strange ith_state");
#endif
		}
	}

	/* we have a kmsg; unlock the msg queue */

	imq_unlock(mqueue);
	assert(kmsg->ikm_header.msgh_size <= max_size);
    }

    {
	ipc_marequest_t marequest;

	marequest = kmsg->ikm_marequest;
	if (marequest != IMAR_NULL) {
		ipc_marequest_destroy(marequest);
		kmsg->ikm_marequest = IMAR_NULL;
	}
	assert((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) == 0);

	assert(port == (ipc_port_t) kmsg->ikm_header.msgh_remote_port);
	ip_lock(port);

	if (ip_active(port)) {
		ipc_thread_queue_t senders;
		ipc_thread_t sender;

		assert(port->ip_msgcount > 0);
		port->ip_msgcount--;

		senders = &port->ip_blocked;
		sender = ipc_thread_queue_first(senders);

		if ((sender != ITH_NULL) &&
		    (port->ip_msgcount < port->ip_qlimit)) {
			ipc_thread_rmqueue(senders, sender);
			sender->ith_state = MACH_MSG_SUCCESS;
			thread_go(sender);
		}
	}

	ip_unlock(port);
    }

	current_task()->messages_received++;

	*kmsgp = kmsg;
	*seqnop = seqno;
	return MACH_MSG_SUCCESS;
}
Example #16
0
void
ipc_port_destroy(
	ipc_port_t	port)
{
	ipc_port_t pdrequest, nsrequest;
	ipc_mqueue_t mqueue;
	ipc_kmsg_queue_t kmqueue;
	ipc_kmsg_t kmsg;
	ipc_thread_t sender;
	ipc_port_request_t dnrequests;
	thread_pool_t thread_pool;

	assert(ip_active(port));
	/* port->ip_receiver_name is garbage */
	/* port->ip_receiver/port->ip_destination is garbage */
	assert(io_otype((ipc_object_t)port) == IOT_PORT);
	assert(port->ip_pset == IPS_NULL);
	assert(port->ip_mscount == 0);
	assert(port->ip_seqno == 0);

	/* first check for a backup port */

	pdrequest = port->ip_pdrequest;
	if (pdrequest != IP_NULL) {
		/* we assume the ref for pdrequest */
		port->ip_pdrequest = IP_NULL;

		/* make port be in limbo */
		port->ip_receiver_name = MACH_PORT_NAME_NULL;
		port->ip_destination = IP_NULL;
		ip_unlock(port);

		if (!ipc_port_check_circularity(port, pdrequest)) {
			/* consumes our refs for port and pdrequest */
			ipc_notify_port_destroyed(pdrequest, port);
			return;
		} else {
			/* consume pdrequest and destroy port */
			ipc_port_release_sonce(pdrequest);
		}

		ip_lock(port);
		assert(ip_active(port));
		assert(port->ip_pset == IPS_NULL);
		assert(port->ip_mscount == 0);
		assert(port->ip_seqno == 0);
		assert(port->ip_pdrequest == IP_NULL);
		assert(port->ip_receiver_name == MACH_PORT_NAME_NULL);
		assert(port->ip_destination == IP_NULL);

		/* fall through and destroy the port */
	}

	/*
	 *	rouse all blocked senders
	 *
	 *	This must be done with the port locked, because
	 *	ipc_mqueue_send can play with the ip_blocked queue
	 *	of a dead port.
	 */

	while ((sender = ipc_thread_dequeue(&port->ip_blocked)) != ITH_NULL) {
		sender->ith_state = MACH_MSG_SUCCESS;
		thread_go(sender);
	}

	/* once port is dead, we don't need to keep it locked */

	port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
	port->ip_timestamp = ipc_port_timestamp();

	/* save for later */
	dnrequests = port->ip_dnrequests;
	port->ip_dnrequests = IPR_NULL;
	ip_unlock(port);
	/* wakeup any threads waiting on this pool port for an activation */
	if ((thread_pool = &port->ip_thread_pool) != THREAD_POOL_NULL)
		thread_pool_wakeup(thread_pool);

	/* throw away no-senders request */

	nsrequest = port->ip_nsrequest;
	if (nsrequest != IP_NULL)
		ipc_notify_send_once(nsrequest); /* consumes ref */

	/* destroy any queued messages */

	mqueue = &port->ip_messages;
	kmqueue = &mqueue->imq_messages;

	while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
		assert(kmsg->ikm_header->msgh_remote_port ==
						(mach_port_t) port);

		port->ip_msgcount--;
		ipc_port_release(port);
		kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
		ipc_kmsg_destroy(kmsg);

	}

	/* generate dead-name notifications */
	if (dnrequests != IPR_NULL) {
		ipc_port_dnnotify(port, dnrequests);
	}

	if (ip_kotype(port) != IKOT_NONE)
		ipc_kobject_destroy(port);

	/* XXXX Perhaps should verify that ip_thread_pool is empty! */

	ipc_port_release(port); /* consume caller's ref */
}
Example #17
0
mach_msg_return_t
ipc_mqueue_send(
	ipc_kmsg_t 		kmsg,
	mach_msg_option_t 	option,
	mach_msg_timeout_t 	time_out)
{
	ipc_port_t port;

	port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
	assert(IP_VALID(port));

	ip_lock(port);

	if (port->ip_receiver == ipc_space_kernel) {
		ipc_kmsg_t reply;

		/*
		 *	We can check ip_receiver == ipc_space_kernel
		 *	before checking that the port is active because
		 *	ipc_port_dealloc_kernel clears ip_receiver
		 *	before destroying a kernel port.
		 */

		assert(ip_active(port));
		ip_unlock(port);

		reply = ipc_kobject_server(kmsg);
		if (reply != IKM_NULL)
			ipc_mqueue_send_always(reply);

		return MACH_MSG_SUCCESS;
	}

	for (;;) {
		ipc_thread_t self;

		/*
		 *	Can't deliver to a dead port.
		 *	However, we can pretend it got sent
		 *	and was then immediately destroyed.
		 */

		if (!ip_active(port)) {
			/*
			 *	We can't let ipc_kmsg_destroy deallocate
			 *	the port right, because we might end up
			 *	in an infinite loop trying to deliver
			 *	a send-once notification.
			 */

			ip_release(port);
			ip_check_unlock(port);
			kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
			ipc_kmsg_destroy(kmsg);
			return MACH_MSG_SUCCESS;
		}

		/*
		 *  Don't block if:
		 *	1) We're under the queue limit.
		 *	2) Caller used the MACH_SEND_ALWAYS internal option.
		 *	3) Message is sent to a send-once right.
		 */

		if ((port->ip_msgcount < port->ip_qlimit) ||
		    (option & MACH_SEND_ALWAYS) ||
		    (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
						MACH_MSG_TYPE_PORT_SEND_ONCE))
			break;

		/* must block waiting for queue to clear */

		self = current_thread();

		if (option & MACH_SEND_TIMEOUT) {
			if (time_out == 0) {
				ip_unlock(port);
				return MACH_SEND_TIMED_OUT;
			}

			thread_will_wait_with_timeout(self, time_out);
		} else
			thread_will_wait(self);

		ipc_thread_enqueue(&port->ip_blocked, self);
		self->ith_state = MACH_SEND_IN_PROGRESS;

	 	ip_unlock(port);
		counter(c_ipc_mqueue_send_block++);
		thread_block((void (*)(void)) 0);
		ip_lock(port);

		/* why did we wake up? */

		if (self->ith_state == MACH_MSG_SUCCESS)
			continue;
		assert(self->ith_state == MACH_SEND_IN_PROGRESS);

		/* take ourselves off blocked queue */

		ipc_thread_rmqueue(&port->ip_blocked, self);

		/*
		 *	Thread wakeup-reason field tells us why
		 *	the wait was interrupted.
		 */

		switch (self->ith_wait_result) {
		    case THREAD_INTERRUPTED:
			/* send was interrupted - give up */

			ip_unlock(port);
			return MACH_SEND_INTERRUPTED;

		    case THREAD_TIMED_OUT:
			/* timeout expired */

			assert(option & MACH_SEND_TIMEOUT);
			time_out = 0;
			break;

		    case THREAD_RESTART:
		    default:
#if MACH_ASSERT
			assert(!"ipc_mqueue_send");
#else
			panic("ipc_mqueue_send");
#endif
		}
	}

	if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
		ip_unlock(port);

		/* don't allow the creation of a circular loop */

		ipc_kmsg_destroy(kmsg);
		return MACH_MSG_SUCCESS;
	}

    {
	ipc_mqueue_t mqueue;
	ipc_pset_t pset;
	ipc_thread_t receiver;
	ipc_thread_queue_t receivers;

	port->ip_msgcount++;
	assert(port->ip_msgcount > 0);

	pset = port->ip_pset;
	if (pset == IPS_NULL)
		mqueue = &port->ip_messages;
	else
		mqueue = &pset->ips_messages;

	imq_lock(mqueue);
	receivers = &mqueue->imq_threads;

	/*
	 *	Can unlock the port now that the msg queue is locked
	 *	and we know the port is active.  While the msg queue
	 *	is locked, we have control of the kmsg, so the ref in
	 *	it for the port is still good.  If the msg queue is in
	 *	a set (dead or alive), then we're OK because the port
	 *	is still a member of the set and the set won't go away
	 *	until the port is taken out, which tries to lock the
	 *	set's msg queue to remove the port's msgs.
	 */

	ip_unlock(port);

	/* check for a receiver for the message */

	for (;;) {
		receiver = ipc_thread_queue_first(receivers);
		if (receiver == ITH_NULL) {
			/* no receivers; queue kmsg */

			ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
			imq_unlock(mqueue);
			break;
		}

		ipc_thread_rmqueue_first_macro(receivers, receiver);
		assert(ipc_kmsg_queue_empty(&mqueue->imq_messages));

		if (kmsg->ikm_header.msgh_size <= receiver->ith_msize) {
			/* got a successful receiver */

			receiver->ith_state = MACH_MSG_SUCCESS;
			receiver->ith_kmsg = kmsg;
			receiver->ith_seqno = port->ip_seqno++;
			imq_unlock(mqueue);

			thread_go(receiver);
			break;
		}

		receiver->ith_state = MACH_RCV_TOO_LARGE;
		receiver->ith_msize = kmsg->ikm_header.msgh_size;
		thread_go(receiver);
	}
    }

	current_task()->messages_sent++;

	return MACH_MSG_SUCCESS;
}