Esempio n. 1
0
boolean_t
ipc_right_check(
	ipc_space_t 	space,
	ipc_port_t 	port,
	mach_port_t 	name,
	ipc_entry_t 	entry)
{
	ipc_entry_bits_t bits;

	assert(space->is_active);
	assert(port == (ipc_port_t) entry->ie_object);

	ip_lock(port);
	if (ip_active(port))
		return FALSE;
	ip_unlock(port);

	/* this was either a pure send right or a send-once right */

	bits = entry->ie_bits;
	assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
	assert(IE_BITS_UREFS(bits) > 0);

	if (bits & MACH_PORT_TYPE_SEND) {
		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);

		/* clean up msg-accepted request */

		if (bits & IE_BITS_MAREQUEST) {
			bits &= ~IE_BITS_MAREQUEST;

			ipc_marequest_cancel(space, name);
		}

		ipc_reverse_remove(space, (ipc_object_t) port);
	} else {
		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
		assert(IE_BITS_UREFS(bits) == 1);
		assert((bits & IE_BITS_MAREQUEST) == 0);
	}

	ipc_port_release(port);

	/* convert entry to dead name */

	bits = (bits &~ IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;

	if (entry->ie_request != 0) {
		assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);

		entry->ie_request = 0;
		bits++;		/* increment urefs */
	}

	entry->ie_bits = bits;
	entry->ie_object = IO_NULL;

	return TRUE;
}
Esempio n. 2
0
/*
 *	Routine:	thread_release_and_exception_return
 *	Purpose:
 *		Continue after thread was halted.
 *	Conditions:
 *		Nothing locked.  We are running on a new kernel stack and
 *		control goes back to thread_exception_return.
 *	Returns:
 *		Doesn't return.
 */
static void
thread_release_and_exception_return(void)
{
	ipc_thread_t self = current_thread();
	/* reply port must be released */
	ipc_port_release(self->ith_port);
	thread_exception_return();
	/*NOTREACHED*/
}
Esempio n. 3
0
void
ipc_port_release_receive(
	ipc_port_t	port)
{
	ipc_port_t dest;

	assert(IP_VALID(port));

	ip_lock(port);
	assert(ip_active(port));
	assert(port->ip_receiver_name == MACH_PORT_NULL);
	dest = port->ip_destination;

	ipc_port_destroy(port); /* consumes ref, unlocks */

	if (dest != IP_NULL)
		ipc_port_release(dest);
}
Esempio n. 4
0
EXTERN void
iokit_release_port( ipc_port_t port )
{
    ipc_port_release( port );
}
Esempio n. 5
0
static  void*
commpage_allocate( 
	vm_map_t	submap,			// commpage32_map or commpage_map64
	size_t		area_used,		// _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
	vm_prot_t	uperm)
{
	vm_offset_t	kernel_addr = 0;	// address of commpage in kernel map
	vm_offset_t	zero = 0;
	vm_size_t	size = area_used;	// size actually populated
	vm_map_entry_t	entry;
	ipc_port_t	handle;
	kern_return_t	kr;

	if (submap == NULL)
		panic("commpage submap is null");

	if ((kr = vm_map(kernel_map,
			 &kernel_addr,
			 area_used,
			 0,
			 VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK),
			 NULL,
			 0,
			 FALSE,
			 VM_PROT_ALL,
			 VM_PROT_ALL,
			 VM_INHERIT_NONE)))
		panic("cannot allocate commpage %d", kr);

	if ((kr = vm_map_wire(kernel_map,
			      kernel_addr,
			      kernel_addr+area_used,
			      VM_PROT_DEFAULT|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
			      FALSE)))
		panic("cannot wire commpage: %d", kr);

	/* 
	 * Now that the object is created and wired into the kernel map, mark it so that no delay
	 * copy-on-write will ever be performed on it as a result of mapping it into user-space.
	 * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and
	 * that would be a real disaster.
	 *
	 * JMM - What we really need is a way to create it like this in the first place.
	 */
	if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map))
		panic("cannot find commpage entry %d", kr);
	VME_OBJECT(entry)->copy_strategy = MEMORY_OBJECT_COPY_NONE;

	if ((kr = mach_make_memory_entry( kernel_map,		// target map
				    &size,		// size 
				    kernel_addr,	// offset (address in kernel map)
				    uperm,	// protections as specified
				    &handle,		// this is the object handle we get
				    NULL )))		// parent_entry (what is this?)
		panic("cannot make entry for commpage %d", kr);

	if ((kr = vm_map_64(	submap,				// target map (shared submap)
			&zero,				// address (map into 1st page in submap)
			area_used,			// size
			0,				// mask
			VM_FLAGS_FIXED,			// flags (it must be 1st page in submap)
			handle,				// port is the memory entry we just made
			0,                              // offset (map 1st page in memory entry)
			FALSE,                          // copy
			uperm,   // cur_protection (R-only in user map)
			uperm,   // max_protection
		        VM_INHERIT_SHARE )))             // inheritance
		panic("cannot map commpage %d", kr);

	ipc_port_release(handle);
	/* Make the kernel mapping non-executable. This cannot be done
	 * at the time of map entry creation as mach_make_memory_entry
	 * cannot handle disjoint permissions at this time.
	 */
	kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE);
	assert (kr == KERN_SUCCESS);

	return (void*)(intptr_t)kernel_addr;                     // return address in kernel map
}
Esempio n. 6
0
void
ipc_port_destroy(
	ipc_port_t	port)
{
	ipc_port_t pdrequest, nsrequest;
	ipc_mqueue_t mqueue;
	ipc_kmsg_t kmsg;
	ipc_port_request_t dnrequests;

	assert(ip_active(port));
	/* port->ip_receiver_name is garbage */
	/* port->ip_receiver/port->ip_destination is garbage */
	assert(port->ip_pset_count == 0);
	assert(port->ip_mscount == 0);

	/* first check for a backup port */

	pdrequest = port->ip_pdrequest;
	if (pdrequest != IP_NULL) {
		/* we assume the ref for pdrequest */
		port->ip_pdrequest = IP_NULL;

		/* make port be in limbo */
		port->ip_receiver_name = MACH_PORT_NULL;
		port->ip_destination = IP_NULL;
		ip_unlock(port);

		/* consumes our refs for port and pdrequest */
		ipc_notify_port_destroyed(pdrequest, port);
		return;
	}

	/* once port is dead, we don't need to keep it locked */

	port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
	port->ip_timestamp = ipc_port_timestamp();

	/* save for later */
	dnrequests = port->ip_dnrequests;
	port->ip_dnrequests = IPR_NULL;

	/*
	 * If the port has a preallocated message buffer and that buffer
	 * is not inuse, free it.  If it has an inuse one, then the kmsg
	 * free will detect that we freed the association and it can free it
	 * like a normal buffer.
	 */
	if (IP_PREALLOC(port)) {
		kmsg = port->ip_premsg;
		assert(kmsg != IKM_NULL);
		IP_CLEAR_PREALLOC(port, kmsg);
		if (!ikm_prealloc_inuse(kmsg))
			ipc_kmsg_free(kmsg);
	}
	ip_unlock(port);

	/* throw away no-senders request */

	nsrequest = port->ip_nsrequest;
	if (nsrequest != IP_NULL)
		ipc_notify_send_once(nsrequest); /* consumes ref */

	/* destroy any queued messages */
	mqueue = &port->ip_messages;
	ipc_mqueue_destroy(mqueue);

	/* generate dead-name notifications */
	if (dnrequests != IPR_NULL) {
		ipc_port_dnnotify(port, dnrequests);
	}

	ipc_kobject_destroy(port);

	ipc_port_release(port); /* consume caller's ref */
}
Esempio n. 7
0
kern_return_t
ipc_port_dngrow(
	ipc_port_t		port,
	ipc_table_elems_t 	target_size)
{
	ipc_table_size_t its;
	ipc_port_request_t otable, ntable;

	assert(ip_active(port));

	otable = port->ip_dnrequests;
	if (otable == IPR_NULL)
		its = &ipc_table_dnrequests[0];
	else
		its = otable->ipr_size + 1;

	if (target_size != ITS_SIZE_NONE) {
		if ((otable != IPR_NULL) &&
		    (target_size <= otable->ipr_size->its_size)) {
			ip_unlock(port);
			return KERN_SUCCESS;
	        }
		while ((its->its_size) && (its->its_size < target_size)) {
			its++;
		}
		if (its->its_size == 0) {
			ip_unlock(port);
			return KERN_NO_SPACE;
		}
	}

	ip_reference(port);
	ip_unlock(port);

	if ((its->its_size == 0) ||
	    ((ntable = it_dnrequests_alloc(its)) == IPR_NULL)) {
		ipc_port_release(port);
		return KERN_RESOURCE_SHORTAGE;
	}

	ip_lock(port);
	ip_release(port);

	/*
	 *	Check that port is still active and that nobody else
	 *	has slipped in and grown the table on us.  Note that
	 *	just checking port->ip_dnrequests == otable isn't
	 *	sufficient; must check ipr_size.
	 */

	if (ip_active(port) &&
	    (port->ip_dnrequests == otable) &&
	    ((otable == IPR_NULL) || (otable->ipr_size+1 == its))) {
		ipc_table_size_t oits;
		ipc_table_elems_t osize, nsize;
		ipc_port_request_index_t free, i;

		/* copy old table to new table */

		if (otable != IPR_NULL) {
			oits = otable->ipr_size;
			osize = oits->its_size;
			free = otable->ipr_next;

			(void) memcpy((void *)(ntable + 1),
			      (const void *)(otable + 1),
			      (osize - 1) * sizeof(struct ipc_port_request));
		} else {
			osize = 1;
			oits = 0;
			free = 0;
		}

		nsize = its->its_size;
		assert(nsize > osize);

		/* add new elements to the new table's free list */

		for (i = osize; i < nsize; i++) {
			ipc_port_request_t ipr = &ntable[i];

			ipr->ipr_name = MACH_PORT_NULL;
			ipr->ipr_next = free;
			free = i;
		}

		ntable->ipr_next = free;
		ntable->ipr_size = its;
		port->ip_dnrequests = ntable;
		ip_unlock(port);

		if (otable != IPR_NULL) {
			it_dnrequests_free(oits, otable);
	        }
	} else {
		ip_check_unlock(port);
		it_dnrequests_free(its, ntable);
	}

	return KERN_SUCCESS;
}
Esempio n. 8
0
void
ipc_port_destroy(
	ipc_port_t	port)
{
	ipc_port_t pdrequest, nsrequest;
	ipc_mqueue_t mqueue;
	ipc_kmsg_queue_t kmqueue;
	ipc_kmsg_t kmsg;
	ipc_thread_t sender;
	ipc_port_request_t dnrequests;
	thread_pool_t thread_pool;

	assert(ip_active(port));
	/* port->ip_receiver_name is garbage */
	/* port->ip_receiver/port->ip_destination is garbage */
	assert(io_otype((ipc_object_t)port) == IOT_PORT);
	assert(port->ip_pset == IPS_NULL);
	assert(port->ip_mscount == 0);
	assert(port->ip_seqno == 0);

	/* first check for a backup port */

	pdrequest = port->ip_pdrequest;
	if (pdrequest != IP_NULL) {
		/* we assume the ref for pdrequest */
		port->ip_pdrequest = IP_NULL;

		/* make port be in limbo */
		port->ip_receiver_name = MACH_PORT_NAME_NULL;
		port->ip_destination = IP_NULL;
		ip_unlock(port);

		if (!ipc_port_check_circularity(port, pdrequest)) {
			/* consumes our refs for port and pdrequest */
			ipc_notify_port_destroyed(pdrequest, port);
			return;
		} else {
			/* consume pdrequest and destroy port */
			ipc_port_release_sonce(pdrequest);
		}

		ip_lock(port);
		assert(ip_active(port));
		assert(port->ip_pset == IPS_NULL);
		assert(port->ip_mscount == 0);
		assert(port->ip_seqno == 0);
		assert(port->ip_pdrequest == IP_NULL);
		assert(port->ip_receiver_name == MACH_PORT_NAME_NULL);
		assert(port->ip_destination == IP_NULL);

		/* fall through and destroy the port */
	}

	/*
	 *	rouse all blocked senders
	 *
	 *	This must be done with the port locked, because
	 *	ipc_mqueue_send can play with the ip_blocked queue
	 *	of a dead port.
	 */

	while ((sender = ipc_thread_dequeue(&port->ip_blocked)) != ITH_NULL) {
		sender->ith_state = MACH_MSG_SUCCESS;
		thread_go(sender);
	}

	/* once port is dead, we don't need to keep it locked */

	port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
	port->ip_timestamp = ipc_port_timestamp();

	/* save for later */
	dnrequests = port->ip_dnrequests;
	port->ip_dnrequests = IPR_NULL;
	ip_unlock(port);
	/* wakeup any threads waiting on this pool port for an activation */
	if ((thread_pool = &port->ip_thread_pool) != THREAD_POOL_NULL)
		thread_pool_wakeup(thread_pool);

	/* throw away no-senders request */

	nsrequest = port->ip_nsrequest;
	if (nsrequest != IP_NULL)
		ipc_notify_send_once(nsrequest); /* consumes ref */

	/* destroy any queued messages */

	mqueue = &port->ip_messages;
	kmqueue = &mqueue->imq_messages;

	while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
		assert(kmsg->ikm_header->msgh_remote_port ==
						(mach_port_t) port);

		port->ip_msgcount--;
		ipc_port_release(port);
		kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
		ipc_kmsg_destroy(kmsg);

	}

	/* generate dead-name notifications */
	if (dnrequests != IPR_NULL) {
		ipc_port_dnnotify(port, dnrequests);
	}

	if (ip_kotype(port) != IKOT_NONE)
		ipc_kobject_destroy(port);

	/* XXXX Perhaps should verify that ip_thread_pool is empty! */

	ipc_port_release(port); /* consume caller's ref */
}
Esempio n. 9
0
kern_return_t
catch_mach_exception_raise(
        __unused mach_port_t exception_port,
        mach_port_t thread,
        mach_port_t task,
        exception_type_t exception,
        mach_exception_data_t code,
        __unused mach_msg_type_number_t codeCnt
)
{
	task_t			self = current_task();
	thread_t		th_act;
	ipc_port_t 		thread_port;
	struct proc		*p;
	kern_return_t		result = MACH_MSG_SUCCESS;
	int			ux_signal = 0;
	mach_exception_code_t 	ucode = 0;
	struct uthread 		*ut;
	mach_port_name_t thread_name = CAST_MACH_PORT_TO_NAME(thread);
	mach_port_name_t task_name = CAST_MACH_PORT_TO_NAME(task);

	/*
	 *	Convert local thread name to global port.
	 */
   if (MACH_PORT_VALID(thread_name) &&
       (ipc_object_copyin(get_task_ipcspace(self), thread_name,
		       MACH_MSG_TYPE_PORT_SEND,
		       (void *) &thread_port) == MACH_MSG_SUCCESS)) {
        if (IPC_PORT_VALID(thread_port)) {
	   th_act = convert_port_to_thread(thread_port);
	   ipc_port_release(thread_port);
	} else {
	   th_act = THREAD_NULL;
	}

	/*
	 *	Catch bogus ports
	 */
	if (th_act != THREAD_NULL) {

	    /*
	     *	Convert exception to unix signal and code.
	     */
	    ux_exception(exception, code[0], code[1], &ux_signal, &ucode);

	    ut = get_bsdthread_info(th_act);
	    p = proc_findthread(th_act);

	    /* Can't deliver a signal without a bsd process reference */
	    if (p == NULL) {
		    ux_signal = 0;
		    result = KERN_FAILURE;
	    }

	    /*
	     * Stack overflow should result in a SIGSEGV signal
	     * on the alternate stack.
	     * but we have one or more guard pages after the
	     * stack top, so we would get a KERN_PROTECTION_FAILURE
	     * exception instead of KERN_INVALID_ADDRESS, resulting in
	     * a SIGBUS signal.
	     * Detect that situation and select the correct signal.
	     */
	    if (code[0] == KERN_PROTECTION_FAILURE &&
		ux_signal == SIGBUS) {
		    user_addr_t		sp, stack_min, stack_max;
		    int			mask;
		    struct sigacts	*ps;

		    sp = code[1];

		    stack_max = p->user_stack;
		    stack_min = p->user_stack - MAXSSIZ;
		    if (sp >= stack_min &&
			sp < stack_max) {
			    /*
			     * This is indeed a stack overflow.  Deliver a
			     * SIGSEGV signal.
			     */
			    ux_signal = SIGSEGV;

			    /*
			     * If the thread/process is not ready to handle
			     * SIGSEGV on an alternate stack, force-deliver
			     * SIGSEGV with a SIG_DFL handler.
			     */
			    mask = sigmask(ux_signal);
			    ps = p->p_sigacts;
			    if ((p->p_sigignore & mask) ||
				(ut->uu_sigwait & mask) ||
				(ut->uu_sigmask & mask) ||
				(ps->ps_sigact[SIGSEGV] == SIG_IGN) ||
				(! (ps->ps_sigonstack & mask))) {
				    p->p_sigignore &= ~mask;
				    p->p_sigcatch &= ~mask;
				    ps->ps_sigact[SIGSEGV] = SIG_DFL;
				    ut->uu_sigwait &= ~mask;
				    ut->uu_sigmask &= ~mask;
			    }
		    }
	    }
	    /*
	     *	Send signal.
	     */
	    if (ux_signal != 0) {
			ut->uu_exception = exception;
			//ut->uu_code = code[0]; // filled in by threadsignal
			ut->uu_subcode = code[1];			
			threadsignal(th_act, ux_signal, code[0]);
	    }
	    if (p != NULL) 
		    proc_rele(p);
	    thread_deallocate(th_act);
	}
	else
	    result = KERN_INVALID_ARGUMENT;
    }
    else
    	result = KERN_INVALID_ARGUMENT;

    /*
     *	Delete our send rights to the task port.
     */
    (void)mach_port_deallocate(get_task_ipcspace(ux_handler_self), task_name);

    return (result);
}
Esempio n. 10
0
kern_return_t
ipc_right_copyout(
	ipc_space_t		space,
	mach_port_t		name,
	ipc_entry_t		entry,
	mach_msg_type_name_t	msgt_name,
	boolean_t		overflow,
	ipc_object_t		object)
{
	ipc_entry_bits_t bits = entry->ie_bits;
	ipc_port_t port;

	assert(IO_VALID(object));
	assert(io_otype(object) == IOT_PORT);
	assert(io_active(object));
	assert(entry->ie_object == object);

	port = (ipc_port_t) object;

	switch (msgt_name) {
	    case MACH_MSG_TYPE_PORT_SEND_ONCE:
		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
		assert(port->ip_sorights > 0);

		/* transfer send-once right and ref to entry */
		ip_unlock(port);

		entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1);
		break;

	    case MACH_MSG_TYPE_PORT_SEND:
		assert(port->ip_srights > 0);

		if (bits & MACH_PORT_TYPE_SEND) {
			mach_port_urefs_t urefs = IE_BITS_UREFS(bits);

			assert(port->ip_srights > 1);
			assert(urefs > 0);
			assert(urefs < MACH_PORT_UREFS_MAX);

			if (urefs+1 == MACH_PORT_UREFS_MAX) {
				if (overflow) {
					/* leave urefs pegged to maximum */

					port->ip_srights--;
					ip_release(port);
					ip_unlock(port);
					return KERN_SUCCESS;
				}

				ip_unlock(port);
				return KERN_UREFS_OVERFLOW;
			}

			port->ip_srights--;
			ip_release(port);
			ip_unlock(port);
		} else if (bits & MACH_PORT_TYPE_RECEIVE) {
			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
			assert(IE_BITS_UREFS(bits) == 0);

			/* transfer send right to entry */
			ip_release(port);
			ip_unlock(port);
		} else {
			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
			assert(IE_BITS_UREFS(bits) == 0);

			/* transfer send right and ref to entry */
			ip_unlock(port);

			/* entry is locked holding ref, so can use port */

			entry->ie_name = name;
			ipc_reverse_insert(space, (ipc_object_t) port, entry);
		}

		entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
		break;

	    case MACH_MSG_TYPE_PORT_RECEIVE: {
		ipc_port_t dest;

		assert(port->ip_mscount == 0);
		assert(port->ip_receiver_name == MACH_PORT_NULL);
		dest = port->ip_destination;

		port->ip_receiver_name = name;
		port->ip_receiver = space;

		/*
		 *	Clear the protected payload field to retain
		 *	the behavior of mach_msg.
		 */
		ipc_port_flag_protected_payload_clear(port);

		assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);

		if (bits & MACH_PORT_TYPE_SEND) {
			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
			assert(IE_BITS_UREFS(bits) > 0);
			assert(port->ip_srights > 0);

			ip_release(port);
			ip_unlock(port);

			/* entry is locked holding ref, so can use port */

			ipc_reverse_remove(space, (ipc_object_t) port);
		} else {
			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
			assert(IE_BITS_UREFS(bits) == 0);

			/* transfer ref to entry */
			ip_unlock(port);
		}

		entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;

		if (dest != IP_NULL)
			ipc_port_release(dest);
		break;
	    }

	    default:
#if MACH_ASSERT
		assert(!"ipc_right_copyout: strange rights");
#else
		panic("ipc_right_copyout: strange rights");
#endif
	}

	return KERN_SUCCESS;
}
Esempio n. 11
0
void
exception_raise_continue_slow(
	mach_msg_return_t mr,
	ipc_kmsg_t 	  kmsg,
	mach_port_seqno_t seqno)
{
	ipc_thread_t self = current_thread();
	ipc_port_t reply_port = self->ith_port;
	ipc_mqueue_t reply_mqueue = &reply_port->ip_messages;

	while (mr == MACH_RCV_INTERRUPTED) {
		/*
		 *	Somebody is trying to force this thread
		 *	to a clean point.  We must cooperate
		 *	and then resume the receive.
		 */

		while (thread_should_halt(self)) {
			/* if thread is about to terminate, release the port */
			if (self->ast & AST_TERMINATE)
				ipc_port_release(reply_port);
			/*
			 *	Use the continuation to release the port in
			 *	case the thread is about to halt.
			 */
			thread_halt_self(thread_release_and_exception_return);
		}

		ip_lock(reply_port);
		if (!ip_active(reply_port)) {
			ip_unlock(reply_port);
			mr = MACH_RCV_PORT_DIED;
			break;
		}

		imq_lock(reply_mqueue);
		ip_unlock(reply_port);

		mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE,
					MACH_MSG_SIZE_MAX,
					MACH_MSG_TIMEOUT_NONE,
					FALSE, exception_raise_continue,
					&kmsg, &seqno);
		/* reply_mqueue is unlocked */
	}
	ipc_port_release(reply_port);

	assert((mr == MACH_MSG_SUCCESS) ||
	       (mr == MACH_RCV_PORT_DIED));

	if (mr == MACH_MSG_SUCCESS) {
		/*
		 *	Consume the reply message.
		 */

		ipc_port_release_sonce(reply_port);
		mr = exception_parse_reply(kmsg);
	}

	if ((mr == KERN_SUCCESS) ||
	    (mr == MACH_RCV_PORT_DIED)) {
		thread_exception_return();
		/*NOTREACHED*/
	}

	if (self->ith_exc != KERN_SUCCESS) {
		exception_try_task(self->ith_exc,
				   self->ith_exc_code,
				   self->ith_exc_subcode);
		/*NOTREACHED*/
	}

	exception_no_server();
	/*NOTREACHED*/
}