Esempio n. 1
0
mach_msg_return_t
mach_msg_rpc_from_kernel_body(
	mach_msg_header_t	*msg,
	mach_msg_size_t		send_size,
	mach_msg_size_t		rcv_size,
#if !IKM_SUPPORT_LEGACY
	__unused
#endif
    boolean_t           legacy)
{
	thread_t self = current_thread();
	ipc_port_t reply;
	ipc_kmsg_t kmsg;
	mach_port_seqno_t seqno;
	mach_msg_return_t mr;

	assert(msg->msgh_local_port == MACH_PORT_NULL);

	mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
	if (mr != MACH_MSG_SUCCESS)
		return mr;

	reply = self->ith_rpc_reply;
	if (reply == IP_NULL) {
		reply = ipc_port_alloc_reply();
		if ((reply == IP_NULL) ||
		    (self->ith_rpc_reply != IP_NULL))
			panic("mach_msg_rpc_from_kernel");
		self->ith_rpc_reply = reply;
	}

	/* insert send-once right for the reply port */
	kmsg->ikm_header->msgh_local_port = reply;
	kmsg->ikm_header->msgh_bits |=
		MACH_MSGH_BITS(0, MACH_MSG_TYPE_MAKE_SEND_ONCE);

#if IKM_SUPPORT_LEGACY
    if(legacy)
        mr = ipc_kmsg_copyin_from_kernel_legacy(kmsg);
    else
        mr = ipc_kmsg_copyin_from_kernel(kmsg);
#else
    mr = ipc_kmsg_copyin_from_kernel(kmsg);
#endif
    if (mr != MACH_MSG_SUCCESS) {
	    ipc_kmsg_free(kmsg);
	    return mr;
    }
	mr = ipc_kmsg_send(kmsg, 
			   MACH_SEND_KERNEL_DEFAULT,
			   MACH_MSG_TIMEOUT_NONE);
	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_destroy(kmsg);
		return mr;
	}

	for (;;) {
		ipc_mqueue_t mqueue;

		assert(reply->ip_pset_count == 0);
		assert(ip_active(reply));

		/* JMM - why this check? */
		if (!self->active) {
			ipc_port_dealloc_reply(reply);
			self->ith_rpc_reply = IP_NULL;
			return MACH_RCV_INTERRUPTED;
		}

		self->ith_continuation = (void (*)(mach_msg_return_t))0;

		mqueue = &reply->ip_messages;
		ipc_mqueue_receive(mqueue,
				   MACH_MSG_OPTION_NONE,
				   MACH_MSG_SIZE_MAX,
				   MACH_MSG_TIMEOUT_NONE,
				   THREAD_INTERRUPTIBLE);

		mr = self->ith_state;
		kmsg = self->ith_kmsg;
		seqno = self->ith_seqno;

		if (mr == MACH_MSG_SUCCESS)
		  {
			break;
		  }

		assert(mr == MACH_RCV_INTERRUPTED);

		assert(reply == self->ith_rpc_reply);

		if (self->handlers) {
			ipc_port_dealloc_reply(reply);
			self->ith_rpc_reply = IP_NULL;
			return(mr);
		}
	}

	/* 
	 * Check to see how much of the message/trailer can be received.
	 * We chose the maximum trailer that will fit, since we don't
	 * have options telling us which trailer elements the caller needed.
	 */
	if (rcv_size >= kmsg->ikm_header->msgh_size) {
		mach_msg_format_0_trailer_t *trailer =  (mach_msg_format_0_trailer_t *)
			((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size);

		if (rcv_size >= kmsg->ikm_header->msgh_size + MAX_TRAILER_SIZE) {
			/* Enough room for a maximum trailer */
			trailer->msgh_trailer_size = MAX_TRAILER_SIZE;
		} 
		else if (rcv_size < kmsg->ikm_header->msgh_size + 
			   trailer->msgh_trailer_size) {
			/* no room for even the basic (default) trailer */
			trailer->msgh_trailer_size = 0;
		}
		assert(trailer->msgh_trailer_type == MACH_MSG_TRAILER_FORMAT_0);
		rcv_size = kmsg->ikm_header->msgh_size + trailer->msgh_trailer_size;
		mr = MACH_MSG_SUCCESS;
	} else {
		mr = MACH_RCV_TOO_LARGE;
	}


	/*
	 *	We want to preserve rights and memory in reply!
	 *	We don't have to put them anywhere; just leave them
	 *	as they are.
	 */
#if IKM_SUPPORT_LEGACY
    if(legacy)
        ipc_kmsg_copyout_to_kernel_legacy(kmsg, ipc_space_reply);
    else
        ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply);
#else
    ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply);
#endif
	ipc_kmsg_put_to_kernel(msg, kmsg, rcv_size);
	return mr;
}
Esempio n. 2
0
void
exception_raise(
	ipc_port_t 	dest_port,
	ipc_port_t 	thread_port,
	ipc_port_t 	task_port,
	integer_t 	_exception, 
	integer_t 	code, 
	integer_t 	subcode)
{
	ipc_thread_t self = current_thread();
	ipc_thread_t receiver;
	ipc_port_t reply_port;
	ipc_mqueue_t dest_mqueue;
	ipc_mqueue_t reply_mqueue;
	ipc_kmsg_t kmsg;
	mach_msg_return_t mr;

	assert(IP_VALID(dest_port));

	/*
	 *	We will eventually need a message buffer.
	 *	Grab the buffer now, while nothing is locked.
	 *	This buffer will get handed to the exception server,
	 *	and it will give the buffer back with its reply.
	 */

	kmsg = ikm_cache();
	if (kmsg != IKM_NULL) {
		ikm_cache() = IKM_NULL;
		ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);
	} else {
		kmsg = ikm_alloc(IKM_SAVED_MSG_SIZE);
		if (kmsg == IKM_NULL)
			panic("exception_raise");
		ikm_init(kmsg, IKM_SAVED_MSG_SIZE);
	}

	/*
	 *	We need a reply port for the RPC.
	 *	Check first for a cached port.
	 */

	ith_lock(self);
	assert(self->ith_self != IP_NULL);

	reply_port = self->ith_rpc_reply;
	if (reply_port == IP_NULL) {
		ith_unlock(self);
		reply_port = ipc_port_alloc_reply();
		ith_lock(self);
		if ((reply_port == IP_NULL) ||
		    (self->ith_rpc_reply != IP_NULL))
			panic("exception_raise");
		self->ith_rpc_reply = reply_port;
	}

	ip_lock(reply_port);
	assert(ip_active(reply_port));
	ith_unlock(self);

	/*
	 *	Make a naked send-once right for the reply port,
	 *	to hand to the exception server.
	 *	Make an extra reference for the reply port,
	 *	to receive on.  This protects us against
	 *	mach_msg_abort_rpc.
	 */

	reply_port->ip_sorights++;
	ip_reference(reply_port);

	ip_reference(reply_port);
	self->ith_port = reply_port;

	reply_mqueue = &reply_port->ip_messages;
	imq_lock(reply_mqueue);
	assert(ipc_kmsg_queue_empty(&reply_mqueue->imq_messages));
	ip_unlock(reply_port);

	/*
	 *	Make sure we can queue to the destination port.
	 */

	if (!ip_lock_try(dest_port)) {
		imq_unlock(reply_mqueue);
		goto slow_exception_raise;
	}

	if (!ip_active(dest_port) ||
	    (dest_port->ip_receiver == ipc_space_kernel)) {
		imq_unlock(reply_mqueue);
		ip_unlock(dest_port);
		goto slow_exception_raise;
	}

	/*
	 *	Find the destination message queue.
	 */

    {
	ipc_pset_t dest_pset;

	dest_pset = dest_port->ip_pset;
	if (dest_pset == IPS_NULL)
		dest_mqueue = &dest_port->ip_messages;
	else
		dest_mqueue = &dest_pset->ips_messages;
    }

	if (!imq_lock_try(dest_mqueue)) {
		imq_unlock(reply_mqueue);
		ip_unlock(dest_port);
		goto slow_exception_raise;
	}

	/*
	 *	Safe to unlock dest_port, because we hold
	 *	dest_mqueue locked.  We never bother changing
	 *	dest_port->ip_msgcount.
	 */

	ip_unlock(dest_port);

	receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
	if ((receiver == ITH_NULL) ||
	    !((receiver->swap_func == (void (*)()) mach_msg_continue) ||
	      ((receiver->swap_func ==
				(void (*)()) mach_msg_receive_continue) &&
	       (sizeof(struct mach_exception) <= receiver->ith_msize) &&
	       ((receiver->ith_option & MACH_RCV_NOTIFY) == 0))) ||
	    !thread_handoff(self, exception_raise_continue, receiver)) {
		imq_unlock(reply_mqueue);
		imq_unlock(dest_mqueue);
		goto slow_exception_raise;
	}
	counter(c_exception_raise_block++);

	assert(current_thread() == receiver);

	/*
	 *	We need to finish preparing self for its
	 *	time asleep in reply_mqueue.  self is left
	 *	holding the extra ref for reply_port.
	 */

	ipc_thread_enqueue_macro(&reply_mqueue->imq_threads, self);
	self->ith_state = MACH_RCV_IN_PROGRESS;
	self->ith_msize = MACH_MSG_SIZE_MAX;
	imq_unlock(reply_mqueue);

	/*
	 *	Finish extracting receiver from dest_mqueue.
	 */

	ipc_thread_rmqueue_first_macro(
		&dest_mqueue->imq_threads, receiver);
	imq_unlock(dest_mqueue);

	/*
	 *	Release the receiver's reference for his object.
	 */
    {
	ipc_object_t object = receiver->ith_object;

	io_lock(object);
	io_release(object);
	io_check_unlock(object);
    }

    {
	struct mach_exception *exc =
			(struct mach_exception *) &kmsg->ikm_header;
	ipc_space_t space = receiver->task->itk_space;

	/*
	 *	We are running as the receiver now.  We hold
	 *	the following resources, which must be consumed:
	 *		kmsg, send-once right for reply_port
	 *		send rights for dest_port, thread_port, task_port
	 *	Synthesize a kmsg for copyout to the receiver.
	 */

	exc->Head.msgh_bits = (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
					      MACH_MSG_TYPE_PORT_SEND) |
			       MACH_MSGH_BITS_COMPLEX);
	exc->Head.msgh_size = sizeof *exc;
     /* exc->Head.msgh_remote_port later */
     /* exc->Head.msgh_local_port later */
	exc->Head.msgh_seqno = 0;
	exc->Head.msgh_id = MACH_EXCEPTION_ID;
	exc->threadType = exc_port_proto;
     /* exc->thread later */
	exc->taskType = exc_port_proto;
     /* exc->task later */
	exc->exceptionType = exc_code_proto;
	exc->exception = _exception;
	exc->codeType = exc_code_proto;
	exc->code = code;
	exc->subcodeType = exc_code_proto;
	exc->subcode = subcode;

	/*
	 *	Check that the receiver can handle the message.
	 */

	if (receiver->ith_rcv_size < sizeof(struct mach_exception)) {
		/*
		 *	ipc_kmsg_destroy is a handy way to consume
		 *	the resources we hold, but it requires setup.
		 */

		exc->Head.msgh_bits =
			(MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
					MACH_MSG_TYPE_PORT_SEND_ONCE) |
			 MACH_MSGH_BITS_COMPLEX);
		exc->Head.msgh_remote_port = (mach_port_t) dest_port;
		exc->Head.msgh_local_port = (mach_port_t) reply_port;
		exc->thread = (mach_port_t) thread_port;
		exc->task = (mach_port_t) task_port;

		ipc_kmsg_destroy(kmsg);
		thread_syscall_return(MACH_RCV_TOO_LARGE);
		/*NOTREACHED*/
	}

	is_write_lock(space);
	assert(space->is_active);

	/*
	 *	To do an atomic copyout, need simultaneous
	 *	locks on both ports and the space.
	 */

	ip_lock(dest_port);
	if (!ip_active(dest_port) ||
	    !ip_lock_try(reply_port)) {
	    abort_copyout:
		ip_unlock(dest_port);
		is_write_unlock(space);

		/*
		 *	Oh well, we have to do the header the slow way.
		 *	First make it look like it's in-transit.
		 */

		exc->Head.msgh_bits =
			(MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
					MACH_MSG_TYPE_PORT_SEND_ONCE) |
			 MACH_MSGH_BITS_COMPLEX);
		exc->Head.msgh_remote_port = (mach_port_t) dest_port;
		exc->Head.msgh_local_port = (mach_port_t) reply_port;

		mr = ipc_kmsg_copyout_header(&exc->Head, space,
					     MACH_PORT_NULL);
		if (mr == MACH_MSG_SUCCESS)
			goto copyout_body;

		/*
		 *	Ack!  Prepare for ipc_kmsg_copyout_dest.
		 *	It will consume thread_port and task_port.
		 */

		exc->thread = (mach_port_t) thread_port;
		exc->task = (mach_port_t) task_port;

		ipc_kmsg_copyout_dest(kmsg, space);
		(void) ipc_kmsg_put(receiver->ith_msg, kmsg,
				    sizeof(mach_msg_header_t));
		thread_syscall_return(mr);
		/*NOTREACHED*/
	}

	if (!ip_active(reply_port)) {
		ip_unlock(reply_port);
		goto abort_copyout;
	}

	assert(reply_port->ip_sorights > 0);
	ip_unlock(reply_port);

    {
	kern_return_t kr;
	ipc_entry_t entry;

	kr = ipc_entry_get (space, &exc->Head.msgh_remote_port, &entry);
	if (kr)
		goto abort_copyout;
    {
	mach_port_gen_t gen;

	assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
	gen = entry->ie_bits + IE_BITS_GEN_ONE;

	/* optimized ipc_right_copyout */

	entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
    }

	entry->ie_object = (ipc_object_t) reply_port;
	is_write_unlock(space);
    }

	/* optimized ipc_object_copyout_dest */

	assert(dest_port->ip_srights > 0);
	ip_release(dest_port);

	exc->Head.msgh_local_port =
		((dest_port->ip_receiver == space) ?
		 dest_port->ip_receiver_name : MACH_PORT_NULL);

	if ((--dest_port->ip_srights == 0) &&
	    (dest_port->ip_nsrequest != IP_NULL)) {
		ipc_port_t nsrequest;
		mach_port_mscount_t mscount;

		/* a rather rare case */

		nsrequest = dest_port->ip_nsrequest;
		mscount = dest_port->ip_mscount;
		dest_port->ip_nsrequest = IP_NULL;
		ip_unlock(dest_port);

		ipc_notify_no_senders(nsrequest, mscount);
	} else
		ip_unlock(dest_port);

    copyout_body:
	/*
	 *	Optimized version of ipc_kmsg_copyout_body,
	 *	to handle the two ports in the body.
	 */

	mr = (ipc_kmsg_copyout_object(space, (ipc_object_t) thread_port,
				      MACH_MSG_TYPE_PORT_SEND, &exc->thread) |
	      ipc_kmsg_copyout_object(space, (ipc_object_t) task_port,
				      MACH_MSG_TYPE_PORT_SEND, &exc->task));
	if (mr != MACH_MSG_SUCCESS) {
		(void) ipc_kmsg_put(receiver->ith_msg, kmsg,
				    kmsg->ikm_header.msgh_size);
		thread_syscall_return(mr | MACH_RCV_BODY_ERROR);
		/*NOTREACHED*/
	}
    }

	/*
	 *	Optimized version of ipc_kmsg_put.
	 *	We must check ikm_cache after copyoutmsg.
	 */

	ikm_check_initialized(kmsg, kmsg->ikm_size);
	assert(kmsg->ikm_size == IKM_SAVED_KMSG_SIZE);

	if (copyoutmsg(&kmsg->ikm_header, receiver->ith_msg,
		       sizeof(struct mach_exception)) ||
	    (ikm_cache() != IKM_NULL)) {
		mr = ipc_kmsg_put(receiver->ith_msg, kmsg,
				  kmsg->ikm_header.msgh_size);
		thread_syscall_return(mr);
		/*NOTREACHED*/
	}

	ikm_cache() = kmsg;
	thread_syscall_return(MACH_MSG_SUCCESS);
	/*NOTREACHED*/
#ifndef	__GNUC__
	return; /* help for the compiler */
#endif

    slow_exception_raise: {
	struct mach_exception *exc =
			(struct mach_exception *) &kmsg->ikm_header;
	ipc_kmsg_t reply_kmsg;
	mach_port_seqno_t reply_seqno;

	exception_raise_misses++;

	/*
	 *	We hold the following resources, which must be consumed:
	 *		kmsg, send-once right and ref for reply_port
	 *		send rights for dest_port, thread_port, task_port
	 *	Synthesize a kmsg to send.
	 */

	exc->Head.msgh_bits = (MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
					      MACH_MSG_TYPE_PORT_SEND_ONCE) |
			       MACH_MSGH_BITS_COMPLEX);
	exc->Head.msgh_size = sizeof *exc;
	exc->Head.msgh_remote_port = (mach_port_t) dest_port;
	exc->Head.msgh_local_port = (mach_port_t) reply_port;
	exc->Head.msgh_seqno = 0;
	exc->Head.msgh_id = MACH_EXCEPTION_ID;
	exc->threadType = exc_port_proto;
	exc->thread = (mach_port_t) thread_port;
	exc->taskType = exc_port_proto;
	exc->task = (mach_port_t) task_port;
	exc->exceptionType = exc_code_proto;
	exc->exception = _exception;
	exc->codeType = exc_code_proto;
	exc->code = code;
	exc->subcodeType = exc_code_proto;
	exc->subcode = subcode;

	ipc_mqueue_send_always(kmsg);

	/*
	 *	We are left with a ref for reply_port,
	 *	which we use to receive the reply message.
	 */

	ip_lock(reply_port);
	if (!ip_active(reply_port)) {
		ip_unlock(reply_port);
		exception_raise_continue_slow(MACH_RCV_PORT_DIED, IKM_NULL, /*dummy*/0);
		/*NOTREACHED*/
	}

	imq_lock(reply_mqueue);
	ip_unlock(reply_port);

	mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE,
				MACH_MSG_SIZE_MAX,
				MACH_MSG_TIMEOUT_NONE,
				FALSE, exception_raise_continue,
				&reply_kmsg, &reply_seqno);
	/* reply_mqueue is unlocked */

	exception_raise_continue_slow(mr, reply_kmsg, reply_seqno);
	/*NOTREACHED*/
    }
}