Example #1
0
mach_msg_return_t
mach_msg_send_from_kernel_proper(
	mach_msg_header_t	*msg,
	mach_msg_size_t		send_size)
{
	ipc_kmsg_t kmsg;
	mach_msg_return_t mr;

	mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
	if (mr != MACH_MSG_SUCCESS)
		return mr;

	mr = ipc_kmsg_copyin_from_kernel(kmsg);
	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_free(kmsg);
		return mr;
	}

	mr = ipc_kmsg_send(kmsg, 
			   MACH_SEND_KERNEL_DEFAULT,
			   MACH_MSG_TIMEOUT_NONE);
	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_destroy(kmsg);
	}

	return mr;
}
Example #2
0
/* lifted from ipc_mig.c:mach_msg_send_from_kernel_proper() */
static mach_msg_return_t
mach_msg_send_from_remote_kernel(mach_msg_header_t	*msg,
                                 mach_msg_size_t	send_size,
                                 mach_node_t		node)
{
    ipc_kmsg_t kmsg;
    mach_msg_return_t mr;

    mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
    if (mr != MACH_MSG_SUCCESS)
        return mr;

    mr = ipc_kmsg_copyin_from_kernel(kmsg);
    if (mr != MACH_MSG_SUCCESS) {
        ipc_kmsg_free(kmsg);
        return mr;
    }

    kmsg->ikm_node = node;	// node that needs to receive message ack
    mr = ipc_kmsg_send(kmsg,
                       MACH_SEND_KERNEL_DEFAULT,
                       MACH_MSG_TIMEOUT_NONE);
    if (mr != MACH_MSG_SUCCESS) {
        ipc_kmsg_destroy(kmsg);
    }

    return mr;
}
Example #3
0
mach_msg_return_t
mach_msg_send_from_kernel_with_options(
	mach_msg_header_t	*msg,
	mach_msg_size_t		send_size,
	mach_msg_option_t	option,
	mach_msg_timeout_t	timeout_val)
{
	ipc_kmsg_t kmsg;
	mach_msg_return_t mr;

	KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START);

	mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
	if (mr != MACH_MSG_SUCCESS) {
		KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr);
		return mr;
	}

	mr = ipc_kmsg_copyin_from_kernel(kmsg);
	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_free(kmsg);
		KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr);
		return mr;
	}

	/*
	 * Until we are sure of its effects, we are disabling
	 * importance donation from the kernel-side of user
	 * threads in importance-donating tasks - unless the
	 * option to force importance donation is passed in,
	 * or the thread's SEND_IMPORTANCE option has been set.
	 * (11938665 & 23925818)
	 */
	if (current_thread()->options & TH_OPT_SEND_IMPORTANCE)
		option &= ~MACH_SEND_NOIMPORTANCE;
	else if ((option & MACH_SEND_IMPORTANCE) == 0)
		option |= MACH_SEND_NOIMPORTANCE;

	mr = ipc_kmsg_send(kmsg, option, timeout_val);

	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_destroy(kmsg);
		KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr);
	}
	
	return mr;
}
Example #4
0
mach_msg_return_t
mach_msg_send_from_kernel_proper(
	mach_msg_header_t	*msg,
	mach_msg_size_t		send_size)
{
	ipc_kmsg_t kmsg;
	mach_msg_return_t mr;

	KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START);

	mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
	if (mr != MACH_MSG_SUCCESS) {
		KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr);
		return mr;
	}

	mr = ipc_kmsg_copyin_from_kernel(kmsg);
	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_free(kmsg);
		KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr);
		return mr;
	}

	/*
	 * respect the thread's SEND_IMPORTANCE option to force importance
	 * donation from the kernel-side of user threads
	 * (11938665 & 23925818)
	 */
	mach_msg_option_t option = MACH_SEND_KERNEL_DEFAULT;
	if (current_thread()->options & TH_OPT_SEND_IMPORTANCE)
		option &= ~MACH_SEND_NOIMPORTANCE;

	mr = ipc_kmsg_send(kmsg, option, MACH_MSG_TIMEOUT_NONE);
	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_destroy(kmsg);
		KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr);
	}

	return mr;
}
Example #5
0
mach_msg_return_t
mach_msg_send_from_kernel_with_options(
	mach_msg_header_t	*msg,
	mach_msg_size_t		send_size,
	mach_msg_option_t	option,
	mach_msg_timeout_t	timeout_val)
{
	ipc_kmsg_t kmsg;
	mach_msg_return_t mr;

	mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
	if (mr != MACH_MSG_SUCCESS)
		return mr;

	mr = ipc_kmsg_copyin_from_kernel(kmsg);
	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_free(kmsg);
		return mr;
	}

#if 11938665
	/*
	 * Until we are sure of its effects, we are disabling
	 * importance donation from the kernel-side of user
	 * threads in importance-donating tasks - unless the
	 * option to force importance donation is passed in.
	 */
	if ((option & MACH_SEND_IMPORTANCE) == 0)
		option |= MACH_SEND_NOIMPORTANCE;
#endif
	mr = ipc_kmsg_send(kmsg, option, timeout_val);

	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_destroy(kmsg);
	}
	
	return mr;
}
Example #6
0
mach_msg_return_t
mach_msg_overwrite(
	mach_msg_header_t		*msg,
	mach_msg_option_t		option,
	mach_msg_size_t		send_size,
	mach_msg_size_t		rcv_size,
	mach_port_name_t		rcv_name,
	__unused mach_msg_timeout_t	msg_timeout,
	__unused mach_port_name_t	notify,
	__unused mach_msg_header_t	*rcv_msg,
       __unused mach_msg_size_t	rcv_msg_size)
{
	ipc_space_t space = current_space();
	vm_map_t map = current_map();
	ipc_kmsg_t kmsg;
	mach_port_seqno_t seqno;
	mach_msg_return_t mr;
	mach_msg_trailer_size_t trailer_size;

	if (option & MACH_SEND_MSG) {
		mach_msg_size_t	msg_and_trailer_size;
		mach_msg_max_trailer_t	*max_trailer;

		if ((send_size < sizeof(mach_msg_header_t)) || (send_size & 3))
			return MACH_SEND_MSG_TOO_SMALL;

		if (send_size > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE)
			return MACH_SEND_TOO_LARGE;

		msg_and_trailer_size = send_size + MAX_TRAILER_SIZE;
		kmsg = ipc_kmsg_alloc(msg_and_trailer_size);

		if (kmsg == IKM_NULL)
			return MACH_SEND_NO_BUFFER;

		(void) memcpy((void *) kmsg->ikm_header, (const void *) msg, send_size);

		kmsg->ikm_header->msgh_size = send_size;

		/* 
		 * Reserve for the trailer the largest space (MAX_TRAILER_SIZE)
		 * However, the internal size field of the trailer (msgh_trailer_size)
		 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize
		 * the cases where no implicit data is requested.
		 */
		max_trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + send_size);
		max_trailer->msgh_sender = current_thread()->task->sec_token;
		max_trailer->msgh_audit = current_thread()->task->audit_token;
		max_trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
		max_trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;

		mr = ipc_kmsg_copyin(kmsg, space, map, &option);

		if (mr != MACH_MSG_SUCCESS) {
			ipc_kmsg_free(kmsg);
			return mr;
		}

		do {
			mr = ipc_kmsg_send(kmsg, MACH_MSG_OPTION_NONE, MACH_MSG_TIMEOUT_NONE);
		 } while (mr == MACH_SEND_INTERRUPTED);

		assert(mr == MACH_MSG_SUCCESS);
	}

	if (option & MACH_RCV_MSG) {
		thread_t self = current_thread();

		do {
			ipc_object_t object;
			ipc_mqueue_t mqueue;

			mr = ipc_mqueue_copyin(space, rcv_name,
					       &mqueue, &object);
			if (mr != MACH_MSG_SUCCESS)
				return mr;
			/* hold ref for object */

			self->ith_continuation = (void (*)(mach_msg_return_t))0;
			ipc_mqueue_receive(mqueue,
					   MACH_MSG_OPTION_NONE,
					   MACH_MSG_SIZE_MAX,
					   MACH_MSG_TIMEOUT_NONE,
					   THREAD_ABORTSAFE);
			mr = self->ith_state;
			kmsg = self->ith_kmsg;
			seqno = self->ith_seqno;

			io_release(object);

		} while (mr == MACH_RCV_INTERRUPTED);
		if (mr != MACH_MSG_SUCCESS)
			return mr;


		trailer_size = ipc_kmsg_add_trailer(kmsg, space, option, current_thread(), seqno, TRUE,
				kmsg->ikm_header->msgh_remote_port->ip_context);

		if (rcv_size < (kmsg->ikm_header->msgh_size + trailer_size)) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) memcpy((void *) msg, (const void *) kmsg->ikm_header, sizeof *msg);
			ipc_kmsg_free(kmsg);
			return MACH_RCV_TOO_LARGE;
		}

		mr = ipc_kmsg_copyout(kmsg, space, map, MACH_MSG_BODY_NULL, option);
		if (mr != MACH_MSG_SUCCESS) {
			if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
				ipc_kmsg_put_to_kernel(msg, kmsg,
						kmsg->ikm_header->msgh_size + trailer_size);
			} else {
				ipc_kmsg_copyout_dest(kmsg, space);
				(void) memcpy((void *) msg, (const void *) kmsg->ikm_header, sizeof *msg);
				ipc_kmsg_free(kmsg);
			}

			return mr;
		}

		(void) memcpy((void *) msg, (const void *) kmsg->ikm_header,
			      kmsg->ikm_header->msgh_size + trailer_size);
		ipc_kmsg_free(kmsg);
	}

	return MACH_MSG_SUCCESS;
}
Example #7
0
mach_msg_return_t
mach_msg_rpc_from_kernel_body(
	mach_msg_header_t	*msg,
	mach_msg_size_t		send_size,
	mach_msg_size_t		rcv_size,
#if !IKM_SUPPORT_LEGACY
	__unused
#endif
    boolean_t           legacy)
{
	thread_t self = current_thread();
	ipc_port_t reply;
	ipc_kmsg_t kmsg;
	mach_port_seqno_t seqno;
	mach_msg_return_t mr;

	assert(msg->msgh_local_port == MACH_PORT_NULL);

	mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg);
	if (mr != MACH_MSG_SUCCESS)
		return mr;

	reply = self->ith_rpc_reply;
	if (reply == IP_NULL) {
		reply = ipc_port_alloc_reply();
		if ((reply == IP_NULL) ||
		    (self->ith_rpc_reply != IP_NULL))
			panic("mach_msg_rpc_from_kernel");
		self->ith_rpc_reply = reply;
	}

	/* insert send-once right for the reply port */
	kmsg->ikm_header->msgh_local_port = reply;
	kmsg->ikm_header->msgh_bits |=
		MACH_MSGH_BITS(0, MACH_MSG_TYPE_MAKE_SEND_ONCE);

#if IKM_SUPPORT_LEGACY
    if(legacy)
        mr = ipc_kmsg_copyin_from_kernel_legacy(kmsg);
    else
        mr = ipc_kmsg_copyin_from_kernel(kmsg);
#else
    mr = ipc_kmsg_copyin_from_kernel(kmsg);
#endif
    if (mr != MACH_MSG_SUCCESS) {
	    ipc_kmsg_free(kmsg);
	    return mr;
    }
	mr = ipc_kmsg_send(kmsg, 
			   MACH_SEND_KERNEL_DEFAULT,
			   MACH_MSG_TIMEOUT_NONE);
	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_destroy(kmsg);
		return mr;
	}

	for (;;) {
		ipc_mqueue_t mqueue;

		assert(reply->ip_pset_count == 0);
		assert(ip_active(reply));

		/* JMM - why this check? */
		if (!self->active) {
			ipc_port_dealloc_reply(reply);
			self->ith_rpc_reply = IP_NULL;
			return MACH_RCV_INTERRUPTED;
		}

		self->ith_continuation = (void (*)(mach_msg_return_t))0;

		mqueue = &reply->ip_messages;
		ipc_mqueue_receive(mqueue,
				   MACH_MSG_OPTION_NONE,
				   MACH_MSG_SIZE_MAX,
				   MACH_MSG_TIMEOUT_NONE,
				   THREAD_INTERRUPTIBLE);

		mr = self->ith_state;
		kmsg = self->ith_kmsg;
		seqno = self->ith_seqno;

		if (mr == MACH_MSG_SUCCESS)
		  {
			break;
		  }

		assert(mr == MACH_RCV_INTERRUPTED);

		assert(reply == self->ith_rpc_reply);

		if (self->handlers) {
			ipc_port_dealloc_reply(reply);
			self->ith_rpc_reply = IP_NULL;
			return(mr);
		}
	}

	/* 
	 * Check to see how much of the message/trailer can be received.
	 * We chose the maximum trailer that will fit, since we don't
	 * have options telling us which trailer elements the caller needed.
	 */
	if (rcv_size >= kmsg->ikm_header->msgh_size) {
		mach_msg_format_0_trailer_t *trailer =  (mach_msg_format_0_trailer_t *)
			((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size);

		if (rcv_size >= kmsg->ikm_header->msgh_size + MAX_TRAILER_SIZE) {
			/* Enough room for a maximum trailer */
			trailer->msgh_trailer_size = MAX_TRAILER_SIZE;
		} 
		else if (rcv_size < kmsg->ikm_header->msgh_size + 
			   trailer->msgh_trailer_size) {
			/* no room for even the basic (default) trailer */
			trailer->msgh_trailer_size = 0;
		}
		assert(trailer->msgh_trailer_type == MACH_MSG_TRAILER_FORMAT_0);
		rcv_size = kmsg->ikm_header->msgh_size + trailer->msgh_trailer_size;
		mr = MACH_MSG_SUCCESS;
	} else {
		mr = MACH_RCV_TOO_LARGE;
	}


	/*
	 *	We want to preserve rights and memory in reply!
	 *	We don't have to put them anywhere; just leave them
	 *	as they are.
	 */
#if IKM_SUPPORT_LEGACY
    if(legacy)
        ipc_kmsg_copyout_to_kernel_legacy(kmsg, ipc_space_reply);
    else
        ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply);
#else
    ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply);
#endif
	ipc_kmsg_put_to_kernel(msg, kmsg, rcv_size);
	return mr;
}
Example #8
0
mach_msg_return_t
mach_msg_overwrite_trap(
	struct mach_msg_overwrite_trap_args *args)
{
  	mach_vm_address_t	msg_addr = args->msg;
	mach_msg_option_t	option = args->option;
	mach_msg_size_t		send_size = args->send_size;
	mach_msg_size_t		rcv_size = args->rcv_size;
	mach_port_name_t	rcv_name = args->rcv_name;
	mach_msg_timeout_t	msg_timeout = args->timeout;
	__unused mach_port_name_t notify = args->notify;
	mach_vm_address_t	rcv_msg_addr = args->rcv_msg;
	__unused mach_port_seqno_t temp_seqno = 0;

	mach_msg_return_t  mr = MACH_MSG_SUCCESS;
	vm_map_t map = current_map();

	/* Only accept options allowed by the user */
	option &= MACH_MSG_OPTION_USER;

	if (option & MACH_SEND_MSG) {
		ipc_space_t space = current_space();
		ipc_kmsg_t kmsg;

		mr = ipc_kmsg_get(msg_addr, send_size, &kmsg);

		if (mr != MACH_MSG_SUCCESS)
			return mr;

		mr = ipc_kmsg_copyin(kmsg, space, map, &option);

		if (mr != MACH_MSG_SUCCESS) {
			ipc_kmsg_free(kmsg);
			return mr;
		}

		mr = ipc_kmsg_send(kmsg, option, msg_timeout);

		if (mr != MACH_MSG_SUCCESS) {
			mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map, MACH_MSG_BODY_NULL);
			(void) ipc_kmsg_put(msg_addr, kmsg, kmsg->ikm_header->msgh_size);
			return mr;
		}

	}

	if (option & MACH_RCV_MSG) {
		thread_t self = current_thread();
		ipc_space_t space = current_space();
		ipc_object_t object;
		ipc_mqueue_t mqueue;

		mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
		if (mr != MACH_MSG_SUCCESS) {
			return mr;
		}
		/* hold ref for object */

		if (rcv_msg_addr != (mach_vm_address_t)0)
			self->ith_msg_addr = rcv_msg_addr;
		else
			self->ith_msg_addr = msg_addr;
		self->ith_object = object;
		self->ith_msize = rcv_size;
		self->ith_option = option;
		self->ith_receiver_name = MACH_PORT_NULL;
		self->ith_continuation = thread_syscall_return;

		ipc_mqueue_receive(mqueue, option, rcv_size, msg_timeout, THREAD_ABORTSAFE);
		if ((option & MACH_RCV_TIMEOUT) && msg_timeout == 0)
			thread_poll_yield(self);
		return mach_msg_receive_results();
	}

	return MACH_MSG_SUCCESS;
}
Example #9
0
mach_msg_return_t
mach_msg_send(
	mach_msg_header_t	*msg,
	mach_msg_option_t	option,
	mach_msg_size_t		send_size,
	mach_msg_timeout_t	send_timeout,
	__unused mach_port_name_t	notify)
{
	ipc_space_t space = current_space();
	vm_map_t map = current_map();
	ipc_kmsg_t kmsg;
	mach_msg_return_t mr;
	mach_msg_size_t	msg_and_trailer_size;
	mach_msg_max_trailer_t	*trailer;

	if ((send_size < sizeof(mach_msg_header_t)) || (send_size & 3))
		return MACH_SEND_MSG_TOO_SMALL;

	if (send_size > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE)
		return MACH_SEND_TOO_LARGE;
	
	msg_and_trailer_size = send_size + MAX_TRAILER_SIZE;

	kmsg = ipc_kmsg_alloc(msg_and_trailer_size);

	if (kmsg == IKM_NULL)
		return MACH_SEND_NO_BUFFER;

	(void) memcpy((void *) kmsg->ikm_header, (const void *) msg, send_size);

	kmsg->ikm_header->msgh_size = send_size;

	/* 
	 * reserve for the trailer the largest space (MAX_TRAILER_SIZE)
	 * However, the internal size field of the trailer (msgh_trailer_size)
	 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize
	 * the cases where no implicit data is requested.
	 */
	trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + send_size);
	trailer->msgh_sender = current_thread()->task->sec_token;
	trailer->msgh_audit = current_thread()->task->audit_token;
	trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
	trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;

	mr = ipc_kmsg_copyin(kmsg, space, map, &option);

	if (mr != MACH_MSG_SUCCESS) {
		ipc_kmsg_free(kmsg);
		return mr;
	}

	mr = ipc_kmsg_send(kmsg, option, send_timeout);

	if (mr != MACH_MSG_SUCCESS) {
	    mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map, MACH_MSG_BODY_NULL);
	    (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, 
			  kmsg->ikm_header->msgh_size);
	    ipc_kmsg_free(kmsg);
	}

	return mr;
}