Ejemplo n.º 1
0
kern_return_t
mach_port_space_basic_info(
	ipc_space_t			space,
	ipc_info_space_basic_t		*infop)
{
	if (space == IS_NULL)
		return KERN_INVALID_TASK;


	is_read_lock(space);
	if (!is_active(space)) {
		is_read_unlock(space);
		return KERN_INVALID_TASK;
	}

	/* get the basic space info */
	infop->iisb_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
	infop->iisb_table_size = space->is_table_size;
	infop->iisb_table_next = space->is_table_next->its_size;
	infop->iisb_table_inuse = space->is_table_size - space->is_table_free - 1;
	infop->iisb_reserved[0] = 0;
	infop->iisb_reserved[1] = 0;

	is_read_unlock(space);

	return KERN_SUCCESS;
}
Ejemplo n.º 2
0
kern_return_t
ipc_object_translate(
	ipc_space_t		space,
	mach_port_name_t	name,
	mach_port_right_t	right,
	ipc_object_t		*objectp)
{
	ipc_entry_t entry;
	ipc_object_t object;
	kern_return_t kr;

	kr = ipc_right_lookup_read(space, name, &entry);
	if (kr != KERN_SUCCESS)
		return kr;
	/* space is read-locked and active */

	if ((entry->ie_bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) {
		is_read_unlock(space);
		return KERN_INVALID_RIGHT;
	}

	object = entry->ie_object;
	assert(object != IO_NULL);

	io_lock(object);
	is_read_unlock(space);

	*objectp = object;
	return KERN_SUCCESS;
}
Ejemplo n.º 3
0
kern_return_t
mach_port_kobject(
	ipc_space_t			space,
	mach_port_name_t		name,
	natural_t			*typep,
	mach_vm_address_t		*addrp)
{
	ipc_entry_t entry;
	ipc_port_t port;
	kern_return_t kr;
	mach_vm_address_t kaddr;

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	kr = ipc_right_lookup_read(space, name, &entry);
	if (kr != KERN_SUCCESS)
		return kr;
	/* space is read-locked and active */

	if ((entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0) {
		is_read_unlock(space);
		return KERN_INVALID_RIGHT;
	}

	__IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object);
	assert(port != IP_NULL);

	ip_lock(port);
	is_read_unlock(space);

	if (!ip_active(port)) {
		ip_unlock(port);
		return KERN_INVALID_RIGHT;
	}

	*typep = (unsigned int) ip_kotype(port);
	kaddr = (mach_vm_address_t)port->ip_kobject;
	ip_unlock(port);

#if (DEVELOPMENT || DEBUG)
	if (0 != kaddr && is_ipc_kobject(*typep))
		*addrp = VM_KERNEL_UNSLIDE_OR_PERM(kaddr);
	else
#endif
		*addrp = 0;

	return KERN_SUCCESS;
}
Ejemplo n.º 4
0
kern_return_t
ipc_object_translate_two(
	ipc_space_t		space,
	mach_port_name_t	name1,
	mach_port_right_t	right1,
	ipc_object_t		*objectp1,
	mach_port_name_t	name2,
	mach_port_right_t	right2,
	ipc_object_t		*objectp2)
{
	ipc_entry_t entry1;
	ipc_entry_t entry2;
	ipc_object_t object;
	kern_return_t kr;

	kr = ipc_right_lookup_two_read(space, name1, &entry1, name2, &entry2);
	if (kr != KERN_SUCCESS)
		return kr;
	/* space is read-locked and active */

	if ((entry1->ie_bits & MACH_PORT_TYPE(right1)) == MACH_PORT_TYPE_NONE) {
		is_read_unlock(space);
		return KERN_INVALID_RIGHT;
	}

	if ((entry2->ie_bits & MACH_PORT_TYPE(right2)) == MACH_PORT_TYPE_NONE) {
		is_read_unlock(space);
		return KERN_INVALID_RIGHT;
	}

	object = entry1->ie_object;
	assert(object != IO_NULL);
	io_lock(object);
	*objectp1 = object;

	object = entry2->ie_object;
	assert(object != IO_NULL);
	io_lock(object);
	*objectp2 = object;

	is_read_unlock(space);
	return KERN_SUCCESS;
}
Ejemplo n.º 5
0
kern_return_t
mach_port_kernel_object(
	ipc_space_t			space,
	mach_port_name_t		name,
	unsigned int			*typep,
	vm_offset_t			*addrp)
{
	ipc_entry_t entry;
	ipc_port_t port;
	kern_return_t kr;

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	kr = ipc_right_lookup_read(space, name, &entry);
	if (kr != KERN_SUCCESS)
		return kr;
	/* space is read-locked and active */

	if ((entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0) {
		is_read_unlock(space);
		return KERN_INVALID_RIGHT;
	}

	port = (ipc_port_t) entry->ie_object;
	assert(port != IP_NULL);

	ip_lock(port);
	is_read_unlock(space);

	if (!ip_active(port)) {
		ip_unlock(port);
		return KERN_INVALID_RIGHT;
	}

	*typep = (unsigned int) ip_kotype(port);
	*addrp = (vm_offset_t) port->ip_kobject;
	ip_unlock(port);
	return KERN_SUCCESS;

}
Ejemplo n.º 6
0
mach_msg_return_t
mach_msg_trap(
	mach_msg_header_t 	*msg,
	mach_msg_option_t 	option,
	mach_msg_size_t 	send_size,
	mach_msg_size_t 	rcv_size,
	mach_port_t 		rcv_name,
	mach_msg_timeout_t 	time_out,
	mach_port_t 		notify)
{
	mach_msg_return_t mr;

	/* first check for common cases */

	if (option == (MACH_SEND_MSG|MACH_RCV_MSG)) {
		ipc_thread_t self = current_thread();
		ipc_space_t space = self->task->itk_space;
		ipc_kmsg_t kmsg;
		ipc_port_t dest_port;
		ipc_object_t rcv_object;
		ipc_mqueue_t rcv_mqueue;
		mach_msg_size_t reply_size;

		/*
		 *	This case is divided into ten sections, each
		 *	with a label.  There are five optimized
		 *	sections and six unoptimized sections, which
		 *	do the same thing but handle all possible
		 *	cases and are slower.
		 *
		 *	The five sections for an RPC are
		 *	    1) Get request message into a buffer.
		 *		(fast_get or slow_get)
		 *	    2) Copyin request message and rcv_name.
		 *		(fast_copyin or slow_copyin)
		 *	    3) Enqueue request and dequeue reply.
		 *		(fast_send_receive or
		 *		 slow_send and slow_receive)
		 *	    4) Copyout reply message.
		 *		(fast_copyout or slow_copyout)
		 *	    5) Put reply message to user's buffer.
		 *		(fast_put or slow_put)
		 *
		 *	Keep the locking hierarchy firmly in mind.
		 *	(First spaces, then ports, then port sets,
		 *	then message queues.)  Only a non-blocking
		 *	attempt can be made to acquire locks out of
		 *	order, or acquire two locks on the same level.
		 *	Acquiring two locks on the same level will
		 *	fail if the objects are really the same,
		 *	unless simple locking is disabled.  This is OK,
		 *	because then the extra unlock does nothing.
		 *
		 *	There are two major reasons these RPCs can't use
		 *	ipc_thread_switch, and use slow_send/slow_receive:
		 *		1) Kernel RPCs.
		 *		2) Servers fall behind clients, so
		 *		client doesn't find a blocked server thread and
		 *		server finds waiting messages and can't block.
		 */

	/*
	    fast_get:
	*/
		/*
		 *	optimized ipc_kmsg_get
		 *
		 *	No locks, references, or messages held.
		 *	We must clear ikm_cache before copyinmsg.
		 */

		if ((send_size > IKM_SAVED_MSG_SIZE) ||
		    (send_size < sizeof(mach_msg_header_t)) ||
		    (send_size & 3) ||
		    ((kmsg = ikm_cache()) == IKM_NULL))
			goto slow_get;

		ikm_cache() = IKM_NULL;
		ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);

		if (copyinmsg(msg, &kmsg->ikm_header,
			      send_size)) {
			ikm_free(kmsg);
			goto slow_get;
		}

		kmsg->ikm_header.msgh_size = send_size;

	    fast_copyin:
		/*
		 *	optimized ipc_kmsg_copyin/ipc_mqueue_copyin
		 *
		 *	We have the request message data in kmsg.
		 *	Must still do copyin, send, receive, etc.
		 *
		 *	If the message isn't simple, we can't combine
		 *	ipc_kmsg_copyin_header and ipc_mqueue_copyin,
		 *	because copyin of the message body might
		 *	affect rcv_name.
		 */

		switch (kmsg->ikm_header.msgh_bits) {
		    case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
					MACH_MSG_TYPE_MAKE_SEND_ONCE): {
			ipc_entry_t table;
			ipc_entry_num_t size;
			ipc_port_t reply_port;

			/* sending a request message */

		    {
			mach_port_index_t index;
			mach_port_gen_t gen;

		    {
			mach_port_t reply_name =
				kmsg->ikm_header.msgh_local_port;

			if (reply_name != rcv_name)
				goto slow_copyin;

			/* optimized ipc_entry_lookup of reply_name */

			index = MACH_PORT_INDEX(reply_name);
			gen = MACH_PORT_GEN(reply_name);
		    }

			is_read_lock(space);
			assert(space->is_active);

			size = space->is_table_size;
			table = space->is_table;

			if (index >= size)
				goto abort_request_copyin;

		    {
			ipc_entry_t entry;
			ipc_entry_bits_t bits;

			entry = &table[index];
			bits = entry->ie_bits;

			/* check generation number and type bit */

			if ((bits & (IE_BITS_GEN_MASK|
				     MACH_PORT_TYPE_RECEIVE)) !=
			    (gen | MACH_PORT_TYPE_RECEIVE))
				goto abort_request_copyin;

			reply_port = (ipc_port_t) entry->ie_object;
			assert(reply_port != IP_NULL);
		    }
		    }

			/* optimized ipc_entry_lookup of dest_name */

		    {
			mach_port_index_t index;
			mach_port_gen_t gen;

		    {
			mach_port_t dest_name =
				kmsg->ikm_header.msgh_remote_port;

			index = MACH_PORT_INDEX(dest_name);
			gen = MACH_PORT_GEN(dest_name);
		    }

			if (index >= size)
				goto abort_request_copyin;

		    {
			ipc_entry_t entry;
			ipc_entry_bits_t bits;

			entry = &table[index];
			bits = entry->ie_bits;

			/* check generation number and type bit */

			if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
			    (gen | MACH_PORT_TYPE_SEND))
				goto abort_request_copyin;

			assert(IE_BITS_UREFS(bits) > 0);

			dest_port = (ipc_port_t) entry->ie_object;
			assert(dest_port != IP_NULL);
		    }
		    }

			/*
			 *	To do an atomic copyin, need simultaneous
			 *	locks on both ports and the space.  If
			 *	dest_port == reply_port, and simple locking is
			 *	enabled, then we will abort.  Otherwise it's
			 *	OK to unlock twice.
			 */

			ip_lock(dest_port);
			if (!ip_active(dest_port) ||
			    !ip_lock_try(reply_port)) {
				ip_unlock(dest_port);
				goto abort_request_copyin;
			}
			is_read_unlock(space);

			assert(dest_port->ip_srights > 0);
			dest_port->ip_srights++;
			ip_reference(dest_port);

			assert(ip_active(reply_port));
			assert(reply_port->ip_receiver_name ==
			       kmsg->ikm_header.msgh_local_port);
			assert(reply_port->ip_receiver == space);

			reply_port->ip_sorights++;
			ip_reference(reply_port);

			kmsg->ikm_header.msgh_bits =
				MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
					       MACH_MSG_TYPE_PORT_SEND_ONCE);
			kmsg->ikm_header.msgh_remote_port =
					(mach_port_t) dest_port;
			kmsg->ikm_header.msgh_local_port =
					(mach_port_t) reply_port;

			/* make sure we can queue to the destination */

			if (dest_port->ip_receiver == ipc_space_kernel) {
				/*
				 * The kernel server has a reference to
				 * the reply port, which it hands back
				 * to us in the reply message.  We do
				 * not need to keep another reference to
				 * it.
				 */
				ip_unlock(reply_port);

				assert(ip_active(dest_port));
				ip_unlock(dest_port);
				goto kernel_send;
			}

			if (dest_port->ip_msgcount >= dest_port->ip_qlimit)
				goto abort_request_send_receive;

			/* optimized ipc_mqueue_copyin */

			if (reply_port->ip_pset != IPS_NULL)
				goto abort_request_send_receive;

			rcv_object = (ipc_object_t) reply_port;
			io_reference(rcv_object);
			rcv_mqueue = &reply_port->ip_messages;
			imq_lock(rcv_mqueue);
			io_unlock(rcv_object);
			goto fast_send_receive;

		    abort_request_copyin:
			is_read_unlock(space);
			goto slow_copyin;

		    abort_request_send_receive:
			ip_unlock(dest_port);
			ip_unlock(reply_port);
			goto slow_send;
		    }

		    case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
			ipc_entry_num_t size;
			ipc_entry_t table;

			/* sending a reply message */

		    {
			mach_port_t reply_name =
				kmsg->ikm_header.msgh_local_port;

			if (reply_name != MACH_PORT_NULL)
				goto slow_copyin;
		    }

			is_write_lock(space);
			assert(space->is_active);

			/* optimized ipc_entry_lookup */

			size = space->is_table_size;
			table = space->is_table;

		    {
			ipc_entry_t entry;
			mach_port_gen_t gen;
			mach_port_index_t index;

		    {
			mach_port_t dest_name =
				kmsg->ikm_header.msgh_remote_port;

			index = MACH_PORT_INDEX(dest_name);
			gen = MACH_PORT_GEN(dest_name);
		    }

			if (index >= size)
				goto abort_reply_dest_copyin;

			entry = &table[index];

			/* check generation, collision bit, and type bit */

			if ((entry->ie_bits & (IE_BITS_GEN_MASK|
					       IE_BITS_COLLISION|
					       MACH_PORT_TYPE_SEND_ONCE)) !=
			    (gen | MACH_PORT_TYPE_SEND_ONCE))
				goto abort_reply_dest_copyin;

			/* optimized ipc_right_copyin */

			assert(IE_BITS_TYPE(entry->ie_bits) ==
						MACH_PORT_TYPE_SEND_ONCE);
			assert(IE_BITS_UREFS(entry->ie_bits) == 1);
			assert((entry->ie_bits & IE_BITS_MAREQUEST) == 0);

			if (entry->ie_request != 0)
				goto abort_reply_dest_copyin;

			dest_port = (ipc_port_t) entry->ie_object;
			assert(dest_port != IP_NULL);

			ip_lock(dest_port);
			if (!ip_active(dest_port)) {
				ip_unlock(dest_port);
				goto abort_reply_dest_copyin;
			}

			assert(dest_port->ip_sorights > 0);

			/* optimized ipc_entry_dealloc */

			entry->ie_next = table->ie_next;
			table->ie_next = index;
			entry->ie_bits = gen;
			entry->ie_object = IO_NULL;
		    }

			kmsg->ikm_header.msgh_bits =
				MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
					       0);
			kmsg->ikm_header.msgh_remote_port =
					(mach_port_t) dest_port;

			/* make sure we can queue to the destination */

			assert(dest_port->ip_receiver != ipc_space_kernel);

			/* optimized ipc_entry_lookup/ipc_mqueue_copyin */

		    {
			ipc_entry_t entry;
			ipc_entry_bits_t bits;

		    {
			mach_port_index_t index;
			mach_port_gen_t gen;

			index = MACH_PORT_INDEX(rcv_name);
			gen = MACH_PORT_GEN(rcv_name);

			if (index >= size)
				goto abort_reply_rcv_copyin;

			entry = &table[index];
			bits = entry->ie_bits;

			/* check generation number */

			if ((bits & IE_BITS_GEN_MASK) != gen)
				goto abort_reply_rcv_copyin;
		    }

			/* check type bits; looking for receive or set */

			if (bits & MACH_PORT_TYPE_PORT_SET) {
				ipc_pset_t rcv_pset;

				rcv_pset = (ipc_pset_t) entry->ie_object;
				assert(rcv_pset != IPS_NULL);

				ips_lock(rcv_pset);
				assert(ips_active(rcv_pset));

				rcv_object = (ipc_object_t) rcv_pset;
				rcv_mqueue = &rcv_pset->ips_messages;
			} else if (bits & MACH_PORT_TYPE_RECEIVE) {
				ipc_port_t rcv_port;

				rcv_port = (ipc_port_t) entry->ie_object;
				assert(rcv_port != IP_NULL);

				if (!ip_lock_try(rcv_port))
					goto abort_reply_rcv_copyin;
				assert(ip_active(rcv_port));

				if (rcv_port->ip_pset != IPS_NULL) {
					ip_unlock(rcv_port);
					goto abort_reply_rcv_copyin;
				}

				rcv_object = (ipc_object_t) rcv_port;
				rcv_mqueue = &rcv_port->ip_messages;
			} else
				goto abort_reply_rcv_copyin;
		    }

			is_write_unlock(space);
			io_reference(rcv_object);
			imq_lock(rcv_mqueue);
			io_unlock(rcv_object);
			goto fast_send_receive;

		    abort_reply_dest_copyin:
			is_write_unlock(space);
			goto slow_copyin;

		    abort_reply_rcv_copyin:
			ip_unlock(dest_port);
			is_write_unlock(space);
			goto slow_send;
		    }

		    default:
			goto slow_copyin;
		}
		/*NOTREACHED*/

	    fast_send_receive:
		/*
		 *	optimized ipc_mqueue_send/ipc_mqueue_receive
		 *
		 *	Finished get/copyin of kmsg and copyin of rcv_name.
		 *	space is unlocked, dest_port is locked,
		 *	we can queue kmsg to dest_port,
		 *	rcv_mqueue is locked, rcv_object holds a ref,
		 *	if rcv_object is a port it isn't in a port set
		 *
		 *	Note that if simple locking is turned off,
		 *	then we could have dest_mqueue == rcv_mqueue
		 *	and not abort when we try to lock dest_mqueue.
		 */

		assert(ip_active(dest_port));
		assert(dest_port->ip_receiver != ipc_space_kernel);
		assert((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
		       (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
						MACH_MSG_TYPE_PORT_SEND_ONCE));
		assert((kmsg->ikm_header.msgh_bits &
						MACH_MSGH_BITS_CIRCULAR) == 0);

	    {
		ipc_mqueue_t dest_mqueue;
		ipc_thread_t receiver;

	    {
		ipc_pset_t dest_pset;

		dest_pset = dest_port->ip_pset;
		if (dest_pset == IPS_NULL)
			dest_mqueue = &dest_port->ip_messages;
		else
			dest_mqueue = &dest_pset->ips_messages;
	    }

		if (!imq_lock_try(dest_mqueue)) {
		    abort_send_receive:
			ip_unlock(dest_port);
			imq_unlock(rcv_mqueue);
			ipc_object_release(rcv_object);
			goto slow_send;
		}

		receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
		if ((receiver == ITH_NULL) ||
		    (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
								!= IKM_NULL)) {
			imq_unlock(dest_mqueue);
			goto abort_send_receive;
		}

		/*
		 *	There is a receiver thread waiting, and
		 *	there is no reply message for us to pick up.
		 *	We have hope of hand-off, so save state.
		 */

		self->ith_msg = msg;
		self->ith_rcv_size = rcv_size;
		self->ith_object = rcv_object;
		self->ith_mqueue = rcv_mqueue;

		if ((receiver->swap_func == (void (*)()) mach_msg_continue) &&
		    thread_handoff(self, mach_msg_continue, receiver)) {
			assert(current_thread() == receiver);

			/*
			 *	We can use the optimized receive code,
			 *	because the receiver is using no options.
			 */
		} else if ((receiver->swap_func ==
				(void (*)()) exception_raise_continue) &&
			   thread_handoff(self, mach_msg_continue, receiver)) {
			counter(c_mach_msg_trap_block_exc++);
			assert(current_thread() == receiver);

			/*
			 *	We are a reply message coming back through
			 *	the optimized exception-handling path.
			 *	Finish with rcv_mqueue and dest_mqueue,
			 *	and then jump to exception code with
			 *	dest_port still locked.  We don't bother
			 *	with a sequence number in this case.
			 */

			ipc_thread_enqueue_macro(
				&rcv_mqueue->imq_threads, self);
			self->ith_state = MACH_RCV_IN_PROGRESS;
			self->ith_msize = MACH_MSG_SIZE_MAX;
			imq_unlock(rcv_mqueue);

			ipc_thread_rmqueue_first_macro(
				&dest_mqueue->imq_threads, receiver);
			imq_unlock(dest_mqueue);

			exception_raise_continue_fast(dest_port, kmsg);
			/*NOTREACHED*/
			return MACH_MSG_SUCCESS;
		} else if ((send_size <= receiver->ith_msize) &&
			   thread_handoff(self, mach_msg_continue, receiver)) {
			assert(current_thread() == receiver);

			if ((receiver->swap_func ==
				(void (*)()) mach_msg_receive_continue) &&
			    ((receiver->ith_option & MACH_RCV_NOTIFY) == 0)) {
				/*
				 *	We can still use the optimized code.
				 */
			} else {
				counter(c_mach_msg_trap_block_slow++);
				/*
				 *	We are running as the receiver,
				 *	but we can't use the optimized code.
				 *	Finish send/receive processing.
				 */

				dest_port->ip_msgcount++;
				ip_unlock(dest_port);

				ipc_thread_enqueue_macro(
					&rcv_mqueue->imq_threads, self);
				self->ith_state = MACH_RCV_IN_PROGRESS;
				self->ith_msize = MACH_MSG_SIZE_MAX;
				imq_unlock(rcv_mqueue);

				ipc_thread_rmqueue_first_macro(
					&dest_mqueue->imq_threads, receiver);
				receiver->ith_state = MACH_MSG_SUCCESS;
				receiver->ith_kmsg = kmsg;
				receiver->ith_seqno = dest_port->ip_seqno++;
				imq_unlock(dest_mqueue);

				/*
				 *	Call the receiver's continuation.
				 */

				receiver->wait_result = THREAD_AWAKENED;
				(*receiver->swap_func)();
				/*NOTREACHED*/
				return MACH_MSG_SUCCESS;
			}
		} else {
			/*
			 *	The receiver can't accept the message,
			 *	or we can't switch to the receiver.
			 */

			imq_unlock(dest_mqueue);
			goto abort_send_receive;
		}
		counter(c_mach_msg_trap_block_fast++);

		/*
		 *	Safe to unlock dest_port now that we are
		 *	committed to this path, because we hold
		 *	dest_mqueue locked.  We never bother changing
		 *	dest_port->ip_msgcount.
		 */

		ip_unlock(dest_port);

		/*
		 *	We need to finish preparing self for its
		 *	time asleep in rcv_mqueue.
		 */

		ipc_thread_enqueue_macro(&rcv_mqueue->imq_threads, self);
		self->ith_state = MACH_RCV_IN_PROGRESS;
		self->ith_msize = MACH_MSG_SIZE_MAX;
		imq_unlock(rcv_mqueue);

		/*
		 *	Finish extracting receiver from dest_mqueue.
		 */

		ipc_thread_rmqueue_first_macro(
			&dest_mqueue->imq_threads, receiver);
		kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
		imq_unlock(dest_mqueue);

		/*
		 *	We don't have to do any post-dequeue processing of
		 *	the message.  We never incremented ip_msgcount, we
		 *	know it has no msg-accepted request, and blocked
		 *	senders aren't a worry because we found the port
		 *	with a receiver waiting.
		 */

		self = receiver;
		space = self->task->itk_space;

		msg = self->ith_msg;
		rcv_size = self->ith_rcv_size;
		rcv_object = self->ith_object;

		/* inline ipc_object_release */
		io_lock(rcv_object);
		io_release(rcv_object);
		io_check_unlock(rcv_object);
	    }

	    fast_copyout:
		/*
		 *	Nothing locked and no references held, except
		 *	we have kmsg with msgh_seqno filled in.  Must
		 *	still check against rcv_size and do
		 *	ipc_kmsg_copyout/ipc_kmsg_put.
		 */

		assert((ipc_port_t) kmsg->ikm_header.msgh_remote_port
						== dest_port);

		reply_size = kmsg->ikm_header.msgh_size;
		if (rcv_size < reply_size)
			goto slow_copyout;

		/* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */

		switch (kmsg->ikm_header.msgh_bits) {
		    case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
					MACH_MSG_TYPE_PORT_SEND_ONCE): {
			ipc_port_t reply_port =
				(ipc_port_t) kmsg->ikm_header.msgh_local_port;
			mach_port_t dest_name, reply_name;

			/* receiving a request message */

			if (!IP_VALID(reply_port))
				goto slow_copyout;

			is_write_lock(space);
			assert(space->is_active);

			/*
			 *	To do an atomic copyout, need simultaneous
			 *	locks on both ports and the space.  If
			 *	dest_port == reply_port, and simple locking is
			 *	enabled, then we will abort.  Otherwise it's
			 *	OK to unlock twice.
			 */

			ip_lock(dest_port);
			if (!ip_active(dest_port) ||
			    !ip_lock_try(reply_port))
				goto abort_request_copyout;

			if (!ip_active(reply_port)) {
				ip_unlock(reply_port);
				goto abort_request_copyout;
			}

			assert(reply_port->ip_sorights > 0);
			ip_unlock(reply_port);

		    {
			ipc_entry_t table;
			ipc_entry_t entry;
			mach_port_index_t index;

			/* optimized ipc_entry_get */

			table = space->is_table;
			index = table->ie_next;

			if (index == 0)
				goto abort_request_copyout;

			entry = &table[index];
			table->ie_next = entry->ie_next;
			entry->ie_request = 0;

		    {
			mach_port_gen_t gen;

			assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
			gen = entry->ie_bits + IE_BITS_GEN_ONE;

			reply_name = MACH_PORT_MAKE(index, gen);

			/* optimized ipc_right_copyout */

			entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
		    }

			assert(MACH_PORT_VALID(reply_name));
			entry->ie_object = (ipc_object_t) reply_port;
			is_write_unlock(space);
		    }

			/* optimized ipc_object_copyout_dest */

			assert(dest_port->ip_srights > 0);
			ip_release(dest_port);

			if (dest_port->ip_receiver == space)
				dest_name = dest_port->ip_receiver_name;
			else
				dest_name = MACH_PORT_NULL;

			if ((--dest_port->ip_srights == 0) &&
			    (dest_port->ip_nsrequest != IP_NULL)) {
				ipc_port_t nsrequest;
				mach_port_mscount_t mscount;

				/* a rather rare case */

				nsrequest = dest_port->ip_nsrequest;
				mscount = dest_port->ip_mscount;
				dest_port->ip_nsrequest = IP_NULL;
				ip_unlock(dest_port);

				ipc_notify_no_senders(nsrequest, mscount);
			} else
				ip_unlock(dest_port);

			if (! ipc_port_flag_protected_payload(dest_port)) {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					MACH_MSG_TYPE_PORT_SEND_ONCE,
					MACH_MSG_TYPE_PORT_SEND);
				kmsg->ikm_header.msgh_local_port = dest_name;
			} else {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					MACH_MSG_TYPE_PORT_SEND_ONCE,
					MACH_MSG_TYPE_PROTECTED_PAYLOAD);
				kmsg->ikm_header.msgh_protected_payload =
					dest_port->ip_protected_payload;
			}
			kmsg->ikm_header.msgh_remote_port = reply_name;
			goto fast_put;

		    abort_request_copyout:
			ip_unlock(dest_port);
			is_write_unlock(space);
			goto slow_copyout;
		    }

		    case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
			mach_port_t dest_name;

			/* receiving a reply message */

			ip_lock(dest_port);
			if (!ip_active(dest_port))
				goto slow_copyout;

			/* optimized ipc_object_copyout_dest */

			assert(dest_port->ip_sorights > 0);

			if (dest_port->ip_receiver == space) {
				ip_release(dest_port);
				dest_port->ip_sorights--;
				dest_name = dest_port->ip_receiver_name;
				ip_unlock(dest_port);
			} else {
				ip_unlock(dest_port);

				ipc_notify_send_once(dest_port);
				dest_name = MACH_PORT_NULL;
			}

			if (! ipc_port_flag_protected_payload(dest_port)) {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					0,
					MACH_MSG_TYPE_PORT_SEND_ONCE);
				kmsg->ikm_header.msgh_local_port = dest_name;
			} else {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					0,
					MACH_MSG_TYPE_PROTECTED_PAYLOAD);
				kmsg->ikm_header.msgh_protected_payload =
					dest_port->ip_protected_payload;
			}
			kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
			goto fast_put;
		    }

		    case MACH_MSGH_BITS_COMPLEX|
			 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
			mach_port_t dest_name;

			/* receiving a complex reply message */

			ip_lock(dest_port);
			if (!ip_active(dest_port))
				goto slow_copyout;

			/* optimized ipc_object_copyout_dest */

			assert(dest_port->ip_sorights > 0);

			if (dest_port->ip_receiver == space) {
				ip_release(dest_port);
				dest_port->ip_sorights--;
				dest_name = dest_port->ip_receiver_name;
				ip_unlock(dest_port);
			} else {
				ip_unlock(dest_port);

				ipc_notify_send_once(dest_port);
				dest_name = MACH_PORT_NULL;
			}

			if (! ipc_port_flag_protected_payload(dest_port)) {
				kmsg->ikm_header.msgh_bits =
					MACH_MSGH_BITS_COMPLEX
					| MACH_MSGH_BITS(
						0,
						MACH_MSG_TYPE_PORT_SEND_ONCE);
				kmsg->ikm_header.msgh_local_port = dest_name;
			} else {
				kmsg->ikm_header.msgh_bits =
					MACH_MSGH_BITS_COMPLEX
					| MACH_MSGH_BITS(
					    0,
					    MACH_MSG_TYPE_PROTECTED_PAYLOAD);
				kmsg->ikm_header.msgh_protected_payload =
					dest_port->ip_protected_payload;
			}
			kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;

			mr = ipc_kmsg_copyout_body(
				(vm_offset_t) (&kmsg->ikm_header + 1),
				(vm_offset_t) &kmsg->ikm_header
					+ kmsg->ikm_header.msgh_size,
				space,
				current_map());

			if (mr != MACH_MSG_SUCCESS) {
				(void) ipc_kmsg_put(msg, kmsg,
					kmsg->ikm_header.msgh_size);
				return mr | MACH_RCV_BODY_ERROR;
			}
			goto fast_put;
		    }

		    default:
			goto slow_copyout;
		}
		/*NOTREACHED*/

	    fast_put:
		/*
		 *	We have the reply message data in kmsg,
		 *	and the reply message size in reply_size.
		 *	Just need to copy it out to the user and free kmsg.
		 *	We must check ikm_cache after copyoutmsg.
		 */

		ikm_check_initialized(kmsg, kmsg->ikm_size);

		if ((kmsg->ikm_size != IKM_SAVED_KMSG_SIZE) ||
		    copyoutmsg(&kmsg->ikm_header, msg,
			       reply_size) ||
		    (ikm_cache() != IKM_NULL))
			goto slow_put;

		ikm_cache() = kmsg;
		thread_syscall_return(MACH_MSG_SUCCESS);
		/*NOTREACHED*/
		return MACH_MSG_SUCCESS; /* help for the compiler */

		/*
		 *	The slow path has a few non-register temporary
		 *	variables used only for call-by-reference.
		 */

	    {
		ipc_kmsg_t temp_kmsg;
		mach_port_seqno_t temp_seqno;
		ipc_object_t temp_rcv_object;
		ipc_mqueue_t temp_rcv_mqueue;

	    slow_get:
		/*
		 *	No locks, references, or messages held.
		 *	Still have to get the request, send it,
		 *	receive reply, etc.
		 */

		mr = ipc_kmsg_get(msg, send_size, &temp_kmsg);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}
		kmsg = temp_kmsg;

		/* try to get back on optimized path */
		goto fast_copyin;

	    slow_copyin:
		/*
		 *	We have the message data in kmsg, but
		 *	we still need to copyin, send it,
		 *	receive a reply, and do copyout.
		 */

		mr = ipc_kmsg_copyin(kmsg, space, current_map(),
				     MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			ikm_free(kmsg);
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		/* try to get back on optimized path */

		if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR)
			goto slow_send;

		dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
		assert(IP_VALID(dest_port));

		ip_lock(dest_port);
		if (dest_port->ip_receiver == ipc_space_kernel) {
			assert(ip_active(dest_port));
			ip_unlock(dest_port);
			goto kernel_send;
		}

		if (ip_active(dest_port) &&
		    ((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
		     (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
					MACH_MSG_TYPE_PORT_SEND_ONCE)))
		{
		    /*
		     *	Try an optimized ipc_mqueue_copyin.
		     *	It will work if this is a request message.
		     */

		    ipc_port_t reply_port;

		    reply_port = (ipc_port_t)
					kmsg->ikm_header.msgh_local_port;
		    if (IP_VALID(reply_port)) {
			if (ip_lock_try(reply_port)) {
			    if (ip_active(reply_port) &&
				reply_port->ip_receiver == space &&
				reply_port->ip_receiver_name == rcv_name &&
				reply_port->ip_pset == IPS_NULL)
			    {
				/* Grab a reference to the reply port. */
				rcv_object = (ipc_object_t) reply_port;
				io_reference(rcv_object);
				rcv_mqueue = &reply_port->ip_messages;
				imq_lock(rcv_mqueue);
				io_unlock(rcv_object);
				goto fast_send_receive;
			    }
			    ip_unlock(reply_port);
			}
		    }
		}

		ip_unlock(dest_port);
		goto slow_send;

	    kernel_send:
		/*
		 *	Special case: send message to kernel services.
		 *	The request message has been copied into the
		 *	kmsg.  Nothing is locked.
		 */

	    {
		ipc_port_t	reply_port;

		/*
		 * Perform the kernel function.
		 */

		kmsg = ipc_kobject_server(kmsg);
		if (kmsg == IKM_NULL) {
			/*
			 * No reply.  Take the
			 * slow receive path.
			 */
			goto slow_get_rcv_port;
		}

		/*
		 * Check that:
		 *	the reply port is alive
		 *	we hold the receive right
		 *	the name has not changed.
		 *	the port is not in a set
		 * If any of these are not true,
		 * we cannot directly receive the reply
		 * message.
		 */
		reply_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
		ip_lock(reply_port);

		if ((!ip_active(reply_port)) ||
		    (reply_port->ip_receiver != space) ||
		    (reply_port->ip_receiver_name != rcv_name) ||
		    (reply_port->ip_pset != IPS_NULL))
		{
			ip_unlock(reply_port);
			ipc_mqueue_send_always(kmsg);
			goto slow_get_rcv_port;
		}

		rcv_mqueue = &reply_port->ip_messages;
		imq_lock(rcv_mqueue);
		/* keep port locked, and don`t change ref count yet */

		/*
		 * If there are messages on the port
		 * or other threads waiting for a message,
		 * we cannot directly receive the reply.
		 */
		if ((ipc_thread_queue_first(&rcv_mqueue->imq_threads)
			!= ITH_NULL) ||
		    (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
			!= IKM_NULL))
		{
			imq_unlock(rcv_mqueue);
			ip_unlock(reply_port);
			ipc_mqueue_send_always(kmsg);
			goto slow_get_rcv_port;
		}

		/*
		 * We can directly receive this reply.
		 * Since the kernel reply never blocks,
		 * it holds no message_accepted request.
		 * Since there were no messages queued
		 * on the reply port, there should be
		 * no threads blocked waiting to send.
		 */

		assert(kmsg->ikm_marequest == IMAR_NULL);
		assert(ipc_thread_queue_first(&reply_port->ip_blocked)
				== ITH_NULL);

		dest_port = reply_port;
		kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
		imq_unlock(rcv_mqueue);

		/*
		 * inline ipc_object_release.
		 * Port is still locked.
		 * Reference count was not incremented.
		 */
		ip_check_unlock(reply_port);

		/* copy out the kernel reply */
		goto fast_copyout;
	    }

	    slow_send:
		/*
		 *	Nothing is locked.  We have acquired kmsg, but
		 *	we still need to send it and receive a reply.
		 */

		mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
				     MACH_MSG_TIMEOUT_NONE);
		if (mr != MACH_MSG_SUCCESS) {
			mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
						      current_map());

			assert(kmsg->ikm_marequest == IMAR_NULL);
			(void) ipc_kmsg_put(msg, kmsg,
					    kmsg->ikm_header.msgh_size);
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

	    slow_get_rcv_port:
		/*
		 * We have sent the message.  Copy in the receive port.
		 */
		mr = ipc_mqueue_copyin(space, rcv_name,
				       &temp_rcv_mqueue, &temp_rcv_object);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}
		rcv_mqueue = temp_rcv_mqueue;
		rcv_object = temp_rcv_object;
		/* hold ref for rcv_object; rcv_mqueue is locked */

	/*
	    slow_receive:
	*/
		/*
		 *	Now we have sent the request and copied in rcv_name,
		 *	so rcv_mqueue is locked and hold ref for rcv_object.
		 *	Just receive a reply and try to get back to fast path.
		 *
		 *	ipc_mqueue_receive may not return, because if we block
		 *	then our kernel stack may be discarded.  So we save
		 *	state here for mach_msg_continue to pick up.
		 */

		self->ith_msg = msg;
		self->ith_rcv_size = rcv_size;
		self->ith_object = rcv_object;
		self->ith_mqueue = rcv_mqueue;

		mr = ipc_mqueue_receive(rcv_mqueue,
					MACH_MSG_OPTION_NONE,
					MACH_MSG_SIZE_MAX,
					MACH_MSG_TIMEOUT_NONE,
					FALSE, mach_msg_continue,
		       			&temp_kmsg, &temp_seqno);
		/* rcv_mqueue is unlocked */
		ipc_object_release(rcv_object);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		(kmsg = temp_kmsg)->ikm_header.msgh_seqno = temp_seqno;
		dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
		goto fast_copyout;

	    slow_copyout:
		/*
		 *	Nothing locked and no references held, except
		 *	we have kmsg with msgh_seqno filled in.  Must
		 *	still check against rcv_size and do
		 *	ipc_kmsg_copyout/ipc_kmsg_put.
		 */

		reply_size = kmsg->ikm_header.msgh_size;
		if (rcv_size < reply_size) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			thread_syscall_return(MACH_RCV_TOO_LARGE);
			/*NOTREACHED*/
		}

		mr = ipc_kmsg_copyout(kmsg, space, current_map(),
				      MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
				(void) ipc_kmsg_put(msg, kmsg,
						kmsg->ikm_header.msgh_size);
			} else {
				ipc_kmsg_copyout_dest(kmsg, space);
				(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			}

			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		/* try to get back on optimized path */

		goto fast_put;

	    slow_put:
		mr = ipc_kmsg_put(msg, kmsg, reply_size);
		thread_syscall_return(mr);
		/*NOTREACHED*/
	    }
	} else if (option == MACH_SEND_MSG) {
		ipc_space_t space = current_space();
		vm_map_t map = current_map();
		ipc_kmsg_t kmsg;

		mr = ipc_kmsg_get(msg, send_size, &kmsg);
		if (mr != MACH_MSG_SUCCESS)
			return mr;

		mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			ikm_free(kmsg);
			return mr;
		}

		mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
				     MACH_MSG_TIMEOUT_NONE);
		if (mr != MACH_MSG_SUCCESS) {
			mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);

			assert(kmsg->ikm_marequest == IMAR_NULL);
			(void) ipc_kmsg_put(msg, kmsg,
					    kmsg->ikm_header.msgh_size);
		}

		return mr;
	} else if (option == MACH_RCV_MSG) {
		ipc_thread_t self = current_thread();
		ipc_space_t space = current_space();
		vm_map_t map = current_map();
		ipc_object_t object;
		ipc_mqueue_t mqueue;
		ipc_kmsg_t kmsg;
		mach_port_seqno_t seqno;

		mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
		if (mr != MACH_MSG_SUCCESS)
			return mr;
		/* hold ref for object; mqueue is locked */

		/*
		 *	ipc_mqueue_receive may not return, because if we block
		 *	then our kernel stack may be discarded.  So we save
		 *	state here for mach_msg_continue to pick up.
		 */

		self->ith_msg = msg;
		self->ith_rcv_size = rcv_size;
		self->ith_object = object;
		self->ith_mqueue = mqueue;

		mr = ipc_mqueue_receive(mqueue,
					MACH_MSG_OPTION_NONE,
					MACH_MSG_SIZE_MAX,
					MACH_MSG_TIMEOUT_NONE,
					FALSE, mach_msg_continue,
					&kmsg, &seqno);
		/* mqueue is unlocked */
		ipc_object_release(object);
		if (mr != MACH_MSG_SUCCESS)
			return mr;

		kmsg->ikm_header.msgh_seqno = seqno;
		if (rcv_size < kmsg->ikm_header.msgh_size) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			return MACH_RCV_TOO_LARGE;
		}

		mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
				(void) ipc_kmsg_put(msg, kmsg,
						kmsg->ikm_header.msgh_size);
			} else {
				ipc_kmsg_copyout_dest(kmsg, space);
				(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			}

			return mr;
		}

		return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
	} else if (option == MACH_MSG_OPTION_NONE) {
		/*
		 *	We can measure the "null mach_msg_trap"
		 *	(syscall entry and thread_syscall_return exit)
		 *	with this path.
		 */

		thread_syscall_return(MACH_MSG_SUCCESS);
		/*NOTREACHED*/
	}

	if (option & MACH_SEND_MSG) {
		mr = mach_msg_send(msg, option, send_size,
				   time_out, notify);
		if (mr != MACH_MSG_SUCCESS)
			return mr;
	}

	if (option & MACH_RCV_MSG) {
		mr = mach_msg_receive(msg, option, rcv_size, rcv_name,
				      time_out, notify);
		if (mr != MACH_MSG_SUCCESS)
			return mr;
	}

	return MACH_MSG_SUCCESS;
}
Ejemplo n.º 7
0
kern_return_t
mach_port_space_info(
	ipc_space_t			space,
	ipc_info_space_t		*infop,
	ipc_info_name_array_t		*tablep,
	mach_msg_type_number_t 		*tableCntp,
	ipc_info_tree_name_array_t	*treep,
	mach_msg_type_number_t 		*treeCntp)
{
	ipc_info_name_t *table_info;
	unsigned int table_potential, table_actual;
	vm_offset_t table_addr;
	vm_size_t table_size = 0;	/* Suppress gcc warning */
	ipc_info_tree_name_t *tree_info;
	unsigned int tree_potential, tree_actual;
	vm_offset_t tree_addr;
	vm_size_t tree_size = 0;	/* Suppress gcc warning */
	ipc_tree_entry_t tentry;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	kern_return_t kr;

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	/* start with in-line memory */

	table_info = *tablep;
	table_potential = *tableCntp;
	tree_info = *treep;
	tree_potential = *treeCntp;

	for (;;) {
		is_read_lock(space);
		if (!space->is_active) {
			is_read_unlock(space);
			if (table_info != *tablep)
				kmem_free(ipc_kernel_map,
					  table_addr, table_size);
			if (tree_info != *treep)
				kmem_free(ipc_kernel_map,
					  tree_addr, tree_size);
			return KERN_INVALID_TASK;
		}

		table_actual = space->is_table_size;
		tree_actual = space->is_tree_total;

		if ((table_actual <= table_potential) &&
		    (tree_actual <= tree_potential))
			break;

		is_read_unlock(space);

		if (table_actual > table_potential) {
			if (table_info != *tablep)
				kmem_free(ipc_kernel_map,
					  table_addr, table_size);

			table_size = round_page(table_actual *
						sizeof *table_info);
			kr = kmem_alloc(ipc_kernel_map,
					&table_addr, table_size);
			if (kr != KERN_SUCCESS) {
				if (tree_info != *treep)
					kmem_free(ipc_kernel_map,
						  tree_addr, tree_size);

				return KERN_RESOURCE_SHORTAGE;
			}

			table_info = (ipc_info_name_t *) table_addr;
			table_potential = table_size/sizeof *table_info;
		}

		if (tree_actual > tree_potential) {
			if (tree_info != *treep)
				kmem_free(ipc_kernel_map,
					  tree_addr, tree_size);

			tree_size = round_page(tree_actual *
					       sizeof *tree_info);
			kr = kmem_alloc(ipc_kernel_map,
					&tree_addr, tree_size);
			if (kr != KERN_SUCCESS) {
				if (table_info != *tablep)
					kmem_free(ipc_kernel_map,
						  table_addr, table_size);

				return KERN_RESOURCE_SHORTAGE;
			}

			tree_info = (ipc_info_tree_name_t *) tree_addr;
			tree_potential = tree_size/sizeof *tree_info;
		}
	}
	/* space is read-locked and active; we have enough wired memory */

	infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
	infop->iis_table_size = space->is_table_size;
	infop->iis_table_next = space->is_table_next->its_size;
	infop->iis_tree_size = space->is_tree_total;
	infop->iis_tree_small = space->is_tree_small;
	infop->iis_tree_hash = space->is_tree_hash;

	table = space->is_table;
	tsize = space->is_table_size;

	for (index = 0; index < tsize; index++) {
		ipc_info_name_t *iin = &table_info[index];
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits = entry->ie_bits;

		iin->iin_name = MACH_PORT_MAKEB(index, bits);
		iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
		iin->iin_compat = FALSE;
		iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
		iin->iin_type = IE_BITS_TYPE(bits);
		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (vm_offset_t) entry->ie_object;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;
	}

	for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
	     tentry != ITE_NULL;
	     tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
		ipc_info_tree_name_t *iitn = &tree_info[index++];
		ipc_info_name_t *iin = &iitn->iitn_name;
		ipc_entry_t entry = &tentry->ite_entry;
		ipc_entry_bits_t bits = entry->ie_bits;

		assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);

		iin->iin_name = tentry->ite_name;
		iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
		iin->iin_compat = FALSE;
		iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
		iin->iin_type = IE_BITS_TYPE(bits);
		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (vm_offset_t) entry->ie_object;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;

		if (tentry->ite_lchild == ITE_NULL)
			iitn->iitn_lchild = MACH_PORT_NULL;
		else
			iitn->iitn_lchild = tentry->ite_lchild->ite_name;

		if (tentry->ite_rchild == ITE_NULL)
			iitn->iitn_rchild = MACH_PORT_NULL;
		else
			iitn->iitn_rchild = tentry->ite_rchild->ite_name;

	}
	ipc_splay_traverse_finish(&space->is_tree);
	is_read_unlock(space);

	if (table_info == *tablep) {
		/* data fit in-line; nothing to deallocate */

		*tableCntp = table_actual;
	} else if (table_actual == 0) {
		kmem_free(ipc_kernel_map, table_addr, table_size);

		*tableCntp = 0;
	} else {
		vm_size_t size_used, rsize_used;
		vm_map_copy_t copy;

		/* kmem_alloc doesn't zero memory */

		size_used = table_actual * sizeof *table_info;
		rsize_used = round_page(size_used);

		if (rsize_used != table_size)
			kmem_free(ipc_kernel_map,
				  table_addr + rsize_used,
				  table_size - rsize_used);

		if (size_used != rsize_used)
			memset((void *) (table_addr + size_used), 0,
			      rsize_used - size_used);

		kr = vm_map_copyin(ipc_kernel_map, table_addr, rsize_used,
				   TRUE, &copy);

		assert(kr == KERN_SUCCESS);

		*tablep = (ipc_info_name_t *) copy;
		*tableCntp = table_actual;
	}

	if (tree_info == *treep) {
		/* data fit in-line; nothing to deallocate */

		*treeCntp = tree_actual;
	} else if (tree_actual == 0) {
		kmem_free(ipc_kernel_map, tree_addr, tree_size);

		*treeCntp = 0;
	} else {
		vm_size_t size_used, rsize_used;
		vm_map_copy_t copy;

		/* kmem_alloc doesn't zero memory */

		size_used = tree_actual * sizeof *tree_info;
		rsize_used = round_page(size_used);

		if (rsize_used != tree_size)
			kmem_free(ipc_kernel_map,
				  tree_addr + rsize_used,
				  tree_size - rsize_used);

		if (size_used != rsize_used)
			memset((void *) (tree_addr + size_used), 0,
			      rsize_used - size_used);

		kr = vm_map_copyin(ipc_kernel_map, tree_addr, rsize_used,
				   TRUE, &copy);

		assert(kr == KERN_SUCCESS);

		*treep = (ipc_info_tree_name_t *) copy;
		*treeCntp = tree_actual;
	}

	return KERN_SUCCESS;
}
Ejemplo n.º 8
0
kern_return_t
mach_port_space_info(
	ipc_space_t			space,
	ipc_info_space_t		*infop,
	ipc_info_name_array_t		*tablep,
	mach_msg_type_number_t 		*tableCntp,
	__unused ipc_info_tree_name_array_t	*treep,
	__unused mach_msg_type_number_t         *treeCntp)
{
	ipc_info_name_t *table_info;
	vm_offset_t table_addr;
	vm_size_t table_size, table_size_needed;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	kern_return_t kr;
	vm_map_copy_t copy;


	if (space == IS_NULL)
		return KERN_INVALID_TASK;

#if !(DEVELOPMENT | DEBUG)
	const boolean_t dbg_ok = (mac_task_check_expose_task(kernel_task) == 0);
#else
	const boolean_t dbg_ok = TRUE;
#endif

	/* start with in-line memory */

	table_size = 0;

	for (;;) {
		is_read_lock(space);
		if (!is_active(space)) {
			is_read_unlock(space);
			if (table_size != 0)
				kmem_free(ipc_kernel_map,
					  table_addr, table_size);
			return KERN_INVALID_TASK;
		}

		table_size_needed =
			vm_map_round_page((space->is_table_size
					   * sizeof(ipc_info_name_t)),
					  VM_MAP_PAGE_MASK(ipc_kernel_map));

		if (table_size_needed == table_size)
			break;

		is_read_unlock(space);

		if (table_size != table_size_needed) {
			if (table_size != 0)
				kmem_free(ipc_kernel_map, table_addr, table_size);
			kr = kmem_alloc(ipc_kernel_map,	&table_addr, table_size_needed, VM_KERN_MEMORY_IPC);
			if (kr != KERN_SUCCESS) {
				return KERN_RESOURCE_SHORTAGE;
			}
			table_size = table_size_needed;
		}

	}
	/* space is read-locked and active; we have enough wired memory */

	/* get the overall space info */
	infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
	infop->iis_table_size = space->is_table_size;
	infop->iis_table_next = space->is_table_next->its_size;

	/* walk the table for this space */
	table = space->is_table;
	tsize = space->is_table_size;
	table_info = (ipc_info_name_array_t)table_addr;
	for (index = 0; index < tsize; index++) {
		ipc_info_name_t *iin = &table_info[index];
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits;

		bits = entry->ie_bits;
		iin->iin_name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
		iin->iin_collision = 0;
		iin->iin_type = IE_BITS_TYPE(bits);
		if ((entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) != MACH_PORT_TYPE_NONE &&
		    entry->ie_request != IE_REQ_NONE) {
			__IGNORE_WCASTALIGN(ipc_port_t port = (ipc_port_t) entry->ie_object);

			assert(IP_VALID(port));
			ip_lock(port);
			iin->iin_type |= ipc_port_request_type(port, iin->iin_name, entry->ie_request);
			ip_unlock(port);
		}

		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (dbg_ok) ? (natural_t)VM_KERNEL_ADDRPERM((uintptr_t)entry->ie_object) : 0;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;
	}

	is_read_unlock(space);

	/* prepare the table out-of-line data for return */
	if (table_size > 0) {
		vm_size_t used_table_size;

		used_table_size = infop->iis_table_size * sizeof(ipc_info_name_t);
		if (table_size > used_table_size)
			bzero((char *)&table_info[infop->iis_table_size],
			      table_size - used_table_size);

		kr = vm_map_unwire(
			ipc_kernel_map,
			vm_map_trunc_page(table_addr,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			vm_map_round_page(table_addr + table_size,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			FALSE);
		assert(kr == KERN_SUCCESS);
		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr, 
				   (vm_map_size_t)used_table_size, TRUE, &copy);
		assert(kr == KERN_SUCCESS);
		*tablep = (ipc_info_name_t *)copy;
		*tableCntp = infop->iis_table_size;
	} else {
		*tablep = (ipc_info_name_t *)0;
		*tableCntp = 0;
	}

	/* splay tree is obsolete, no work to do... */
	*treep = (ipc_info_tree_name_t *)0;
	*treeCntp = 0;
	return KERN_SUCCESS;
}
Ejemplo n.º 9
0
Archivo: ipc_space.c Proyecto: ctos/bpi
void
ipc_space_destroy(
	ipc_space_t	space)
{
	ipc_tree_entry_t tentry;
	ipc_entry_t table;
	ipc_entry_num_t size;
	mach_port_index_t index;
	boolean_t active;

	assert(space != IS_NULL);

	is_write_lock(space);
	active = space->is_active;
	space->is_active = FALSE;
	is_write_unlock(space);

	if (!active)
		return;

	/*
	 *	If somebody is trying to grow the table,
	 *	we must wait until they finish and figure
	 *	out the space died.
	 */

	is_read_lock(space);
	while (space->is_growing) {
		assert_wait((event_t) space, FALSE);
		is_read_unlock(space);
		thread_block((void (*)(void)) 0);
		is_read_lock(space);
	}
	is_read_unlock(space);

	/*
	 *	Now we can futz with it	without having it locked.
	 */

	table = space->is_table;
	size = space->is_table_size;

	for (index = 0; index < size; index++) {
		ipc_entry_t entry = &table[index];
		mach_port_type_t type = IE_BITS_TYPE(entry->ie_bits);

		if (type != MACH_PORT_TYPE_NONE) {
			mach_port_t name =
				MACH_PORT_MAKEB(index, entry->ie_bits);

			ipc_right_clean(space, name, entry);
		}
	}

	it_entries_free(space->is_table_next-1, table);

	for (tentry = ipc_splay_traverse_start(&space->is_tree);
	     tentry != ITE_NULL;
	     tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) {
		mach_port_type_t type = IE_BITS_TYPE(tentry->ite_bits);
		mach_port_t name = tentry->ite_name;

		assert(type != MACH_PORT_TYPE_NONE);

		/* use object before ipc_right_clean releases ref */

		if (type == MACH_PORT_TYPE_SEND)
			ipc_hash_global_delete(space, tentry->ite_object,
					       name, tentry);

		ipc_right_clean(space, name, &tentry->ite_entry);
	}
	ipc_splay_traverse_finish(&space->is_tree);

	/*
	 *	Because the space is now dead,
	 *	we must release the "active" reference for it.
	 *	Our caller still has his reference.
	 */

	is_release(space);
}
Ejemplo n.º 10
0
kern_return_t
mach_port_space_info(
	ipc_space_t			space,
	ipc_info_space_t		*infop,
	ipc_info_name_array_t		*tablep,
	mach_msg_type_number_t 		*tableCntp,
	ipc_info_tree_name_array_t	*treep,
	mach_msg_type_number_t 		*treeCntp)
{
	ipc_info_name_t *table_info;
	vm_offset_t table_addr;
	vm_size_t table_size, table_size_needed;
	ipc_info_tree_name_t *tree_info;
	vm_offset_t tree_addr;
	vm_size_t tree_size, tree_size_needed;
	ipc_tree_entry_t tentry;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	kern_return_t kr;
	vm_map_copy_t copy;


	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	/* start with in-line memory */

	table_size = 0;
	tree_size = 0;

	for (;;) {
		is_read_lock(space);
		if (!space->is_active) {
			is_read_unlock(space);
			if (table_size != 0)
				kmem_free(ipc_kernel_map,
					  table_addr, table_size);
			if (tree_size != 0)
				kmem_free(ipc_kernel_map,
					  tree_addr, tree_size);
			return KERN_INVALID_TASK;
		}

		table_size_needed = round_page(space->is_table_size
					       * sizeof(ipc_info_name_t));
		tree_size_needed = round_page(space->is_tree_total
					      * sizeof(ipc_info_tree_name_t));

		if ((table_size_needed == table_size) &&
		    (tree_size_needed == tree_size))
			break;

		is_read_unlock(space);

		if (table_size != table_size_needed) {
			if (table_size != 0)
				kmem_free(ipc_kernel_map, table_addr, table_size);
			kr = kmem_alloc(ipc_kernel_map,	&table_addr, table_size_needed);
			if (kr != KERN_SUCCESS) {
				if (tree_size != 0)
					kmem_free(ipc_kernel_map, tree_addr, tree_size);
				return KERN_RESOURCE_SHORTAGE;
			}
			table_size = table_size_needed;
		}
		if (tree_size != tree_size_needed) {
			if (tree_size != 0)
				kmem_free(ipc_kernel_map, tree_addr, tree_size);
			kr = kmem_alloc(ipc_kernel_map, &tree_addr, tree_size_needed);
			if (kr != KERN_SUCCESS) {
				if (table_size != 0)
					kmem_free(ipc_kernel_map, table_addr, table_size);
				return KERN_RESOURCE_SHORTAGE;
			}
			tree_size = tree_size_needed;
		}
	}
	/* space is read-locked and active; we have enough wired memory */

	/* get the overall space info */
	infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
	infop->iis_table_size = space->is_table_size;
	infop->iis_table_next = space->is_table_next->its_size;
	infop->iis_tree_size = space->is_tree_total;
	infop->iis_tree_small = space->is_tree_small;
	infop->iis_tree_hash = space->is_tree_hash;

	/* walk the table for this space */
	table = space->is_table;
	tsize = space->is_table_size;
	table_info = (ipc_info_name_array_t)table_addr;
	for (index = 0; index < tsize; index++) {
		ipc_info_name_t *iin = &table_info[index];
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits;

		bits = entry->ie_bits;
		iin->iin_name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
		iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
		iin->iin_type = IE_BITS_TYPE(bits);
		if (entry->ie_request)
			iin->iin_type |= MACH_PORT_TYPE_DNREQUEST;
		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (vm_offset_t) entry->ie_object;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;
	}

	/* walk the splay tree for this space */
	tree_info = (ipc_info_tree_name_array_t)tree_addr;
	for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
	     tentry != ITE_NULL;
	     tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
		ipc_info_tree_name_t *iitn = &tree_info[index++];
		ipc_info_name_t *iin = &iitn->iitn_name;
		ipc_entry_t entry = &tentry->ite_entry;
		ipc_entry_bits_t bits = entry->ie_bits;

		assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);

		iin->iin_name = tentry->ite_name;
		iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
		iin->iin_type = IE_BITS_TYPE(bits);
		if (entry->ie_request)
			iin->iin_type |= MACH_PORT_TYPE_DNREQUEST;
		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (vm_offset_t) entry->ie_object;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;

		if (tentry->ite_lchild == ITE_NULL)
			iitn->iitn_lchild = MACH_PORT_NULL;
		else
			iitn->iitn_lchild = tentry->ite_lchild->ite_name;

		if (tentry->ite_rchild == ITE_NULL)
			iitn->iitn_rchild = MACH_PORT_NULL;
		else
			iitn->iitn_rchild = tentry->ite_rchild->ite_name;

	}
	ipc_splay_traverse_finish(&space->is_tree);
	is_read_unlock(space);

	/* prepare the table out-of-line data for return */
	if (table_size > 0) {
		if (table_size > infop->iis_table_size * sizeof(ipc_info_name_t))
			bzero((char *)&table_info[infop->iis_table_size],
			      table_size - infop->iis_table_size * sizeof(ipc_info_name_t));

		kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(table_addr),
				   vm_map_round_page(table_addr + table_size), FALSE);
		assert(kr == KERN_SUCCESS);
		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr, 
				   (vm_map_size_t)table_size, TRUE, &copy);
		assert(kr == KERN_SUCCESS);
		*tablep = (ipc_info_name_t *)copy;
		*tableCntp = infop->iis_table_size;
	} else {
		*tablep = (ipc_info_name_t *)0;
		*tableCntp = 0;
	}

	/* prepare the tree out-of-line data for return */
	if (tree_size > 0) {
		if (tree_size > infop->iis_tree_size * sizeof(ipc_info_tree_name_t))
			bzero((char *)&tree_info[infop->iis_tree_size],
			      tree_size - infop->iis_tree_size * sizeof(ipc_info_tree_name_t));

		kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(tree_addr),
				   vm_map_round_page(tree_addr + tree_size), FALSE);
		assert(kr == KERN_SUCCESS);
		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)tree_addr, 
				   (vm_map_size_t)tree_size, TRUE, &copy);
		assert(kr == KERN_SUCCESS);
		*treep = (ipc_info_tree_name_t *)copy;
		*treeCntp = infop->iis_tree_size;
	} else {
		*treep = (ipc_info_tree_name_t *)0;
		*treeCntp = 0;
	}
	return KERN_SUCCESS;
}
Ejemplo n.º 11
0
kern_return_t
mach_port_names(
	ipc_space_t		space,
	mach_port_name_t	**namesp,
	mach_msg_type_number_t	*namesCnt,
	mach_port_type_t	**typesp,
	mach_msg_type_number_t	*typesCnt)
{
	ipc_entry_bits_t *capability;
	ipc_tree_entry_t tentry;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	ipc_entry_num_t actual;	/* this many names */
	ipc_port_timestamp_t timestamp;	/* logical time of this operation */
	mach_port_name_t *names;
	mach_port_type_t *types;
	kern_return_t kr;

	vm_size_t size;		/* size of allocated memory */
	vm_offset_t addr1;	/* allocated memory, for names */
	vm_offset_t addr2;	/* allocated memory, for types */
	vm_map_copy_t memory1;	/* copied-in memory, for names */
	vm_map_copy_t memory2;	/* copied-in memory, for types */

	/* safe simplifying assumption */
	assert_static(sizeof(mach_port_name_t) == sizeof(mach_port_type_t));

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	size = 0;

	for (;;) {
		ipc_entry_num_t bound;
		vm_size_t size_needed;

		is_read_lock(space);
		if (!space->is_active) {
			is_read_unlock(space);
			if (size != 0) {
				kmem_free(ipc_kernel_map, addr1, size);
				kmem_free(ipc_kernel_map, addr2, size);
			}
			return KERN_INVALID_TASK;
		}

		/* upper bound on number of names in the space */

		bound = space->is_table_size + space->is_tree_total;
		size_needed = round_page_32(bound * sizeof(mach_port_name_t));

		if (size_needed <= size)
			break;

		is_read_unlock(space);

		if (size != 0) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
		}
		size = size_needed;

		kr = vm_allocate(ipc_kernel_map, &addr1, size, TRUE);
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		kr = vm_allocate(ipc_kernel_map, &addr2, size, TRUE);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr1, size);
			return KERN_RESOURCE_SHORTAGE;
		}

		/* can't fault while we hold locks */

		kr = vm_map_wire(ipc_kernel_map, addr1, addr1 + size,
				     VM_PROT_READ|VM_PROT_WRITE, FALSE);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
			return KERN_RESOURCE_SHORTAGE;
		}

		kr = vm_map_wire(ipc_kernel_map, addr2, addr2 + size,
				     VM_PROT_READ|VM_PROT_WRITE, FALSE);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
			return KERN_RESOURCE_SHORTAGE;
		}

	}
	/* space is read-locked and active */

	names = (mach_port_name_t *) addr1;
	types = (mach_port_type_t *) addr2;
	actual = 0;

	timestamp = ipc_port_timestamp();

	table = space->is_table;
	tsize = space->is_table_size;

	for (index = 0; index < tsize; index++) {
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits = entry->ie_bits;

		if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
			mach_port_name_t name;

			name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
			mach_port_names_helper(timestamp, entry, name, names,
					       types, &actual, space);
		}
	}

	for (tentry = ipc_splay_traverse_start(&space->is_tree);
	    tentry != ITE_NULL;
	    tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
		ipc_entry_t entry = &tentry->ite_entry;
		mach_port_name_t name = tentry->ite_name;

		assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE);
		mach_port_names_helper(timestamp, entry, name, names,
				       types, &actual, space);
	}
	ipc_splay_traverse_finish(&space->is_tree);
	is_read_unlock(space);

	if (actual == 0) {
		memory1 = VM_MAP_COPY_NULL;
		memory2 = VM_MAP_COPY_NULL;

		if (size != 0) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
		}
	} else {
		vm_size_t size_used;
		vm_size_t vm_size_used;

		size_used = actual * sizeof(mach_port_name_t);
		vm_size_used = round_page_32(size_used);

		/*
		 *	Make used memory pageable and get it into
		 *	copied-in form.  Free any unused memory.
		 */

		kr = vm_map_unwire(ipc_kernel_map,
				     addr1, addr1 + vm_size_used, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_unwire(ipc_kernel_map,
				     addr2, addr2 + vm_size_used, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, addr1, size_used,
				   TRUE, &memory1);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, addr2, size_used,
				   TRUE, &memory2);
		assert(kr == KERN_SUCCESS);

		if (vm_size_used != size) {
			kmem_free(ipc_kernel_map,
				  addr1 + vm_size_used, size - vm_size_used);
			kmem_free(ipc_kernel_map,
				  addr2 + vm_size_used, size - vm_size_used);
		}
	}

	*namesp = (mach_port_name_t *) memory1;
	*namesCnt = actual;
	*typesp = (mach_port_type_t *) memory2;
	*typesCnt = actual;
	return KERN_SUCCESS;
}
Ejemplo n.º 12
0
kern_return_t
mach_port_move_member(
	ipc_space_t		space,
	mach_port_name_t	member,
	mach_port_name_t	after)
{
	ipc_entry_t entry;
	ipc_port_t port;
	ipc_pset_t nset;
	kern_return_t kr;

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	if (!MACH_PORT_VALID(member))
		return KERN_INVALID_RIGHT;

	if (after == MACH_PORT_DEAD)
		return KERN_INVALID_RIGHT;

	kr = ipc_right_lookup_read(space, member, &entry);
	if (kr != KERN_SUCCESS)
		return kr;
	/* space is read-locked and active */

	if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
		is_read_unlock(space);
		return KERN_INVALID_RIGHT;
	}

	port = (ipc_port_t) entry->ie_object;
	assert(port != IP_NULL);

	if (after == MACH_PORT_NULL)
		nset = IPS_NULL;
	else {
		entry = ipc_entry_lookup(space, after);
		if (entry == IE_NULL) {
			is_read_unlock(space);
			return KERN_INVALID_NAME;
		}

		if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
			is_read_unlock(space);
			return KERN_INVALID_RIGHT;
		}

		nset = (ipc_pset_t) entry->ie_object;
		assert(nset != IPS_NULL);
	}
	ip_lock(port);
	ipc_pset_remove_from_all(port);

	if (nset != IPS_NULL) {
		ips_lock(nset);
		kr = ipc_pset_add(nset, port);
		ips_unlock(nset);
	}
	ip_unlock(port);
	is_read_unlock(space);
	return kr;
}
Ejemplo n.º 13
0
kern_return_t
mach_port_get_set_status(
	ipc_space_t			space,
	mach_port_name_t		name,
	mach_port_name_t		**members,
	mach_msg_type_number_t		*membersCnt)
{
	ipc_entry_num_t actual;		/* this many members */
	ipc_entry_num_t maxnames;	/* space for this many members */
	kern_return_t kr;

	vm_size_t size;		/* size of allocated memory */
	vm_offset_t addr;	/* allocated memory */
	vm_map_copy_t memory;	/* copied-in memory */

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	if (!MACH_PORT_VALID(name))
		return KERN_INVALID_RIGHT;

	size = PAGE_SIZE;	/* initial guess */

	for (;;) {
		ipc_tree_entry_t tentry;
		ipc_entry_t entry, table;
		ipc_entry_num_t tsize;
		mach_port_index_t index;
		mach_port_name_t *names;
		ipc_pset_t pset;

		kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		/* can't fault while we hold locks */

		kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
				     VM_PROT_READ|VM_PROT_WRITE, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = ipc_right_lookup_read(space, name, &entry);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr, size);
			return kr;
		}
		/* space is read-locked and active */

		if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_PORT_SET) {
			is_read_unlock(space);
			kmem_free(ipc_kernel_map, addr, size);
			return KERN_INVALID_RIGHT;
		}

		pset = (ipc_pset_t) entry->ie_object;
		assert(pset != IPS_NULL);
		/* the port set must be active */

		names = (mach_port_name_t *) addr;
		maxnames = size / sizeof(mach_port_name_t);
		actual = 0;

		table = space->is_table;
		tsize = space->is_table_size;

		for (index = 0; index < tsize; index++) {
			ipc_entry_t ientry = &table[index];

			if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
				ipc_port_t port =
					(ipc_port_t) ientry->ie_object;

				mach_port_gst_helper(pset, port,
						     maxnames, names, &actual);
			}
		}

		for (tentry = ipc_splay_traverse_start(&space->is_tree);
		    tentry != ITE_NULL;
		    tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) {
			ipc_entry_bits_t bits = tentry->ite_bits;

			assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);

			if (bits & MACH_PORT_TYPE_RECEIVE) {
			    ipc_port_t port = (ipc_port_t) tentry->ite_object;

			    mach_port_gst_helper(pset, port, maxnames,
						 names, &actual);
			}
		}
		ipc_splay_traverse_finish(&space->is_tree);
		is_read_unlock(space);

		if (actual <= maxnames)
			break;

		/* didn't have enough memory; allocate more */

		kmem_free(ipc_kernel_map, addr, size);
		size = round_page_32(actual * sizeof(mach_port_name_t)) + PAGE_SIZE;
	}

	if (actual == 0) {
		memory = VM_MAP_COPY_NULL;

		kmem_free(ipc_kernel_map, addr, size);
	} else {
		vm_size_t size_used;
		vm_size_t vm_size_used;

		size_used = actual * sizeof(mach_port_name_t);
		vm_size_used = round_page_32(size_used);

		/*
		 *	Make used memory pageable and get it into
		 *	copied-in form.  Free any unused memory.
		 */

		kr = vm_map_unwire(ipc_kernel_map,
				     addr, addr + vm_size_used, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
				   TRUE, &memory);
		assert(kr == KERN_SUCCESS);

		if (vm_size_used != size)
			kmem_free(ipc_kernel_map,
				  addr + vm_size_used, size - vm_size_used);
	}

	*members = (mach_port_name_t *) memory;
	*membersCnt = actual;
	return KERN_SUCCESS;
}
Ejemplo n.º 14
0
Archivo: ipc_pset.c Proyecto: ctos/bpi
kern_return_t
ipc_pset_move(
	ipc_space_t	space,
	ipc_port_t	port,
	ipc_pset_t	nset)
{
	ipc_pset_t oset;

	/*
	 *	While we've got the space locked, it holds refs for
	 *	the port and nset (because of the entries).  Also,
	 *	they must be alive.  While we've got port locked, it
	 *	holds a ref for oset, which might not be alive.
	 */

	ip_lock(port);
	assert(ip_active(port));

	oset = port->ip_pset;

	if (oset == nset) {
		/* the port is already in the new set:  a noop */

		is_read_unlock(space);
	} else if (oset == IPS_NULL) {
		/* just add port to the new set */

		ips_lock(nset);
		assert(ips_active(nset));
		is_read_unlock(space);

		ipc_pset_add(nset, port);

		ips_unlock(nset);
	} else if (nset == IPS_NULL) {
		/* just remove port from the old set */

		is_read_unlock(space);
		ips_lock(oset);

		ipc_pset_remove(oset, port);

		if (ips_active(oset))
			ips_unlock(oset);
		else {
			ips_check_unlock(oset);
			oset = IPS_NULL; /* trigger KERN_NOT_IN_SET */
		}
	} else {
		/* atomically move port from oset to nset */

		if (oset < nset) {
			ips_lock(oset);
			ips_lock(nset);
		} else {
			ips_lock(nset);
			ips_lock(oset);
		}

		is_read_unlock(space);
		assert(ips_active(nset));

		ipc_pset_remove(oset, port);
		ipc_pset_add(nset, port);

		ips_unlock(nset);
		ips_check_unlock(oset);	/* KERN_NOT_IN_SET not a possibility */
	}

	ip_unlock(port);

	return (((nset == IPS_NULL) && (oset == IPS_NULL)) ?
		KERN_NOT_IN_SET : KERN_SUCCESS);
}
Ejemplo n.º 15
0
mach_msg_return_t
ipc_mqueue_copyin(
	ipc_space_t	space,
	mach_port_t	name,
	ipc_mqueue_t	*mqueuep,
	ipc_object_t	*objectp)
{
	ipc_entry_t entry;
	ipc_entry_bits_t bits;
	ipc_object_t object;
	ipc_mqueue_t mqueue;

	is_read_lock(space);
	if (!space->is_active) {
		is_read_unlock(space);
		return MACH_RCV_INVALID_NAME;
	}

	entry = ipc_entry_lookup(space, name);
	if (entry == IE_NULL) {
		is_read_unlock(space);
		return MACH_RCV_INVALID_NAME;
	}

	bits = entry->ie_bits;
	object = entry->ie_object;

	if (bits & MACH_PORT_TYPE_RECEIVE) {
		ipc_port_t port;
		ipc_pset_t pset;

		port = (ipc_port_t) object;
		assert(port != IP_NULL);

		ip_lock(port);
		assert(ip_active(port));
		assert(port->ip_receiver_name == name);
		assert(port->ip_receiver == space);
		is_read_unlock(space);

		pset = port->ip_pset;
		if (pset != IPS_NULL) {
			ips_lock(pset);
			if (ips_active(pset)) {
				ips_unlock(pset);
				ip_unlock(port);
				return MACH_RCV_IN_SET;
			}

			ipc_pset_remove(pset, port);
			ips_check_unlock(pset);
			assert(port->ip_pset == IPS_NULL);
		}

		mqueue = &port->ip_messages;
	} else if (bits & MACH_PORT_TYPE_PORT_SET) {
		ipc_pset_t pset;

		pset = (ipc_pset_t) object;
		assert(pset != IPS_NULL);

		ips_lock(pset);
		assert(ips_active(pset));
		assert(pset->ips_local_name == name);
		is_read_unlock(space);

		mqueue = &pset->ips_messages;
	} else {
		is_read_unlock(space);
		return MACH_RCV_INVALID_NAME;
	}

	/*
	 *	At this point, the object is locked and active,
	 *	the space is unlocked, and mqueue is initialized.
	 */

	io_reference(object);
	imq_lock(mqueue);
	io_unlock(object);

	*objectp = object;
	*mqueuep = mqueue;
	return MACH_MSG_SUCCESS;
}