Ejemplo n.º 1
0
void
ipc_thread_terminate(
	thread_t	thread)
{
	ipc_port_t	kport = thread->ith_self;

	if (kport != IP_NULL) {
		int			i;

		if (IP_VALID(thread->ith_sself))
			ipc_port_release_send(thread->ith_sself);

		thread->ith_sself = thread->ith_self = IP_NULL;

		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
			if (IP_VALID(thread->exc_actions[i].port))
				ipc_port_release_send(thread->exc_actions[i].port);
        }

		ipc_port_dealloc_kernel(kport);
	}

	assert(ipc_kmsg_queue_empty(&thread->ith_messages));

	if (thread->ith_rpc_reply != IP_NULL)
		ipc_port_dealloc_reply(thread->ith_rpc_reply);

	thread->ith_rpc_reply = IP_NULL;
}
Ejemplo n.º 2
0
void
ipc_task_reset(
	task_t		task)
{
	ipc_port_t old_kport, new_kport;
	ipc_port_t old_sself;
	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
	int i;

	new_kport = ipc_port_alloc_kernel();
	if (new_kport == IP_NULL)
		panic("ipc_task_reset");

	itk_lock(task);

	old_kport = task->itk_self;

	if (old_kport == IP_NULL) {
		/* the task is already terminated (can this happen?) */
		itk_unlock(task);
		ipc_port_dealloc_kernel(new_kport);
		return;
	}

	task->itk_self = new_kport;
	old_sself = task->itk_sself;
	task->itk_sself = ipc_port_make_send(new_kport);
	ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
	ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);

	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
		if (!task->exc_actions[i].privileged) {
			old_exc_actions[i] = task->exc_actions[i].port;
			task->exc_actions[i].port = IP_NULL;
		} else {
			old_exc_actions[i] = IP_NULL;
		}
	}/* for */
	
	if (IP_VALID(task->itk_debug_control)) {
		ipc_port_release_send(task->itk_debug_control);
	}
	task->itk_debug_control = IP_NULL;
	
	itk_unlock(task);

	/* release the naked send rights */

	if (IP_VALID(old_sself))
		ipc_port_release_send(old_sself);

	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
		if (IP_VALID(old_exc_actions[i])) {
			ipc_port_release_send(old_exc_actions[i]);
		}
	}/* for */

	/* destroy the kernel port */
	ipc_port_dealloc_kernel(old_kport);
}
Ejemplo n.º 3
0
/*
 *	Routine:	host_set_exception_ports [kernel call]
 *	Purpose:
 *			Sets the host exception port, flavor and
 *			behavior for the exception types specified by the mask.
 *			There will be one send right per exception per valid
 *			port.
 *	Conditions:
 *		Nothing locked.  If successful, consumes
 *		the supplied send right.
 *	Returns:
 *		KERN_SUCCESS		Changed the special port.
 *		KERN_INVALID_ARGUMENT	The host_priv is not valid,
 *					Illegal mask bit set.
 *					Illegal exception behavior
 */
kern_return_t
host_set_exception_ports(
	host_priv_t				host_priv,
	exception_mask_t		exception_mask,
	ipc_port_t			new_port,
	exception_behavior_t		new_behavior,
	thread_state_flavor_t		new_flavor)
{
	register int	i;
	ipc_port_t	old_port[EXC_TYPES_COUNT];

	if (host_priv == HOST_PRIV_NULL) {
		return KERN_INVALID_ARGUMENT;
	}

	assert(host_priv == &realhost);

	if (exception_mask & ~EXC_MASK_VALID) {
		return KERN_INVALID_ARGUMENT;
	}

	if (IP_VALID(new_port)) {
		switch (new_behavior & ~MACH_EXCEPTION_CODES) {
		case EXCEPTION_DEFAULT:
		case EXCEPTION_STATE:
		case EXCEPTION_STATE_IDENTITY:
			break;
		default:
			return KERN_INVALID_ARGUMENT;
		}
	}
	/* Cannot easily check "new_flavor", but that just means that
	 * the flavor in the generated exception message might be garbage:
	 * GIGO
	 */
	host_lock(host_priv);

	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
		if (exception_mask & (1 << i)) {
			old_port[i] = host_priv->exc_actions[i].port;
			host_priv->exc_actions[i].port =
				ipc_port_copy_send(new_port);
			host_priv->exc_actions[i].behavior = new_behavior;
			host_priv->exc_actions[i].flavor = new_flavor;
		} else
			old_port[i] = IP_NULL;
	}/* for */

	/*
	 * Consume send rights without any lock held.
	 */
	host_unlock(host_priv);
	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
		if (IP_VALID(old_port[i]))
			ipc_port_release_send(old_port[i]);
	if (IP_VALID(new_port))		 /* consume send right */
		ipc_port_release_send(new_port);

        return KERN_SUCCESS;
}
Ejemplo n.º 4
0
void
ipc_thread_terminate(thread_t thread)
{
	ipc_port_t kport;

	ith_lock(thread);
	kport = thread->ith_self;

	if (kport == IP_NULL) {
		/* the thread is already terminated (can this happen?) */
		ith_unlock(thread);
		return;
	}

	thread->ith_self = IP_NULL;
	ith_unlock(thread);

	assert(ipc_kmsg_queue_empty(&thread->ith_messages));

	/* release the naked send rights */

	if (IP_VALID(thread->ith_sself))
		ipc_port_release_send(thread->ith_sself);
	if (IP_VALID(thread->ith_exception))
		ipc_port_release_send(thread->ith_exception);

	/* destroy the kernel port */

	ipc_port_dealloc_kernel(kport);
}
Ejemplo n.º 5
0
/*
 *	Routine:	convert_task_suspend_token_to_port
 *	Purpose:
 *		Convert from a task suspension token to a port.
 *		Consumes a task suspension token ref; produces a naked send-once right
 *		which may be invalid.  
 *	Conditions:
 *		Nothing locked.
 */
ipc_port_t
convert_task_suspension_token_to_port(
	task_suspension_token_t		task)
{
	ipc_port_t port;

	task_lock(task);
	if (task->active) {
		if (task->itk_resume == IP_NULL) {
			task->itk_resume = ipc_port_alloc_kernel();
			if (!IP_VALID(task->itk_resume)) {
				panic("failed to create resume port");
			}

			ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
		}

		/*
		 * Create a send-once right for each instance of a direct user-called
		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
		 * the notification handler will resume the target task.
		 */
		port = ipc_port_make_sonce(task->itk_resume);
		assert(IP_VALID(port));
	} else {
		port = IP_NULL;
	}

	task_unlock(task);
	task_suspension_token_deallocate(task);

	return port;
}
Ejemplo n.º 6
0
/*
 *	Routine:	ipc_port_request_type
 *	Purpose:
 *		Determine the type(s) of port requests enabled for a name.
 *	Conditions:
 *		The port must be locked or inactive (to avoid table growth).
 *		The index must not be IE_REQ_NONE and for the name in question.
 */
mach_port_type_t
ipc_port_request_type(
	ipc_port_t			port,
	__assert_only mach_port_name_t	name,
	ipc_port_request_index_t	index)
{
	ipc_port_request_t ipr, table;
	mach_port_type_t type = 0;

	table = port->ip_requests;
	assert (table != IPR_NULL);

	assert(index != IE_REQ_NONE);
	ipr = &table[index];
	assert(ipr->ipr_name == name);

	if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
		type |= MACH_PORT_TYPE_DNREQUEST;

		if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
			type |= MACH_PORT_TYPE_SPREQUEST;

			if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
				type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
			}
		}
	}
	return type;
}
Ejemplo n.º 7
0
void
ipc_port_release_send(
	ipc_port_t	port)
{
	ipc_port_t nsrequest = IP_NULL;
	mach_port_mscount_t mscount;

	if (!IP_VALID(port))
		return;

	ip_lock(port);

	if (!ip_active(port)) {
		ip_unlock(port);
		ip_release(port);
		return;
	}

	assert(port->ip_srights > 0);

	if (--port->ip_srights == 0 &&
	    port->ip_nsrequest != IP_NULL) {
		nsrequest = port->ip_nsrequest;
		port->ip_nsrequest = IP_NULL;
		mscount = port->ip_mscount;
		ip_unlock(port);
		ip_release(port);
		ipc_notify_no_senders(nsrequest, mscount);
	} else {
		ip_unlock(port);
		ip_release(port);
	}
}
Ejemplo n.º 8
0
/*
 *	Routine: convert_port_to_locked_task
 *	Purpose:
 *		Internal helper routine to convert from a port to a locked
 *		task.  Used by several routines that try to convert from a
 *		task port to a reference on some task related object.
 *	Conditions:
 *		Nothing locked, blocking OK.
 */
task_t
convert_port_to_locked_task(ipc_port_t port)
{
        int try_failed_count = 0;

	while (IP_VALID(port)) {
		task_t task;

		ip_lock(port);
		if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
			ip_unlock(port);
			return TASK_NULL;
		}
		task = (task_t) port->ip_kobject;
		assert(task != TASK_NULL);

		/*
		 * Normal lock ordering puts task_lock() before ip_lock().
		 * Attempt out-of-order locking here.
		 */
		if (task_lock_try(task)) {
			ip_unlock(port);
			return(task);
		}
		try_failed_count++;

		ip_unlock(port);
		mutex_pause(try_failed_count);
	}
	return TASK_NULL;
}
Ejemplo n.º 9
0
EXTERN io_object_t
iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space)
{
	io_object_t obj = NULL;

	if (connectRef && MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef))) {
		ipc_port_t port;
		kern_return_t kr;

		kr = ipc_object_translate(space, CAST_MACH_PORT_TO_NAME(connectRef), MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);

		if (kr == KERN_SUCCESS) {
			assert(IP_VALID(port));

			ip_reference(port);
			ip_unlock(port);

			iokit_lock_port(port);
			if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
				obj = (io_object_t) port->ip_kobject;
				iokit_add_connect_reference(obj);
			}
			iokit_unlock_port(port);

			ip_release(port);
		}
	}

	return obj;
}
Ejemplo n.º 10
0
Archivo: net.c Proyecto: ctos/bpi
/* Free all sk_buffs on the done list.
   This routine is called by the iodone thread in ds_routines.c.  */
void
free_skbuffs ()
{
  struct sk_buff *skb;

  while (1)
    {
      skb = skb_dequeue (&skb_done_list);
      if (skb)
	{
	  if (skb->copy)
	    {
	      vm_map_copy_discard (skb->copy);
	      skb->copy = NULL;
	    }
	  if (IP_VALID (skb->reply))
	    {
	      ds_device_write_reply (skb->reply, skb->reply_type, 0, skb->len);
	      skb->reply = IP_NULL;
	    }
	  dev_kfree_skb (skb, FREE_WRITE);
	}
      else
	break;
    }
}
Ejemplo n.º 11
0
/*  flipc_port_create() is called to convert a regular mach port into a
 *  flipc port (i.e., the port has one or more rights off-node).
 *  <lport> must be locked on entry and is not unlocked on return.
 */
static kern_return_t
flipc_port_create(ipc_port_t lport, mach_node_t node, mnl_name_t name)
{
    /* Ensure parameters are valid and not already linked */
    assert(IP_VALID(lport));
    assert(MACH_NODE_VALID(node));
    assert(MNL_NAME_VALID(name));
    assert(!FPORT_VALID(lport->ip_messages.imq_fport));

    /* Allocate and initialize a flipc port */
    flipc_port_t fport = (flipc_port_t) zalloc(flipc_port_zone);
    if (!FPORT_VALID(fport))
        return KERN_RESOURCE_SHORTAGE;
    bzero(fport, sizeof(struct flipc_port));
    fport->obj.name = name;
    fport->hostnode = node;
    if (node == localnode)
        fport->state = FPORT_STATE_PRINCIPAL;
    else
        fport->state = FPORT_STATE_PROXY;

    /* Link co-structures (lport is locked) */
    fport->lport = lport;
    lport->ip_messages.imq_fport = fport;

    /* Add fport to the name hash table; revert link if insert fails */
    kern_return_t kr =  mnl_obj_insert((mnl_obj_t)fport);
    if (kr != KERN_SUCCESS) {
        lport->ip_messages.imq_fport = FPORT_NULL;
        fport->lport = IP_NULL;
        zfree(flipc_port_zone, fport);
    }

    return kr;
}
Ejemplo n.º 12
0
/*
 * Retry wait for output queue emptied, for write.
 * No locks may be held.
 * May run on any CPU.
 */
boolean_t char_write_done(
	register io_req_t	ior)
{
	register struct tty *tp = (struct tty *)ior->io_dev_ptr;
	register spl_t s = spltty();

	simple_lock(&tp->t_lock);
	if (tp->t_outq.c_cc > TTHIWAT(tp) ||
	    (tp->t_state & TS_CARR_ON) == 0) {

	    queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done);
	    simple_unlock(&tp->t_lock);
	    splx(s);
	    return FALSE;
	}
	simple_unlock(&tp->t_lock);
	splx(s);

	if (IP_VALID(ior->io_reply_port)) {
	  (void) (*((ior->io_op & IO_INBAND) ?
		    ds_device_write_reply_inband :
		    ds_device_write_reply))(ior->io_reply_port,
					    ior->io_reply_port_type,
					    ior->io_error,
					    (int) (ior->io_total -
						   ior->io_residual));
	}
	mach_device_deallocate(ior->io_device);
	return TRUE;
}
Ejemplo n.º 13
0
/*
 * fileport_notify
 *
 * Description: Handle a no-senders notification for a fileport.  Unless
 * 		the message is spoofed, destroys the port and releases
 * 		its reference on the fileglob.
 *
 * Parameters: msg		A Mach no-senders notification message.
 */
void
fileport_notify(mach_msg_header_t *msg)
{
	mach_no_senders_notification_t *notification = (void *)msg;
	ipc_port_t port = notification->not_header.msgh_remote_port;
	struct fileglob *fg = NULL;

	if (!IP_VALID(port))
		panic("Invalid port passed to fileport_notify()\n");

	ip_lock(port);

	fg = (struct fileglob *)port->ip_kobject;

	if (!ip_active(port)) 
		panic("Inactive port passed to fileport_notify()\n");
	if (ip_kotype(port) != IKOT_FILEPORT) 
		panic("Port of type other than IKOT_FILEPORT passed to fileport_notify()\n");
	if (fg == NULL) 
		panic("fileport without an assocated fileglob\n");

	if (port->ip_srights == 0) {
		ip_unlock(port);

		fileport_releasefg(fg);
		ipc_port_dealloc_kernel(port);
	} else {
		ip_unlock(port);
	}

	return;
}
Ejemplo n.º 14
0
EXTERN mach_port_name_t
iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
{
    ipc_port_t		port;
    ipc_port_t		sendPort;
    mach_port_name_t	name = 0;

    if( obj == NULL)
        return MACH_PORT_NULL;

    port = iokit_port_for_object( obj, type );
    if( port) {
	sendPort = ipc_port_make_send( port);
	iokit_release_port( port );
    } else
	sendPort = IP_NULL;

    if (IP_VALID( sendPort )) {
    	kern_return_t	kr;
    	kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort,
				MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
	if ( kr != KERN_SUCCESS) {
	    ipc_port_release_send( sendPort );
	    name = MACH_PORT_NULL;
	}
    } else if ( sendPort == IP_NULL)
        name = MACH_PORT_NULL;
    else if ( sendPort == IP_DEAD)
    	name = MACH_PORT_DEAD;

    iokit_remove_reference( obj );

    return( name );
}
Ejemplo n.º 15
0
/*
 * fileport_alloc
 *
 * Description: Obtain a send right for the given fileglob, which must be
 *		referenced.
 *
 * Parameters: 	fg		A fileglob.
 *
 * Returns: 	Port of type IKOT_FILEPORT with fileglob set as its kobject. 
 * 		Port is returned with a send right.
 */
ipc_port_t
fileport_alloc(struct fileglob *fg)
{
	ipc_port_t fileport;
	ipc_port_t sendport;
	ipc_port_t notifyport;

	fileport = ipc_port_alloc_kernel();
	if (fileport == IP_NULL) {
		goto out;
	}

	ipc_kobject_set(fileport, (ipc_kobject_t)fg, IKOT_FILEPORT);
	notifyport = ipc_port_make_sonce(fileport);
	ip_lock(fileport); /* unlocked by ipc_port_nsrequest */
	ipc_port_nsrequest(fileport, 1, notifyport, &notifyport);

	sendport = ipc_port_make_send(fileport);
	if (!IP_VALID(sendport)) {
		panic("Couldn't allocate send right for fileport!\n");
	}

out:
	return fileport;
}
Ejemplo n.º 16
0
/*
 *	Routine:	convert_port_to_mig_object [interface]
 *	Purpose:
 *		Base implementation of MIG intrans routine to convert from
 *		an incoming port reference to a new reference on the
 *		underlying object. A new reference must be created, because
 *		the port's reference could go away asynchronously.
 *	Returns:
 *		NULL - Not an active MIG object port or iid not supported
 *		Otherwise, a reference to the underlying MIG interface
 *	Conditions:
 *		Nothing locked.
 */
mig_object_t
convert_port_to_mig_object(
	ipc_port_t	port,
	const MIGIID	*iid)
{
	mig_object_t	mig_object;
	void 		*ppv;

	if (!IP_VALID(port))
		return NULL;

	ip_lock(port);
	if (!ip_active(port) || (ip_kotype(port) != IKOT_MIG)) {
		ip_unlock(port);
		return NULL;
	}

	/*
	 * Our port points to some MIG object interface.  Now
	 * query it to get a reference to the desired interface.
	 */
	ppv = NULL;
	mig_object = (mig_object_t)port->ip_kobject;
	mig_object->pVtbl->QueryInterface((IMIGObject *)mig_object, iid, &ppv);
	ip_unlock(port);
	return (mig_object_t)ppv;
}
Ejemplo n.º 17
0
mach_port_name_t
ipc_port_copyout_send(
	ipc_port_t	sright,
	ipc_space_t	space)
{
	mach_port_name_t name;

	if (IP_VALID(sright)) {
		kern_return_t kr;

		kr = ipc_object_copyout(space, (ipc_object_t) sright,
					MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
		if (kr != KERN_SUCCESS) {
			ipc_port_release_send(sright);

			if (kr == KERN_INVALID_CAPABILITY)
				name = MACH_PORT_DEAD;
			else
				name = MACH_PORT_NULL;
		}
	} else
		name = CAST_MACH_PORT_TO_NAME(sright);

	return name;
}
Ejemplo n.º 18
0
/*
 *	Routine:	port_name_to_semaphore
 *	Purpose:
 *		Convert from a port name in the current space to a semaphore.
 *		Produces a semaphore ref, which may be null.
 *	Conditions:
 *		Nothing locked.
 */
kern_return_t
port_name_to_semaphore(
	mach_port_name_t 	name,
	semaphore_t 		*semaphorep)
{
	ipc_port_t kern_port;
	kern_return_t kr;

	if (!MACH_PORT_VALID(name)) {
		*semaphorep = SEMAPHORE_NULL;
		return KERN_INVALID_NAME;
	}

	kr = ipc_object_translate(current_space(), name, MACH_PORT_RIGHT_SEND,
				  (ipc_object_t *) &kern_port);
	if (kr != KERN_SUCCESS) {
		*semaphorep = SEMAPHORE_NULL;
		return kr;
	}
	/* have the port locked */
	assert(IP_VALID(kern_port));

	*semaphorep = convert_port_to_semaphore(kern_port);
	ip_unlock(kern_port);

	return KERN_SUCCESS;
}
Ejemplo n.º 19
0
kern_return_t
mach_ports_register(
	task_t			task,
	mach_port_array_t	memory,
	mach_msg_type_number_t	portsCnt)
{
	ipc_port_t ports[TASK_PORT_REGISTER_MAX];
	unsigned int i;

	if ((task == TASK_NULL) ||
	    (portsCnt > TASK_PORT_REGISTER_MAX) ||
	    (portsCnt && memory == NULL))
		return KERN_INVALID_ARGUMENT;

	/*
	 *	Pad the port rights with nulls.
	 */

	for (i = 0; i < portsCnt; i++)
		ports[i] = memory[i];
	for (; i < TASK_PORT_REGISTER_MAX; i++)
		ports[i] = IP_NULL;

	itk_lock(task);
	if (task->itk_self == IP_NULL) {
		itk_unlock(task);
		return KERN_INVALID_ARGUMENT;
	}

	/*
	 *	Replace the old send rights with the new.
	 *	Release the old rights after unlocking.
	 */

	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
		ipc_port_t old;

		old = task->itk_registered[i];
		task->itk_registered[i] = ports[i];
		ports[i] = old;
	}

	itk_unlock(task);

	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
		if (IP_VALID(ports[i]))
			ipc_port_release_send(ports[i]);

	/*
	 *	Now that the operation is known to be successful,
	 *	we can free the memory.
	 */

	if (portsCnt != 0)
		kfree(memory,
		      (vm_size_t) (portsCnt * sizeof(mach_port_t)));

	return KERN_SUCCESS;
}
Ejemplo n.º 20
0
/*
 *	Routine:	semaphore_dereference
 *
 *	Release a reference on a semaphore.  If this is the last reference,
 *	the semaphore data structure is deallocated.
 */
void
semaphore_dereference(
	semaphore_t		semaphore)
{
	uint32_t collisions;
	spl_t spl_level;

	if (semaphore == NULL)
		return;

	if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
		return;

	/*
	 * Last ref, clean up the port [if any]
	 * associated with the semaphore, destroy
	 * it (if still active) and then free
	 * the semaphore.
	 */
	ipc_port_t port = semaphore->port;

	if (IP_VALID(port)) {
		assert(!port->ip_srights);
		ipc_port_dealloc_kernel(port);
	}

	/*
	 * Lock the semaphore to lock in the owner task reference.
	 * Then continue to try to lock the task (inverse order).
	 */
	spl_level = splsched();
	semaphore_lock(semaphore);
	for (collisions = 0; semaphore->active; collisions++) {
		task_t task = semaphore->owner;

		assert(task != TASK_NULL);
		
		if (task_lock_try(task)) {
			semaphore_destroy_internal(task, semaphore);
			/* semaphore unlocked */
			splx(spl_level);
			task_unlock(task);
			goto out;
		}
		
		/* failed to get out-of-order locks */
		semaphore_unlock(semaphore);
		splx(spl_level);
		mutex_pause(collisions);
		spl_level = splsched();
		semaphore_lock(semaphore);
	}
	semaphore_unlock(semaphore);
	splx(spl_level);

 out:
	zfree(semaphore_zone, semaphore);
}
Ejemplo n.º 21
0
void
exception(
	integer_t _exception, 
	integer_t code, 
	integer_t subcode)
{
	ipc_thread_t self = current_thread();
	ipc_port_t exc_port;

	if (_exception == KERN_SUCCESS)
		panic("exception");

	/*
	 *	Optimized version of retrieve_thread_exception.
	 */

	ith_lock(self);
	assert(self->ith_self != IP_NULL);
	exc_port = self->ith_exception;
	if (!IP_VALID(exc_port)) {
		ith_unlock(self);
		exception_try_task(_exception, code, subcode);
		/*NOTREACHED*/
	}

	ip_lock(exc_port);
	ith_unlock(self);
	if (!ip_active(exc_port)) {
		ip_unlock(exc_port);
		exception_try_task(_exception, code, subcode);
		/*NOTREACHED*/
	}

	/*
	 *	Make a naked send right for the exception port.
	 */

	ip_reference(exc_port);
	exc_port->ip_srights++;
	ip_unlock(exc_port);

	/*
	 *	If this exception port doesn't work,
	 *	we will want to try the task's exception port.
	 *	Indicate this by saving the exception state.
	 */

	self->ith_exc = _exception;
	self->ith_exc_code = code;
	self->ith_exc_subcode = subcode;

	exception_raise(exc_port,
			retrieve_thread_self_fast(self),
			retrieve_task_self_fast(self->task),
			_exception, code, subcode);
	/*NOTREACHED*/
}
Ejemplo n.º 22
0
/*
 *	Routine:	convert_semaphore_to_port
 *	Purpose:
 *		Convert a semaphore reference to a send right to a
 *		semaphore port.
 *
 *		Consumes the semaphore reference.  If the semaphore
 *		port currently has no send rights (or doesn't exist
 *		yet), the reference is donated to the port to represent
 *		all extant send rights collectively.
 */
ipc_port_t
convert_semaphore_to_port (semaphore_t semaphore)
{
	ipc_port_t port, send;

	if (semaphore == SEMAPHORE_NULL)
		return (IP_NULL);

	/* caller is donating a reference */
	port = semaphore->port;

	if (!IP_VALID(port)) {
		port = ipc_port_alloc_kernel();
		assert(IP_VALID(port));
		ipc_kobject_set_atomically(port, (ipc_kobject_t) semaphore, IKOT_SEMAPHORE);

		/* If we lose the race, deallocate and pick up the other guy's port */
		if (!OSCompareAndSwapPtr(IP_NULL, port, &semaphore->port)) {
			ipc_port_dealloc_kernel(port);
			port = semaphore->port;
			assert(ip_kotype(port) == IKOT_SEMAPHORE);
			assert(port->ip_kobject == (ipc_kobject_t)semaphore);
		}
	}

	ip_lock(port);
	assert(ip_active(port));
	send = ipc_port_make_send_locked(port);

	if (1 == port->ip_srights) {
		ipc_port_t old_notify;

		/* transfer our ref to the port, and arm the no-senders notification */
		assert(IP_NULL == port->ip_nsrequest);
		ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
		/* port unlocked */
		assert(IP_NULL == old_notify);
	} else {
		/* piggyback on the existing port reference, so consume ours */
		ip_unlock(port);
		semaphore_dereference(semaphore);
	}
	return (send);
}
Ejemplo n.º 23
0
/*  Get the mnl_name associated with local ipc_port <lport>.
 *  Returns MNL_NAME_NULL if <lport> is invalid or not a flipc port.
 */
static inline mnl_name_t mnl_name_from_port(ipc_port_t lport)
{
    mnl_name_t name = MNL_NAME_NULL;

    if (IP_VALID(lport)) {
        flipc_port_t fport = lport->ip_messages.data.port.fport;
        if (FPORT_VALID(fport))
            name = fport->obj.name;
    }
    return name;
}
Ejemplo n.º 24
0
void
ipc_task_terminate(
	task_t		task)
{
	ipc_port_t kport;
	int i;

	itk_lock(task);
	kport = task->itk_self;

	if (kport == IP_NULL) {
		/* the task is already terminated (can this happen?) */
		itk_unlock(task);
		return;
	}

	task->itk_self = IP_NULL;
	itk_unlock(task);

	/* release the naked send rights */

	if (IP_VALID(task->itk_sself))
		ipc_port_release_send(task->itk_sself);
	if (IP_VALID(task->itk_exception))
		ipc_port_release_send(task->itk_exception);
	if (IP_VALID(task->itk_bootstrap))
		ipc_port_release_send(task->itk_bootstrap);

	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
		if (IP_VALID(task->itk_registered[i]))
			ipc_port_release_send(task->itk_registered[i]);

	/* destroy the space, leaving just a reference for it */

	ipc_space_destroy(task->itk_space);

	/* destroy the kernel port */

	ipc_port_dealloc_kernel(kport);
}
Ejemplo n.º 25
0
void
exception_try_task(
	integer_t _exception, 
	integer_t code, 
	integer_t subcode)
{
	ipc_thread_t self = current_thread();
	task_t task = self->task;
	ipc_port_t exc_port;

	/*
	 *	Optimized version of retrieve_task_exception.
	 */

	itk_lock(task);
	assert(task->itk_self != IP_NULL);
	exc_port = task->itk_exception;
	if (!IP_VALID(exc_port)) {
		itk_unlock(task);
		exception_no_server();
		/*NOTREACHED*/
	}

	ip_lock(exc_port);
	itk_unlock(task);
	if (!ip_active(exc_port)) {
		ip_unlock(exc_port);
		exception_no_server();
		/*NOTREACHED*/
	}

	/*
	 *	Make a naked send right for the exception port.
	 */

	ip_reference(exc_port);
	exc_port->ip_srights++;
	ip_unlock(exc_port);

	/*
	 *	This is the thread's last chance.
	 *	Clear the saved exception state.
	 */

	self->ith_exc = KERN_SUCCESS;

	exception_raise(exc_port,
			retrieve_thread_self_fast(self),
			retrieve_task_self_fast(task),
			_exception, code, subcode);
	/*NOTREACHED*/
}
Ejemplo n.º 26
0
ipc_port_t
ipc_port_make_sonce(
	ipc_port_t	port)
{
	assert(IP_VALID(port));

	ip_lock(port);
	assert(ip_active(port));
	port->ip_sorights++;
	ip_reference(port);
	ip_unlock(port);

	return port;
}
Ejemplo n.º 27
0
Archivo: host.c Proyecto: DJHartley/xnu
/*
 *      User interface for setting a special port.
 *
 *      Only permits the user to set a user-owned special port
 *      ID, rejecting a kernel-owned special port ID.
 *
 *      A special kernel port cannot be set up using this
 *      routine; use kernel_set_special_port() instead.
 */
kern_return_t
host_set_special_port(
        host_priv_t     host_priv,
        int             id,
        ipc_port_t      port)
{
	if (host_priv == HOST_PRIV_NULL ||
	    id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT ) {
		if (IP_VALID(port))
			ipc_port_release_send(port);
		return KERN_INVALID_ARGUMENT;
	}

	return kernel_set_special_port(host_priv, id, port);
}
Ejemplo n.º 28
0
processor_set_name_t
convert_port_to_pset_name(
	ipc_port_t	port)
{
	boolean_t r;
	processor_set_t pset = PROCESSOR_SET_NULL;

	r = FALSE;
	while (!r && IP_VALID(port)) {
		ip_lock(port);
		r = ref_pset_port_locked(port, TRUE, &pset);
		/* port unlocked */
	}
	return pset;
}
Ejemplo n.º 29
0
/*
 * fileport_get_fileglob
 *
 * Description: Obtain the fileglob associated with a given port.
 *
 * Parameters: port		A Mach port of type IKOT_FILEPORT.
 *
 * Returns:    NULL		The given Mach port did not reference a
 *				fileglob.
 *	       !NULL		The fileglob that is associated with the
 *				Mach port.
 *
 * Notes: The caller must have a reference on the fileport.
 */
struct fileglob *
fileport_port_to_fileglob(ipc_port_t port)
{
	struct fileglob *fg = NULL;

	if (!IP_VALID(port))
		return NULL;

	ip_lock(port);
	if (ip_active(port) && IKOT_FILEPORT == ip_kotype(port))
		fg = (void *)port->ip_kobject;
	ip_unlock(port);

	return fg;
}
Ejemplo n.º 30
0
void
ipc_thread_terminate(
	thread_t	thread)
{
	ipc_port_t	kport = thread->ith_self;

	if (kport != IP_NULL) {
		int			i;

		if (IP_VALID(thread->ith_sself))
			ipc_port_release_send(thread->ith_sself);

		thread->ith_sself = thread->ith_self = IP_NULL;

		if (thread->exc_actions != NULL) {
			for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
				if (IP_VALID(thread->exc_actions[i].port))
					ipc_port_release_send(thread->exc_actions[i].port);
			}
			ipc_thread_destroy_exc_actions(thread);
		}

		ipc_port_dealloc_kernel(kport);
	}

#if IMPORTANCE_INHERITANCE
	assert(thread->ith_assertions == 0);
#endif

	assert(ipc_kmsg_queue_empty(&thread->ith_messages));

	if (thread->ith_rpc_reply != IP_NULL)
		ipc_port_dealloc_reply(thread->ith_rpc_reply);

	thread->ith_rpc_reply = IP_NULL;
}