示例#1
0
void ipc_host_init(void)
{
	ipc_port_t	port;
	/*
	 *	Allocate and set up the two host ports.
	 */
	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_host_init");

	ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST);
	realhost.host_self = port;

	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_host_init");

	ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV);
	realhost.host_priv_self = port;

	/*
	 *	Set up ipc for default processor set.
	 */
	ipc_pset_init(&default_pset);
	ipc_pset_enable(&default_pset);

	/*
	 *	And for master processor
	 */
	ipc_processor_init(master_processor);
}
示例#2
0
文件: ipc_host.c 项目: TalAloni/xnu
void ipc_host_init(void)
{
	ipc_port_t	port;
	int i;

	lck_mtx_init(&realhost.lock, &host_notify_lock_grp, &host_notify_lock_attr);

	/*
	 *	Allocate and set up the two host ports.
	 */
	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_host_init");

	ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_SECURITY);
	kernel_set_special_port(&realhost, HOST_SECURITY_PORT,
				ipc_port_make_send(port));

	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_host_init");

	ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST);
	kernel_set_special_port(&realhost, HOST_PORT,
				ipc_port_make_send(port));

	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_host_init");

	ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV);
	kernel_set_special_port(&realhost, HOST_PRIV_PORT,
				ipc_port_make_send(port));

	/* the rest of the special ports will be set up later */

	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
			realhost.exc_actions[i].port = IP_NULL;
		}/* for */

	/*
	 *	Set up ipc for default processor set.
	 */
	ipc_pset_init(&pset0);
	ipc_pset_enable(&pset0);

	/*
	 *	And for master processor
	 */
	ipc_processor_init(master_processor);
	ipc_processor_enable(master_processor);
}
示例#3
0
EXTERN ipc_port_t
iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type )
{
    ipc_port_t		notify;
    ipc_port_t		port;

    do {

	/* Allocate port, keeping a reference for it. */
        port = ipc_port_alloc_kernel();
        if( port == IP_NULL)
            continue;

        /* set kobject & type */
//	iokit_add_reference( obj );
	ipc_kobject_set( port, (ipc_kobject_t) obj, type);

        /* Request no-senders notifications on the port. */
        ip_lock( port);
        notify = ipc_port_make_sonce_locked( port);
        ipc_port_nsrequest( port, 1, notify, &notify);
	/* port unlocked */
        assert( notify == IP_NULL);
	gIOKitPortCount++;

    } while( FALSE);

    return( port );
}
示例#4
0
/* Utility routine to create a new ledger */
static ledger_t
ledger_allocate(
		ledger_item_t	limit,
		ledger_t	ledger_ledger,
		ledger_t	ledger_parent)
{
	ledger_t	ledger;

	ledger = (ledger_t)kalloc(sizeof(ledger_data_t));
	if (ledger == LEDGER_NULL)
		return(LEDGER_NULL);

	ledger->ledger_self = ipc_port_alloc_kernel();
	if (ledger->ledger_self == IP_NULL)
		return(LEDGER_NULL);

	ledger_lock_init(ledger);
	ledger->ledger_limit = limit;
	ledger->ledger_balance = 0;
	ledger->ledger_service_port = MACH_PORT_NULL;
	ledger->ledger_ledger = ledger_ledger;
	ledger->ledger_parent = ledger_parent;
	ipc_kobject_set(ledger->ledger_self, (ipc_kobject_t)ledger,
			IKOT_LEDGER);

	return(ledger);
}
示例#5
0
void
ipc_task_reset(
	task_t		task)
{
	ipc_port_t old_kport, new_kport;
	ipc_port_t old_sself;
	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
	int i;

	new_kport = ipc_port_alloc_kernel();
	if (new_kport == IP_NULL)
		panic("ipc_task_reset");

	itk_lock(task);

	old_kport = task->itk_self;

	if (old_kport == IP_NULL) {
		/* the task is already terminated (can this happen?) */
		itk_unlock(task);
		ipc_port_dealloc_kernel(new_kport);
		return;
	}

	task->itk_self = new_kport;
	old_sself = task->itk_sself;
	task->itk_sself = ipc_port_make_send(new_kport);
	ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
	ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);

	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
		if (!task->exc_actions[i].privileged) {
			old_exc_actions[i] = task->exc_actions[i].port;
			task->exc_actions[i].port = IP_NULL;
		} else {
			old_exc_actions[i] = IP_NULL;
		}
	}/* for */
	
	if (IP_VALID(task->itk_debug_control)) {
		ipc_port_release_send(task->itk_debug_control);
	}
	task->itk_debug_control = IP_NULL;
	
	itk_unlock(task);

	/* release the naked send rights */

	if (IP_VALID(old_sself))
		ipc_port_release_send(old_sself);

	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
		if (IP_VALID(old_exc_actions[i])) {
			ipc_port_release_send(old_exc_actions[i]);
		}
	}/* for */

	/* destroy the kernel port */
	ipc_port_dealloc_kernel(old_kport);
}
示例#6
0
文件: ipc_misc.c 项目: SbIm/xnu-env
/*
 * fileport_alloc
 *
 * Description: Obtain a send right for the given fileglob, which must be
 *		referenced.
 *
 * Parameters: 	fg		A fileglob.
 *
 * Returns: 	Port of type IKOT_FILEPORT with fileglob set as its kobject. 
 * 		Port is returned with a send right.
 */
ipc_port_t
fileport_alloc(struct fileglob *fg)
{
	ipc_port_t fileport;
	ipc_port_t sendport;
	ipc_port_t notifyport;

	fileport = ipc_port_alloc_kernel();
	if (fileport == IP_NULL) {
		goto out;
	}

	ipc_kobject_set(fileport, (ipc_kobject_t)fg, IKOT_FILEPORT);
	notifyport = ipc_port_make_sonce(fileport);
	ip_lock(fileport); /* unlocked by ipc_port_nsrequest */
	ipc_port_nsrequest(fileport, 1, notifyport, &notifyport);

	sendport = ipc_port_make_send(fileport);
	if (!IP_VALID(sendport)) {
		panic("Couldn't allocate send right for fileport!\n");
	}

out:
	return fileport;
}
示例#7
0
void
ipc_pset_init(
	processor_set_t		pset)
{
	ipc_port_t	port;

	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_pset_init");
	pset->pset_self = port;

	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_pset_init");
	pset->pset_name_self = port;
}
示例#8
0
/*
 *	Routine:	ipc_clock_init
 *	Purpose:
 *		Initialize ipc control of a clock.
 */
void
ipc_clock_init(
	clock_t		clock)
{
	ipc_port_t	port;

	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_clock_init");
	clock->cl_service = port;

	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_clock_init");
	clock->cl_control = port;
}
示例#9
0
/*
 * Only called internally. Allocate an xmm_object port.
 * The xmm_object takes the mobj reference.
 */
ipc_port_t
xmm_object_allocate(
	xmm_obj_t	mobj)
{
	ipc_port_t xmm_object;

	/*
	 * Create an xmm object port.
	 */
	xmm_object = ipc_port_alloc_kernel();
	if (xmm_object == IP_NULL) {
		return IP_NULL;
	}
	IP_CLEAR_NMS(xmm_object);

	/*
	 * Associate the xmm obj with the xmm object port.
	 * We keep the xmm obj reference returned by xmm_svm_create.
	 */
	ipc_kobject_set(xmm_object, (ipc_kobject_t) mobj, IKOT_XMM_OBJECT);

	/*
	 * Return the port.
	 */
	return xmm_object;
}
示例#10
0
/*
 *	Routine:	convert_task_suspend_token_to_port
 *	Purpose:
 *		Convert from a task suspension token to a port.
 *		Consumes a task suspension token ref; produces a naked send-once right
 *		which may be invalid.  
 *	Conditions:
 *		Nothing locked.
 */
ipc_port_t
convert_task_suspension_token_to_port(
	task_suspension_token_t		task)
{
	ipc_port_t port;

	task_lock(task);
	if (task->active) {
		if (task->itk_resume == IP_NULL) {
			task->itk_resume = ipc_port_alloc_kernel();
			if (!IP_VALID(task->itk_resume)) {
				panic("failed to create resume port");
			}

			ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
		}

		/*
		 * Create a send-once right for each instance of a direct user-called
		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
		 * the notification handler will resume the target task.
		 */
		port = ipc_port_make_sonce(task->itk_resume);
		assert(IP_VALID(port));
	} else {
		port = IP_NULL;
	}

	task_unlock(task);
	task_suspension_token_deallocate(task);

	return port;
}
示例#11
0
/*
 *	Routine:	semaphore_create
 *
 *	Creates a semaphore.
 *	The port representing the semaphore is returned as a parameter.
 */
kern_return_t
semaphore_create(
	task_t			task,
	semaphore_t		*new_semaphore,
	int				policy,
	int				value)
{
	semaphore_t		 s = SEMAPHORE_NULL;



	if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) {
		*new_semaphore = SEMAPHORE_NULL;
		return KERN_INVALID_ARGUMENT;
	}

	s = (semaphore_t) zalloc (semaphore_zone);

	if (s == SEMAPHORE_NULL) {
		*new_semaphore = SEMAPHORE_NULL;
		return KERN_RESOURCE_SHORTAGE; 
	}

	wait_queue_init(&s->wait_queue, policy); /* also inits lock */
	s->count = value;
	s->ref_count = 1;

	/*
	 *  Create and initialize the semaphore port
	 */
	s->port	= ipc_port_alloc_kernel();
	if (s->port == IP_NULL) {	
		/* This will deallocate the semaphore */	
		semaphore_dereference(s);
		*new_semaphore = SEMAPHORE_NULL;
		return KERN_RESOURCE_SHORTAGE; 
	}

	ipc_kobject_set (s->port, (ipc_kobject_t) s, IKOT_SEMAPHORE);

	/*
	 *  Associate the new semaphore with the task by adding
	 *  the new semaphore to the task's semaphore list.
	 *
	 *  Associate the task with the new semaphore by having the
	 *  semaphores task pointer point to the owning task's structure.
	 */
	task_lock(task);
	enqueue_head(&task->semaphore_list, (queue_entry_t) s);
	task->semaphores_owned++;
	s->owner = task;
	s->active = TRUE;
	task_unlock(task);

	*new_semaphore = s;

	return KERN_SUCCESS;
}		  
示例#12
0
文件: ipc_host.c 项目: TalAloni/xnu
void
ipc_processor_init(
	processor_t	processor)
{
	ipc_port_t	port;

	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_processor_init");
	processor->processor_self = port;
}
示例#13
0
void
ipc_processor_init(
	processor_t	processor)
{
	ipc_port_t	port;

	port = ipc_port_alloc_kernel();
	if (port == IP_NULL)
		panic("ipc_processor_init");
	processor->processor_self = port;
	ipc_kobject_set(port, (ipc_kobject_t) processor, IKOT_PROCESSOR);
}
示例#14
0
/*
 *	Routine:	convert_mig_object_to_port [interface]
 *	Purpose:
 *		Base implementation of MIG outtrans routine to convert from
 *		a mig object reference to a new send right on the object's
 *		port.  The object reference is consumed.
 *	Returns:
 *		IP_NULL - Null MIG object supplied
 *		Otherwise, a newly made send right for the port
 *	Conditions:
 *		Nothing locked.
 */
ipc_port_t
convert_mig_object_to_port(
	mig_object_t	mig_object)
{
	ipc_port_t	port;
	boolean_t	deallocate = TRUE;

	if (mig_object == MIG_OBJECT_NULL)
		return IP_NULL;

	port = mig_object->port;
	while ((port == IP_NULL) ||
	       ((port = ipc_port_make_send(port)) == IP_NULL)) {
		ipc_port_t	previous;

		/*
		 * Either the port was never set up, or it was just
		 * deallocated out from under us by the no-senders
		 * processing.  In either case, we must:
		 *	Attempt to make one
		 * 	Arrange for no senders
		 *	Try to atomically register it with the object
		 *		Destroy it if we are raced.
		 */
		port = ipc_port_alloc_kernel();
		ip_lock(port);
		ipc_kobject_set_atomically(port,
					   (ipc_kobject_t) mig_object,
					   IKOT_MIG);

		/* make a sonce right for the notification */
		port->ip_sorights++;
		ip_reference(port);

		ipc_port_nsrequest(port, 1, port, &previous);
		/* port unlocked */

		assert(previous == IP_NULL);

		if (OSCompareAndSwapPtr((void *)IP_NULL, (void *)port,
											(void * volatile *)&mig_object->port)) {
			deallocate = FALSE;
		} else {
			ipc_port_dealloc_kernel(port);
			port = mig_object->port;
		}
	}

	if (deallocate)
		mig_object->pVtbl->Release((IMIGObject *)mig_object);

	return (port);
}
示例#15
0
void
ipc_task_init(
	task_t		task,
	task_t		parent)
{
	ipc_space_t space;
	ipc_port_t kport;
	kern_return_t kr;
	int i;


	kr = ipc_space_create(&space);
	if (kr != KERN_SUCCESS)
		panic("ipc_task_init");


	kport = ipc_port_alloc_kernel();
	if (kport == IP_NULL)
		panic("ipc_task_init");

	itk_lock_init(task);
	task->itk_self = kport;
	task->itk_sself = ipc_port_make_send(kport);
	task->itk_space = space;

	if (parent == TASK_NULL) {
		task->itk_exception = IP_NULL;
		task->itk_bootstrap = IP_NULL;
		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
			task->itk_registered[i] = IP_NULL;
	} else {
		itk_lock(parent);
		assert(parent->itk_self != IP_NULL);

		/* inherit registered ports */

		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
			task->itk_registered[i] =
				ipc_port_copy_send(parent->itk_registered[i]);

		/* inherit exception and bootstrap ports */

		task->itk_exception =
			ipc_port_copy_send(parent->itk_exception);
		task->itk_bootstrap =
			ipc_port_copy_send(parent->itk_bootstrap);

		itk_unlock(parent);
	}
}
示例#16
0
/*
 *	Routine:	convert_semaphore_to_port
 *	Purpose:
 *		Convert a semaphore reference to a send right to a
 *		semaphore port.
 *
 *		Consumes the semaphore reference.  If the semaphore
 *		port currently has no send rights (or doesn't exist
 *		yet), the reference is donated to the port to represent
 *		all extant send rights collectively.
 */
ipc_port_t
convert_semaphore_to_port (semaphore_t semaphore)
{
	ipc_port_t port, send;

	if (semaphore == SEMAPHORE_NULL)
		return (IP_NULL);

	/* caller is donating a reference */
	port = semaphore->port;

	if (!IP_VALID(port)) {
		port = ipc_port_alloc_kernel();
		assert(IP_VALID(port));
		ipc_kobject_set_atomically(port, (ipc_kobject_t) semaphore, IKOT_SEMAPHORE);

		/* If we lose the race, deallocate and pick up the other guy's port */
		if (!OSCompareAndSwapPtr(IP_NULL, port, &semaphore->port)) {
			ipc_port_dealloc_kernel(port);
			port = semaphore->port;
			assert(ip_kotype(port) == IKOT_SEMAPHORE);
			assert(port->ip_kobject == (ipc_kobject_t)semaphore);
		}
	}

	ip_lock(port);
	assert(ip_active(port));
	send = ipc_port_make_send_locked(port);

	if (1 == port->ip_srights) {
		ipc_port_t old_notify;

		/* transfer our ref to the port, and arm the no-senders notification */
		assert(IP_NULL == port->ip_nsrequest);
		ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
		/* port unlocked */
		assert(IP_NULL == old_notify);
	} else {
		/* piggyback on the existing port reference, so consume ours */
		ip_unlock(port);
		semaphore_dereference(semaphore);
	}
	return (send);
}
示例#17
0
void
device_service_create()
{
	master_device_port = ipc_port_alloc_kernel();
	if (master_device_port == IP_NULL)
	    panic("can't allocate master device port");

	ds_init();
	dev_lookup_init();
	net_io_init();
	device_pager_init();
	chario_init();
#ifdef FIPC
	fipc_init();
#endif

	(void) kernel_thread(kernel_task, io_done_thread, 0);
	(void) kernel_thread(kernel_task, net_thread, 0);
}
示例#18
0
void
ipc_thread_init(thread_t thread)
{
	ipc_port_t kport;

	kport = ipc_port_alloc_kernel();
	if (kport == IP_NULL)
		panic("ipc_thread_init");

	ipc_thread_links_init(thread);
	ipc_kmsg_queue_init(&thread->ith_messages);

	ith_lock_init(thread);
	thread->ith_self = kport;
	thread->ith_sself = ipc_port_make_send(kport);
	thread->ith_exception = IP_NULL;

	thread->ith_mig_reply = MACH_PORT_NULL;
	thread->ith_rpc_reply = IP_NULL;
}
示例#19
0
void
device_service_create(void)
{
	master_device_port = ipc_port_alloc_kernel();
	if (master_device_port == IP_NULL)
	    panic("can't allocate master device port");

	ipc_kobject_set(master_device_port, 1, IKOT_MASTER_DEVICE);
	ds_init();
	net_io_init();
	device_pager_init();
	datadev_init();

	(void) kernel_thread(kernel_task, io_done_thread, (char *)0);
	(void) kernel_thread(kernel_task, net_thread, (char *)0);
#if	XKMACHKERNEL && !DIPC_XKERN
	/*
	 * Initialize the x-kernel
	 */
	(void) kernel_thread(kernel_task, xkInit, (char *)0);
#endif	/* XKMACHKERNEL && !DIPC_XKERN */
}
示例#20
0
void
ipc_thread_init(
	thread_t	thread)
{
	ipc_port_t	kport;
	int			i;

	kport = ipc_port_alloc_kernel();
	if (kport == IP_NULL)
		panic("ipc_thread_init");

	thread->ith_self = kport;
	thread->ith_sself = ipc_port_make_send(kport);

	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
		thread->exc_actions[i].port = IP_NULL;

	ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);

	ipc_kmsg_queue_init(&thread->ith_messages);

	thread->ith_rpc_reply = IP_NULL;
}
示例#21
0
void
ipc_thread_init(
	thread_t	thread)
{
	ipc_port_t	kport;

	kport = ipc_port_alloc_kernel();
	if (kport == IP_NULL)
		panic("ipc_thread_init");

	thread->ith_self = kport;
	thread->ith_sself = ipc_port_make_send(kport);
	thread->exc_actions = NULL;

	ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);

#if IMPORTANCE_INHERITANCE
	thread->ith_assertions = 0;
#endif

	ipc_kmsg_queue_init(&thread->ith_messages);

	thread->ith_rpc_reply = IP_NULL;
}
示例#22
0
文件: net.c 项目: ctos/bpi
static io_return_t
device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
	     dev_mode_t mode, char *name, device_t *devp)
{
  io_return_t err = D_SUCCESS;
  ipc_port_t notify;
  struct ifnet *ifp;
  struct linux_device *dev;
  struct net_data *nd;

  /* Search for the device.  */
  for (dev = dev_base; dev; dev = dev->next)
    if (dev->base_addr
	&& dev->base_addr != 0xffe0
	&& !strcmp (name, dev->name))
      break;
  if (!dev)
    return D_NO_SUCH_DEVICE;

  /* Allocate and initialize device data if this is the first open.  */
  nd = dev->net_data;
  if (!nd)
    {
      dev->net_data = nd = ((struct net_data *)
			    kalloc (sizeof (struct net_data)));
      if (!nd)
	{
	  err = D_NO_MEMORY;
	  goto out;
	}
      nd->dev = dev;
      nd->device.emul_data = nd;
      nd->device.emul_ops = &linux_net_emulation_ops;
      nd->port = ipc_port_alloc_kernel ();
      if (nd->port == IP_NULL)
	{
	  err = KERN_RESOURCE_SHORTAGE;
	  goto out;
	}
      ipc_kobject_set (nd->port, (ipc_kobject_t) & nd->device, IKOT_DEVICE);
      notify = ipc_port_make_sonce (nd->port);
      ip_lock (nd->port);
      ipc_port_nsrequest (nd->port, 1, notify, &notify);
      assert (notify == IP_NULL);

      ifp = &nd->ifnet;
      ifp->if_unit = dev->name[strlen (dev->name) - 1] - '0';
      ifp->if_flags = IFF_UP | IFF_RUNNING;
      ifp->if_mtu = dev->mtu;
      ifp->if_header_size = dev->hard_header_len;
      ifp->if_header_format = dev->type;
      ifp->if_address_size = dev->addr_len;
      ifp->if_address = dev->dev_addr;
      if_init_queues (ifp);

      if (dev->open)
	{
	  linux_intr_pri = SPL6;
	  if ((*dev->open) (dev))
	    err = D_NO_SUCH_DEVICE;
	}

    out:
      if (err)
	{
	  if (nd)
	    {
	      if (nd->port != IP_NULL)
		{
		  ipc_kobject_set (nd->port, IKO_NULL, IKOT_NONE);
		  ipc_port_dealloc_kernel (nd->port);
		}
	      kfree ((vm_offset_t) nd, sizeof (struct net_data));
	      nd = NULL;
	      dev->net_data = NULL;
	    }
	}
      else
	{
	  /* IPv6 heavily relies on multicasting (especially router and
	     neighbor solicits and advertisements), so enable reception of
	     those multicast packets by setting `LINUX_IFF_ALLMULTI'.  */
	  dev->flags |= LINUX_IFF_UP | LINUX_IFF_RUNNING | LINUX_IFF_ALLMULTI;
	  skb_queue_head_init (&dev->buffs[0]);

	  if (dev->set_multicast_list)
	    dev->set_multicast_list (dev);
	}
      if (IP_VALID (reply_port))
	ds_device_open_reply (reply_port, reply_port_type,
			      err, dev_to_port (nd));
      return MIG_NO_REPLY;
    }

  *devp = &nd->device;
  return D_SUCCESS;
}
示例#23
0
文件: sync_lock.c 项目: CptFrazz/xnu
/*
 *	ROUTINE:	lock_set_create		[exported]
 *
 *	Creates a lock set.
 *	The port representing the lock set is returned as a parameter.
 */      
kern_return_t
lock_set_create (
	task_t		task,
	lock_set_t	*new_lock_set,
	int		n_ulocks,
	int		policy)
{
	lock_set_t 	lock_set = LOCK_SET_NULL;
	ulock_t		ulock;
	vm_size_t 	size;
	int 		x;

	*new_lock_set = LOCK_SET_NULL;

	if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
		return KERN_INVALID_ARGUMENT;

	if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks)
		return KERN_RESOURCE_SHORTAGE;

	size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
	lock_set = (lock_set_t) kalloc (size);

	if (lock_set == LOCK_SET_NULL)
		return KERN_RESOURCE_SHORTAGE; 


	lock_set_lock_init(lock_set);
	lock_set->n_ulocks = n_ulocks;
	lock_set->ref_count = (task == kernel_task) ? 1 : 2; /* one for kernel, one for port */

	/*
	 *  Create and initialize the lock set port
	 */
	lock_set->port = ipc_port_alloc_kernel();
	if (lock_set->port == IP_NULL) {	
		kfree(lock_set, size);
		return KERN_RESOURCE_SHORTAGE; 
	}

	ipc_kobject_set (lock_set->port,
			(ipc_kobject_t) lock_set,
			IKOT_LOCK_SET);

	/*
	 *  Initialize each ulock in the lock set
	 */

	for (x=0; x < n_ulocks; x++) {
		ulock = (ulock_t) &lock_set->ulock_list[x];
		ulock_lock_init(ulock);
		ulock->lock_set  = lock_set;
		ulock->holder	 = THREAD_NULL;
		ulock->blocked   = FALSE;
		ulock->unstable	 = FALSE;
		ulock->ho_wait	 = FALSE;
		ulock->accept_wait = FALSE;
		wait_queue_init(&ulock->wait_queue, policy);
	}

	lock_set_ownership_set(lock_set, task);

	lock_set->active = TRUE;
	*new_lock_set = lock_set;

	return KERN_SUCCESS;
}
示例#24
0
void
svm_create_new_copy(
	xmm_obj_t	mobj)
{
	xmm_obj_t old_copy, new_copy, kobj;
	kern_return_t kr;
	ipc_port_t old_copy_pager;
	ipc_port_t new_copy_pager;
	int k_copy;
	kern_return_t result;

	assert(xmm_obj_lock_held(mobj));

	/*
	 * Prevent others from examining copy until we are done.
	 */
	assert(! MOBJ->copy_in_progress);
	MOBJ->copy_in_progress = TRUE;
	xmm_obj_unlock(mobj);

	new_copy_pager = ipc_port_alloc_kernel();
	if (new_copy_pager == IP_NULL) {
		panic("svm_create_new_copy: ipc_port_alloc_kernel");
	}

	/* we hold a naked receive right for new_copy_pager */
	(void) ipc_port_make_send(new_copy_pager);
	/* now we also hold a naked send right for new_copy_pager */

	xmm_user_create(new_copy_pager, &new_copy);
	xmm_svm_create(new_copy, new_copy_pager, &new_copy);

	kr = xmm_memory_manager_export(new_copy, new_copy_pager);
	if (kr != KERN_SUCCESS) {
		panic("m_ksvm_copy: xmm_memory_manager_export: %x\n", kr);
	}

	/* xmm_user should now have a send right; we will share it */
	ipc_port_release_send(new_copy_pager);
	assert((new_copy_pager)->ip_srights > 0);

	/*
	 * Copy-call objects are temporary and noncachable.
	 */
	NEW_COPY->temporary = TRUE;
	NEW_COPY->may_cache = FALSE;

	/*
	 * Link old copy with new.
	 *
	 * XXX
	 * Grabbing references creates an object collapsing problem.
	 * Need to add vm_object_collapse-like code for when objs
	 * are only referenced by their copies.
	 */
	xmm_obj_lock(mobj);
	if ((old_copy = MOBJ->copy) != XMM_OBJ_NULL) {
		xmm_obj_lock(old_copy);
		xmm_obj_unlock(mobj);
		/*
		 * Remember old_copy pager, which must be valid
		 */
		old_copy_pager = OLD_COPY->memory_object;
		assert(IP_VALID(old_copy_pager));

		/*
		 * New copy steals reference to mobj from old copy.
		 * Old copy creates a reference to new copy.
		 */
		OLD_COPY->shadow = new_copy;
		NEW_COPY->copy = old_copy;
		xmm_obj_reference(new_copy);

		/*
		 * old_copy must not be destoyed before the new copy is stable.
		 */
		OLD_COPY->k_count++;

#if	MACH_PAGEMAP
		/*
		 * We freeze the old copy, now that it is isolated
		 * from the permanent object by the new copy and
		 * therefore will receive no more page pushes.
		 * Note that the old copy might already be frozen
		 * if it was the first copy of this object (see below).
		 */
		if (OLD_COPY->pagemap == VM_EXTERNAL_NULL) {
			svm_create_pagemap_for_copy(old_copy, mobj);
		} else
			xmm_obj_unlock(old_copy);
#else	/* MACH_PAGEMAP */
		xmm_obj_unlock(old_copy);
#endif	/* MACH_PAGEMAP */

		/*
		 * Create immediately attributes array for the new copy.
		 */
		NEW_COPY->bits.range = MOBJ->bits.range;
		NEW_COPY->num_pages = MOBJ->num_pages;
		svm_extend_new_copy(MOBJ->bits.bitmap,
				    MOBJ->bits.level, new_copy, 0);
	} else {
		/*
		 * Remember old_copy pager, which is null
		 */
		old_copy_pager = IP_NULL;

		/*
		 * New copy must create its own reference to mobj.
		 */
		NEW_COPY->copy = XMM_OBJ_NULL;
		xmm_obj_reference(mobj);
		xmm_obj_unlock(mobj);

#if	MACH_PAGEMAP
		/*
		 * We freeze the first copy of a copy-call object
		 * as soon as that copy is created (with no pages
		 * in it), to take advantage of the fact that many
		 * permanent objects never push any pages into
		 * their copies: executable files, for example.
		 */
		assert(NEW_COPY->pagemap == VM_EXTERNAL_NULL);
		NEW_COPY->existence_size = ptoa(MOBJ->num_pages);
		NEW_COPY->pagemap =
			vm_external_create(NEW_COPY->existence_size);
#endif	/* MACH_PAGEMAP */
	}

	/*
	 * Link mobj with its new copy.
	 * Appropriate references have been taken above.
	 */
	NEW_COPY->shadow = mobj;
	xmm_obj_lock(mobj);
	MOBJ->copy = new_copy;

	/*
	 * Set each page needs_copy so that we know to push a copy
	 * of the page if that page gets written.
	 *
	 * One might think that we should remove write protection
	 * on all pages here, since we know for a fact that the
	 * kernel has done so. However, this is not correct.
	 * We need to remember that the pages are possibly
	 * dirty; however, we cannot set them dirty, since
	 * they might not be. If we set a page dirty, then
	 * it must be dirty. If we leave a page written, then
	 * we know that we have to ask the kernel for the page,
	 * since it might be dirty, but we can tolerate it if
	 * the page is in fact not dirty.
	 *
	 * In other words, we have chosen to have write access
	 * be a hint that the page is dirty, but to have the
	 * dirty flag be truth, not a hint.
	 *
	 * This would seem to introduce a new problem, namely
	 * a writer making a write request. However, this isn't
	 * a new problem; it's just one we haven't seen or
	 * thought of before.
	 *
	 * XXX
	 * Pushing zero-fill pages seems like a total waste.
	 *
	 * XXX
	 * We used to set (obsoleted) readonly flag here -- why?
	 *
	 * Note that this object may previously have had a copy
	 * object and thus we may be resetting some set bits here.
	 * The fact that we always set all bits when we attach
	 * a new copy object is what makes it correct for
	 * M_GET_NEEDS_COPY to ignore needs_copy bits when there
	 * is no copy object. That is, we don't have to make the
	 * bits consistent when there is no copy object because
	 * we will make them consistent here when we attach a
	 * new copy object.
	 */
	svm_extend_set_copy(MOBJ->bits.bitmap, MOBJ->bits.level);

	/*
	 * Tell all kernels about new copy object, while
	 * asking them to protect their pages in the copied
	 * object.
	 *
	 * XXX
	 * We should mark "copy in progress" for any other kernels
	 * that come in here with an m_ksvm_copy, so that we can
	 * return the same copy.
	 *
	 * The memory_object_create_copy call is a synchronous rpc.
	 * This ensures that the pages are protected by the time we
	 * exit the loop. We could have an explicit return message
	 * to reduce latency.
	 *
	 * XXX
	 * We could reduce the number of calls in some cases
	 * by calling only those kernels which either have some
	 * write access or which have a shadow chain for this object.
	 */
	svm_klist_first(mobj, &kobj);
	xmm_obj_unlock(mobj);

	/*
	 * Reference the XMM memory_object port in order to avoid calling
	 * xmm_obj_destroy during copy creation,
	 */
	kr = xmm_object_reference(new_copy_pager);
	assert(kr == KERN_SUCCESS);

	while (kobj) {
		xmm_obj_unlock(kobj);
		ipc_port_copy_send(new_copy_pager);
		kr = K_CREATE_COPY(kobj, new_copy_pager, &result);
		assert(kr == KERN_SUCCESS);
		xmm_obj_lock(mobj);
		xmm_obj_lock(kobj);
		svm_klist_next(mobj, &kobj, FALSE);
	}

	/*
	 * Release the XMM memory_object port, destroying the new object
	 * if needed.
	 */
	xmm_obj_lock(new_copy);
	if (NEW_COPY->state == MOBJ_STATE_UNCALLED)
		svm_mobj_initialize(new_copy, TRUE, 0);
	xmm_obj_unlock(new_copy);
	xmm_object_release(new_copy_pager);

	/*
	 * Wake up anyone waiting for this copy to complete.
	 */
	xmm_obj_lock(mobj);
	MOBJ->copy_in_progress = FALSE;
	if (MOBJ->copy_wanted) {
		MOBJ->copy_wanted = FALSE;
		thread_wakeup(svm_copy_event(mobj));
	}

	/*
	 * Release the old_copy extra reference if needed.
	 */
	if (old_copy != XMM_OBJ_NULL) {
		xmm_obj_lock(old_copy);
		if (--OLD_COPY->k_count == 0 &&
		    OLD_COPY->state == MOBJ_STATE_SHOULD_TERMINATE) {
			xmm_obj_unlock(mobj);
			xmm_terminate_pending--;
			xmm_svm_destroy(old_copy); /* consumes lock */
			xmm_obj_lock(mobj);
		} else
			xmm_obj_unlock(old_copy);
	}
}
示例#25
0
void
default_pager_initialize(void)
{
	kern_return_t		kr;
	__unused static char	here[] = "default_pager_initialize";

	lck_grp_attr_setdefault(&default_pager_lck_grp_attr);
	lck_grp_init(&default_pager_lck_grp, "default_pager", &default_pager_lck_grp_attr);
	lck_attr_setdefault(&default_pager_lck_attr);	

	/*
	 * Vm variables.
	 */
#ifndef MACH_KERNEL
	vm_page_mask = vm_page_size - 1;
	assert((unsigned int) vm_page_size == vm_page_size);
	vm_page_shift = local_log2((unsigned int) vm_page_size);
#endif

	/*
	 * List of all vstructs.
	 */
	vstruct_zone = zinit(sizeof(struct vstruct),
			     10000 * sizeof(struct vstruct),
			     8192, "vstruct zone");
	zone_change(vstruct_zone, Z_CALLERACCT, FALSE);
	zone_change(vstruct_zone, Z_NOENCRYPT, TRUE);

	VSL_LOCK_INIT();
	queue_init(&vstruct_list.vsl_queue);
	vstruct_list.vsl_count = 0;

	VSTATS_LOCK_INIT(&global_stats.gs_lock);

	bs_initialize();

	/*
	 * Exported DMM port.
	 */
	default_pager_object = ipc_port_alloc_kernel();


	/*
	 * Export pager interfaces.
	 */
#ifdef	USER_PAGER
	if ((kr = netname_check_in(name_server_port, "UserPager",
				   default_pager_self,
				   default_pager_object))
	    != KERN_SUCCESS) {
		dprintf(("netname_check_in returned 0x%x\n", kr));
		exit(1);
	}
#else	/* USER_PAGER */
	{
		unsigned int clsize;
		memory_object_default_t dmm;

		dmm = default_pager_object;
		assert((unsigned int) vm_page_size == vm_page_size);
		clsize = ((unsigned int) vm_page_size << vstruct_def_clshift);
		kr = host_default_memory_manager(host_priv_self(), &dmm, clsize);
		if ((kr != KERN_SUCCESS) ||
		    (dmm != MEMORY_OBJECT_DEFAULT_NULL))
			Panic("default memory manager");

	}
#endif	/* USER_PAGER */


}
示例#26
0
void
ipc_thread_reset(
	thread_t	thread)
{
	ipc_port_t old_kport, new_kport;
	ipc_port_t old_sself;
	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
	boolean_t  has_old_exc_actions = FALSE;	
	int		   i;

	new_kport = ipc_port_alloc_kernel();
	if (new_kport == IP_NULL)
		panic("ipc_task_reset");

	thread_mtx_lock(thread);

	old_kport = thread->ith_self;

	if (old_kport == IP_NULL) {
		/* the  is already terminated (can this happen?) */
		thread_mtx_unlock(thread);
		ipc_port_dealloc_kernel(new_kport);
		return;
	}

	thread->ith_self = new_kport;
	old_sself = thread->ith_sself;
	thread->ith_sself = ipc_port_make_send(new_kport);
	ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
	ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);

	/*
	 * Only ports that were set by root-owned processes
	 * (privileged ports) should survive 
	 */
	if (thread->exc_actions != NULL) {
		has_old_exc_actions = TRUE;
		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
			if (thread->exc_actions[i].privileged) {
				old_exc_actions[i] = IP_NULL;
			} else {
				old_exc_actions[i] = thread->exc_actions[i].port;
				thread->exc_actions[i].port = IP_NULL;		
			}
		}
	}

	thread_mtx_unlock(thread);

	/* release the naked send rights */

	if (IP_VALID(old_sself))
		ipc_port_release_send(old_sself);

	if (has_old_exc_actions) {
		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
			ipc_port_release_send(old_exc_actions[i]);
		}
	}

	/* destroy the kernel port */
	ipc_port_dealloc_kernel(old_kport);
}
示例#27
0
void
ipc_task_init(
	task_t		task,
	task_t		parent)
{
	ipc_space_t space;
	ipc_port_t kport;
	ipc_port_t nport;
	kern_return_t kr;
	int i;


	kr = ipc_space_create(&ipc_table_entries[0], &space);
	if (kr != KERN_SUCCESS)
		panic("ipc_task_init");

	space->is_task = task;

	kport = ipc_port_alloc_kernel();
	if (kport == IP_NULL)
		panic("ipc_task_init");

	nport = ipc_port_alloc_kernel();
	if (nport == IP_NULL)
		panic("ipc_task_init");

	itk_lock_init(task);
	task->itk_self = kport;
	task->itk_nself = nport;
	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
	task->itk_sself = ipc_port_make_send(kport);
	task->itk_debug_control = IP_NULL;
	task->itk_space = space;

	if (parent == TASK_NULL) {
		ipc_port_t port;

		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
			task->exc_actions[i].port = IP_NULL;
		}/* for */
		
		kr = host_get_host_port(host_priv_self(), &port);
		assert(kr == KERN_SUCCESS);
		task->itk_host = port;

		task->itk_bootstrap = IP_NULL;
		task->itk_seatbelt = IP_NULL;
		task->itk_gssd = IP_NULL;
		task->itk_task_access = IP_NULL;

		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
			task->itk_registered[i] = IP_NULL;
	} else {
		itk_lock(parent);
		assert(parent->itk_self != IP_NULL);

		/* inherit registered ports */

		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
			task->itk_registered[i] =
				ipc_port_copy_send(parent->itk_registered[i]);

		/* inherit exception and bootstrap ports */

		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
		    task->exc_actions[i].port =
		  		ipc_port_copy_send(parent->exc_actions[i].port);
		    task->exc_actions[i].flavor =
				parent->exc_actions[i].flavor;
		    task->exc_actions[i].behavior = 
				parent->exc_actions[i].behavior;
		    task->exc_actions[i].privileged =
				parent->exc_actions[i].privileged;
		}/* for */
		task->itk_host =
			ipc_port_copy_send(parent->itk_host);

		task->itk_bootstrap =
			ipc_port_copy_send(parent->itk_bootstrap);

		task->itk_seatbelt =
			ipc_port_copy_send(parent->itk_seatbelt);

		task->itk_gssd =
			ipc_port_copy_send(parent->itk_gssd);

		task->itk_task_access =
			ipc_port_copy_send(parent->itk_task_access);

		itk_unlock(parent);
	}
}