void ipc_task_disable( task_t task) { ipc_port_t kport; ipc_port_t nport; ipc_port_t rport; itk_lock(task); kport = task->itk_self; if (kport != IP_NULL) ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); nport = task->itk_nself; if (nport != IP_NULL) ipc_kobject_set(nport, IKO_NULL, IKOT_NONE); rport = task->itk_resume; if (rport != IP_NULL) { /* * From this point onwards this task is no longer accepting * resumptions. * * There are still outstanding suspensions on this task, * even as it is being torn down. Disconnect the task * from the rport, thereby "orphaning" the rport. The rport * itself will go away only when the last suspension holder * destroys his SO right to it -- when he either * exits, or tries to actually use that last SO right to * resume this (now non-existent) task. */ ipc_kobject_set(rport, IKO_NULL, IKOT_NONE); } itk_unlock(task); }
void ipc_host_init(void) { ipc_port_t port; /* * Allocate and set up the two host ports. */ port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST); realhost.host_self = port; port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV); realhost.host_priv_self = port; /* * Set up ipc for default processor set. */ ipc_pset_init(&default_pset); ipc_pset_enable(&default_pset); /* * And for master processor */ ipc_processor_init(master_processor); }
/* * ipc_pset_enable: * * Enable ipc access to a processor set. */ void ipc_pset_enable( processor_set_t pset) { ipc_kobject_set(pset->pset_self, (ipc_kobject_t) pset, IKOT_PSET); ipc_kobject_set(pset->pset_name_self, (ipc_kobject_t) pset, IKOT_PSET_NAME); }
void ipc_task_reset( task_t task) { ipc_port_t old_kport, new_kport; ipc_port_t old_sself; ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; int i; new_kport = ipc_port_alloc_kernel(); if (new_kport == IP_NULL) panic("ipc_task_reset"); itk_lock(task); old_kport = task->itk_self; if (old_kport == IP_NULL) { /* the task is already terminated (can this happen?) */ itk_unlock(task); ipc_port_dealloc_kernel(new_kport); return; } task->itk_self = new_kport; old_sself = task->itk_sself; task->itk_sself = ipc_port_make_send(new_kport); ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (!task->exc_actions[i].privileged) { old_exc_actions[i] = task->exc_actions[i].port; task->exc_actions[i].port = IP_NULL; } else { old_exc_actions[i] = IP_NULL; } }/* for */ if (IP_VALID(task->itk_debug_control)) { ipc_port_release_send(task->itk_debug_control); } task->itk_debug_control = IP_NULL; itk_unlock(task); /* release the naked send rights */ if (IP_VALID(old_sself)) ipc_port_release_send(old_sself); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (IP_VALID(old_exc_actions[i])) { ipc_port_release_send(old_exc_actions[i]); } }/* for */ /* destroy the kernel port */ ipc_port_dealloc_kernel(old_kport); }
/* * ipc_pset_disable: * * Disable ipc access to a processor set by clearing the port objects. * Caller must hold pset lock and a reference to the pset. Ok to * just decrement pset reference count as a result. */ void ipc_pset_disable( processor_set_t pset) { ipc_kobject_set(pset->pset_self, IKO_NULL, IKOT_NONE); ipc_kobject_set(pset->pset_name_self, IKO_NULL, IKOT_NONE); pset->ref_count -= 2; }
/* * Routine: ipc_clock_enable * Purpose: * Enable ipc access to a clock. */ void ipc_clock_enable( clock_t clock) { ipc_kobject_set(clock->cl_service, (ipc_kobject_t) clock, IKOT_CLOCK); ipc_kobject_set(clock->cl_control, (ipc_kobject_t) clock, IKOT_CLOCK_CTRL); }
void ipc_host_init(void) { ipc_port_t port; int i; lck_mtx_init(&realhost.lock, &host_notify_lock_grp, &host_notify_lock_attr); /* * Allocate and set up the two host ports. */ port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_SECURITY); kernel_set_special_port(&realhost, HOST_SECURITY_PORT, ipc_port_make_send(port)); port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST); kernel_set_special_port(&realhost, HOST_PORT, ipc_port_make_send(port)); port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV); kernel_set_special_port(&realhost, HOST_PRIV_PORT, ipc_port_make_send(port)); /* the rest of the special ports will be set up later */ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { realhost.exc_actions[i].port = IP_NULL; }/* for */ /* * Set up ipc for default processor set. */ ipc_pset_init(&pset0); ipc_pset_enable(&pset0); /* * And for master processor */ ipc_processor_init(master_processor); ipc_processor_enable(master_processor); }
/* * ipc_pset_enable: * * Enable ipc access to a processor set. */ void ipc_pset_enable( processor_set_t pset) { pset_lock(pset); if (pset->active) { ipc_kobject_set(pset->pset_self, (ipc_kobject_t) pset, IKOT_PSET); ipc_kobject_set(pset->pset_name_self, (ipc_kobject_t) pset, IKOT_PSET_NAME); pset->ref_count += 2; } pset_unlock(pset); }
void ipc_task_disable( task_t task) { ipc_port_t kport; ipc_port_t nport; itk_lock(task); kport = task->itk_self; if (kport != IP_NULL) ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); nport = task->itk_nself; if (nport != IP_NULL) ipc_kobject_set(nport, IKO_NULL, IKOT_NONE); itk_unlock(task); }
/* * fileport_alloc * * Description: Obtain a send right for the given fileglob, which must be * referenced. * * Parameters: fg A fileglob. * * Returns: Port of type IKOT_FILEPORT with fileglob set as its kobject. * Port is returned with a send right. */ ipc_port_t fileport_alloc(struct fileglob *fg) { ipc_port_t fileport; ipc_port_t sendport; ipc_port_t notifyport; fileport = ipc_port_alloc_kernel(); if (fileport == IP_NULL) { goto out; } ipc_kobject_set(fileport, (ipc_kobject_t)fg, IKOT_FILEPORT); notifyport = ipc_port_make_sonce(fileport); ip_lock(fileport); /* unlocked by ipc_port_nsrequest */ ipc_port_nsrequest(fileport, 1, notifyport, ¬ifyport); sendport = ipc_port_make_send(fileport); if (!IP_VALID(sendport)) { panic("Couldn't allocate send right for fileport!\n"); } out: return fileport; }
/* Utility routine to create a new ledger */ static ledger_t ledger_allocate( ledger_item_t limit, ledger_t ledger_ledger, ledger_t ledger_parent) { ledger_t ledger; ledger = (ledger_t)kalloc(sizeof(ledger_data_t)); if (ledger == LEDGER_NULL) return(LEDGER_NULL); ledger->ledger_self = ipc_port_alloc_kernel(); if (ledger->ledger_self == IP_NULL) return(LEDGER_NULL); ledger_lock_init(ledger); ledger->ledger_limit = limit; ledger->ledger_balance = 0; ledger->ledger_service_port = MACH_PORT_NULL; ledger->ledger_ledger = ledger_ledger; ledger->ledger_parent = ledger_parent; ipc_kobject_set(ledger->ledger_self, (ipc_kobject_t)ledger, IKOT_LEDGER); return(ledger); }
/* * Routine: convert_task_suspend_token_to_port * Purpose: * Convert from a task suspension token to a port. * Consumes a task suspension token ref; produces a naked send-once right * which may be invalid. * Conditions: * Nothing locked. */ ipc_port_t convert_task_suspension_token_to_port( task_suspension_token_t task) { ipc_port_t port; task_lock(task); if (task->active) { if (task->itk_resume == IP_NULL) { task->itk_resume = ipc_port_alloc_kernel(); if (!IP_VALID(task->itk_resume)) { panic("failed to create resume port"); } ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME); } /* * Create a send-once right for each instance of a direct user-called * task_suspend2 call. Each time one of these send-once rights is abandoned, * the notification handler will resume the target task. */ port = ipc_port_make_sonce(task->itk_resume); assert(IP_VALID(port)); } else { port = IP_NULL; } task_unlock(task); task_suspension_token_deallocate(task); return port; }
/* * Only called internally. Allocate an xmm_object port. * The xmm_object takes the mobj reference. */ ipc_port_t xmm_object_allocate( xmm_obj_t mobj) { ipc_port_t xmm_object; /* * Create an xmm object port. */ xmm_object = ipc_port_alloc_kernel(); if (xmm_object == IP_NULL) { return IP_NULL; } IP_CLEAR_NMS(xmm_object); /* * Associate the xmm obj with the xmm object port. * We keep the xmm obj reference returned by xmm_svm_create. */ ipc_kobject_set(xmm_object, (ipc_kobject_t) mobj, IKOT_XMM_OBJECT); /* * Return the port. */ return xmm_object; }
EXTERN ipc_port_t iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type ) { ipc_port_t notify; ipc_port_t port; do { /* Allocate port, keeping a reference for it. */ port = ipc_port_alloc_kernel(); if( port == IP_NULL) continue; /* set kobject & type */ // iokit_add_reference( obj ); ipc_kobject_set( port, (ipc_kobject_t) obj, type); /* Request no-senders notifications on the port. */ ip_lock( port); notify = ipc_port_make_sonce_locked( port); ipc_port_nsrequest( port, 1, notify, ¬ify); /* port unlocked */ assert( notify == IP_NULL); gIOKitPortCount++; } while( FALSE); return( port ); }
void ipc_task_enable( task_t task) { ipc_port_t kport; ipc_port_t nport; itk_lock(task); kport = task->itk_self; if (kport != IP_NULL) ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK); nport = task->itk_nself; if (nport != IP_NULL) ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME); itk_unlock(task); }
/* * Routine: semaphore_create * * Creates a semaphore. * The port representing the semaphore is returned as a parameter. */ kern_return_t semaphore_create( task_t task, semaphore_t *new_semaphore, int policy, int value) { semaphore_t s = SEMAPHORE_NULL; if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) { *new_semaphore = SEMAPHORE_NULL; return KERN_INVALID_ARGUMENT; } s = (semaphore_t) zalloc (semaphore_zone); if (s == SEMAPHORE_NULL) { *new_semaphore = SEMAPHORE_NULL; return KERN_RESOURCE_SHORTAGE; } wait_queue_init(&s->wait_queue, policy); /* also inits lock */ s->count = value; s->ref_count = 1; /* * Create and initialize the semaphore port */ s->port = ipc_port_alloc_kernel(); if (s->port == IP_NULL) { /* This will deallocate the semaphore */ semaphore_dereference(s); *new_semaphore = SEMAPHORE_NULL; return KERN_RESOURCE_SHORTAGE; } ipc_kobject_set (s->port, (ipc_kobject_t) s, IKOT_SEMAPHORE); /* * Associate the new semaphore with the task by adding * the new semaphore to the task's semaphore list. * * Associate the task with the new semaphore by having the * semaphores task pointer point to the owning task's structure. */ task_lock(task); enqueue_head(&task->semaphore_list, (queue_entry_t) s); task->semaphores_owned++; s->owner = task; s->active = TRUE; task_unlock(task); *new_semaphore = s; return KERN_SUCCESS; }
void ipc_thread_disable( thread_t thread) { ipc_port_t kport = thread->ith_self; if (kport != IP_NULL) ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); }
EXTERN kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ) { iokit_lock_port(port); ipc_kobject_set( port, (ipc_kobject_t) obj, type); iokit_unlock_port(port); return( KERN_SUCCESS); }
/* * ipc_processor_enable: * * Enable ipc control of processor by setting port object. */ void ipc_processor_enable( processor_t processor) { ipc_port_t myport; myport = processor->processor_self; ipc_kobject_set(myport, (ipc_kobject_t) processor, IKOT_PROCESSOR); }
void ipc_thread_enable(thread_t thread) { ipc_port_t kport; ith_lock(thread); kport = thread->ith_self; if (kport != IP_NULL) ipc_kobject_set(kport, (ipc_kobject_t) thread, IKOT_THREAD); ith_unlock(thread); }
/* * ipc_processor_disable: * * Disable ipc control of processor by clearing port object. */ void ipc_processor_disable( processor_t processor) { ipc_port_t myport; myport = processor->processor_self; if (myport == IP_NULL) return; ipc_kobject_set(myport, IKO_NULL, IKOT_NONE); }
void ipc_processor_init( processor_t processor) { ipc_port_t port; port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_processor_init"); processor->processor_self = port; ipc_kobject_set(port, (ipc_kobject_t) processor, IKOT_PROCESSOR); }
EXTERN kern_return_t iokit_destroy_object_port( ipc_port_t port ) { iokit_lock_port(port); ipc_kobject_set( port, IKO_NULL, IKOT_NONE); // iokit_remove_reference( obj ); iokit_unlock_port(port); ipc_port_dealloc_kernel( port); gIOKitPortCount--; return( KERN_SUCCESS); }
void xmm_object_nuke( ipc_port_t xmm_object) { xmm_obj_t mobj; /* * Get and disassociate mobj from xmm object port. */ assert(!IP_IS_REMOTE(xmm_object)); assert(ip_kotype(xmm_object) == IKOT_XMM_OBJECT); mobj = (xmm_obj_t) xmm_object->ip_kobject; ipc_kobject_set(xmm_object, IKO_NULL, IKOT_NONE); /* * Destroy xmm object port and mobj. */ xmm_object_destroy(xmm_object, mobj); }
void device_service_create(void) { master_device_port = ipc_port_alloc_kernel(); if (master_device_port == IP_NULL) panic("can't allocate master device port"); ipc_kobject_set(master_device_port, 1, IKOT_MASTER_DEVICE); ds_init(); net_io_init(); device_pager_init(); datadev_init(); (void) kernel_thread(kernel_task, io_done_thread, (char *)0); (void) kernel_thread(kernel_task, net_thread, (char *)0); #if XKMACHKERNEL && !DIPC_XKERN /* * Initialize the x-kernel */ (void) kernel_thread(kernel_task, xkInit, (char *)0); #endif /* XKMACHKERNEL && !DIPC_XKERN */ }
void ipc_thread_init( thread_t thread) { ipc_port_t kport; int i; kport = ipc_port_alloc_kernel(); if (kport == IP_NULL) panic("ipc_thread_init"); thread->ith_self = kport; thread->ith_sself = ipc_port_make_send(kport); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) thread->exc_actions[i].port = IP_NULL; ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD); ipc_kmsg_queue_init(&thread->ith_messages); thread->ith_rpc_reply = IP_NULL; }
void ipc_thread_init( thread_t thread) { ipc_port_t kport; kport = ipc_port_alloc_kernel(); if (kport == IP_NULL) panic("ipc_thread_init"); thread->ith_self = kport; thread->ith_sself = ipc_port_make_send(kport); thread->exc_actions = NULL; ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD); #if IMPORTANCE_INHERITANCE thread->ith_assertions = 0; #endif ipc_kmsg_queue_init(&thread->ith_messages); thread->ith_rpc_reply = IP_NULL; }
void ipc_thread_reset( thread_t thread) { ipc_port_t old_kport, new_kport; ipc_port_t old_sself; ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; boolean_t has_old_exc_actions = FALSE; int i; new_kport = ipc_port_alloc_kernel(); if (new_kport == IP_NULL) panic("ipc_task_reset"); thread_mtx_lock(thread); old_kport = thread->ith_self; if (old_kport == IP_NULL) { /* the is already terminated (can this happen?) */ thread_mtx_unlock(thread); ipc_port_dealloc_kernel(new_kport); return; } thread->ith_self = new_kport; old_sself = thread->ith_sself; thread->ith_sself = ipc_port_make_send(new_kport); ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD); /* * Only ports that were set by root-owned processes * (privileged ports) should survive */ if (thread->exc_actions != NULL) { has_old_exc_actions = TRUE; for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (thread->exc_actions[i].privileged) { old_exc_actions[i] = IP_NULL; } else { old_exc_actions[i] = thread->exc_actions[i].port; thread->exc_actions[i].port = IP_NULL; } } } thread_mtx_unlock(thread); /* release the naked send rights */ if (IP_VALID(old_sself)) ipc_port_release_send(old_sself); if (has_old_exc_actions) { for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { ipc_port_release_send(old_exc_actions[i]); } } /* destroy the kernel port */ ipc_port_dealloc_kernel(old_kport); }
static io_return_t device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type, dev_mode_t mode, char *name, device_t *devp) { io_return_t err = D_SUCCESS; ipc_port_t notify; struct ifnet *ifp; struct linux_device *dev; struct net_data *nd; /* Search for the device. */ for (dev = dev_base; dev; dev = dev->next) if (dev->base_addr && dev->base_addr != 0xffe0 && !strcmp (name, dev->name)) break; if (!dev) return D_NO_SUCH_DEVICE; /* Allocate and initialize device data if this is the first open. */ nd = dev->net_data; if (!nd) { dev->net_data = nd = ((struct net_data *) kalloc (sizeof (struct net_data))); if (!nd) { err = D_NO_MEMORY; goto out; } nd->dev = dev; nd->device.emul_data = nd; nd->device.emul_ops = &linux_net_emulation_ops; nd->port = ipc_port_alloc_kernel (); if (nd->port == IP_NULL) { err = KERN_RESOURCE_SHORTAGE; goto out; } ipc_kobject_set (nd->port, (ipc_kobject_t) & nd->device, IKOT_DEVICE); notify = ipc_port_make_sonce (nd->port); ip_lock (nd->port); ipc_port_nsrequest (nd->port, 1, notify, ¬ify); assert (notify == IP_NULL); ifp = &nd->ifnet; ifp->if_unit = dev->name[strlen (dev->name) - 1] - '0'; ifp->if_flags = IFF_UP | IFF_RUNNING; ifp->if_mtu = dev->mtu; ifp->if_header_size = dev->hard_header_len; ifp->if_header_format = dev->type; ifp->if_address_size = dev->addr_len; ifp->if_address = dev->dev_addr; if_init_queues (ifp); if (dev->open) { linux_intr_pri = SPL6; if ((*dev->open) (dev)) err = D_NO_SUCH_DEVICE; } out: if (err) { if (nd) { if (nd->port != IP_NULL) { ipc_kobject_set (nd->port, IKO_NULL, IKOT_NONE); ipc_port_dealloc_kernel (nd->port); } kfree ((vm_offset_t) nd, sizeof (struct net_data)); nd = NULL; dev->net_data = NULL; } } else { /* IPv6 heavily relies on multicasting (especially router and neighbor solicits and advertisements), so enable reception of those multicast packets by setting `LINUX_IFF_ALLMULTI'. */ dev->flags |= LINUX_IFF_UP | LINUX_IFF_RUNNING | LINUX_IFF_ALLMULTI; skb_queue_head_init (&dev->buffs[0]); if (dev->set_multicast_list) dev->set_multicast_list (dev); } if (IP_VALID (reply_port)) ds_device_open_reply (reply_port, reply_port_type, err, dev_to_port (nd)); return MIG_NO_REPLY; } *devp = &nd->device; return D_SUCCESS; }
/* * ROUTINE: lock_set_create [exported] * * Creates a lock set. * The port representing the lock set is returned as a parameter. */ kern_return_t lock_set_create ( task_t task, lock_set_t *new_lock_set, int n_ulocks, int policy) { lock_set_t lock_set = LOCK_SET_NULL; ulock_t ulock; vm_size_t size; int x; *new_lock_set = LOCK_SET_NULL; if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX) return KERN_INVALID_ARGUMENT; if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks) return KERN_RESOURCE_SHORTAGE; size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1)); lock_set = (lock_set_t) kalloc (size); if (lock_set == LOCK_SET_NULL) return KERN_RESOURCE_SHORTAGE; lock_set_lock_init(lock_set); lock_set->n_ulocks = n_ulocks; lock_set->ref_count = (task == kernel_task) ? 1 : 2; /* one for kernel, one for port */ /* * Create and initialize the lock set port */ lock_set->port = ipc_port_alloc_kernel(); if (lock_set->port == IP_NULL) { kfree(lock_set, size); return KERN_RESOURCE_SHORTAGE; } ipc_kobject_set (lock_set->port, (ipc_kobject_t) lock_set, IKOT_LOCK_SET); /* * Initialize each ulock in the lock set */ for (x=0; x < n_ulocks; x++) { ulock = (ulock_t) &lock_set->ulock_list[x]; ulock_lock_init(ulock); ulock->lock_set = lock_set; ulock->holder = THREAD_NULL; ulock->blocked = FALSE; ulock->unstable = FALSE; ulock->ho_wait = FALSE; ulock->accept_wait = FALSE; wait_queue_init(&ulock->wait_queue, policy); } lock_set_ownership_set(lock_set, task); lock_set->active = TRUE; *new_lock_set = lock_set; return KERN_SUCCESS; }