void ipc_pset_destroy( ipc_pset_t pset) { spl_t s; assert(ips_active(pset)); pset->ips_object.io_bits &= ~IO_BITS_ACTIVE; /* * remove all the member message queues */ ipc_mqueue_remove_all(&pset->ips_messages); s = splsched(); imq_lock(&pset->ips_messages); ipc_mqueue_changed(&pset->ips_messages); imq_unlock(&pset->ips_messages); splx(s); /* XXXX Perhaps ought to verify ips_thread_pool is empty */ ips_release(pset); /* consume the ref our caller gave us */ ips_check_unlock(pset); }
void ipc_pset_destroy( ipc_pset_t pset) { assert(ips_active(pset)); pset->ips_object.io_bits &= ~IO_BITS_ACTIVE; imq_lock(&pset->ips_messages); ipc_mqueue_changed(&pset->ips_messages, MACH_RCV_PORT_DIED); imq_unlock(&pset->ips_messages); /* Common destruction for the IPC target. */ ipc_target_terminate(&pset->ips_target); ips_release(pset); /* consume the ref our caller gave us */ ips_check_unlock(pset); }
kern_return_t ipc_pset_move( ipc_space_t space, ipc_port_t port, ipc_pset_t nset) { ipc_pset_t oset; /* * While we've got the space locked, it holds refs for * the port and nset (because of the entries). Also, * they must be alive. While we've got port locked, it * holds a ref for oset, which might not be alive. */ ip_lock(port); assert(ip_active(port)); oset = port->ip_pset; if (oset == nset) { /* the port is already in the new set: a noop */ is_read_unlock(space); } else if (oset == IPS_NULL) { /* just add port to the new set */ ips_lock(nset); assert(ips_active(nset)); is_read_unlock(space); ipc_pset_add(nset, port); ips_unlock(nset); } else if (nset == IPS_NULL) { /* just remove port from the old set */ is_read_unlock(space); ips_lock(oset); ipc_pset_remove(oset, port); if (ips_active(oset)) ips_unlock(oset); else { ips_check_unlock(oset); oset = IPS_NULL; /* trigger KERN_NOT_IN_SET */ } } else { /* atomically move port from oset to nset */ if (oset < nset) { ips_lock(oset); ips_lock(nset); } else { ips_lock(nset); ips_lock(oset); } is_read_unlock(space); assert(ips_active(nset)); ipc_pset_remove(oset, port); ipc_pset_add(nset, port); ips_unlock(nset); ips_check_unlock(oset); /* KERN_NOT_IN_SET not a possibility */ } ip_unlock(port); return (((nset == IPS_NULL) && (oset == IPS_NULL)) ? KERN_NOT_IN_SET : KERN_SUCCESS); }
mach_msg_return_t ipc_mqueue_copyin( ipc_space_t space, mach_port_t name, ipc_mqueue_t *mqueuep, ipc_object_t *objectp) { ipc_entry_t entry; ipc_entry_bits_t bits; ipc_object_t object; ipc_mqueue_t mqueue; is_read_lock(space); if (!space->is_active) { is_read_unlock(space); return MACH_RCV_INVALID_NAME; } entry = ipc_entry_lookup(space, name); if (entry == IE_NULL) { is_read_unlock(space); return MACH_RCV_INVALID_NAME; } bits = entry->ie_bits; object = entry->ie_object; if (bits & MACH_PORT_TYPE_RECEIVE) { ipc_port_t port; ipc_pset_t pset; port = (ipc_port_t) object; assert(port != IP_NULL); ip_lock(port); assert(ip_active(port)); assert(port->ip_receiver_name == name); assert(port->ip_receiver == space); is_read_unlock(space); pset = port->ip_pset; if (pset != IPS_NULL) { ips_lock(pset); if (ips_active(pset)) { ips_unlock(pset); ip_unlock(port); return MACH_RCV_IN_SET; } ipc_pset_remove(pset, port); ips_check_unlock(pset); assert(port->ip_pset == IPS_NULL); } mqueue = &port->ip_messages; } else if (bits & MACH_PORT_TYPE_PORT_SET) { ipc_pset_t pset; pset = (ipc_pset_t) object; assert(pset != IPS_NULL); ips_lock(pset); assert(ips_active(pset)); assert(pset->ips_local_name == name); is_read_unlock(space); mqueue = &pset->ips_messages; } else { is_read_unlock(space); return MACH_RCV_INVALID_NAME; } /* * At this point, the object is locked and active, * the space is unlocked, and mqueue is initialized. */ io_reference(object); imq_lock(mqueue); io_unlock(object); *objectp = object; *mqueuep = mqueue; return MACH_MSG_SUCCESS; }