static void filt_machportdetach( struct knote *kn) { ipc_pset_t pset = kn->kn_ptr.p_pset; wait_queue_link_t wql = WAIT_QUEUE_LINK_NULL; /* * Unlink the portset wait queue from knote/kqueue, * and release our reference on the portset. */ ips_lock(pset); (void)knote_unlink_wait_queue(kn, &pset->ips_messages.imq_wait_queue, &wql); kn->kn_ptr.p_pset = IPS_NULL; ips_unlock(pset); ips_release(pset); if (wql != WAIT_QUEUE_LINK_NULL) wait_queue_link_free(wql); }
void ipc_port_clear_receiver( ipc_port_t port) { ipc_pset_t pset; assert(ip_active(port)); pset = port->ip_pset; if (pset != IPS_NULL) { ips_lock(pset); ipc_pset_remove(pset, port); ips_unlock(pset); ips_release(pset); } ipc_port_changed(port, MACH_RCV_PORT_DIED); ipc_port_set_mscount(port, 0); port->ip_seqno = 0; }
void ipc_pset_destroy( ipc_pset_t pset) { spl_t s; queue_head_t link_data; queue_t links = &link_data; wait_queue_link_t wql; queue_init(links); assert(ips_active(pset)); pset->ips_object.io_bits &= ~IO_BITS_ACTIVE; /* * remove all the member message queues */ ipc_mqueue_remove_all(&pset->ips_messages, links); /* * Set all waiters on the portset running to * discover the change. */ s = splsched(); imq_lock(&pset->ips_messages); ipc_mqueue_changed(&pset->ips_messages); imq_unlock(&pset->ips_messages); splx(s); ips_unlock(pset); ips_release(pset); /* consume the ref our caller gave us */ while(!queue_empty(links)) { wql = (wait_queue_link_t) dequeue(links); wait_queue_link_free(wql); } }
void ipc_port_set_seqno( ipc_port_t port, mach_port_seqno_t seqno) { if (port->ip_pset != IPS_NULL) { ipc_pset_t pset = port->ip_pset; ips_lock(pset); if (!ips_active(pset)) { ipc_pset_remove(pset, port); ips_unlock(pset); ips_release(pset); goto no_port_set; } else { port->ip_seqno = seqno; } } else { no_port_set: port->ip_seqno = seqno; } }
static int filt_machport( struct knote *kn, __unused long hint) { mach_port_name_t name = (mach_port_name_t)kn->kn_kevent.ident; ipc_pset_t pset = IPS_NULL; wait_result_t wresult; thread_t self = current_thread(); kern_return_t kr; mach_msg_option_t option; mach_msg_size_t size; /* never called from below */ assert(hint == 0); /* * called from user context. Have to validate the * name. If it changed, we have an EOF situation. */ kr = ipc_object_translate(current_space(), name, MACH_PORT_RIGHT_PORT_SET, (ipc_object_t *)&pset); if (kr != KERN_SUCCESS || pset != kn->kn_ptr.p_pset || !ips_active(pset)) { kn->kn_data = 0; kn->kn_flags |= (EV_EOF | EV_ONESHOT); if (pset != IPS_NULL) { ips_unlock(pset); } return(1); } /* just use the reference from here on out */ ips_reference(pset); ips_unlock(pset); /* * Only honor supported receive options. If no options are * provided, just force a MACH_RCV_TOO_LARGE to detect the * name of the port and sizeof the waiting message. */ option = kn->kn_sfflags & (MACH_RCV_MSG|MACH_RCV_LARGE|MACH_RCV_LARGE_IDENTITY| MACH_RCV_TRAILER_MASK|MACH_RCV_VOUCHER); if (option & MACH_RCV_MSG) { self->ith_msg_addr = (mach_vm_address_t) kn->kn_ext[0]; size = (mach_msg_size_t)kn->kn_ext[1]; } else { option = MACH_RCV_LARGE; self->ith_msg_addr = 0; size = 0; } /* * Set up to receive a message or the notification of a * too large message. But never allow this call to wait. * If the user provided aditional options, like trailer * options, pass those through here. But we don't support * scatter lists through this interface. */ self->ith_object = (ipc_object_t)pset; self->ith_msize = size; self->ith_option = option; self->ith_receiver_name = MACH_PORT_NULL; self->ith_continuation = NULL; option |= MACH_RCV_TIMEOUT; // never wait self->ith_state = MACH_RCV_IN_PROGRESS; wresult = ipc_mqueue_receive_on_thread( &pset->ips_messages, option, size, /* max_size */ 0, /* immediate timeout */ THREAD_INTERRUPTIBLE, self); assert(wresult == THREAD_NOT_WAITING); assert(self->ith_state != MACH_RCV_IN_PROGRESS); /* * If we timed out, just release the reference on the * portset and return zero. */ if (self->ith_state == MACH_RCV_TIMED_OUT) { ips_release(pset); return 0; } /* * If we weren't attempting to receive a message * directly, we need to return the port name in * the kevent structure. */ if ((option & MACH_RCV_MSG) != MACH_RCV_MSG) { assert(self->ith_state == MACH_RCV_TOO_LARGE); assert(self->ith_kmsg == IKM_NULL); kn->kn_data = self->ith_receiver_name; ips_release(pset); return 1; } /* * Attempt to receive the message directly, returning * the results in the fflags field. */ assert(option & MACH_RCV_MSG); kn->kn_ext[1] = self->ith_msize; kn->kn_data = MACH_PORT_NULL; kn->kn_fflags = mach_msg_receive_results(); /* kmsg and pset reference consumed */ /* * if the user asked for the identity of ports containing a * a too-large message, return it in the data field (as we * do for messages we didn't try to receive). */ if ((kn->kn_fflags == MACH_RCV_TOO_LARGE) && (option & MACH_RCV_LARGE_IDENTITY)) kn->kn_data = self->ith_receiver_name; return 1; }
kern_return_t mach_port_allocate_full( ipc_space_t space, mach_port_right_t right, mach_port_t proto, mach_port_qos_t *qosp, mach_port_name_t *namep) { ipc_kmsg_t kmsg; kern_return_t kr; if (space == IS_NULL) return (KERN_INVALID_TASK); if (proto != MACH_PORT_NULL) return (KERN_INVALID_VALUE); if (qosp->name) { if (!MACH_PORT_VALID (*namep)) return (KERN_INVALID_VALUE); if (is_fast_space (space)) return (KERN_FAILURE); } if (qosp->prealloc) { mach_msg_size_t size = qosp->len + MAX_TRAILER_SIZE; if (right != MACH_PORT_RIGHT_RECEIVE) return (KERN_INVALID_VALUE); kmsg = (ipc_kmsg_t)kalloc(ikm_plus_overhead(size)); if (kmsg == IKM_NULL) return (KERN_RESOURCE_SHORTAGE); ikm_init(kmsg, size); } switch (right) { case MACH_PORT_RIGHT_RECEIVE: { ipc_port_t port; if (qosp->name) kr = ipc_port_alloc_name(space, *namep, &port); else kr = ipc_port_alloc(space, namep, &port); if (kr == KERN_SUCCESS) { if (qosp->prealloc) ipc_kmsg_set_prealloc(kmsg, port); ip_unlock(port); } else if (qosp->prealloc) ipc_kmsg_free(kmsg); break; } case MACH_PORT_RIGHT_PORT_SET: { ipc_pset_t pset; if (qosp->name) kr = ipc_pset_alloc_name(space, *namep, &pset); else kr = ipc_pset_alloc(space, namep, &pset); if (kr == KERN_SUCCESS) ips_unlock(pset); break; } case MACH_PORT_RIGHT_DEAD_NAME: kr = ipc_object_alloc_dead(space, namep); break; default: kr = KERN_INVALID_VALUE; break; } return (kr); }
kern_return_t mach_port_move_member( ipc_space_t space, mach_port_name_t member, mach_port_name_t after) { ipc_entry_t entry; ipc_port_t port; ipc_pset_t nset; kern_return_t kr; if (space == IS_NULL) return KERN_INVALID_TASK; if (!MACH_PORT_VALID(member)) return KERN_INVALID_RIGHT; if (after == MACH_PORT_DEAD) return KERN_INVALID_RIGHT; kr = ipc_right_lookup_read(space, member, &entry); if (kr != KERN_SUCCESS) return kr; /* space is read-locked and active */ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) { is_read_unlock(space); return KERN_INVALID_RIGHT; } port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); if (after == MACH_PORT_NULL) nset = IPS_NULL; else { entry = ipc_entry_lookup(space, after); if (entry == IE_NULL) { is_read_unlock(space); return KERN_INVALID_NAME; } if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) { is_read_unlock(space); return KERN_INVALID_RIGHT; } nset = (ipc_pset_t) entry->ie_object; assert(nset != IPS_NULL); } ip_lock(port); ipc_pset_remove_from_all(port); if (nset != IPS_NULL) { ips_lock(nset); kr = ipc_pset_add(nset, port); ips_unlock(nset); } ip_unlock(port); is_read_unlock(space); return kr; }
kern_return_t ipc_pset_move( ipc_space_t space, ipc_port_t port, ipc_pset_t nset) { ipc_pset_t oset; /* * While we've got the space locked, it holds refs for * the port and nset (because of the entries). Also, * they must be alive. While we've got port locked, it * holds a ref for oset, which might not be alive. */ ip_lock(port); assert(ip_active(port)); oset = port->ip_pset; if (oset == nset) { /* the port is already in the new set: a noop */ is_read_unlock(space); } else if (oset == IPS_NULL) { /* just add port to the new set */ ips_lock(nset); assert(ips_active(nset)); is_read_unlock(space); ipc_pset_add(nset, port); ips_unlock(nset); } else if (nset == IPS_NULL) { /* just remove port from the old set */ is_read_unlock(space); ips_lock(oset); ipc_pset_remove(oset, port); if (ips_active(oset)) ips_unlock(oset); else { ips_check_unlock(oset); oset = IPS_NULL; /* trigger KERN_NOT_IN_SET */ } } else { /* atomically move port from oset to nset */ if (oset < nset) { ips_lock(oset); ips_lock(nset); } else { ips_lock(nset); ips_lock(oset); } is_read_unlock(space); assert(ips_active(nset)); ipc_pset_remove(oset, port); ipc_pset_add(nset, port); ips_unlock(nset); ips_check_unlock(oset); /* KERN_NOT_IN_SET not a possibility */ } ip_unlock(port); return (((nset == IPS_NULL) && (oset == IPS_NULL)) ? KERN_NOT_IN_SET : KERN_SUCCESS); }
mach_msg_return_t ipc_mqueue_copyin( ipc_space_t space, mach_port_t name, ipc_mqueue_t *mqueuep, ipc_object_t *objectp) { ipc_entry_t entry; ipc_entry_bits_t bits; ipc_object_t object; ipc_mqueue_t mqueue; is_read_lock(space); if (!space->is_active) { is_read_unlock(space); return MACH_RCV_INVALID_NAME; } entry = ipc_entry_lookup(space, name); if (entry == IE_NULL) { is_read_unlock(space); return MACH_RCV_INVALID_NAME; } bits = entry->ie_bits; object = entry->ie_object; if (bits & MACH_PORT_TYPE_RECEIVE) { ipc_port_t port; ipc_pset_t pset; port = (ipc_port_t) object; assert(port != IP_NULL); ip_lock(port); assert(ip_active(port)); assert(port->ip_receiver_name == name); assert(port->ip_receiver == space); is_read_unlock(space); pset = port->ip_pset; if (pset != IPS_NULL) { ips_lock(pset); if (ips_active(pset)) { ips_unlock(pset); ip_unlock(port); return MACH_RCV_IN_SET; } ipc_pset_remove(pset, port); ips_check_unlock(pset); assert(port->ip_pset == IPS_NULL); } mqueue = &port->ip_messages; } else if (bits & MACH_PORT_TYPE_PORT_SET) { ipc_pset_t pset; pset = (ipc_pset_t) object; assert(pset != IPS_NULL); ips_lock(pset); assert(ips_active(pset)); assert(pset->ips_local_name == name); is_read_unlock(space); mqueue = &pset->ips_messages; } else { is_read_unlock(space); return MACH_RCV_INVALID_NAME; } /* * At this point, the object is locked and active, * the space is unlocked, and mqueue is initialized. */ io_reference(object); imq_lock(mqueue); io_unlock(object); *objectp = object; *mqueuep = mqueue; return MACH_MSG_SUCCESS; }