kern_return_t mach_port_space_info( ipc_space_t space, ipc_info_space_t *infop, ipc_info_name_array_t *tablep, mach_msg_type_number_t *tableCntp, ipc_info_tree_name_array_t *treep, mach_msg_type_number_t *treeCntp) { ipc_info_name_t *table_info; unsigned int table_potential, table_actual; vm_offset_t table_addr; vm_size_t table_size = 0; /* Suppress gcc warning */ ipc_info_tree_name_t *tree_info; unsigned int tree_potential, tree_actual; vm_offset_t tree_addr; vm_size_t tree_size = 0; /* Suppress gcc warning */ ipc_tree_entry_t tentry; ipc_entry_t table; ipc_entry_num_t tsize; mach_port_index_t index; kern_return_t kr; if (space == IS_NULL) return KERN_INVALID_TASK; /* start with in-line memory */ table_info = *tablep; table_potential = *tableCntp; tree_info = *treep; tree_potential = *treeCntp; for (;;) { is_read_lock(space); if (!space->is_active) { is_read_unlock(space); if (table_info != *tablep) kmem_free(ipc_kernel_map, table_addr, table_size); if (tree_info != *treep) kmem_free(ipc_kernel_map, tree_addr, tree_size); return KERN_INVALID_TASK; } table_actual = space->is_table_size; tree_actual = space->is_tree_total; if ((table_actual <= table_potential) && (tree_actual <= tree_potential)) break; is_read_unlock(space); if (table_actual > table_potential) { if (table_info != *tablep) kmem_free(ipc_kernel_map, table_addr, table_size); table_size = round_page(table_actual * sizeof *table_info); kr = kmem_alloc(ipc_kernel_map, &table_addr, table_size); if (kr != KERN_SUCCESS) { if (tree_info != *treep) kmem_free(ipc_kernel_map, tree_addr, tree_size); return KERN_RESOURCE_SHORTAGE; } table_info = (ipc_info_name_t *) table_addr; table_potential = table_size/sizeof *table_info; } if (tree_actual > tree_potential) { if (tree_info != *treep) kmem_free(ipc_kernel_map, tree_addr, tree_size); tree_size = round_page(tree_actual * sizeof *tree_info); kr = kmem_alloc(ipc_kernel_map, &tree_addr, tree_size); if (kr != KERN_SUCCESS) { if (table_info != *tablep) kmem_free(ipc_kernel_map, table_addr, table_size); return KERN_RESOURCE_SHORTAGE; } tree_info = (ipc_info_tree_name_t *) tree_addr; tree_potential = tree_size/sizeof *tree_info; } } /* space is read-locked and active; we have enough wired memory */ infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD); infop->iis_table_size = space->is_table_size; infop->iis_table_next = space->is_table_next->its_size; infop->iis_tree_size = space->is_tree_total; infop->iis_tree_small = space->is_tree_small; infop->iis_tree_hash = space->is_tree_hash; table = space->is_table; tsize = space->is_table_size; for (index = 0; index < tsize; index++) { ipc_info_name_t *iin = &table_info[index]; ipc_entry_t entry = &table[index]; ipc_entry_bits_t bits = entry->ie_bits; iin->iin_name = MACH_PORT_MAKEB(index, bits); iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE; iin->iin_compat = FALSE; iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE; iin->iin_type = IE_BITS_TYPE(bits); iin->iin_urefs = IE_BITS_UREFS(bits); iin->iin_object = (vm_offset_t) entry->ie_object; iin->iin_next = entry->ie_next; iin->iin_hash = entry->ie_index; } for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0; tentry != ITE_NULL; tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) { ipc_info_tree_name_t *iitn = &tree_info[index++]; ipc_info_name_t *iin = &iitn->iitn_name; ipc_entry_t entry = &tentry->ite_entry; ipc_entry_bits_t bits = entry->ie_bits; assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE); iin->iin_name = tentry->ite_name; iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE; iin->iin_compat = FALSE; iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE; iin->iin_type = IE_BITS_TYPE(bits); iin->iin_urefs = IE_BITS_UREFS(bits); iin->iin_object = (vm_offset_t) entry->ie_object; iin->iin_next = entry->ie_next; iin->iin_hash = entry->ie_index; if (tentry->ite_lchild == ITE_NULL) iitn->iitn_lchild = MACH_PORT_NULL; else iitn->iitn_lchild = tentry->ite_lchild->ite_name; if (tentry->ite_rchild == ITE_NULL) iitn->iitn_rchild = MACH_PORT_NULL; else iitn->iitn_rchild = tentry->ite_rchild->ite_name; } ipc_splay_traverse_finish(&space->is_tree); is_read_unlock(space); if (table_info == *tablep) { /* data fit in-line; nothing to deallocate */ *tableCntp = table_actual; } else if (table_actual == 0) { kmem_free(ipc_kernel_map, table_addr, table_size); *tableCntp = 0; } else { vm_size_t size_used, rsize_used; vm_map_copy_t copy; /* kmem_alloc doesn't zero memory */ size_used = table_actual * sizeof *table_info; rsize_used = round_page(size_used); if (rsize_used != table_size) kmem_free(ipc_kernel_map, table_addr + rsize_used, table_size - rsize_used); if (size_used != rsize_used) memset((void *) (table_addr + size_used), 0, rsize_used - size_used); kr = vm_map_copyin(ipc_kernel_map, table_addr, rsize_used, TRUE, ©); assert(kr == KERN_SUCCESS); *tablep = (ipc_info_name_t *) copy; *tableCntp = table_actual; } if (tree_info == *treep) { /* data fit in-line; nothing to deallocate */ *treeCntp = tree_actual; } else if (tree_actual == 0) { kmem_free(ipc_kernel_map, tree_addr, tree_size); *treeCntp = 0; } else { vm_size_t size_used, rsize_used; vm_map_copy_t copy; /* kmem_alloc doesn't zero memory */ size_used = tree_actual * sizeof *tree_info; rsize_used = round_page(size_used); if (rsize_used != tree_size) kmem_free(ipc_kernel_map, tree_addr + rsize_used, tree_size - rsize_used); if (size_used != rsize_used) memset((void *) (tree_addr + size_used), 0, rsize_used - size_used); kr = vm_map_copyin(ipc_kernel_map, tree_addr, rsize_used, TRUE, ©); assert(kr == KERN_SUCCESS); *treep = (ipc_info_tree_name_t *) copy; *treeCntp = tree_actual; } return KERN_SUCCESS; }
void ipc_space_destroy( ipc_space_t space) { ipc_tree_entry_t tentry; ipc_entry_t table; ipc_entry_num_t size; mach_port_index_t index; boolean_t active; assert(space != IS_NULL); is_write_lock(space); active = space->is_active; space->is_active = FALSE; is_write_unlock(space); if (!active) return; /* * If somebody is trying to grow the table, * we must wait until they finish and figure * out the space died. */ is_read_lock(space); while (space->is_growing) { assert_wait((event_t) space, FALSE); is_read_unlock(space); thread_block((void (*)(void)) 0); is_read_lock(space); } is_read_unlock(space); /* * Now we can futz with it without having it locked. */ table = space->is_table; size = space->is_table_size; for (index = 0; index < size; index++) { ipc_entry_t entry = &table[index]; mach_port_type_t type = IE_BITS_TYPE(entry->ie_bits); if (type != MACH_PORT_TYPE_NONE) { mach_port_t name = MACH_PORT_MAKEB(index, entry->ie_bits); ipc_right_clean(space, name, entry); } } it_entries_free(space->is_table_next-1, table); for (tentry = ipc_splay_traverse_start(&space->is_tree); tentry != ITE_NULL; tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) { mach_port_type_t type = IE_BITS_TYPE(tentry->ite_bits); mach_port_t name = tentry->ite_name; assert(type != MACH_PORT_TYPE_NONE); /* use object before ipc_right_clean releases ref */ if (type == MACH_PORT_TYPE_SEND) ipc_hash_global_delete(space, tentry->ite_object, name, tentry); ipc_right_clean(space, name, &tentry->ite_entry); } ipc_splay_traverse_finish(&space->is_tree); /* * Because the space is now dead, * we must release the "active" reference for it. * Our caller still has his reference. */ is_release(space); }
kern_return_t mach_port_names( ipc_space_t space, mach_port_name_t **namesp, mach_msg_type_number_t *namesCnt, mach_port_type_t **typesp, mach_msg_type_number_t *typesCnt) { ipc_entry_bits_t *capability; ipc_tree_entry_t tentry; ipc_entry_t table; ipc_entry_num_t tsize; mach_port_index_t index; ipc_entry_num_t actual; /* this many names */ ipc_port_timestamp_t timestamp; /* logical time of this operation */ mach_port_name_t *names; mach_port_type_t *types; kern_return_t kr; vm_size_t size; /* size of allocated memory */ vm_offset_t addr1; /* allocated memory, for names */ vm_offset_t addr2; /* allocated memory, for types */ vm_map_copy_t memory1; /* copied-in memory, for names */ vm_map_copy_t memory2; /* copied-in memory, for types */ /* safe simplifying assumption */ assert_static(sizeof(mach_port_name_t) == sizeof(mach_port_type_t)); if (space == IS_NULL) return KERN_INVALID_TASK; size = 0; for (;;) { ipc_entry_num_t bound; vm_size_t size_needed; is_read_lock(space); if (!space->is_active) { is_read_unlock(space); if (size != 0) { kmem_free(ipc_kernel_map, addr1, size); kmem_free(ipc_kernel_map, addr2, size); } return KERN_INVALID_TASK; } /* upper bound on number of names in the space */ bound = space->is_table_size + space->is_tree_total; size_needed = round_page_32(bound * sizeof(mach_port_name_t)); if (size_needed <= size) break; is_read_unlock(space); if (size != 0) { kmem_free(ipc_kernel_map, addr1, size); kmem_free(ipc_kernel_map, addr2, size); } size = size_needed; kr = vm_allocate(ipc_kernel_map, &addr1, size, TRUE); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; kr = vm_allocate(ipc_kernel_map, &addr2, size, TRUE); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr1, size); return KERN_RESOURCE_SHORTAGE; } /* can't fault while we hold locks */ kr = vm_map_wire(ipc_kernel_map, addr1, addr1 + size, VM_PROT_READ|VM_PROT_WRITE, FALSE); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr1, size); kmem_free(ipc_kernel_map, addr2, size); return KERN_RESOURCE_SHORTAGE; } kr = vm_map_wire(ipc_kernel_map, addr2, addr2 + size, VM_PROT_READ|VM_PROT_WRITE, FALSE); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr1, size); kmem_free(ipc_kernel_map, addr2, size); return KERN_RESOURCE_SHORTAGE; } } /* space is read-locked and active */ names = (mach_port_name_t *) addr1; types = (mach_port_type_t *) addr2; actual = 0; timestamp = ipc_port_timestamp(); table = space->is_table; tsize = space->is_table_size; for (index = 0; index < tsize; index++) { ipc_entry_t entry = &table[index]; ipc_entry_bits_t bits = entry->ie_bits; if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) { mach_port_name_t name; name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits)); mach_port_names_helper(timestamp, entry, name, names, types, &actual, space); } } for (tentry = ipc_splay_traverse_start(&space->is_tree); tentry != ITE_NULL; tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) { ipc_entry_t entry = &tentry->ite_entry; mach_port_name_t name = tentry->ite_name; assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE); mach_port_names_helper(timestamp, entry, name, names, types, &actual, space); } ipc_splay_traverse_finish(&space->is_tree); is_read_unlock(space); if (actual == 0) { memory1 = VM_MAP_COPY_NULL; memory2 = VM_MAP_COPY_NULL; if (size != 0) { kmem_free(ipc_kernel_map, addr1, size); kmem_free(ipc_kernel_map, addr2, size); } } else { vm_size_t size_used; vm_size_t vm_size_used; size_used = actual * sizeof(mach_port_name_t); vm_size_used = round_page_32(size_used); /* * Make used memory pageable and get it into * copied-in form. Free any unused memory. */ kr = vm_map_unwire(ipc_kernel_map, addr1, addr1 + vm_size_used, FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_unwire(ipc_kernel_map, addr2, addr2 + vm_size_used, FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, addr1, size_used, TRUE, &memory1); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, addr2, size_used, TRUE, &memory2); assert(kr == KERN_SUCCESS); if (vm_size_used != size) { kmem_free(ipc_kernel_map, addr1 + vm_size_used, size - vm_size_used); kmem_free(ipc_kernel_map, addr2 + vm_size_used, size - vm_size_used); } } *namesp = (mach_port_name_t *) memory1; *namesCnt = actual; *typesp = (mach_port_type_t *) memory2; *typesCnt = actual; return KERN_SUCCESS; }
kern_return_t mach_port_space_info( ipc_space_t space, ipc_info_space_t *infop, ipc_info_name_array_t *tablep, mach_msg_type_number_t *tableCntp, ipc_info_tree_name_array_t *treep, mach_msg_type_number_t *treeCntp) { ipc_info_name_t *table_info; vm_offset_t table_addr; vm_size_t table_size, table_size_needed; ipc_info_tree_name_t *tree_info; vm_offset_t tree_addr; vm_size_t tree_size, tree_size_needed; ipc_tree_entry_t tentry; ipc_entry_t table; ipc_entry_num_t tsize; mach_port_index_t index; kern_return_t kr; vm_map_copy_t copy; if (space == IS_NULL) return KERN_INVALID_TASK; /* start with in-line memory */ table_size = 0; tree_size = 0; for (;;) { is_read_lock(space); if (!space->is_active) { is_read_unlock(space); if (table_size != 0) kmem_free(ipc_kernel_map, table_addr, table_size); if (tree_size != 0) kmem_free(ipc_kernel_map, tree_addr, tree_size); return KERN_INVALID_TASK; } table_size_needed = round_page(space->is_table_size * sizeof(ipc_info_name_t)); tree_size_needed = round_page(space->is_tree_total * sizeof(ipc_info_tree_name_t)); if ((table_size_needed == table_size) && (tree_size_needed == tree_size)) break; is_read_unlock(space); if (table_size != table_size_needed) { if (table_size != 0) kmem_free(ipc_kernel_map, table_addr, table_size); kr = kmem_alloc(ipc_kernel_map, &table_addr, table_size_needed); if (kr != KERN_SUCCESS) { if (tree_size != 0) kmem_free(ipc_kernel_map, tree_addr, tree_size); return KERN_RESOURCE_SHORTAGE; } table_size = table_size_needed; } if (tree_size != tree_size_needed) { if (tree_size != 0) kmem_free(ipc_kernel_map, tree_addr, tree_size); kr = kmem_alloc(ipc_kernel_map, &tree_addr, tree_size_needed); if (kr != KERN_SUCCESS) { if (table_size != 0) kmem_free(ipc_kernel_map, table_addr, table_size); return KERN_RESOURCE_SHORTAGE; } tree_size = tree_size_needed; } } /* space is read-locked and active; we have enough wired memory */ /* get the overall space info */ infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD); infop->iis_table_size = space->is_table_size; infop->iis_table_next = space->is_table_next->its_size; infop->iis_tree_size = space->is_tree_total; infop->iis_tree_small = space->is_tree_small; infop->iis_tree_hash = space->is_tree_hash; /* walk the table for this space */ table = space->is_table; tsize = space->is_table_size; table_info = (ipc_info_name_array_t)table_addr; for (index = 0; index < tsize; index++) { ipc_info_name_t *iin = &table_info[index]; ipc_entry_t entry = &table[index]; ipc_entry_bits_t bits; bits = entry->ie_bits; iin->iin_name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits)); iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE; iin->iin_type = IE_BITS_TYPE(bits); if (entry->ie_request) iin->iin_type |= MACH_PORT_TYPE_DNREQUEST; iin->iin_urefs = IE_BITS_UREFS(bits); iin->iin_object = (vm_offset_t) entry->ie_object; iin->iin_next = entry->ie_next; iin->iin_hash = entry->ie_index; } /* walk the splay tree for this space */ tree_info = (ipc_info_tree_name_array_t)tree_addr; for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0; tentry != ITE_NULL; tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) { ipc_info_tree_name_t *iitn = &tree_info[index++]; ipc_info_name_t *iin = &iitn->iitn_name; ipc_entry_t entry = &tentry->ite_entry; ipc_entry_bits_t bits = entry->ie_bits; assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE); iin->iin_name = tentry->ite_name; iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE; iin->iin_type = IE_BITS_TYPE(bits); if (entry->ie_request) iin->iin_type |= MACH_PORT_TYPE_DNREQUEST; iin->iin_urefs = IE_BITS_UREFS(bits); iin->iin_object = (vm_offset_t) entry->ie_object; iin->iin_next = entry->ie_next; iin->iin_hash = entry->ie_index; if (tentry->ite_lchild == ITE_NULL) iitn->iitn_lchild = MACH_PORT_NULL; else iitn->iitn_lchild = tentry->ite_lchild->ite_name; if (tentry->ite_rchild == ITE_NULL) iitn->iitn_rchild = MACH_PORT_NULL; else iitn->iitn_rchild = tentry->ite_rchild->ite_name; } ipc_splay_traverse_finish(&space->is_tree); is_read_unlock(space); /* prepare the table out-of-line data for return */ if (table_size > 0) { if (table_size > infop->iis_table_size * sizeof(ipc_info_name_t)) bzero((char *)&table_info[infop->iis_table_size], table_size - infop->iis_table_size * sizeof(ipc_info_name_t)); kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(table_addr), vm_map_round_page(table_addr + table_size), FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr, (vm_map_size_t)table_size, TRUE, ©); assert(kr == KERN_SUCCESS); *tablep = (ipc_info_name_t *)copy; *tableCntp = infop->iis_table_size; } else { *tablep = (ipc_info_name_t *)0; *tableCntp = 0; } /* prepare the tree out-of-line data for return */ if (tree_size > 0) { if (tree_size > infop->iis_tree_size * sizeof(ipc_info_tree_name_t)) bzero((char *)&tree_info[infop->iis_tree_size], tree_size - infop->iis_tree_size * sizeof(ipc_info_tree_name_t)); kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(tree_addr), vm_map_round_page(tree_addr + tree_size), FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)tree_addr, (vm_map_size_t)tree_size, TRUE, ©); assert(kr == KERN_SUCCESS); *treep = (ipc_info_tree_name_t *)copy; *treeCntp = infop->iis_tree_size; } else { *treep = (ipc_info_tree_name_t *)0; *treeCntp = 0; } return KERN_SUCCESS; }
kern_return_t mach_port_get_set_status( ipc_space_t space, mach_port_name_t name, mach_port_name_t **members, mach_msg_type_number_t *membersCnt) { ipc_entry_num_t actual; /* this many members */ ipc_entry_num_t maxnames; /* space for this many members */ kern_return_t kr; vm_size_t size; /* size of allocated memory */ vm_offset_t addr; /* allocated memory */ vm_map_copy_t memory; /* copied-in memory */ if (space == IS_NULL) return KERN_INVALID_TASK; if (!MACH_PORT_VALID(name)) return KERN_INVALID_RIGHT; size = PAGE_SIZE; /* initial guess */ for (;;) { ipc_tree_entry_t tentry; ipc_entry_t entry, table; ipc_entry_num_t tsize; mach_port_index_t index; mach_port_name_t *names; ipc_pset_t pset; kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; /* can't fault while we hold locks */ kr = vm_map_wire(ipc_kernel_map, addr, addr + size, VM_PROT_READ|VM_PROT_WRITE, FALSE); assert(kr == KERN_SUCCESS); kr = ipc_right_lookup_read(space, name, &entry); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr, size); return kr; } /* space is read-locked and active */ if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_PORT_SET) { is_read_unlock(space); kmem_free(ipc_kernel_map, addr, size); return KERN_INVALID_RIGHT; } pset = (ipc_pset_t) entry->ie_object; assert(pset != IPS_NULL); /* the port set must be active */ names = (mach_port_name_t *) addr; maxnames = size / sizeof(mach_port_name_t); actual = 0; table = space->is_table; tsize = space->is_table_size; for (index = 0; index < tsize; index++) { ipc_entry_t ientry = &table[index]; if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE) { ipc_port_t port = (ipc_port_t) ientry->ie_object; mach_port_gst_helper(pset, port, maxnames, names, &actual); } } for (tentry = ipc_splay_traverse_start(&space->is_tree); tentry != ITE_NULL; tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) { ipc_entry_bits_t bits = tentry->ite_bits; assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE); if (bits & MACH_PORT_TYPE_RECEIVE) { ipc_port_t port = (ipc_port_t) tentry->ite_object; mach_port_gst_helper(pset, port, maxnames, names, &actual); } } ipc_splay_traverse_finish(&space->is_tree); is_read_unlock(space); if (actual <= maxnames) break; /* didn't have enough memory; allocate more */ kmem_free(ipc_kernel_map, addr, size); size = round_page_32(actual * sizeof(mach_port_name_t)) + PAGE_SIZE; } if (actual == 0) { memory = VM_MAP_COPY_NULL; kmem_free(ipc_kernel_map, addr, size); } else { vm_size_t size_used; vm_size_t vm_size_used; size_used = actual * sizeof(mach_port_name_t); vm_size_used = round_page_32(size_used); /* * Make used memory pageable and get it into * copied-in form. Free any unused memory. */ kr = vm_map_unwire(ipc_kernel_map, addr, addr + vm_size_used, FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, addr, size_used, TRUE, &memory); assert(kr == KERN_SUCCESS); if (vm_size_used != size) kmem_free(ipc_kernel_map, addr + vm_size_used, size - vm_size_used); } *members = (mach_port_name_t *) memory; *membersCnt = actual; return KERN_SUCCESS; }