boolean_t vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry) { struct vm_map_header hdr = map->hdr; struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store)); vm_map_entry_t cur = vm_map_to_entry(map); vm_map_entry_t prev = VM_MAP_ENTRY_NULL; while (rb_entry != (struct vm_map_store*)NULL) { cur = VME_FOR_STORE(rb_entry); if(cur == VM_MAP_ENTRY_NULL) panic("no entry"); if (address >= cur->vme_start) { if (address < cur->vme_end) { *vm_entry = cur; return TRUE; } rb_entry = RB_RIGHT(rb_entry, entry); prev = cur; } else { rb_entry = RB_LEFT(rb_entry, entry); } } if( prev == VM_MAP_ENTRY_NULL){ prev = vm_map_to_entry(map); } *vm_entry = prev; return FALSE; }
int get_vmsubmap_entries( vm_map_t map, vm_object_offset_t start, vm_object_offset_t end) { int total_entries = 0; vm_map_entry_t entry; if (not_in_kdp) vm_map_lock(map); entry = vm_map_first_entry(map); while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { entry = entry->vme_next; } while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { if(entry->is_sub_map) { total_entries += get_vmsubmap_entries(entry->object.sub_map, entry->offset, entry->offset + (entry->vme_end - entry->vme_start)); } else { total_entries += 1; } entry = entry->vme_next; } if (not_in_kdp) vm_map_unlock(map); return(total_entries); }
boolean_t first_free_is_valid_ll( vm_map_t map ) { vm_map_entry_t entry, next; entry = vm_map_to_entry(map); next = entry->vme_next; while (vm_map_trunc_page(next->vme_start, VM_MAP_PAGE_MASK(map)) == vm_map_trunc_page(entry->vme_end, VM_MAP_PAGE_MASK(map)) || (vm_map_trunc_page(next->vme_start, VM_MAP_PAGE_MASK(map)) == vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) && next != vm_map_to_entry(map))) { entry = next; next = entry->vme_next; if (entry == vm_map_to_entry(map)) break; } if (map->first_free != entry) { printf("Bad first_free for map %p: %p should be %p\n", map, map->first_free, entry); return FALSE; } return TRUE; }
static void check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry) { vm_map_entry_t hole_entry, next_hole_entry; vm_map_entry_t map_entry, next_map_entry; if (map->holes_list == NULL) { return; } hole_entry = (vm_map_entry_t) map->holes_list; next_hole_entry = hole_entry->vme_next; map_entry = vm_map_first_entry(map); next_map_entry = map_entry->vme_next; while(map_entry->vme_start > hole_entry->vme_start) { hole_entry = next_hole_entry; next_hole_entry = hole_entry->vme_next; if (hole_entry == (vm_map_entry_t)map->holes_list) break; } while (map_entry != vm_map_to_entry(map)) { if (map_entry->vme_start >= map->max_offset) break; if (map_entry->vme_end != map_entry->vme_next->vme_start) { if (map_entry->vme_next == vm_map_to_entry(map)) break; if (hole_entry->vme_start != map_entry->vme_end) { panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry); assert(hole_entry->vme_start == map_entry->vme_end); } if (hole_entry->vme_end != map_entry->vme_next->vme_start) { panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry); assert(hole_entry->vme_end == map_entry->vme_next->vme_start); } hole_entry = next_hole_entry; next_hole_entry = hole_entry->vme_next; if (hole_entry == (vm_map_entry_t)map->holes_list) break; } map_entry = map_entry->vme_next; } }
void vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type ) { switch (update_type) { case VM_MAP_ENTRY_CREATE: break; case VM_MAP_ENTRY_DELETE: if((entry) == (map)->first_free) { (map)->first_free = vm_map_to_entry(map); } if((entry) == (map)->hint) { (map)->hint = vm_map_to_entry(map); } break; default: break; } }
boolean_t projected_buffer_in_range( vm_map_t map, vm_offset_t start, vm_offset_t end) { vm_map_entry_t entry; if (map == VM_MAP_NULL || map == kernel_map) return(FALSE); /*Find first entry*/ if (!vm_map_lookup_entry(map, start, &entry)) entry = entry->vme_next; while (entry != vm_map_to_entry(map) && entry->projected_on == 0 && entry->vme_start <= end) { entry = entry->vme_next; } return(entry != vm_map_to_entry(map) && entry->vme_start <= end); }
kern_return_t projected_buffer_collect(vm_map_t map) { vm_map_entry_t entry, next; if (map == VM_MAP_NULL || map == kernel_map) return(KERN_INVALID_ARGUMENT); for (entry = vm_map_first_entry(map); entry != vm_map_to_entry(map); entry = next) { next = entry->vme_next; if (entry->projected_on != 0) projected_buffer_deallocate(map, entry->vme_start, entry->vme_end); } return(KERN_SUCCESS); }
kern_return_t vm32_region_info_64( __DEBUG_ONLY vm_map_t map, __DEBUG_ONLY vm32_offset_t address, __DEBUG_ONLY vm_info_region_64_t *regionp, __DEBUG_ONLY vm_info_object_array_t *objectsp, __DEBUG_ONLY mach_msg_type_number_t *objectsCntp) { #if !MACH_VM_DEBUG return KERN_FAILURE; #else vm_map_copy_t copy; vm_offset_t addr = 0; /* memory for OOL data */ vm_size_t size; /* size of the memory */ unsigned int room; /* room for this many objects */ unsigned int used; /* actually this many objects */ vm_info_region_64_t region; kern_return_t kr; if (map == VM_MAP_NULL) return KERN_INVALID_TASK; size = 0; /* no memory allocated yet */ for (;;) { vm_map_t cmap; /* current map in traversal */ vm_map_t nmap; /* next map to look at */ vm_map_entry_t entry; vm_object_t object, cobject, nobject; /* nothing is locked */ vm_map_lock_read(map); for (cmap = map;; cmap = nmap) { /* cmap is read-locked */ if (!vm_map_lookup_entry(cmap, address, &entry)) { entry = entry->vme_next; if (entry == vm_map_to_entry(cmap)) { vm_map_unlock_read(cmap); if (size != 0) kmem_free(ipc_kernel_map, addr, size); return KERN_NO_SPACE; } } if (entry->is_sub_map) nmap = VME_SUBMAP(entry); else break; /* move down to the lower map */ vm_map_lock_read(nmap); vm_map_unlock_read(cmap); } /* cmap is read-locked; we have a real entry */ object = VME_OBJECT(entry); region.vir_start = (natural_t) entry->vme_start; region.vir_end = (natural_t) entry->vme_end; region.vir_object = (natural_t)(uintptr_t) object; region.vir_offset = VME_OFFSET(entry); region.vir_needs_copy = entry->needs_copy; region.vir_protection = entry->protection; region.vir_max_protection = entry->max_protection; region.vir_inheritance = entry->inheritance; region.vir_wired_count = entry->wired_count; region.vir_user_wired_count = entry->user_wired_count; used = 0; room = (unsigned int) (size / sizeof(vm_info_object_t)); if (object == VM_OBJECT_NULL) { vm_map_unlock_read(cmap); /* no memory needed */ break; } vm_object_lock(object); vm_map_unlock_read(cmap); for (cobject = object;; cobject = nobject) { /* cobject is locked */ if (used < room) { vm_info_object_t *vio = &((vm_info_object_t *) addr)[used]; vio->vio_object = (natural_t)(uintptr_t) cobject; vio->vio_size = (natural_t) cobject->vo_size; vio->vio_ref_count = cobject->ref_count; vio->vio_resident_page_count = cobject->resident_page_count; vio->vio_copy = (natural_t)(uintptr_t) cobject->copy; vio->vio_shadow = (natural_t)(uintptr_t) cobject->shadow; vio->vio_shadow_offset = (natural_t) cobject->vo_shadow_offset; vio->vio_paging_offset = (natural_t) cobject->paging_offset; vio->vio_copy_strategy = cobject->copy_strategy; vio->vio_last_alloc = (vm_offset_t) cobject->last_alloc; vio->vio_paging_in_progress = cobject->paging_in_progress + cobject->activity_in_progress; vio->vio_pager_created = cobject->pager_created; vio->vio_pager_initialized = cobject->pager_initialized; vio->vio_pager_ready = cobject->pager_ready; vio->vio_can_persist = cobject->can_persist; vio->vio_internal = cobject->internal; vio->vio_temporary = cobject->temporary; vio->vio_alive = cobject->alive; vio->vio_purgable = (cobject->purgable != VM_PURGABLE_DENY); vio->vio_purgable_volatile = (cobject->purgable == VM_PURGABLE_VOLATILE || cobject->purgable == VM_PURGABLE_EMPTY); } used++; nobject = cobject->shadow; if (nobject == VM_OBJECT_NULL) { vm_object_unlock(cobject); break; } vm_object_lock(nobject); vm_object_unlock(cobject); } /* nothing locked */ if (used <= room) break; /* must allocate more memory */ if (size != 0) kmem_free(ipc_kernel_map, addr, size); size = vm_map_round_page(2 * used * sizeof(vm_info_object_t), VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; kr = vm_map_wire( ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), VM_PROT_READ|VM_PROT_WRITE, FALSE); assert(kr == KERN_SUCCESS); } /* free excess memory; make remaining memory pageable */ if (used == 0) { copy = VM_MAP_COPY_NULL; if (size != 0) kmem_free(ipc_kernel_map, addr, size); } else { vm_size_t size_used = (used * sizeof(vm_info_object_t)); vm_size_t vmsize_used = vm_map_round_page(size_used, VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_map_unwire( ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size_used, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)size_used, TRUE, ©); assert(kr == KERN_SUCCESS); if (size != vmsize_used) kmem_free(ipc_kernel_map, addr + vmsize_used, size - vmsize_used); } *regionp = region; *objectsp = (vm_info_object_array_t) copy; *objectsCntp = used; return KERN_SUCCESS; #endif /* MACH_VM_DEBUG */ }
int fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uint32_t *vnodeaddr, uint32_t *vid) { vm_map_t map; vm_map_offset_t address = (vm_map_offset_t )arg; vm_map_entry_t tmp_entry; vm_map_entry_t entry; vm_map_offset_t start; vm_region_extended_info_data_t extended; vm_region_top_info_data_t top; task_lock(task); map = task->map; if (map == VM_MAP_NULL) { task_unlock(task); return(0); } vm_map_reference(map); task_unlock(task); vm_map_lock_read(map); start = address; if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { vm_map_unlock_read(map); vm_map_deallocate(map); return(0); } } else { entry = tmp_entry; } start = entry->vme_start; pinfo->pri_offset = entry->offset; pinfo->pri_protection = entry->protection; pinfo->pri_max_protection = entry->max_protection; pinfo->pri_inheritance = entry->inheritance; pinfo->pri_behavior = entry->behavior; pinfo->pri_user_wired_count = entry->user_wired_count; pinfo->pri_user_tag = entry->alias; if (entry->is_sub_map) { pinfo->pri_flags |= PROC_REGION_SUBMAP; } else { if (entry->is_shared) pinfo->pri_flags |= PROC_REGION_SHARED; } extended.protection = entry->protection; extended.user_tag = entry->alias; extended.pages_resident = 0; extended.pages_swapped_out = 0; extended.pages_shared_now_private = 0; extended.pages_dirtied = 0; extended.external_pager = 0; extended.shadow_depth = 0; vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, &extended); if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED) extended.share_mode = SM_PRIVATE; top.private_pages_resident = 0; top.shared_pages_resident = 0; vm_map_region_top_walk(entry, &top); pinfo->pri_pages_resident = extended.pages_resident; pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private; pinfo->pri_pages_swapped_out = extended.pages_swapped_out; pinfo->pri_pages_dirtied = extended.pages_dirtied; pinfo->pri_ref_count = extended.ref_count; pinfo->pri_shadow_depth = extended.shadow_depth; pinfo->pri_share_mode = extended.share_mode; pinfo->pri_private_pages_resident = top.private_pages_resident; pinfo->pri_shared_pages_resident = top.shared_pages_resident; pinfo->pri_obj_id = top.obj_id; pinfo->pri_address = (uint64_t)start; pinfo->pri_size = (uint64_t)(entry->vme_end - start); pinfo->pri_depth = 0; if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) { *vnodeaddr = (uint32_t)0; if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) ==0) { vm_map_unlock_read(map); vm_map_deallocate(map); return(1); } } vm_map_unlock_read(map); vm_map_deallocate(map); return(1); }
/* * vm_map_lookup_entry_ll: [ internal use only ] * Use the linked list to find the map entry containing (or * immediately preceding) the specified address * in the given map; the entry is returned * in the "entry" parameter. The boolean * result indicates whether the address is * actually contained in the map. */ boolean_t vm_map_store_lookup_entry_ll( register vm_map_t map, register vm_map_offset_t address, vm_map_entry_t *entry) /* OUT */ { register vm_map_entry_t cur; register vm_map_entry_t last; /* * Start looking either from the head of the * list, or from the hint. */ cur = map->hint; if (cur == vm_map_to_entry(map)) cur = cur->vme_next; if (address >= cur->vme_start) { /* * Go from hint to end of list. * * But first, make a quick check to see if * we are already looking at the entry we * want (which is usually the case). * Note also that we don't need to save the hint * here... it is the same hint (unless we are * at the header, in which case the hint didn't * buy us anything anyway). */ last = vm_map_to_entry(map); if ((cur != last) && (cur->vme_end > address)) { *entry = cur; return(TRUE); } } else { /* * Go from start to hint, *inclusively* */ last = cur->vme_next; cur = vm_map_first_entry(map); } /* * Search linearly */ while (cur != last) { if (cur->vme_end > address) { if (address >= cur->vme_start) { /* * Save this lookup for future * hints, and return */ *entry = cur; SAVE_HINT_MAP_READ(map, cur); return(TRUE); } break; } cur = cur->vme_next; } *entry = cur->vme_prev; SAVE_HINT_MAP_READ(map, *entry); return(FALSE); }