/* * kmap_alloc_wait: * * Allocates pageable memory from a sub-map of the kernel. If the submap * has no room, the caller sleeps waiting for more memory in the submap. * * This routine may block. */ vm_offset_t kmap_alloc_wait(vm_map_t map, vm_size_t size) { vm_offset_t addr; size = round_page(size); if (!swap_reserve(size)) return (0); for (;;) { /* * To make this work for more than one map, use the map's lock * to lock out sleepers/wakers. */ vm_map_lock(map); if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) break; /* no space now; see if we can ever get space */ if (vm_map_max(map) - vm_map_min(map) < size) { vm_map_unlock(map); swap_release(size); return (0); } map->needs_wakeup = TRUE; vm_map_unlock_and_wait(map, 0); } vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, MAP_ACC_CHARGED); vm_map_unlock(map); return (addr); }
static int findspace_demo(void) { vm_map_t *orig = get_user_vm_map(); vm_map_t *umap = vm_map_new(); vm_map_activate(umap); #define addr1 0x10000000 #define addr2 0x30000000 vm_map_add_entry(umap, addr1, addr2, VM_PROT_NONE); #define addr3 0x30005000 #define addr4 0x60000000 vm_map_add_entry(umap, addr3, addr4, VM_PROT_NONE); vm_addr_t t; int n; n = vm_map_findspace(umap, 0x00010000, PAGESIZE, &t); assert(n == 0 && t == 0x00010000); n = vm_map_findspace(umap, addr1, PAGESIZE, &t); assert(n == 0 && t == addr2); n = vm_map_findspace(umap, addr1 + 20 * PAGESIZE, PAGESIZE, &t); assert(n == 0 && t == addr2); n = vm_map_findspace(umap, addr1, 0x6000, &t); assert(n == 0 && t == addr4); n = vm_map_findspace(umap, addr1, 0x5000, &t); assert(n == 0 && t == addr2); /* Fill the gap exactly */ vm_map_add_entry(umap, t, t + 0x5000, VM_PROT_NONE); n = vm_map_findspace(umap, addr1, 0x5000, &t); assert(n == 0 && t == addr4); n = vm_map_findspace(umap, addr4, 0x6000, &t); assert(n == 0 && t == addr4); n = vm_map_findspace(umap, 0, 0x40000000, &t); assert(n == -ENOMEM); /* Restore original vm_map */ vm_map_activate(orig); vm_map_delete(umap); klog("Test passed."); return KTEST_SUCCESS; }
/* * vm_contig_pg_kmap: * * Map previously allocated (vm_contig_pg_alloc) range of pages from * vm_page_array[] into the KVA. Once mapped, the pages are part of * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size). * * No requirements. */ vm_offset_t vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags) { vm_offset_t addr, tmp_addr; vm_page_t pga = vm_page_array; int i, count; size = round_page(size); if (size == 0) panic("vm_contig_pg_kmap: size must not be 0"); crit_enter(); lwkt_gettoken(&vm_token); /* * We've found a contiguous chunk that meets our requirements. * Allocate KVM, and assign phys pages and return a kernel VM * pointer. */ count = vm_map_entry_reserve(MAP_RESERVE_COUNT); vm_map_lock(map); if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr) != KERN_SUCCESS) { /* * XXX We almost never run out of kernel virtual * space, so we don't make the allocated memory * above available. */ vm_map_unlock(map); vm_map_entry_release(count); lwkt_reltoken(&vm_token); crit_exit(); return (0); } /* * kernel_object maps 1:1 to kernel_map. */ vm_object_hold(&kernel_object); vm_object_reference(&kernel_object); vm_map_insert(map, &count, &kernel_object, addr, addr, addr + size, VM_MAPTYPE_NORMAL, VM_PROT_ALL, VM_PROT_ALL, 0); vm_map_unlock(map); vm_map_entry_release(count); tmp_addr = addr; for (i = start; i < (start + size / PAGE_SIZE); i++) { vm_page_t m = &pga[i]; vm_page_insert(m, &kernel_object, OFF_TO_IDX(tmp_addr)); if ((flags & M_ZERO) && !(m->flags & PG_ZERO)) pmap_zero_page(VM_PAGE_TO_PHYS(m)); m->flags = 0; tmp_addr += PAGE_SIZE; } vm_map_wire(map, addr, addr + size, 0); vm_object_drop(&kernel_object); lwkt_reltoken(&vm_token); crit_exit(); return (addr); }