/* * Allocate new wired pages in an object. * The object is assumed to be mapped into the kernel map or * a submap. */ void kmem_alloc_pages( vm_object_t object, vm_offset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t protection) { /* * Mark the pmap region as not pageable. */ pmap_pageable(kernel_pmap, start, end, FALSE); while (start < end) { vm_page_t mem; vm_object_lock(object); /* * Allocate a page */ while ((mem = vm_page_alloc(object, offset)) == VM_PAGE_NULL) { vm_object_unlock(object); VM_PAGE_WAIT((void (*)()) 0); vm_object_lock(object); } /* * Wire it down */ vm_page_lock_queues(); vm_page_wire(mem); vm_page_unlock_queues(); vm_object_unlock(object); /* * Enter it in the kernel pmap */ PMAP_ENTER(kernel_pmap, start, mem, protection, TRUE); vm_object_lock(object); PAGE_WAKEUP_DONE(mem); vm_object_unlock(object); start += PAGE_SIZE; offset += PAGE_SIZE; } }
/* * Remap wired pages in an object into a new region. * The object is assumed to be mapped into the kernel map or * a submap. */ void kmem_remap_pages( vm_object_t object, vm_offset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t protection) { /* * Mark the pmap region as not pageable. */ pmap_pageable(kernel_pmap, start, end, FALSE); while (start < end) { vm_page_t mem; vm_object_lock(object); /* * Find a page */ if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL) panic("kmem_remap_pages"); /* * Wire it down (again) */ vm_page_lock_queues(); vm_page_wire(mem); vm_page_unlock_queues(); vm_object_unlock(object); /* * Enter it in the kernel pmap. The page isn't busy, * but this shouldn't be a problem because it is wired. */ PMAP_ENTER(kernel_pmap, start, mem, protection, TRUE); start += PAGE_SIZE; offset += PAGE_SIZE; } }
/* * Actually maps the vm_page's to the chosen virtual addresses * in the reserved range. * It may block during PMAP_ENTER(). */ void kkt_do_mappings(void) { unsigned int ind_range; vm_offset_t virt_addr; vm_page_t m; mutex_lock(&kkt_mapping_lock); simple_lock(&kkt_virt_lock); for (ind_range = 0; ind_range < KKT_VIRT_SIZE; ind_range++) { if (kkt_virt_status[ind_range] == MAP_VADDR) { virt_addr = kkt_virt_start_vaddr + (ind_range * PAGE_SIZE); m = kkt_virt_vmp[ind_range]; kkt_virt_status[ind_range] = USED_VADDR; simple_unlock(&kkt_virt_lock); PMAP_ENTER(kernel_pmap, virt_addr, m, VM_PROT_READ | VM_PROT_WRITE, FALSE); simple_lock(&kkt_virt_lock); } } simple_unlock(&kkt_virt_lock); mutex_unlock(&kkt_mapping_lock); }
kern_return_t kmem_io_map_copyout( vm_map_t map, vm_offset_t *addr, /* actual addr of data */ vm_offset_t *alloc_addr, /* page aligned addr */ vm_size_t *alloc_size, /* size allocated */ vm_map_copy_t copy, vm_size_t min_size) /* Do at least this much */ { vm_offset_t myaddr, offset; vm_size_t mysize, copy_size; kern_return_t ret; vm_page_t *page_list; vm_map_copy_t new_copy; int i; assert(copy->type == VM_MAP_COPY_PAGE_LIST); assert(min_size != 0); /* * Figure out the size in vm pages. */ min_size += copy->offset - trunc_page(copy->offset); min_size = round_page(min_size); mysize = round_page(copy->offset + copy->size) - trunc_page(copy->offset); /* * If total size is larger than one page list and * we don't have to do more than one page list, then * only do one page list. * * XXX Could be much smarter about this ... like trimming length * XXX if we need more than one page list but not all of them. */ copy_size = ptoa(copy->cpy_npages); if (mysize > copy_size && copy_size > min_size) mysize = copy_size; /* * Allocate some address space in the map (must be kernel * space). */ myaddr = vm_map_min(map); ret = vm_map_enter(map, &myaddr, mysize, (vm_offset_t) 0, TRUE, VM_OBJECT_NULL, (vm_offset_t) 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); if (ret != KERN_SUCCESS) return(ret); /* * Tell the pmap module that this will be wired, and * enter the mappings. */ pmap_pageable(vm_map_pmap(map), myaddr, myaddr + mysize, TRUE); *addr = myaddr + (copy->offset - trunc_page(copy->offset)); *alloc_addr = myaddr; *alloc_size = mysize; offset = myaddr; page_list = ©->cpy_page_list[0]; while (TRUE) { for ( i = 0; i < copy->cpy_npages; i++, offset += PAGE_SIZE) { PMAP_ENTER(vm_map_pmap(map), offset, *page_list, VM_PROT_READ, TRUE); page_list++; } if (offset == (myaddr + mysize)) break; /* * Onward to the next page_list. The extend_cont * leaves the current page list's pages alone; * they'll be cleaned up at discard. Reset this * copy's continuation to discard the next one. */ vm_map_copy_invoke_extend_cont(copy, &new_copy, &ret); if (ret != KERN_SUCCESS) { kmem_io_map_deallocate(map, myaddr, mysize); return(ret); } copy->cpy_cont = vm_map_copy_discard_cont; copy->cpy_cont_args = (char *) new_copy; copy = new_copy; page_list = ©->cpy_page_list[0]; } return(ret); }
kern_return_t kernel_memory_allocate( register vm_map_t map, register vm_offset_t *addrp, register vm_size_t size, register vm_offset_t mask, int flags) { vm_object_t object; vm_object_offset_t offset; vm_object_offset_t pg_offset; vm_map_entry_t entry; vm_map_offset_t map_addr, fill_start; vm_map_offset_t map_mask; vm_map_size_t map_size, fill_size; kern_return_t kr; vm_page_t mem; vm_page_t guard_page_list = NULL; vm_page_t wired_page_list = NULL; int guard_page_count = 0; int wired_page_count = 0; int i; int vm_alloc_flags; if (! vm_kernel_ready) { panic("kernel_memory_allocate: VM is not ready"); } if (size == 0) { *addrp = 0; return KERN_INVALID_ARGUMENT; } map_size = vm_map_round_page(size); map_mask = (vm_map_offset_t) mask; vm_alloc_flags = 0; /* * limit the size of a single extent of wired memory * to try and limit the damage to the system if * too many pages get wired down */ if (map_size > (1 << 30)) { return KERN_RESOURCE_SHORTAGE; } /* * Guard pages: * * Guard pages are implemented as ficticious pages. By placing guard pages * on either end of a stack, they can help detect cases where a thread walks * off either end of its stack. They are allocated and set up here and attempts * to access those pages are trapped in vm_fault_page(). * * The map_size we were passed may include extra space for * guard pages. If those were requested, then back it out of fill_size * since vm_map_find_space() takes just the actual size not including * guard pages. Similarly, fill_start indicates where the actual pages * will begin in the range. */ fill_start = 0; fill_size = map_size; if (flags & KMA_GUARD_FIRST) { vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE; fill_start += PAGE_SIZE_64; fill_size -= PAGE_SIZE_64; if (map_size < fill_start + fill_size) { /* no space for a guard page */ *addrp = 0; return KERN_INVALID_ARGUMENT; } guard_page_count++; } if (flags & KMA_GUARD_LAST) { vm_alloc_flags |= VM_FLAGS_GUARD_AFTER; fill_size -= PAGE_SIZE_64; if (map_size <= fill_start + fill_size) { /* no space for a guard page */ *addrp = 0; return KERN_INVALID_ARGUMENT; } guard_page_count++; } wired_page_count = (int) (fill_size / PAGE_SIZE_64); assert(wired_page_count * PAGE_SIZE_64 == fill_size); for (i = 0; i < guard_page_count; i++) { for (;;) { mem = vm_page_grab_guard(); if (mem != VM_PAGE_NULL) break; if (flags & KMA_NOPAGEWAIT) { kr = KERN_RESOURCE_SHORTAGE; goto out; } vm_page_more_fictitious(); } mem->pageq.next = (queue_entry_t)guard_page_list; guard_page_list = mem; } for (i = 0; i < wired_page_count; i++) { uint64_t unavailable; for (;;) { if (flags & KMA_LOMEM) mem = vm_page_grablo(); else mem = vm_page_grab(); if (mem != VM_PAGE_NULL) break; if (flags & KMA_NOPAGEWAIT) { kr = KERN_RESOURCE_SHORTAGE; goto out; } if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) { kr = KERN_RESOURCE_SHORTAGE; goto out; } unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE; if (unavailable > max_mem || map_size > (max_mem - unavailable)) { kr = KERN_RESOURCE_SHORTAGE; goto out; } VM_PAGE_WAIT(); } mem->pageq.next = (queue_entry_t)wired_page_list; wired_page_list = mem; } /* * Allocate a new object (if necessary). We must do this before * locking the map, or risk deadlock with the default pager. */ if ((flags & KMA_KOBJECT) != 0) { object = kernel_object; vm_object_reference(object); } else { object = vm_object_allocate(map_size); } kr = vm_map_find_space(map, &map_addr, fill_size, map_mask, vm_alloc_flags, &entry); if (KERN_SUCCESS != kr) { vm_object_deallocate(object); goto out; } entry->object.vm_object = object; entry->offset = offset = (object == kernel_object) ? map_addr : 0; entry->wired_count++; if (flags & KMA_PERMANENT) entry->permanent = TRUE; if (object != kernel_object) vm_object_reference(object); vm_object_lock(object); vm_map_unlock(map); pg_offset = 0; if (fill_start) { if (guard_page_list == NULL) panic("kernel_memory_allocate: guard_page_list == NULL"); mem = guard_page_list; guard_page_list = (vm_page_t)mem->pageq.next; mem->pageq.next = NULL; vm_page_insert(mem, object, offset + pg_offset); mem->busy = FALSE; pg_offset += PAGE_SIZE_64; } for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) { if (wired_page_list == NULL) panic("kernel_memory_allocate: wired_page_list == NULL"); mem = wired_page_list; wired_page_list = (vm_page_t)mem->pageq.next; mem->pageq.next = NULL; mem->wire_count++; vm_page_insert(mem, object, offset + pg_offset); mem->busy = FALSE; mem->pmapped = TRUE; mem->wpmapped = TRUE; PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, VM_PROT_READ | VM_PROT_WRITE, object->wimg_bits & VM_WIMG_MASK, TRUE); if (flags & KMA_NOENCRYPT) { bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE); pmap_set_noencrypt(mem->phys_page); } }