kern_return_t vm32_region_info_64( __DEBUG_ONLY vm_map_t map, __DEBUG_ONLY vm32_offset_t address, __DEBUG_ONLY vm_info_region_64_t *regionp, __DEBUG_ONLY vm_info_object_array_t *objectsp, __DEBUG_ONLY mach_msg_type_number_t *objectsCntp) { #if !MACH_VM_DEBUG return KERN_FAILURE; #else vm_map_copy_t copy; vm_offset_t addr = 0; /* memory for OOL data */ vm_size_t size; /* size of the memory */ unsigned int room; /* room for this many objects */ unsigned int used; /* actually this many objects */ vm_info_region_64_t region; kern_return_t kr; if (map == VM_MAP_NULL) return KERN_INVALID_TASK; size = 0; /* no memory allocated yet */ for (;;) { vm_map_t cmap; /* current map in traversal */ vm_map_t nmap; /* next map to look at */ vm_map_entry_t entry; vm_object_t object, cobject, nobject; /* nothing is locked */ vm_map_lock_read(map); for (cmap = map;; cmap = nmap) { /* cmap is read-locked */ if (!vm_map_lookup_entry(cmap, address, &entry)) { entry = entry->vme_next; if (entry == vm_map_to_entry(cmap)) { vm_map_unlock_read(cmap); if (size != 0) kmem_free(ipc_kernel_map, addr, size); return KERN_NO_SPACE; } } if (entry->is_sub_map) nmap = VME_SUBMAP(entry); else break; /* move down to the lower map */ vm_map_lock_read(nmap); vm_map_unlock_read(cmap); } /* cmap is read-locked; we have a real entry */ object = VME_OBJECT(entry); region.vir_start = (natural_t) entry->vme_start; region.vir_end = (natural_t) entry->vme_end; region.vir_object = (natural_t)(uintptr_t) object; region.vir_offset = VME_OFFSET(entry); region.vir_needs_copy = entry->needs_copy; region.vir_protection = entry->protection; region.vir_max_protection = entry->max_protection; region.vir_inheritance = entry->inheritance; region.vir_wired_count = entry->wired_count; region.vir_user_wired_count = entry->user_wired_count; used = 0; room = (unsigned int) (size / sizeof(vm_info_object_t)); if (object == VM_OBJECT_NULL) { vm_map_unlock_read(cmap); /* no memory needed */ break; } vm_object_lock(object); vm_map_unlock_read(cmap); for (cobject = object;; cobject = nobject) { /* cobject is locked */ if (used < room) { vm_info_object_t *vio = &((vm_info_object_t *) addr)[used]; vio->vio_object = (natural_t)(uintptr_t) cobject; vio->vio_size = (natural_t) cobject->vo_size; vio->vio_ref_count = cobject->ref_count; vio->vio_resident_page_count = cobject->resident_page_count; vio->vio_copy = (natural_t)(uintptr_t) cobject->copy; vio->vio_shadow = (natural_t)(uintptr_t) cobject->shadow; vio->vio_shadow_offset = (natural_t) cobject->vo_shadow_offset; vio->vio_paging_offset = (natural_t) cobject->paging_offset; vio->vio_copy_strategy = cobject->copy_strategy; vio->vio_last_alloc = (vm_offset_t) cobject->last_alloc; vio->vio_paging_in_progress = cobject->paging_in_progress + cobject->activity_in_progress; vio->vio_pager_created = cobject->pager_created; vio->vio_pager_initialized = cobject->pager_initialized; vio->vio_pager_ready = cobject->pager_ready; vio->vio_can_persist = cobject->can_persist; vio->vio_internal = cobject->internal; vio->vio_temporary = cobject->temporary; vio->vio_alive = cobject->alive; vio->vio_purgable = (cobject->purgable != VM_PURGABLE_DENY); vio->vio_purgable_volatile = (cobject->purgable == VM_PURGABLE_VOLATILE || cobject->purgable == VM_PURGABLE_EMPTY); } used++; nobject = cobject->shadow; if (nobject == VM_OBJECT_NULL) { vm_object_unlock(cobject); break; } vm_object_lock(nobject); vm_object_unlock(cobject); } /* nothing locked */ if (used <= room) break; /* must allocate more memory */ if (size != 0) kmem_free(ipc_kernel_map, addr, size); size = vm_map_round_page(2 * used * sizeof(vm_info_object_t), VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)); if (kr != KERN_SUCCESS) return KERN_RESOURCE_SHORTAGE; kr = vm_map_wire( ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), VM_PROT_READ|VM_PROT_WRITE, FALSE); assert(kr == KERN_SUCCESS); } /* free excess memory; make remaining memory pageable */ if (used == 0) { copy = VM_MAP_COPY_NULL; if (size != 0) kmem_free(ipc_kernel_map, addr, size); } else { vm_size_t size_used = (used * sizeof(vm_info_object_t)); vm_size_t vmsize_used = vm_map_round_page(size_used, VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_map_unwire( ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size_used, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)size_used, TRUE, ©); assert(kr == KERN_SUCCESS); if (size != vmsize_used) kmem_free(ipc_kernel_map, addr + vmsize_used, size - vmsize_used); } *regionp = region; *objectsp = (vm_info_object_array_t) copy; *objectsCntp = used; return KERN_SUCCESS; #endif /* MACH_VM_DEBUG */ }
static void* commpage_allocate( vm_map_t submap, // commpage32_map or commpage_map64 size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED vm_prot_t uperm) { vm_offset_t kernel_addr = 0; // address of commpage in kernel map vm_offset_t zero = 0; vm_size_t size = area_used; // size actually populated vm_map_entry_t entry; ipc_port_t handle; kern_return_t kr; if (submap == NULL) panic("commpage submap is null"); if ((kr = vm_map(kernel_map, &kernel_addr, area_used, 0, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_OSFMK), NULL, 0, FALSE, VM_PROT_ALL, VM_PROT_ALL, VM_INHERIT_NONE))) panic("cannot allocate commpage %d", kr); if ((kr = vm_map_wire(kernel_map, kernel_addr, kernel_addr+area_used, VM_PROT_DEFAULT|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK), FALSE))) panic("cannot wire commpage: %d", kr); /* * Now that the object is created and wired into the kernel map, mark it so that no delay * copy-on-write will ever be performed on it as a result of mapping it into user-space. * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and * that would be a real disaster. * * JMM - What we really need is a way to create it like this in the first place. */ if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map)) panic("cannot find commpage entry %d", kr); VME_OBJECT(entry)->copy_strategy = MEMORY_OBJECT_COPY_NONE; if ((kr = mach_make_memory_entry( kernel_map, // target map &size, // size kernel_addr, // offset (address in kernel map) uperm, // protections as specified &handle, // this is the object handle we get NULL ))) // parent_entry (what is this?) panic("cannot make entry for commpage %d", kr); if ((kr = vm_map_64( submap, // target map (shared submap) &zero, // address (map into 1st page in submap) area_used, // size 0, // mask VM_FLAGS_FIXED, // flags (it must be 1st page in submap) handle, // port is the memory entry we just made 0, // offset (map 1st page in memory entry) FALSE, // copy uperm, // cur_protection (R-only in user map) uperm, // max_protection VM_INHERIT_SHARE ))) // inheritance panic("cannot map commpage %d", kr); ipc_port_release(handle); /* Make the kernel mapping non-executable. This cannot be done * at the time of map entry creation as mach_make_memory_entry * cannot handle disjoint permissions at this time. */ kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE); assert (kr == KERN_SUCCESS); return (void*)(intptr_t)kernel_addr; // return address in kernel map }
static void *__CFMessagePortPerform(void *msg, CFIndex size, CFAllocatorRef allocator, void *info) { CFMessagePortRef ms = info; struct __CFMessagePortMachMessage *msgp = msg; struct __CFMessagePortMachMessage *replymsg; void *context_info; void (*context_release)(const void *); CFDataRef returnData, data = NULL; void *return_bytes = NULL; CFIndex return_len = 0; int32_t msgid; __CFMessagePortLock(ms); if (!__CFMessagePortIsValid(ms)) { __CFMessagePortUnlock(ms); return NULL; } // assert: 0 < (int32_t)msgp->head.msgh_id if (NULL != ms->_context.retain) { context_info = (void *)ms->_context.retain(ms->_context.info); context_release = ms->_context.release; } else { context_info = ms->_context.info; context_release = NULL; } __CFMessagePortUnlock(ms); /* Create no-copy, no-free-bytes wrapper CFData */ if (0 == msgp->body.msgh_descriptor_count) { int32_t byteslen = CFSwapInt32LittleToHost(msgp->contents.msg0.byteslen); msgid = CFSwapInt32LittleToHost(msgp->contents.msg0.msgid); if (0 <= byteslen) { data = CFDataCreateWithBytesNoCopy(kCFAllocatorSystemDefault, msgp->contents.msg0.bytes, byteslen, kCFAllocatorNull); } } else { msgid = CFSwapInt32LittleToHost(msgp->contents.msg1.msgid); data = CFDataCreateWithBytesNoCopy(kCFAllocatorSystemDefault, msgp->contents.msg1.desc.out_of_line.address, msgp->contents.msg1.desc.out_of_line.size, kCFAllocatorNull); } returnData = ms->_callout(ms, msgid, data, context_info); /* Now, returnData could be (1) NULL, (2) an ordinary data < MAX_INLINE, (3) ordinary data >= MAX_INLINE, (4) a no-copy data < MAX_INLINE, (5) a no-copy data >= MAX_INLINE. In cases (2) and (4), we send the return bytes inline in the Mach message, so can release the returnData object here. In cases (3) and (5), we'll send the data out-of-line, we need to create a copy of the memory, which we'll have the kernel autodeallocate for us on send. In case (4) also, the bytes in the return data may be part of the bytes in "data" that we sent into the callout, so if the incoming data was received out of line, we wouldn't be able to clean up the out-of-line wad until the message was sent either, if we didn't make the copy. */ if (NULL != returnData) { return_len = CFDataGetLength(returnData); if (return_len < __CFMessagePortMaxInlineBytes) { return_bytes = (void *)CFDataGetBytePtr(returnData); } else { return_bytes = NULL; vm_allocate(mach_task_self(), (vm_address_t *)&return_bytes, return_len, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_MACH_MSG)); /* vm_copy would only be a win here if the source address is page aligned; it is a lose in all other cases, since the kernel will just do the memmove for us (but not in as simple a way). */ memmove(return_bytes, CFDataGetBytePtr(returnData), return_len); } } replymsg = __CFMessagePortCreateMessage(allocator, true, msgp->head.msgh_remote_port, MACH_PORT_NULL, -1 * (int32_t)msgp->head.msgh_id, msgid, return_bytes, return_len); if (1 == replymsg->body.msgh_descriptor_count) { replymsg->contents.msg1.desc.out_of_line.deallocate = true; } if (data) CFRelease(data); if (1 == msgp->body.msgh_descriptor_count) { vm_deallocate(mach_task_self(), (vm_address_t)msgp->contents.msg1.desc.out_of_line.address, msgp->contents.msg1.desc.out_of_line.size); } if (returnData) CFRelease(returnData); if (context_release) { context_release(context_info); } return replymsg; }
DISPATCH_NOINLINE static void _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) { #if HAVE_MACH kern_return_t kr; mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE; mach_vm_offset_t vm_mask = ~MAGAZINE_MASK; mach_vm_address_t vm_addr = vm_page_size; while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT))) { if (kr != KERN_NO_SPACE) { (void)dispatch_assume_zero(kr); DISPATCH_CLIENT_CRASH("Could not allocate heap"); } _dispatch_temporary_resource_shortage(); vm_addr = vm_page_size; } uintptr_t aligned_region = (uintptr_t)vm_addr; #else // HAVE_MACH const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE; void *region_p; while (!dispatch_assume((region_p = mmap(NULL, region_sz, PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE, VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) { _dispatch_temporary_resource_shortage(); } uintptr_t region = (uintptr_t)region_p; uintptr_t region_end = region + region_sz; uintptr_t aligned_region, aligned_region_end; uintptr_t bottom_slop_len, top_slop_len; // Realign if needed; find the slop at top/bottom to unmap if ((region & ~(MAGAZINE_MASK)) == 0) { bottom_slop_len = 0; aligned_region = region; aligned_region_end = region_end - BYTES_PER_MAGAZINE; top_slop_len = BYTES_PER_MAGAZINE; } else { aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE; aligned_region_end = aligned_region + (MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE); bottom_slop_len = aligned_region - region; top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len; } #if DISPATCH_DEBUG // Double-check our math. dispatch_assert(aligned_region % PAGE_SIZE == 0); dispatch_assert(aligned_region_end % PAGE_SIZE == 0); dispatch_assert(aligned_region_end > aligned_region); dispatch_assert(top_slop_len % PAGE_SIZE == 0); dispatch_assert(bottom_slop_len % PAGE_SIZE == 0); dispatch_assert(aligned_region_end + top_slop_len == region_end); dispatch_assert(region + bottom_slop_len == aligned_region); dispatch_assert(region_sz == bottom_slop_len + top_slop_len + MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE); if (bottom_slop_len) { (void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len, PROT_NONE)); } if (top_slop_len) { (void)dispatch_assume_zero(mprotect((void *)aligned_region_end, top_slop_len, PROT_NONE)); } #else if (bottom_slop_len) { (void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len)); } if (top_slop_len) { (void)dispatch_assume_zero(munmap((void *)aligned_region_end, top_slop_len)); } #endif // DISPATCH_DEBUG #endif // HAVE_MACH if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region, relaxed)) { // If we lost the race to link in the new region, unmap the whole thing. #if DISPATCH_DEBUG (void)dispatch_assume_zero(mprotect((void *)aligned_region, MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE)); #else (void)dispatch_assume_zero(munmap((void *)aligned_region, MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE)); #endif } }
void bsd_init(void) { struct uthread *ut; unsigned int i; struct vfs_context context; kern_return_t ret; struct ucred temp_cred; struct posix_cred temp_pcred; #if NFSCLIENT || CONFIG_IMAGEBOOT boolean_t netboot = FALSE; #endif #define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */ throttle_init(); printf(copyright); bsd_init_kprintf("calling kmeminit\n"); kmeminit(); bsd_init_kprintf("calling parse_bsd_args\n"); parse_bsd_args(); #if CONFIG_DEV_KMEM bsd_init_kprintf("calling dev_kmem_init\n"); dev_kmem_init(); #endif /* Initialize kauth subsystem before instancing the first credential */ bsd_init_kprintf("calling kauth_init\n"); kauth_init(); /* Initialize process and pgrp structures. */ bsd_init_kprintf("calling procinit\n"); procinit(); /* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/ tty_init(); kernproc = &proc0; /* implicitly bzero'ed */ /* kernel_task->proc = kernproc; */ set_bsdtask_info(kernel_task,(void *)kernproc); /* give kernproc a name */ bsd_init_kprintf("calling process_name\n"); process_name("kernel_task", kernproc); /* allocate proc lock group attribute and group */ bsd_init_kprintf("calling lck_grp_attr_alloc_init\n"); proc_lck_grp_attr= lck_grp_attr_alloc_init(); proc_lck_grp = lck_grp_alloc_init("proc", proc_lck_grp_attr); #if CONFIG_FINE_LOCK_GROUPS proc_slock_grp = lck_grp_alloc_init("proc-slock", proc_lck_grp_attr); proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock", proc_lck_grp_attr); proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock", proc_lck_grp_attr); proc_mlock_grp = lck_grp_alloc_init("proc-mlock", proc_lck_grp_attr); #endif /* Allocate proc lock attribute */ proc_lck_attr = lck_attr_alloc_init(); #if 0 #if __PROC_INTERNAL_DEBUG lck_attr_setdebug(proc_lck_attr); #endif #endif #if CONFIG_FINE_LOCK_GROUPS proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr); #else proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_ucred_mlock, proc_lck_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr); #endif assert(bsd_simul_execs != 0); execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); execargs_cache_size = bsd_simul_execs; execargs_free_count = bsd_simul_execs; execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t)); bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t)); if (current_task() != kernel_task) printf("bsd_init: We have a problem, " "current task is not kernel task\n"); bsd_init_kprintf("calling get_bsdthread_info\n"); ut = (uthread_t)get_bsdthread_info(current_thread()); #if CONFIG_MACF /* * Initialize the MAC Framework */ mac_policy_initbsd(); kernproc->p_mac_enforce = 0; #if defined (__i386__) || defined (__x86_64__) /* * We currently only support this on i386/x86_64, as that is the * only lock code we have instrumented so far. */ check_policy_init(policy_check_flags); #endif #endif /* MAC */ /* Initialize System Override call */ init_system_override(); /* * Create process 0. */ proc_list_lock(); LIST_INSERT_HEAD(&allproc, kernproc, p_list); kernproc->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); #ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr); #else lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr); #endif /* There is no other bsd thread this point and is safe without pgrp lock */ LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist); kernproc->p_listflag |= P_LIST_INPGRP; kernproc->p_pgrpid = 0; kernproc->p_uniqueid = 0; pgrp0.pg_session = &session0; pgrp0.pg_membercnt = 1; session0.s_count = 1; session0.s_leader = kernproc; session0.s_listflags = 0; #ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr); #else lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr); #endif LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash); proc_list_unlock(); kernproc->task = kernel_task; kernproc->p_stat = SRUN; kernproc->p_flag = P_SYSTEM; kernproc->p_lflag = 0; kernproc->p_ladvflag = 0; #if DEVELOPMENT || DEBUG if (bootarg_disable_aslr) kernproc->p_flag |= P_DISABLE_ASLR; #endif kernproc->p_nice = NZERO; kernproc->p_pptr = kernproc; TAILQ_INIT(&kernproc->p_uthlist); TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list); kernproc->sigwait = FALSE; kernproc->sigwait_thread = THREAD_NULL; kernproc->exit_thread = THREAD_NULL; kernproc->p_csflags = CS_VALID; /* * Create credential. This also Initializes the audit information. */ bsd_init_kprintf("calling bzero\n"); bzero(&temp_cred, sizeof(temp_cred)); bzero(&temp_pcred, sizeof(temp_pcred)); temp_pcred.cr_ngroups = 1; /* kern_proc, shouldn't call up to DS for group membership */ temp_pcred.cr_flags = CRF_NOMEMBERD; temp_cred.cr_audit.as_aia_p = audit_default_aia_p; bsd_init_kprintf("calling kauth_cred_create\n"); /* * We have to label the temp cred before we create from it to * properly set cr_ngroups, or the create will fail. */ posix_cred_label(&temp_cred, &temp_pcred); kernproc->p_ucred = kauth_cred_create(&temp_cred); /* update cred on proc */ PROC_UPDATE_CREDS_ONPROC(kernproc); /* give the (already exisiting) initial thread a reference on it */ bsd_init_kprintf("calling kauth_cred_ref\n"); kauth_cred_ref(kernproc->p_ucred); ut->uu_context.vc_ucred = kernproc->p_ucred; ut->uu_context.vc_thread = current_thread(); TAILQ_INIT(&kernproc->p_aio_activeq); TAILQ_INIT(&kernproc->p_aio_doneq); kernproc->p_aio_total_count = 0; kernproc->p_aio_active_count = 0; bsd_init_kprintf("calling file_lock_init\n"); file_lock_init(); #if CONFIG_MACF mac_cred_label_associate_kernel(kernproc->p_ucred); #endif /* Create the file descriptor table. */ kernproc->p_fd = &filedesc0; filedesc0.fd_cmask = cmask; filedesc0.fd_knlistsize = -1; filedesc0.fd_knlist = NULL; filedesc0.fd_knhash = NULL; filedesc0.fd_knhashmask = 0; /* Create the limits structures. */ kernproc->p_limit = &limit0; for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++) limit0.pl_rlimit[i].rlim_cur = limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid; limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; limit0.pl_refcnt = 1; kernproc->p_stats = &pstats0; kernproc->p_sigacts = &sigacts0; /* * Charge root for one process: launchd. */ bsd_init_kprintf("calling chgproccnt\n"); (void)chgproccnt(0, 1); /* * Allocate a kernel submap for pageable memory * for temporary copying (execve()). */ { vm_offset_t minimum; bsd_init_kprintf("calling kmem_suballoc\n"); assert(bsd_pageable_map_size != 0); ret = kmem_suballoc(kernel_map, &minimum, (vm_size_t)bsd_pageable_map_size, TRUE, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_BSD), &bsd_pageable_map); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to allocate bsd pageable map"); } /* * Initialize buffers and hash links for buffers * * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must * happen after a credential has been associated with * the kernel task. */ bsd_init_kprintf("calling bsd_bufferinit\n"); bsd_bufferinit(); /* Initialize the execve() semaphore */ bsd_init_kprintf("calling semaphore_create\n"); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to create execve semaphore"); /* * Initialize the calendar. */ bsd_init_kprintf("calling IOKitInitializeTime\n"); IOKitInitializeTime(); bsd_init_kprintf("calling ubc_init\n"); ubc_init(); /* * Initialize device-switches. */ bsd_init_kprintf("calling devsw_init() \n"); devsw_init(); /* Initialize the file systems. */ bsd_init_kprintf("calling vfsinit\n"); vfsinit(); #if CONFIG_PROC_UUID_POLICY /* Initial proc_uuid_policy subsystem */ bsd_init_kprintf("calling proc_uuid_policy_init()\n"); proc_uuid_policy_init(); #endif #if SOCKETS /* Initialize per-CPU cache allocator */ mcache_init(); /* Initialize mbuf's. */ bsd_init_kprintf("calling mbinit\n"); mbinit(); net_str_id_init(); /* for mbuf tags */ #endif /* SOCKETS */ /* * Initializes security event auditing. * XXX: Should/could this occur later? */ #if CONFIG_AUDIT bsd_init_kprintf("calling audit_init\n"); audit_init(); #endif /* Initialize kqueues */ bsd_init_kprintf("calling knote_init\n"); knote_init(); /* Initialize for async IO */ bsd_init_kprintf("calling aio_init\n"); aio_init(); /* Initialize pipes */ bsd_init_kprintf("calling pipeinit\n"); pipeinit(); /* Initialize SysV shm subsystem locks; the subsystem proper is * initialized through a sysctl. */ #if SYSV_SHM bsd_init_kprintf("calling sysv_shm_lock_init\n"); sysv_shm_lock_init(); #endif #if SYSV_SEM bsd_init_kprintf("calling sysv_sem_lock_init\n"); sysv_sem_lock_init(); #endif #if SYSV_MSG bsd_init_kprintf("sysv_msg_lock_init\n"); sysv_msg_lock_init(); #endif bsd_init_kprintf("calling pshm_lock_init\n"); pshm_lock_init(); bsd_init_kprintf("calling psem_lock_init\n"); psem_lock_init(); pthread_init(); /* POSIX Shm and Sem */ bsd_init_kprintf("calling pshm_cache_init\n"); pshm_cache_init(); bsd_init_kprintf("calling psem_cache_init\n"); psem_cache_init(); bsd_init_kprintf("calling time_zone_slock_init\n"); time_zone_slock_init(); bsd_init_kprintf("calling select_waitq_init\n"); select_waitq_init(); /* * Initialize protocols. Block reception of incoming packets * until everything is ready. */ bsd_init_kprintf("calling sysctl_register_fixed\n"); sysctl_register_fixed(); bsd_init_kprintf("calling sysctl_mib_init\n"); sysctl_mib_init(); #if NETWORKING bsd_init_kprintf("calling dlil_init\n"); dlil_init(); bsd_init_kprintf("calling proto_kpi_init\n"); proto_kpi_init(); #endif /* NETWORKING */ #if SOCKETS bsd_init_kprintf("calling socketinit\n"); socketinit(); bsd_init_kprintf("calling domaininit\n"); domaininit(); iptap_init(); #if FLOW_DIVERT flow_divert_init(); #endif /* FLOW_DIVERT */ #endif /* SOCKETS */ kernproc->p_fd->fd_cdir = NULL; kernproc->p_fd->fd_rdir = NULL; #if CONFIG_FREEZE #ifndef CONFIG_MEMORYSTATUS #error "CONFIG_FREEZE defined without matching CONFIG_MEMORYSTATUS" #endif /* Initialise background freezing */ bsd_init_kprintf("calling memorystatus_freeze_init\n"); memorystatus_freeze_init(); #endif #if CONFIG_MEMORYSTATUS /* Initialize kernel memory status notifications */ bsd_init_kprintf("calling memorystatus_init\n"); memorystatus_init(); #endif /* CONFIG_MEMORYSTATUS */ bsd_init_kprintf("calling macx_init\n"); macx_init(); bsd_init_kprintf("calling acct_init\n"); acct_init(); #ifdef GPROF /* Initialize kernel profiling. */ kmstartup(); #endif bsd_init_kprintf("calling bsd_autoconf\n"); bsd_autoconf(); #if CONFIG_DTRACE dtrace_postinit(); #endif /* * We attach the loopback interface *way* down here to ensure * it happens after autoconf(), otherwise it becomes the * "primary" interface. */ #include <loop.h> #if NLOOP > 0 bsd_init_kprintf("calling loopattach\n"); loopattach(); /* XXX */ #endif #if NGIF /* Initialize gif interface (after lo0) */ gif_init(); #endif #if PFLOG /* Initialize packet filter log interface */ pfloginit(); #endif /* PFLOG */ #if NETHER > 0 /* Register the built-in dlil ethernet interface family */ bsd_init_kprintf("calling ether_family_init\n"); ether_family_init(); #endif /* ETHER */ #if NETWORKING /* Call any kext code that wants to run just after network init */ bsd_init_kprintf("calling net_init_run\n"); net_init_run(); #if CONTENT_FILTER cfil_init(); #endif #if PACKET_MANGLER pkt_mnglr_init(); #endif #if NECP /* Initialize Network Extension Control Policies */ necp_init(); #endif netagent_init(); /* register user tunnel kernel control handler */ utun_register_control(); #if IPSEC ipsec_register_control(); #endif /* IPSEC */ netsrc_init(); nstat_init(); tcp_cc_init(); #if MPTCP mptcp_control_register(); #endif /* MPTCP */ #endif /* NETWORKING */ bsd_init_kprintf("calling vnode_pager_bootstrap\n"); vnode_pager_bootstrap(); bsd_init_kprintf("calling inittodr\n"); inittodr(0); /* Mount the root file system. */ while( TRUE) { int err; bsd_init_kprintf("calling setconf\n"); setconf(); #if NFSCLIENT netboot = (mountroot == netboot_mountroot); #endif bsd_init_kprintf("vfs_mountroot\n"); if (0 == (err = vfs_mountroot())) break; rootdevice[0] = '\0'; #if NFSCLIENT if (netboot) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: failed to mount network root, error %d, %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } #endif printf("cannot mount root, errno = %d\n", err); boothowto |= RB_ASKNAME; } IOSecureBSDRoot(rootdevice); context.vc_thread = current_thread(); context.vc_ucred = kernproc->p_ucred; mountlist.tqh_first->mnt_flag |= MNT_ROOTFS; bsd_init_kprintf("calling VFS_ROOT\n"); /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) panic("bsd_init: cannot find root vnode: %s", PE_boot_args()); rootvnode->v_flag |= VROOT; (void)vnode_ref(rootvnode); (void)vnode_put(rootvnode); filedesc0.fd_cdir = rootvnode; #if NFSCLIENT if (netboot) { int err; netboot = TRUE; /* post mount setup */ if ((err = netboot_setup()) != 0) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: NetBoot could not find root, error %d: %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } } #endif #if CONFIG_IMAGEBOOT /* * See if a system disk image is present. If so, mount it and * switch the root vnode to point to it */ if (netboot == FALSE && imageboot_needed()) { /* * An image was found. No turning back: we're booted * with a kernel from the disk image. */ imageboot_setup(); } #endif /* CONFIG_IMAGEBOOT */ /* set initial time; all other resource data is already zero'ed */ microtime_with_abstime(&kernproc->p_start, &kernproc->p_stats->ps_start); #if DEVFS { char mounthere[] = "/dev"; /* !const because of internal casting */ bsd_init_kprintf("calling devfs_kernel_mount\n"); devfs_kernel_mount(mounthere); } #endif /* DEVFS */ /* Initialize signal state for process 0. */ bsd_init_kprintf("calling siginit\n"); siginit(kernproc); bsd_init_kprintf("calling bsd_utaskbootstrap\n"); bsd_utaskbootstrap(); #if defined(__LP64__) kernproc->p_flag |= P_LP64; #endif pal_kernel_announce(); bsd_init_kprintf("calling mountroot_post_hook\n"); /* invoke post-root-mount hook */ if (mountroot_post_hook != NULL) mountroot_post_hook(); #if 0 /* not yet */ consider_zone_gc(FALSE); #endif bsd_init_kprintf("done\n"); }
IOReturn IOAccelReadFramebuffer(io_service_t framebuffer, uint32_t width, uint32_t height, size_t rowBytes, vm_address_t * result, vm_size_t * bytecount) { IOReturn err; io_service_t accelerator; UInt32 framebufferIndex; size_t size = 0; UInt32 surfaceID = 155; vm_address_t buffer = 0; IOAccelConnect connect = MACH_PORT_NULL; IOAccelDeviceRegion * region = NULL; IOAccelSurfaceInformation surfaceInfo; IOGraphicsAcceleratorInterface ** interface = 0; IOBlitterPtr copyRegionProc; IOBlitCopyRegion op; IOBlitSurface dest; SInt32 quality = 0; *result = 0; *bytecount = 0; dest.interfaceRef = NULL; do { err = IOAccelFindAccelerator(framebuffer, &accelerator, &framebufferIndex); if (kIOReturnSuccess != err) continue; err = IOAccelCreateSurface(accelerator, surfaceID, kIOAccelSurfaceModeWindowedBit | kIOAccelSurfaceModeColorDepth8888, &connect); IOObjectRelease(accelerator); if (kIOReturnSuccess != err) continue; size = rowBytes * height; region = calloc(1, sizeof (IOAccelDeviceRegion) + sizeof(IOAccelBounds)); if (!region) continue; region->num_rects = 1; region->bounds.x = region->rect[0].x = 0; region->bounds.y = region->rect[0].y = 0; region->bounds.h = region->rect[0].h = height; region->bounds.w = region->rect[0].w = width; err = vm_allocate(mach_task_self(), &buffer, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_COREGRAPHICS_FRAMEBUFFERS)); if (kIOReturnSuccess != err) continue; err = IOAccelSetSurfaceFramebufferShapeWithBackingAndLength(connect, region, kIOAccelSurfaceShapeIdentityScaleBit| kIOAccelSurfaceShapeNonBlockingBit| //kIOAccelSurfaceShapeStaleBackingBit | kIOAccelSurfaceShapeNonSimpleBit, 0, (IOVirtualAddress) buffer, (UInt32) rowBytes, (UInt32) size); if (kIOReturnSuccess != err) continue; err = IOCreatePlugInInterfaceForService(framebuffer, kIOGraphicsAcceleratorTypeID, kIOGraphicsAcceleratorInterfaceID, (IOCFPlugInInterface ***)&interface, &quality ); if (kIOReturnSuccess != err) continue; err = (*interface)->GetBlitter(interface, kIOBlitAllOptions, (kIOBlitTypeCopyRegion | kIOBlitTypeOperationType0), kIOBlitSourceFramebuffer, ©RegionProc); if (kIOReturnSuccess != err) continue; err = (*interface)->AllocateSurface(interface, kIOBlitHasCGSSurface, &dest, (void *) surfaceID); if (kIOReturnSuccess != err) continue; err = (*interface)->SetDestination(interface, kIOBlitSurfaceDestination, &dest); if (kIOReturnSuccess != err) continue; op.region = region; op.deltaX = 0; op.deltaY = 0; err = (*copyRegionProc)(interface, kNilOptions, (kIOBlitTypeCopyRegion | kIOBlitTypeOperationType0), kIOBlitSourceFramebuffer, &op.operation, (void *) 0); if (kIOReturnSuccess != err) continue; (*interface)->Flush(interface, kNilOptions); err = IOAccelWriteLockSurfaceWithOptions(connect, kIOAccelSurfaceLockInBacking, &surfaceInfo, sizeof(surfaceInfo)); if (kIOReturnSuccess != err) continue; (void ) IOAccelWriteUnlockSurfaceWithOptions(connect, kIOAccelSurfaceLockInBacking); } while (false); if (dest.interfaceRef) (*interface)->FreeSurface(interface, kIOBlitHasCGSSurface, &dest); // destroy the surface if (connect) (void) IOAccelDestroySurface(connect); if (region) free(region); if (interface) IODestroyPlugInInterface((IOCFPlugInInterface **)interface); if (kIOReturnSuccess == err) { *result = buffer; *bytecount = size; } return (err); }
bool IOBufferMemoryDescriptor::initWithOptions( IOOptionBits options, vm_size_t capacity, vm_offset_t alignment, task_t inTask) { vm_map_t map = 0; IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual; if (!capacity) return false; _options = options; _capacity = capacity; _physAddrs = 0; _physSegCount = 0; _buffer = 0; // Grab the direction and the Auto Prepare bits from the Buffer MD options iomdOptions |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare); if ((options & kIOMemorySharingTypeMask) && (alignment < page_size)) alignment = page_size; if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) return false; _alignment = alignment; if (options & kIOMemoryPageable) { iomdOptions |= kIOMemoryBufferPageable; if (inTask == kernel_task) { /* Allocate some kernel address space. */ _buffer = IOMallocPageable(capacity, alignment); if (_buffer) map = IOPageableMapForAddress((vm_address_t) _buffer); } else { kern_return_t kr; if( !reserved) { reserved = IONew( ExpansionData, 1 ); if( !reserved) return( false ); } map = get_task_map(inTask); vm_map_reference(map); reserved->map = map; kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity), VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); if( KERN_SUCCESS != kr) return( false ); // we have to make sure that these pages don't get copied on fork. kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE); if( KERN_SUCCESS != kr) return( false ); } } else { // @@@ gvdl: Need to remove this // Buffer should never auto prepare they should be prepared explicitly // But it never was enforced so what are you going to do? iomdOptions |= kIOMemoryAutoPrepare; /* Allocate a wired-down buffer inside kernel space. */ if (options & kIOMemoryPhysicallyContiguous) _buffer = IOMallocContiguous(capacity, alignment, 0); else if (alignment > 1) _buffer = IOMallocAligned(capacity, alignment); else _buffer = IOMalloc(capacity); } if (!_buffer) return false; _singleRange.v.address = (vm_address_t) _buffer; _singleRange.v.length = capacity; if (!super::initWithOptions(&_singleRange.v, 1, 0, inTask, iomdOptions, /* System mapper */ 0)) return false; if (options & kIOMemoryPageable) { kern_return_t kr; ipc_port_t sharedMem = (ipc_port_t) _memEntry; vm_size_t size = round_page_32(_ranges.v[0].length); // must create the entry before any pages are allocated if( 0 == sharedMem) { // set memory entry cache vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; switch (options & kIOMapCacheMask) { case kIOMapInhibitCache: SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); break; case kIOMapWriteThruCache: SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); break; case kIOMapWriteCombineCache: SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); break; case kIOMapCopybackCache: SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); break; case kIOMapDefaultCache: default: SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); break; } kr = mach_make_memory_entry( map, &size, _ranges.v[0].address, memEntryCacheMode, &sharedMem, NULL ); if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) { ipc_port_release_send( sharedMem ); kr = kIOReturnVMError; } if( KERN_SUCCESS != kr) sharedMem = 0; _memEntry = (void *) sharedMem; } } setLength(capacity); return true; }
kern_return_t kernel_memory_allocate( register vm_map_t map, register vm_offset_t *addrp, register vm_size_t size, register vm_offset_t mask, int flags, vm_tag_t tag) { vm_object_t object; vm_object_offset_t offset; vm_object_offset_t pg_offset; vm_map_entry_t entry = NULL; vm_map_offset_t map_addr, fill_start; vm_map_offset_t map_mask; vm_map_size_t map_size, fill_size; kern_return_t kr, pe_result; vm_page_t mem; vm_page_t guard_page_list = NULL; vm_page_t wired_page_list = NULL; int guard_page_count = 0; int wired_page_count = 0; int i; int vm_alloc_flags; vm_prot_t kma_prot; if (! vm_kernel_ready) { panic("kernel_memory_allocate: VM is not ready"); } map_size = vm_map_round_page(size, VM_MAP_PAGE_MASK(map)); map_mask = (vm_map_offset_t) mask; vm_alloc_flags = VM_MAKE_TAG(tag); /* Check for zero allocation size (either directly or via overflow) */ if (map_size == 0) { *addrp = 0; return KERN_INVALID_ARGUMENT; } /* * limit the size of a single extent of wired memory * to try and limit the damage to the system if * too many pages get wired down * limit raised to 2GB with 128GB max physical limit */ if ( !(flags & KMA_VAONLY) && map_size > (1ULL << 31)) { return KERN_RESOURCE_SHORTAGE; } /* * Guard pages: * * Guard pages are implemented as ficticious pages. By placing guard pages * on either end of a stack, they can help detect cases where a thread walks * off either end of its stack. They are allocated and set up here and attempts * to access those pages are trapped in vm_fault_page(). * * The map_size we were passed may include extra space for * guard pages. If those were requested, then back it out of fill_size * since vm_map_find_space() takes just the actual size not including * guard pages. Similarly, fill_start indicates where the actual pages * will begin in the range. */ fill_start = 0; fill_size = map_size; if (flags & KMA_GUARD_FIRST) { vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE; fill_start += PAGE_SIZE_64; fill_size -= PAGE_SIZE_64; if (map_size < fill_start + fill_size) { /* no space for a guard page */ *addrp = 0; return KERN_INVALID_ARGUMENT; } guard_page_count++; } if (flags & KMA_GUARD_LAST) { vm_alloc_flags |= VM_FLAGS_GUARD_AFTER; fill_size -= PAGE_SIZE_64; if (map_size <= fill_start + fill_size) { /* no space for a guard page */ *addrp = 0; return KERN_INVALID_ARGUMENT; } guard_page_count++; } wired_page_count = (int) (fill_size / PAGE_SIZE_64); assert(wired_page_count * PAGE_SIZE_64 == fill_size); for (i = 0; i < guard_page_count; i++) { for (;;) { mem = vm_page_grab_guard(); if (mem != VM_PAGE_NULL) break; if (flags & KMA_NOPAGEWAIT) { kr = KERN_RESOURCE_SHORTAGE; goto out; } vm_page_more_fictitious(); } mem->pageq.next = (queue_entry_t)guard_page_list; guard_page_list = mem; } if (! (flags & KMA_VAONLY)) { for (i = 0; i < wired_page_count; i++) { uint64_t unavailable; for (;;) { if (flags & KMA_LOMEM) mem = vm_page_grablo(); else mem = vm_page_grab(); if (mem != VM_PAGE_NULL) break; if (flags & KMA_NOPAGEWAIT) { kr = KERN_RESOURCE_SHORTAGE; goto out; } if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) { kr = KERN_RESOURCE_SHORTAGE; goto out; } unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE; if (unavailable > max_mem || map_size > (max_mem - unavailable)) { kr = KERN_RESOURCE_SHORTAGE; goto out; } VM_PAGE_WAIT(); } mem->pageq.next = (queue_entry_t)wired_page_list; wired_page_list = mem; } } /* * Allocate a new object (if necessary). We must do this before * locking the map, or risk deadlock with the default pager. */ if ((flags & KMA_KOBJECT) != 0) { object = kernel_object; vm_object_reference(object); } else if ((flags & KMA_COMPRESSOR) != 0) { object = compressor_object; vm_object_reference(object); } else { object = vm_object_allocate(map_size); } kr = vm_map_find_space(map, &map_addr, fill_size, map_mask, vm_alloc_flags, &entry); if (KERN_SUCCESS != kr) { vm_object_deallocate(object); goto out; } if (object == kernel_object || object == compressor_object) { offset = map_addr; } else { offset = 0; } VME_OBJECT_SET(entry, object); VME_OFFSET_SET(entry, offset); if (object != compressor_object) entry->wired_count++; if (flags & KMA_PERMANENT) entry->permanent = TRUE; if (object != kernel_object && object != compressor_object) vm_object_reference(object); vm_object_lock(object); vm_map_unlock(map); pg_offset = 0; if (fill_start) { if (guard_page_list == NULL) panic("kernel_memory_allocate: guard_page_list == NULL"); mem = guard_page_list; guard_page_list = (vm_page_t)mem->pageq.next; mem->pageq.next = NULL; vm_page_insert(mem, object, offset + pg_offset); mem->busy = FALSE; pg_offset += PAGE_SIZE_64; } kma_prot = VM_PROT_READ | VM_PROT_WRITE; if (flags & KMA_VAONLY) { pg_offset = fill_start + fill_size; } else { for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) { if (wired_page_list == NULL) panic("kernel_memory_allocate: wired_page_list == NULL"); mem = wired_page_list; wired_page_list = (vm_page_t)mem->pageq.next; mem->pageq.next = NULL; mem->wire_count++; vm_page_insert_wired(mem, object, offset + pg_offset, tag); mem->busy = FALSE; mem->pmapped = TRUE; mem->wpmapped = TRUE; PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem, kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, PMAP_OPTIONS_NOWAIT, pe_result); if (pe_result == KERN_RESOURCE_SHORTAGE) { vm_object_unlock(object); PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE); vm_object_lock(object); } if (flags & KMA_NOENCRYPT) { bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE); pmap_set_noencrypt(mem->phys_page); } } }