/* * If we try to allocate memory occupied by superpages as normal pages * - the call should fail */ boolean_t test_reallocate() { mach_vm_address_t addr = 0, addr2; mach_vm_size_t size = SUPERPAGE_SIZE; int kr, ret; int i; kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB); if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret; /* attempt to allocate every sub-page of superpage */ for (i=0; i<SUPERPAGE_SIZE/PAGE_SIZE; i++) { addr2 = addr + i*PAGE_SIZE; size = PAGE_SIZE; kr = mach_vm_allocate(mach_task_self(), &addr2, size, 0); if ((ret = check_kr(kr, "mach_vm_allocate"))) { sprintf(error, "could allocate already allocated space, page %d", i); mach_vm_deallocate(mach_task_self(), addr, size); return FALSE; } } kr = mach_vm_deallocate(mach_task_self(), addr, size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; return TRUE; }
/* * If we fork with active superpages * - the parent should still be able to access the superpages * - the child should not be able to access the superpages */ boolean_t test_fork() { mach_vm_address_t addr = 0; mach_vm_size_t size = SUPERPAGE_SIZE; int kr, ret; pid_t pid; kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB); if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret; fflush(stdout); if ((pid=fork())) { /* parent */ if (!(ret = check_rw(addr, size))) return ret; waitpid(pid, &ret, 0); if (!ret) { sprintf(error, "child could access superpage"); return ret; } } else { /* child */ if (!(ret = check_nr(addr, size, NULL))) exit(ret); exit(TRUE); } kr = mach_vm_deallocate(mach_task_self(), addr, size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; return TRUE; }
PassRefPtr<SharedMemory> SharedMemory::create(size_t size) { ASSERT(size); mach_vm_address_t address; kern_return_t kr = mach_vm_allocate(mach_task_self(), &address, round_page(size), VM_FLAGS_ANYWHERE); if (kr != KERN_SUCCESS) { LOG_ERROR("Failed to allocate mach_vm_allocate shared memory (%zu bytes). %s (%x)", size, mach_error_string(kr), kr); return 0; } // Create a Mach port that represents the shared memory. mach_port_t port; memory_object_size_t memoryObjectSize = round_page(size); kr = mach_make_memory_entry_64(mach_task_self(), &memoryObjectSize, address, VM_PROT_DEFAULT, &port, MACH_PORT_NULL); if (kr != KERN_SUCCESS) { LOG_ERROR("Failed to create a mach port for shared memory. %s (%x)", mach_error_string(kr), kr); mach_vm_deallocate(mach_task_self(), address, round_page(size)); return 0; } ASSERT(memoryObjectSize >= round_page(size)); RefPtr<SharedMemory> sharedMemory(adoptRef(new SharedMemory)); sharedMemory->m_size = size; sharedMemory->m_data = toPointer(address); sharedMemory->m_port = port; return sharedMemory.release(); }
int munmap(__unused proc_t p, struct munmap_args *uap, __unused register_t *retval) { mach_vm_offset_t user_addr; mach_vm_size_t user_size; kern_return_t result; user_addr = (mach_vm_offset_t) uap->addr; user_size = (mach_vm_size_t) uap->len; AUDIT_ARG(addr, user_addr); AUDIT_ARG(len, user_size); if (user_addr & PAGE_MASK_64) { /* UNIX SPEC: user address is not page-aligned, return EINVAL */ return EINVAL; } if (user_addr + user_size < user_addr) return(EINVAL); if (user_size == 0) { /* UNIX SPEC: size is 0, return EINVAL */ return EINVAL; } result = mach_vm_deallocate(current_map(), user_addr, user_size); if (result != KERN_SUCCESS) { return(EINVAL); } return(0); }
static int shm_delete_mapping(__unused struct proc *p, struct shmmap_state *shmmap_s, int deallocate) { struct shmid_kernel *shmseg; int segnum, result; mach_vm_size_t size; segnum = IPCID_TO_IX(shmmap_s->shmid); shmseg = &shmsegs[segnum]; size = mach_vm_round_page(shmseg->u.shm_segsz); /* XXX done for us? */ if (deallocate) { result = mach_vm_deallocate(current_map(), shmmap_s->va, size); if (result != KERN_SUCCESS) return EINVAL; } shmmap_s->shmid = SHMID_UNALLOCATED; shmseg->u.shm_dtime = sysv_shmtime(); if ((--shmseg->u.shm_nattch <= 0) && (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) { shm_deallocate_segment(shmseg); shm_last_free = segnum; } return 0; }
SharedMemory::~SharedMemory() { if (m_data) { kern_return_t kr = mach_vm_deallocate(mach_task_self(), toVMAddress(m_data), round_page(m_size)); #if RELEASE_LOG_DISABLED ASSERT_UNUSED(kr, kr == KERN_SUCCESS); #else if (kr != KERN_SUCCESS) { RELEASE_LOG_ERROR(VirtualMemory, "%p - SharedMemory::~SharedMemory: Failed to deallocate shared memory. %{public}s (%x)", this, mach_error_string(kr), kr); ASSERT_NOT_REACHED(); } #endif } if (m_port) { kern_return_t kr = mach_port_deallocate(mach_task_self(), m_port); #if RELEASE_LOG_DISABLED ASSERT_UNUSED(kr, kr == KERN_SUCCESS); #else if (kr != KERN_SUCCESS) { RELEASE_LOG_ERROR(VirtualMemory, "%p - SharedMemory::~SharedMemory: Failed to deallocate port. %{public}s (%x)", this, mach_error_string(kr), kr); ASSERT_NOT_REACHED(); } #endif } }
boolean_t test_fileio() { mach_vm_address_t addr1 = 0; mach_vm_address_t addr2 = 0; mach_vm_size_t size = SUPERPAGE_SIZE; int kr, ret; int fd; unsigned int bytes; /* allocate one superpage */ kr = mach_vm_allocate(mach_task_self(), &addr1, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB); if (!(ret = check_kr(kr, "mach_vm_allocate (1)"))) return ret; /* allocate base pages (superpage-sized) */ kr = mach_vm_allocate(mach_task_self(), &addr2, size, VM_FLAGS_ANYWHERE); if (!(ret = check_kr(kr, "mach_vm_allocate (2)"))) return ret; if ((fd = open(FILENAME, O_RDONLY))<0) { sprintf(error, "couldn't open %s", FILENAME); return FALSE; } fcntl(fd, F_NOCACHE, 1); /* read kernel into superpage */ if ((bytes = read(fd, (void*)(uintptr_t)addr1, SUPERPAGE_SIZE)) < SUPERPAGE_SIZE) { sprintf(error, "short read (1)"); return FALSE; } lseek(fd, 0, SEEK_SET); /* read kernel into base pages */ if ((bytes = read(fd, (void*)(uintptr_t)addr2, SUPERPAGE_SIZE)) < SUPERPAGE_SIZE) { sprintf(error, "short read (2)"); return FALSE; } close(fd); /* compare */ if (memcmp((void*)(uintptr_t)addr1, (void*)(uintptr_t)addr2, bytes)) { sprintf(error, "read data corrupt"); return FALSE; } kr = mach_vm_deallocate(mach_task_self(), addr1, size); if (!(ret = check_kr(kr, "mach_vm_deallocate (1)"))) return ret; kr = mach_vm_deallocate(mach_task_self(), addr2, size); if (!(ret = check_kr(kr, "mach_vm_deallocate (2)"))) return ret; return TRUE; }
void kext_free(vm_offset_t addr, vm_size_t size) { kern_return_t rval; rval = mach_vm_deallocate(g_kext_map, addr, size); assert(rval == KERN_SUCCESS); }
SharedMemory::~SharedMemory() { if (!m_data) return; kern_return_t kr = mach_vm_deallocate(mach_task_self(), toVMAddress(m_data), round_page(m_size)); ASSERT_UNUSED(kr, kr == KERN_SUCCESS); }
// We should consider moving this into each MacThread. static void get_threads_profile_data(DNBProfileDataScanType scanType, task_t task, nub_process_t pid, std::vector<uint64_t> &threads_id, std::vector<std::string> &threads_name, std::vector<uint64_t> &threads_used_usec) { kern_return_t kr; thread_act_array_t threads; mach_msg_type_number_t tcnt; kr = task_threads(task, &threads, &tcnt); if (kr != KERN_SUCCESS) return; for (int i = 0; i < tcnt; i++) { thread_identifier_info_data_t identifier_info; mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT; kr = ::thread_info(threads[i], THREAD_IDENTIFIER_INFO, (thread_info_t)&identifier_info, &count); if (kr != KERN_SUCCESS) continue; thread_basic_info_data_t basic_info; count = THREAD_BASIC_INFO_COUNT; kr = ::thread_info(threads[i], THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count); if (kr != KERN_SUCCESS) continue; if ((basic_info.flags & TH_FLAGS_IDLE) == 0) { nub_thread_t tid = MachThread::GetGloballyUniqueThreadIDForMachPortID (threads[i]); threads_id.push_back(tid); if ((scanType & eProfileThreadName) && (identifier_info.thread_handle != 0)) { struct proc_threadinfo proc_threadinfo; int len = ::proc_pidinfo(pid, PROC_PIDTHREADINFO, identifier_info.thread_handle, &proc_threadinfo, PROC_PIDTHREADINFO_SIZE); if (len && proc_threadinfo.pth_name[0]) { threads_name.push_back(proc_threadinfo.pth_name); } else { threads_name.push_back(""); } } else { threads_name.push_back(""); } struct timeval tv; struct timeval thread_tv; TIME_VALUE_TO_TIMEVAL(&basic_info.user_time, &thread_tv); TIME_VALUE_TO_TIMEVAL(&basic_info.system_time, &tv); timeradd(&thread_tv, &tv, &thread_tv); uint64_t used_usec = thread_tv.tv_sec * 1000000ULL + thread_tv.tv_usec; threads_used_usec.push_back(used_usec); } kr = mach_port_deallocate(mach_task_self(), threads[i]); } kr = mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)threads, tcnt * sizeof(*threads)); }
static void fpm_mach_vm_deallocate() { if (local_page) { mach_vm_deallocate(mach_task_self(), local_page, local_size); target_page_base = 0; local_page = 0; local_size = 0; } }
void pixie_free_huge(void *p, size_t size) { #if defined (WIN32) VirtualFree(p, 0, MEM_RELEASE); #elif defined (__APPLE__) mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)p, size); #else munmap(p, size); #endif }
int main(void) { kern_return_t kr; int status; mach_port_t mytask = mach_task_self(); mach_vm_size_t size = (mach_vm_size_t)vm_page_size; kr = mach_vm_allocate(mytask, &page_shared, size, VM_FLAGS_ANYWHERE); OUT_ON_MACH_ERROR("vm_allocate", kr); kr = mach_vm_allocate(mytask, &page_cow, size, VM_FLAGS_ANYWHERE); OUT_ON_MACH_ERROR("vm_allocate", kr); kr = mach_vm_inherit(mytask, page_shared, size, VM_INHERIT_SHARE); OUT_ON_MACH_ERROR("vm_inherit(VM_INHERIT_SHARE)", kr); kr = mach_vm_inherit(mytask, page_cow, size, VM_INHERIT_COPY); OUT_ON_MACH_ERROR("vm_inherit(VM_INHERIT_COPY)", kr); FIRST_UINT32(page_shared) = (unsigned int)0xAAAAAAAA; FIRST_UINT32(page_cow) = (unsigned int)0xBBBBBBBB; printf("%-12s%-8s%-10s%-12s%-10s%s\n", "Process", "Page", "Contents", "VM Object", "Refcount", "Event"); peek_at_some_memory("parent", "before forking"); if (fork() == 0) child_process(); // this will also exit the child wait(&status); peek_at_some_memory("parent", "after child is done"); out: mach_vm_deallocate(mytask, page_shared, size); mach_vm_deallocate(mytask, page_cow, size); exit(0); }
kern_return_t vm_deallocate( mach_port_name_t task, vm_address_t address, vm_size_t size) { kern_return_t rv; rv = mach_vm_deallocate(task, address, size); return (rv); }
SharedMemory::~SharedMemory() { if (m_data && m_shouldVMDeallocateData) { kern_return_t kr = mach_vm_deallocate(mach_task_self(), toVMAddress(m_data), round_page(m_size)); ASSERT_UNUSED(kr, kr == KERN_SUCCESS); } if (m_port) { kern_return_t kr = mach_port_deallocate(mach_task_self(), m_port); ASSERT_UNUSED(kr, kr == KERN_SUCCESS); } }
/* * If we deallocate a sub-page of a superpage, * - the call should succeed * - make the complete memory inaccessible */ boolean_t test_deallocatesubpage() { int kr; int ret; mach_vm_address_t addr = 0; mach_vm_size_t size = SUPERPAGE_SIZE; kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB); if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret; kr = mach_vm_deallocate(mach_task_self(), addr + PAGE_SIZE, size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; if (!(ret = check_nr(addr, size, NULL))) return ret; return TRUE; }
/* * If we deallocate a superpage, * - the call should succeed * - make the memory inaccessible */ boolean_t test_deallocate() { mach_vm_size_t size = SUPERPAGE_SIZE; int kr, ret; if (!global_addr) { sprintf(error, "skipped deallocation"); return FALSE; } kr = mach_vm_deallocate(mach_task_self(), global_addr, global_size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; if (!(ret = check_nr(global_addr, size, NULL))) return ret; return TRUE; }
/* * Tests one allocation/deallocaton cycle; used in a loop this tests for leaks */ boolean_t test_alloc_dealloc() { mach_vm_address_t addr = 0; mach_vm_size_t size = SUPERPAGE_SIZE; int kr, ret; kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB); if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret; if (!(ret = check_addr0(addr, "mach_vm_allocate"))) return ret; if (!(ret = check_align(addr))) return ret; if (!(ret = check_rw(addr, size))) return ret; kr = mach_vm_deallocate(mach_task_self(), addr, size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; return TRUE; }
int virtual_free(int pid, mach_vm_address_t address, mach_vm_size_t size) { int sts; vm_map_t port = getport(pid); //fprintf(stderr, "virtual_free %x %x %x\n", pid, address, size); kern_return_t err = mach_vm_deallocate(port, address, size); if(err!= KERN_SUCCESS){ sts = 0; } else { sts = 1; } return sts; }
/** * Free the memory mapping. * * @note Unlike most free() functions in this API, this function is async-safe. */ void plcrash_async_mobject_free (plcrash_async_mobject_t *mobj) { kern_return_t kt; #ifdef PL_HAVE_MACH_VM kt = mach_vm_deallocate(mach_task_self(), mobj->vm_address, mobj->vm_length); #else kt = vm_deallocate(mach_task_self(), mobj->vm_address, mobj->vm_length); #endif if (kt != KERN_SUCCESS) PLCF_DEBUG("vm_deallocate() failure: %d", kt); /* Decrement our task refcount */ mach_port_mod_refs(mach_task_self(), mobj->task, MACH_PORT_RIGHT_SEND, -1); }
int _kernelrpc_mach_vm_deallocate_trap(struct _kernelrpc_mach_vm_deallocate_args *args) { task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; if (task != current_task()) goto done; rv = mach_vm_deallocate(task->map, args->address, args->size); done: if (task) task_deallocate(task); return (rv); }
/* * If we allocate a superpage of any size read-write without specifying an address * - the call should succeed * - not return 0 * - the memory should be readable and writable * If we deallocate it, * - the call should succeed * - make the memory inaccessible */ boolean_t test_allocate_size_any() { int kr; int ret; mach_vm_address_t addr = 0; mach_vm_size_t size = 2*PAGE_SIZE; /* will be rounded up to some superpage size */ kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_ANY); if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret; if (!(ret = check_addr0(addr, "mach_vm_allocate"))) return ret; if (!(ret = check_rw(addr, size))) return ret; kr = mach_vm_deallocate(mach_task_self(), addr, size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; if (!(ret = check_nr(addr, size, NULL))) return ret; return TRUE; }
/* * If we allocate a 2 MB superpage read-write at a 2 MB aligned address, * - the call should succeed * - return the address we wished for * - the memory should be readable and writable * If we deallocate it, * - the call should succeed * - make the memory inaccessible */ boolean_t test_allocatefixed() { int kr; int ret; mach_vm_address_t addr = FIXED_ADDRESS1; mach_vm_size_t size = SUPERPAGE_SIZE; kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_SUPERPAGE_SIZE_2MB); if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret; if (!(ret = check_addr(addr, FIXED_ADDRESS1, "mach_vm_allocate"))) return ret; if (!(ret = check_rw(addr, size))) return ret; kr = mach_vm_deallocate(mach_task_self(), addr, size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; if (!(ret = check_nr(addr, size, NULL))) return ret; return TRUE; }
PassRefPtr<SharedMemory> SharedMemory::create(size_t size) { ASSERT(size); mach_vm_address_t address; kern_return_t kr = mach_vm_allocate(mach_task_self(), &address, round_page(size), VM_FLAGS_ANYWHERE); if (kr != KERN_SUCCESS) { LOG_ERROR("Failed to allocate mach_vm_allocate shared memory (%zu bytes). %s (%x)", size, mach_error_string(kr), kr); return 0; } RefPtr<SharedMemory> sharedMemory = createFromVMBuffer(toPointer(address), size); if (!sharedMemory) { mach_vm_deallocate(mach_task_self(), address, round_page(size)); return 0; } sharedMemory->m_shouldVMDeallocateData = true; return sharedMemory.release(); }
/* * If we try to write-protect superpages * - the call should succeed * - the memory should remain readable * - the memory should not be writable */ boolean_t test_readonly() { int kr; int ret; mach_vm_address_t addr = 0; mach_vm_size_t size = SUPERPAGE_SIZE; kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB); if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret; mach_vm_protect(mach_task_self(), addr, size, 0, VM_PROT_READ); if (!(ret = check_kr(kr, "mach_vm_protect"))) return ret; if (!(ret = check_r(addr, size, NULL))) return ret; if (!(ret = check_nw(addr, size))) return ret; kr = mach_vm_deallocate(mach_task_self(), addr, size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; return TRUE; }
/* * If we try to wire superpages * - the call should succeed * - the memory should remain readable and writable */ boolean_t test_wire() { int kr; int ret; mach_vm_address_t addr = 0; mach_vm_size_t size = SUPERPAGE_SIZE; kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB); if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret; kr = mach_vm_wire(mach_host_self(), mach_task_self(), addr, size, VM_PROT_WRITE | VM_PROT_READ); if (!geteuid()) /* may fail as user */ if (!(ret = check_kr(kr, "mach_vm_wire"))) return ret; if (!(ret = check_rw(addr, size))) return ret; kr = mach_vm_deallocate(mach_task_self(), addr, size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; return TRUE; }
static void _dispatch_data_destroy_buffer(const void* buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor) { if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) { free((void*)buffer); } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) { // do nothing #if HAVE_MACH } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) { mach_vm_size_t vm_size = size; mach_vm_address_t vm_addr = (uintptr_t)buffer; mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); #endif } else { if (!queue) { queue = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); } dispatch_async_f(queue, destructor, _dispatch_call_block_and_release); } }
/* * If we try to wire superpages * - the call should fail * - the memory should remain readable and writable * Currently, superpages are always wired. */ boolean_t test_unwire() { int kr; int ret; mach_vm_address_t addr = 0; mach_vm_size_t size = SUPERPAGE_SIZE; kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | VM_FLAGS_SUPERPAGE_SIZE_2MB); if (!(ret = check_kr(kr, "mach_vm_allocate"))) return ret; kr = mach_vm_wire(mach_host_self(), mach_task_self(), addr, size, VM_PROT_NONE); if ((ret = check_kr(kr, "mach_vm_wire"))) { sprintf(error, "could unwire"); return FALSE; } if (!(ret = check_rw(addr, size))) return ret; kr = mach_vm_deallocate(mach_task_self(), addr, size); if (!(ret = check_kr(kr, "mach_vm_deallocate"))) return ret; return TRUE; }
bool ZGFreeBytes(const void *bytes, ZGMemorySize size) { return mach_vm_deallocate(current_task(), (vm_offset_t)bytes, size) == KERN_SUCCESS; }
bool ZGDeallocateMemory(ZGMemoryMap processTask, ZGMemoryAddress address, ZGMemorySize size) { return (mach_vm_deallocate(processTask, address, size) == KERN_SUCCESS); }