__private_extern__ uintptr_t __CFFindPointer(uintptr_t ptr, uintptr_t start) { vm_map_t task = mach_task_self(); mach_vm_address_t address = start; for (;;) { mach_vm_size_t size = 0; vm_region_basic_info_data_64_t info; mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; mach_port_t object_name; kern_return_t ret = mach_vm_region(task, &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &object_name); if (KERN_SUCCESS != ret) break; boolean_t scan = (info.protection & VM_PROT_WRITE) ? 1 : 0; if (scan) { uintptr_t *addr = (uintptr_t *)((uintptr_t)address); uintptr_t *end = (uintptr_t *)((uintptr_t)address + (uintptr_t)size); while (addr < end) { if ((uintptr_t *)start <= addr && *addr == ptr) { return (uintptr_t)addr; } addr++; } } address += size; } return 0; }
static void readmem(mach_vm_offset_t *buffer, mach_vm_address_t address, mach_vm_size_t size, pid_t pid, vm_region_basic_info_data_64_t *info) { // get task for pid vm_map_t port; kern_return_t kr; if (task_for_pid(mach_task_self(), pid, &port)) { fprintf(stderr, "[ERROR] Can't execute task_for_pid! Do you have the right permissions/entitlements?\n"); exit(1); } mach_msg_type_number_t info_cnt = sizeof (vm_region_basic_info_data_64_t); mach_port_t object_name; mach_vm_size_t size_info; mach_vm_address_t address_info = address; kr = mach_vm_region(port, &address_info, &size_info, VM_REGION_BASIC_INFO_64, (vm_region_info_t)info, &info_cnt, &object_name); if (kr) { fprintf(stderr, "[ERROR] mach_vm_region failed with error %d\n", (int)kr); exit(1); } // read memory - vm_read_overwrite because we supply the buffer mach_vm_size_t nread; kr = mach_vm_read_overwrite(port, address, size, (mach_vm_address_t)buffer, &nread); if (kr || nread != size) { fprintf(stderr, "[ERROR] vm_read failed!\n"); exit(1); } }
static mach_vm_address_t RegionContainingAddress(mach_vm_address_t aAddress) { mach_port_t task; kern_return_t kr = task_for_pid(mach_task_self(), getpid(), &task); if (kr != KERN_SUCCESS) { return 0; } mach_vm_address_t address = aAddress; mach_vm_size_t size; vm_region_basic_info_data_64_t info; mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; mach_port_t object_name; kr = mach_vm_region(task, &address, &size, VM_REGION_BASIC_INFO_64, reinterpret_cast<vm_region_info_t>(&info), &count, &object_name); if (kr != KERN_SUCCESS || size == 0 || address > aAddress || address + size <= aAddress) { // mach_vm_region failed, or couldn't find region at given address. return 0; } return address; }
bool ZGRegionInfo(ZGMemoryMap processTask, ZGMemoryAddress *address, ZGMemorySize *size, ZGMemoryBasicInfo *regionInfo) { mach_port_t objectName = MACH_PORT_NULL; mach_msg_type_number_t regionInfoSize = VM_REGION_BASIC_INFO_COUNT_64; return mach_vm_region(processTask, address, size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)regionInfo, ®ionInfoSize, &objectName) == KERN_SUCCESS; }
static vm_prot_t protectionOfRegion(mach_vm_address_t address) { mach_vm_size_t regionSize = 0; vm_region_basic_info_64 regionInfo; mach_msg_type_number_t regionInfoCount = VM_REGION_BASIC_INFO_COUNT_64; mach_port_t objectName; if (mach_vm_region(mach_task_self(), &address, ®ionSize, VM_REGION_BASIC_INFO_64, (vm_region_info_t)®ionInfo, ®ionInfoCount, &objectName)) CRASH(); return regionInfo.protection; }
void findmemoryspace(){ if(gspaces!=NULL) free(gspaces); gspaces=(space*)malloc(sizeof(space)*MAX_SPACE_COUNT); gspace_count=0; kern_return_t kr; vm_size_t vmsize=0,presize; vm_address_t address=0,preaddress; vm_region_extended_info_data_t info; mach_msg_type_number_t info_count; memory_object_name_t object; preaddress=address; presize=vmsize; do{ address=preaddress+presize; info_count = VM_REGION_EXTENDED_INFO_COUNT; kr = mach_vm_region(gtask, &address, &vmsize, VM_REGION_EXTENDED_INFO, &info,&info_count, &object); if(kr!=KERN_SUCCESS){ kr=task_for_pid(current_task(),gproc->kp_proc.p_pid,>ask); kr=mach_vm_region(gtask,&address,&vmsize,VM_REGION_EXTENDED_INFO,&info,&info_count,&object); } if(address!=preaddress){ if(info.share_mode==SM_PRIVATE||info.share_mode==SM_COW){ space *space=(gspaces+gspace_count); space->address=address; space->size=vmsize; gspace_count++; printf("space %p-%p\n",address,address+vmsize); } preaddress=address; presize=vmsize; }else{ presize+=vmsize; } }while(kr==KERN_SUCCESS); printf("find %d validate memory spaces.\n",gspace_count); }
bool vm_region_next(vm_map_t task, mach_vm_address_t start, vm_region_basic_info_data_64_t *out) { mach_vm_address_t address = start; mach_vm_size_t size = 1; struct vm_region_submap_info_64 info; mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; kern_return_t ret = mach_vm_region(task, &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_64_t)(&info), &count, NULL); return ret == KERN_SUCCESS; }
int main() { kern_return_t kern_return; mach_port_t task; int pid = 0; printf("Enter PID to look-up: "); scanf("%d", &pid); // Need to run this program as root (i.e. sudo) in order for this to work kern_return = task_for_pid(mach_task_self(), pid, &task); if (kern_return != KERN_SUCCESS) { printf("task_for_pid() failed, error %d - %s\n", kern_return, mach_error_string(kern_return)); exit(1); } kern_return_t kret; vm_region_basic_info_data_t info; vm_size_t size; mach_port_t object_name; mach_msg_type_number_t count; vm_address_t firstRegionBegin; vm_address_t lastRegionEnd; vm_size_t fullSize; count = VM_REGION_BASIC_INFO_COUNT_64; mach_vm_address_t address = 1; int regionCount = 0; int flag = 0; while (flag == 0) { //Attempts to get the region info for given task kret = mach_vm_region(task, &address, &size, VM_REGION_BASIC_INFO, (vm_region_info_t) &info, &count, &object_name); if (kret == KERN_SUCCESS) { if (regionCount == 0) { firstRegionBegin = address; regionCount += 1; } fullSize += size; address += size; } else flag = 1; } lastRegionEnd = address; printf("Base Address: %p\n",(void *) (uintptr_t)firstRegionBegin); printf("lastRegionEnd: %lu\n",lastRegionEnd); printf("fullSize: %lu\n",fullSize); return 0; }
int virtual_query(int pid, mach_vm_address_t *baseaddr, unsigned int *prot, mach_vm_size_t *size) { task_t port = getport(pid); //fprintf(stderr, "virtual_query %x %x %x\n", pid, *baseaddr, *size); // since we are using mach_vm_region we should use the new structures - support both 32 and 64 bits mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; struct vm_region_basic_info_64 info; mach_port_t objectName = MACH_PORT_NULL; mach_vm_address_t requested_base = *baseaddr; kern_return_t result = mach_vm_region(port, baseaddr, size, VM_REGION_BASIC_INFO_64, (vm_region_info_t) &info, &count, &objectName); // what can go wrong? // No allocated pages at or after the requested addy // we just make up one for the rest of memory if(result != KERN_SUCCESS){ // fprintf(stderr, "[IMPLEMENTATION.C] virtual_query failing case 1\n"); #if __LP64__ *size = 0xffffffffffffffff - requested_base + 1; #else *size = 0xffffffff - requested_base + 1; #endif *prot = PAGE_NOACCESS; return 0; } if (VM_REGION_BASIC_INFO_COUNT_64 != count) { fprintf(stderr, "vm_region returned a bad info count"); } // Mac scans ahead to the next allocated region, windows doesn't // We just make up a region at the base that isn't accessible so that iterating through memory works :/ // this will bring problems with 64bit binaries because addressing starts at vmaddr 0x0000000100000000 // and we can have requests for lower addresses // FIXME if(*baseaddr > requested_base) { // fprintf(stderr, "[IMPLEMENTATION.C] virtual_query failing case 2, baseaddr=%p, requested_base=%p\n", (void *)*baseaddr, (void *)requested_base); *size = *baseaddr - requested_base; *baseaddr = requested_base; *prot = PAGE_NOACCESS; return 0; } // cool, worked *prot = XToWinProtection(info.protection); //fprintf(stderr, "Virtual query suceeded\n"); return 0; }
vm_address_t get_base_address(mach_port_t task){ kern_return_t kret; vm_region_basic_info_data_t info; vm_size_t size; mach_port_t object_name; mach_msg_type_number_t count; vm_address_t firstRegionBegin; mach_vm_address_t address = 1; count = VM_REGION_BASIC_INFO_COUNT_64; kret = mach_vm_region(task, &address, (mach_vm_size_t *) &size, VM_REGION_BASIC_INFO, (vm_region_info_t) &info, &count, &object_name); return address; }
static void get_adr_base_and_size(void* address, void** base, size_t* size) { mach_port_t object_name = { 0 }; task_t task = { 0 }; vm_region_basic_info_data_t info = { 0 }; mach_vm_size_t mach_vm_size = { 0 }; mach_vm_address_t mach_vm_address = (size_t)address; mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; kern_return_t status = task_for_pid(mach_task_self(), getpid(), &task); *base = 0; *size = 0; if (status) return; status = mach_vm_region(task, &mach_vm_address, &mach_vm_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&info, &count, &object_name); if (status) return; *base = (void*)(size_t)mach_vm_address; *size = (size_t)mach_vm_size; }
kern_return_t get_object_id(mach_vm_address_t offset, int *obj_id, int *ref_count) { kern_return_t kr; mach_port_t unused; mach_vm_size_t size = (mach_vm_size_t)vm_page_size; mach_vm_address_t address = offset; vm_region_top_info_data_t info; mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT; kr = mach_vm_region(mach_task_self(), &address, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &unused); if (kr == KERN_SUCCESS) { *obj_id = info.obj_id; *ref_count = info.ref_count; } return kr; }
static vm_prot_t getPermission(void* addr) { mach_vm_address_t address = (mach_vm_address_t)(uintptr_t)addr; kern_return_t result; mach_port_t object_name; vm_region_basic_info_data_64_t info; mach_msg_type_number_t count; mach_vm_size_t size = 4096; count = VM_REGION_BASIC_INFO_COUNT_64; result = mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &object_name); if ( result == KERN_SUCCESS ) return info.protection; return 0; }
static void darwin_debug_regions (task_t task, mach_vm_address_t address, int max) { kern_return_t kret; vm_region_basic_info_data_64_t info, prev_info; mach_vm_address_t prev_address; mach_vm_size_t size, prev_size; mach_port_t object_name; mach_msg_type_number_t count; int nsubregions = 0; int num_printed = 0; count = VM_REGION_BASIC_INFO_COUNT_64; kret = mach_vm_region (task, &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t) &info, &count, &object_name); if (kret != KERN_SUCCESS) { printf_filtered (_("No memory regions.")); return; } memcpy (&prev_info, &info, sizeof (vm_region_basic_info_data_64_t)); prev_address = address; prev_size = size; nsubregions = 1; for (;;) { int print = 0; int done = 0; address = prev_address + prev_size; /* Check to see if address space has wrapped around. */ if (address == 0) print = done = 1; if (!done) { count = VM_REGION_BASIC_INFO_COUNT_64; kret = mach_vm_region (task, &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t) &info, &count, &object_name); if (kret != KERN_SUCCESS) { size = 0; print = done = 1; } } if (address != prev_address + prev_size) print = 1; if ((info.protection != prev_info.protection) || (info.max_protection != prev_info.max_protection) || (info.inheritance != prev_info.inheritance) || (info.shared != prev_info.reserved) || (info.reserved != prev_info.reserved)) print = 1; if (print) { printf_filtered (_("%s-%s %s/%s %s %s %s"), paddress (target_gdbarch (), prev_address), paddress (target_gdbarch (), prev_address + prev_size), unparse_protection (prev_info.protection), unparse_protection (prev_info.max_protection), unparse_inheritance (prev_info.inheritance), prev_info.shared ? _("shrd") : _("priv"), prev_info.reserved ? _("reserved") : _("not-rsvd")); if (nsubregions > 1) printf_filtered (_(" (%d sub-rgn)"), nsubregions); printf_filtered (_("\n")); prev_address = address; prev_size = size; memcpy (&prev_info, &info, sizeof (vm_region_basic_info_data_64_t)); nsubregions = 1; num_printed++; } else { prev_size += size; nsubregions++; } if ((max > 0) && (num_printed >= max)) done = 1; if (done) break; } }
unsigned long long darwin_virtual_size() { kern_return_t error; task_t task; struct task_basic_info_64 taskinfo; cpu_type_t cputype; mach_msg_type_number_t count; mach_vm_size_t size; mach_vm_address_t address; mach_port_t object_name; vm_region_top_info_data_t info; mach_vm_size_t vsize; mach_vm_size_t empty; int has_shared_regions; empty = 0; count = TASK_BASIC_INFO_64_COUNT; task = mach_task_self(); error = task_info(task, TASK_BASIC_INFO_64, (task_info_t)&taskinfo, &count); if (error != KERN_SUCCESS) { return 0; } vsize = taskinfo.virtual_size; cputype = cpu_type(); // Go through all the vm regions and check to see if we should count them in the vsize or not for (address = 0, has_shared_regions = 0; ; address += size) { count = VM_REGION_TOP_INFO_COUNT; if (mach_vm_region(task, &address, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name) != KERN_SUCCESS) { // There are no more vm regions to look at. break; } if (in_shared_region(cputype, address)) { // Check if this process has the globally shared text and data regions mapped in. // If so, set has_shared_regions to 1 and so we only check once. if (has_shared_regions == 0 && info.share_mode == SM_EMPTY) { vm_region_basic_info_data_64_t basic_info; count = VM_REGION_BASIC_INFO_COUNT_64; if (mach_vm_region(task, &address, &size, VM_REGION_BASIC_INFO, (vm_region_info_t)&basic_info, &count, &object_name) != KERN_SUCCESS) { break; } if (basic_info.reserved) { has_shared_regions = 1; } } // Skip the vm region if it is not a shared private region. if (info.share_mode != SM_PRIVATE) { continue; } } if (info.share_mode == SM_EMPTY) { empty += size; } } // Subtract out the globally shared text and data region. if (has_shared_regions == 1) { vsize -= shared_region_size(cputype); } // Subtract out the empty pages (pagezero, stack guard, etc) vsize -= empty; return vsize; }
// taken from vmmap.c ios clone void macosx_debug_regions (task_t task, mach_vm_address_t address, int max) { kern_return_t kret; mach_vm_address_t prev_address; /* @TODO: warning - potential overflow here - gotta fix this.. */ vm_region_basic_info_data_t prev_info, info; mach_vm_size_t size, prev_size; mach_port_t object_name; mach_msg_type_number_t count; int nsubregions = 0; int num_printed = 0; count = VM_REGION_BASIC_INFO_COUNT_64; kret = mach_vm_region (task, &address, &size, VM_REGION_BASIC_INFO, (vm_region_info_t) &info, &count, &object_name); if (kret) { printf ("mach_vm_region: Error %d - %s", kret, mach_error_string(kret)); return; } memcpy (&prev_info, &info, sizeof (vm_region_basic_info_data_t)); prev_address = address; prev_size = size; nsubregions = 1; self_sections_count = 0; for (;;) { int print = 0; int done = 0; address = prev_address + prev_size; /* Check to see if address space has wrapped around. */ if (address == 0) print = done = 1; if (!done) { // Even on iOS, we use VM_REGION_BASIC_INFO_COUNT_64. This works. count = VM_REGION_BASIC_INFO_COUNT_64; kret = mach_vm_region (task, &address, &size, VM_REGION_BASIC_INFO, (vm_region_info_t) &info, &count, &object_name); if (kret != KERN_SUCCESS) { /* iOS 6 workaround - attempt to reget the task port to avoiD */ /* "(ipc/send) invalid destination port" (1000003 or something) */ task_for_pid(mach_task_self(),getpid (), &task); kret = mach_vm_region (task, &address, &size, VM_REGION_BASIC_INFO, (vm_region_info_t) &info, &count, &object_name); } if (kret != KERN_SUCCESS) { fprintf (stderr,"mach_vm_region failed for address %p - Error: %x\n", address,(kret)); size = 0; if (address >= 0x4000000) return; print = done = 1; } } if (address != prev_address + prev_size) print = 1; if ((info.protection != prev_info.protection) || (info.max_protection != prev_info.max_protection) || (info.inheritance != prev_info.inheritance) || (info.shared != prev_info.reserved) || (info.reserved != prev_info.reserved)) print = 1; if (print) { int print_size; char *print_size_unit; if (num_printed == 0) printf ("Region "); else printf (" ... "); //findListOfBinaries(task, prev_address, prev_size); /* Quick hack to show size of segment, which GDB does not */ print_size = prev_size; if (print_size > 1024) { print_size /= 1024; print_size_unit = "K"; } if (print_size > 1024) { print_size /= 1024; print_size_unit = "M"; } if (print_size > 1024) { print_size /= 1024; print_size_unit = "G"; } /* End Quick hack */ printf (" %p - %p [%d%s](%x/%x; %d, %s, %s)", (prev_address), (prev_address + prev_size), print_size, print_size_unit, prev_info.protection, prev_info.max_protection, prev_info.inheritance, prev_info.shared ? "shared" : "private", prev_info.reserved ? "reserved" : "not-reserved"); self_sections[self_sections_count].from = prev_address; self_sections[self_sections_count].to = prev_address+prev_size; self_sections[self_sections_count].perm = PERM_READ; //prev_info.protection; self_sections_count++; if (nsubregions > 1) printf (" (%d sub-regions)", nsubregions); printf ("\n"); prev_address = address; prev_size = size; memcpy (&prev_info, &info, sizeof (vm_region_basic_info_data_t)); nsubregions = 1; num_printed++; } else { prev_size += size; nsubregions++; } if ((max > 0) && (num_printed >= max)) { printf ("Max %d num_printed %d\n", max, num_printed); done = 1; } if (done) break; } }
void MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt) { // Collecting some other info cheaply but not reporting for now. mach_vm_size_t empty = 0; mach_vm_size_t fw_private = 0; mach_vm_size_t aliased = 0; bool global_shared_text_data_mapped = false; vm_size_t pagesize = PageSize (task); for (mach_vm_address_t addr=0, size=0; ; addr += size) { vm_region_top_info_data_t info; mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT; mach_port_t object_name; kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name); if (kr != KERN_SUCCESS) break; if (InSharedRegion(addr, cputype)) { // Private Shared fw_private += info.private_pages_resident * pagesize; // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again. if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) { vm_region_basic_info_data_64_t b_info; mach_vm_address_t b_addr = addr; mach_vm_size_t b_size = size; count = VM_REGION_BASIC_INFO_COUNT_64; kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name); if (kr != KERN_SUCCESS) break; if (b_info.reserved) { global_shared_text_data_mapped = TRUE; } } // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range. if (info.share_mode != SM_PRIVATE) { continue; } } // Update counters according to the region type. if (info.share_mode == SM_COW && info.ref_count == 1) { // Treat single reference SM_COW as SM_PRIVATE info.share_mode = SM_PRIVATE; } switch (info.share_mode) { case SM_LARGE_PAGE: // Treat SM_LARGE_PAGE the same as SM_PRIVATE // since they are not shareable and are wired. case SM_PRIVATE: rprvt += info.private_pages_resident * pagesize; rprvt += info.shared_pages_resident * pagesize; vprvt += size; break; case SM_EMPTY: empty += size; break; case SM_COW: case SM_SHARED: { if (pid == 0) { // Treat kernel_task specially if (info.share_mode == SM_COW) { rprvt += info.private_pages_resident * pagesize; vprvt += size; } break; } if (info.share_mode == SM_COW) { rprvt += info.private_pages_resident * pagesize; vprvt += info.private_pages_resident * pagesize; } break; } default: // log that something is really bad. break; } } rprvt += aliased; }