static ut64 getNextValid(RIO *io, RIODesc *fd, ut64 addr) { struct vm_region_submap_info_64 info; vm_address_t address = MACH_VM_MIN_ADDRESS; vm_size_t size = (vm_size_t) 0; vm_size_t osize = (vm_size_t) 0; natural_t depth = 0; kern_return_t kr; int tid = RIOMACH_PID (fd->data); task_t task = pid_to_task (tid); ut64 lower = addr; #if __arm64__ || __aarch64__ size = osize = 16384; // acording to frida #else size = osize = 4096; #endif if (the_lower) { if (addr < the_lower) return the_lower; return addr; } for (;;) { mach_msg_type_number_t info_count; info_count = VM_REGION_SUBMAP_INFO_COUNT_64; memset (&info, 0, sizeof (info)); kr = vm_region_recurse_64 (task, &address, &size, &depth, (vm_region_recurse_info_t) &info, &info_count); if (kr != KERN_SUCCESS) { break; } if (lower == addr) { lower = address; } if (info.is_submap) { depth++; continue; } if (addr >= address && addr < address + size) { return addr; } if (address < lower) { lower = address; } if (size < 1) { size = osize; // f**k } address += size; size = 0; } the_lower = lower; return lower; }
void genProcessMemoryMap(int pid, QueryData& results, bool exe_only = false) { mach_port_t task = MACH_PORT_NULL; kern_return_t status = task_for_pid(mach_task_self(), pid, &task); if (status != KERN_SUCCESS) { // Cannot request memory map for pid (permissions, invalid). return; } // Create a map of library paths from the dyld cache. std::map<vm_address_t, std::string> libraries; if (!exe_only) { genProcessLibraries(task, libraries); } // Use address offset (starting at 0) to count memory maps. vm_address_t address = 0; size_t map_count = 0; uint32_t depth = 0; while (map_count++ < MAX_MEMORY_MAPS) { struct vm_region_submap_info_64 info; mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; vm_size_t size = 0; status = vm_region_recurse_64( task, &address, &size, &depth, (vm_region_info_64_t)&info, &count); if (status == KERN_INVALID_ADDRESS) { // Reached the end of the memory map. break; } if (info.is_submap) { // A submap increments the depth search to vm_region_recurse. // Use the same address to continue a recursive search within the region. depth++; continue; } genMemoryRegion(pid, address, size, info, libraries, results); if (exe_only) { break; } address += size; } if (task != MACH_PORT_NULL) { mach_port_deallocate(mach_task_self(), task); } }
//it's not used (yet) vm_address_t get_kernel_base(task_t ___task) { kern_return_t ret; task_t task; vm_region_submap_info_data_64_t info; ut64 size; mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64; unsigned int depth = 0; ut64 addr = KERNEL_LOWER; // lowest possible kernel base address int count; ret = task_for_pid (mach_task_self(), 0, &task); if (ret != KERN_SUCCESS) return 0; ut64 naddr; eprintf ("%d vs %d\n", task, ___task); for (count=128; count; count--) { // get next memory region naddr = addr; ret = vm_region_recurse_64 (task, (vm_address_t*)&naddr, (vm_size_t*)&size, &depth, (vm_region_info_t)&info, &info_count); if (ret != KERN_SUCCESS) break; if (size<1) break; if (addr == naddr) { addr += size; continue; } eprintf ("0x%08"PFMT64x" size 0x%08"PFMT64x" perm 0x%x\n", (ut64)addr, (ut64)size, info.max_protection); // the kernel maps over a GB of RAM at the address where it maps // itself so we use that fact to detect it's position if (size > 1024*1024*1024) { return addr + IMAGE_OFFSET; } addr += size; } return (vm_address_t)0; }
int main() { mach_port_t process_to_write; kern_return_t error; int pid; if(getuid() && geteuid()) { printf("You need to be root to vm_write!\n"); } else{ printf("PID: "); scanf("%d", &pid); error = task_for_pid(mach_task_self(), pid, &process_to_write); if ((error != KERN_SUCCESS) || !MACH_PORT_VALID(process_to_write)) { printf("Error getting the process!\n"); } kern_return_t krc = KERN_SUCCESS; vm_address_t address = 0; vm_size_t size = 0; uint32_t depth = 1; int c = 0; uintptr_t base; while (1) { struct vm_region_submap_info_64 info; mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; krc = vm_region_recurse_64(process_to_write, &address, &size, &depth, (vm_region_info_64_t)&info, &count); if (krc == KERN_INVALID_ADDRESS){ break; } if (info.is_submap){ depth++; } else { if (c == 16) { base = (uintptr_t)address; } c++; address += size; } } printf ("%012" PRIxPTR "\n", base); int negativeOffset = 0x14; int positiveBaseOffset = 0x7ffb30; int sz; int sz2; vm_offset_t dataPointer = 0; vm_offset_t dataPointer2 = 0; void **bytes; void **bytes2; printf("%012" PRIxPTR "\n", (uintptr_t)(base + positiveBaseOffset)); error = vm_read(process_to_write, (uintptr_t)(base + positiveBaseOffset), sizeof(vm_address_t), &dataPointer, &sz); if (error == KERN_SUCCESS) { bytes = (uintptr_t)dataPointer; printf("%012" PRIxPTR "\n", *bytes); error = vm_read(process_to_write, (uintptr_t)(*bytes-negativeOffset), sizeof(int), &dataPointer2, &sz2); if (error == KERN_SUCCESS) { bytes2 = (void *)dataPointer2; printf("%d\n", *bytes2); } } /* mach_port_name_t task; vm_map_offset_t vmoffset; vm_map_size_t vmsize; uint32_t nesting_depth = 0; struct vm_region_submap_info_64 vbr; mach_msg_type_number_t vbrcount = 16; kern_return_t kr; if ((kr = mach_vm_region_recurse(process_to_write, &vmoffset, &vmsize, &nesting_depth, (vm_region_recurse_info_t)&vbr, &vbrcount)) != KERN_SUCCESS) { printf("Error"); } printf("%p\n", (void *) (uintptr_t)vmoffset); */ } return 0; }
int main() { mach_port_t process_to_write; kern_return_t error; int pid; if(getuid() && geteuid()) { printf("You need to be root to vm_write!\n"); } else{ printf("PID: "); scanf("%d", &pid); error = task_for_pid(mach_task_self(), pid, &process_to_write); if ((error != KERN_SUCCESS) || !MACH_PORT_VALID(process_to_write)) { printf("Error getting the process!\n"); } kern_return_t krc = KERN_SUCCESS; vm_address_t address = 0; vm_size_t size = 0; uint32_t depth = 1; while (1) { struct vm_region_submap_info_64 info; mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; krc = vm_region_recurse_64(process_to_write, &address, &size, &depth, (vm_region_info_64_t)&info, &count); if (krc == KERN_INVALID_ADDRESS){ break; } if (info.is_submap){ depth++; } else { //do stuff printf ("Found region: %p to %p\n", (uint32_t)address, (uint32_t)address+size); address += size; } } /* mach_port_name_t task; vm_map_offset_t vmoffset; vm_map_size_t vmsize; uint32_t nesting_depth = 0; struct vm_region_submap_info_64 vbr; mach_msg_type_number_t vbrcount = 16; kern_return_t kr; if ((kr = mach_vm_region_recurse(process_to_write, &vmoffset, &vmsize, &nesting_depth, (vm_region_recurse_info_t)&vbr, &vbrcount)) != KERN_SUCCESS) { printf("Error"); } printf("%p\n", (void *) (uintptr_t)vmoffset); */ } return 0; }
vm_address_t find_near(vm_address_t addr) { struct current_zone* cur = &zbg; struct current_zone* prev = NULL; while (cur) { if (cur->zone_free_list && IS_NEAR(addr,cur->zone_free_list)) { vm_address_t rtn = (vm_address_t)zalloc(&cur->zone_free_list); if (prev && !cur->zone_free_list) { prev->list_next = cur->list_next; free(cur); } printf("got NEAR tramp\n"); return rtn; } prev = cur; cur = cur->list_next; } cur = &zbg; printf("allocating new near zone\n"); kern_return_t kr = KERN_SUCCESS; vm_size_t size = 0; vm_size_t old_size = 0; vm_address_t old_address = 0; vm_address_t address = addr - ((1ULL << 31ULL) - 1ULL); while (1) { mach_msg_type_number_t count; struct vm_region_submap_info_64 info; uint32_t nesting_depth; count = VM_REGION_SUBMAP_INFO_COUNT_64; kr = vm_region_recurse_64(mach_task_self_, &address, &size, &nesting_depth, (vm_region_info_64_t)&info, &count); if (kr == KERN_INVALID_ADDRESS) { break; } else if (kr) { mach_error("vm_region:", kr); break; /* last region done */ } if (info.is_submap) { nesting_depth++; } else { // printf("near_region[%p]: %p -> %p (%lx bytes)\n", addr, (void*)address, (void*)(address+size), size); if (old_address && old_address + old_size < address) { //printf("GOT SPACE_region[%p]: %p -> %p (%lx bytes)\n", addr, (void*)address, (void*)(address+size), size); if ((IS_NEAR(addr, old_address+old_size))) { if (ZONE_SIZE % 2 || ZONE_SIZE < sizeof(native_word_t)) { //puts("zalloc error: zone size must be a multiple of 2 and bigger than sizeof(native_word_t)"); exit(-1); } native_word_t* szfl = (native_word_t*)old_address + old_size; vm_allocate(mach_task_self_, (vm_address_t*)&szfl, PAGE_SIZE, 0); if (!szfl) { if (kr == KERN_INVALID_ADDRESS) { return 0; } continue; } vm_protect(mach_task_self_, (vm_address_t)szfl, PAGE_SIZE, 0, VM_PROT_ALL); if (cur->zone_free_list) { while (cur->list_next) { cur = cur->list_next; if (!cur->zone_free_list) { break; } } if (cur->zone_free_list) { cur->list_next = malloc(sizeof(struct current_zone)); cur->list_next->zone_free_list = 0; cur->list_next->list_next = 0; cur = cur->list_next; assert(cur->zone_free_list == 0); } } for (int i = 0; i < (PAGE_SIZE/ZONE_SIZE); i++) { zfree((void*)((native_word_t)&szfl[i*(ZONE_SIZE/sizeof(native_word_t))]), &cur->zone_free_list); } return (vm_address_t)zalloc(&cur->zone_free_list); } } old_address = address; old_size = size; address += size; } } return 0; }