int xnu_step (RDebug *dbg) { int ret = R_FALSE; int pid = dbg->pid; //debug_arch_x86_trap_set (dbg, 1); // TODO: not supported in all platforms. need dbg.swstep= #if __arm__ || __arm64__ || __aarch64__ ios_hwstep_enable (dbg, 1); ret = ptrace (PT_STEP, pid, (caddr_t)1, 0); //SIGINT if (ret != 0) { perror ("ptrace-step"); eprintf ("mach-error: %d, %s\n", ret, MACH_ERROR_STRING (ret)); ret = R_FALSE; /* do not wait for events */ } else ret = R_TRUE; ios_hwstep_enable (dbg, 0); #else ret = ptrace (PT_STEP, pid, (caddr_t)1, 0); //SIGINT if (ret != 0) { perror ("ptrace-step"); eprintf ("mach-error: %d, %s\n", ret, MACH_ERROR_STRING (ret)); ret = R_FALSE; /* do not wait for events */ } else ret = R_TRUE; //TODO handle the signals here in xnu. Now is only supported for linux /*r_debug_handle_signals (dbg);*/ #endif return ret; }
task_t pid_to_task(int pid) { static task_t old_pid = -1; static task_t old_task = -1; task_t task = -1; int err; /* xlr8! */ if (old_task != -1 && old_pid == pid) return old_task; err = task_for_pid (mach_task_self (), (pid_t)pid, &task); if ((err != KERN_SUCCESS) || !MACH_PORT_VALID (task)) { task = task_for_pid_workaround (pid); if (task == -1) { eprintf ("Failed to get task %d for pid %d.\n", (int)task, (int)pid); eprintf ("Reason: 0x%x: %s\n", err, (char *)MACH_ERROR_STRING (err)); eprintf ("You probably need to run as root or sign the binary.\n" " Read doc/ios.md || doc/osx.md\n" " make -C binr/radare2 ios-sign || osx-sign\n"); return -1; } } old_pid = pid; old_task = task; return task; }
static kern_return_t macosx_vm_protect_region (task_t task, mach_vm_address_t region_start, mach_vm_size_t region_size, mach_vm_address_t addr, mach_vm_size_t size, vm_prot_t prot, boolean_t set_max) { kern_return_t kret; mach_vm_address_t protect_addr; mach_vm_size_t protect_size; /* On Tiger we want to set protections at the region level. */ protect_addr = region_start; protect_size = region_size; kret = mach_vm_protect (task, protect_addr, protect_size, set_max, prot); #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("macosx_vm_protect_region ( 0x%8.8llx ): [ 0x%8.8llx - 0x%8.8llx ) %s = %c%c%s => %s\n", (uint64_t) addr, (uint64_t) protect_addr, (uint64_t) (protect_addr + protect_size), set_max ? "max_prot" : "prot", prot & VM_PROT_COPY ? 'c' : '-', prot & VM_PROT_NO_CHANGE ? '!' : '-', g_macosx_protection_strs[prot & 7], kret ? MACH_ERROR_STRING (kret) : "0"); #endif return kret; }
static int mach_write_at(RIOMach *riom, const void *buff, int len, ut64 addr) { task_t task = riom->task; #if 0 /* get paVM_PROT_EXECUTEge perms */ kern_return_t err; int ret, _basic64[VM_REGION_BASIC_INFO_COUNT_64]; vm_region_basic_info_64_t basic64 = (vm_region_basic_info_64_t)_basic64; mach_msg_type_number_t infocnt; const int pagesize = 4096; vm_offset_t addrbase; mach_port_t objname; vm_size_t size = pagesize; eprintf (" 0x%llx\n", addr); infocnt = VM_REGION_BASIC_INFO_COUNT_64; addrbase = addr; size = len; // intentionally use VM_REGION_BASIC_INFO and get up-converted ret = vm_region_64 (task, &addrbase, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)basic64, &infocnt, &objname); eprintf ("+ PERMS (%x) %llx\n", basic64->protection, addr); if (ret == -1) { eprintf ("Cant get vm region info\n"); } #endif /* get page perms */ // XXX SHOULD RESTORE PERMS LATER!!! if (vm_protect (task, addr, len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE) != KERN_SUCCESS) //if (mach_vm_protect (task, addr, len, 0, VM_PROT_READ | VM_PROT_WRITE) != KERN_SUCCESS) if (vm_protect (task, addr, len, 0, VM_PROT_WRITE) != KERN_SUCCESS) eprintf ("cant change page perms to rw at 0x%"PFMT64x" with len= %d\n", addr, len); if (vm_write (task, (vm_address_t)addr, (vm_offset_t)buff, (mach_msg_type_number_t)len) != KERN_SUCCESS) eprintf ("cant write on memory\n"); //if (vm_read_overwrite(task, addr, 4, buff, &sz)) { eprintf ("cannot overwrite\n"); } #if 0 eprintf ("addrbase: %x\n", addrbase); eprintf ("change prems to %x\n", basic64->protection); int prot = 0; if (basic64->protection & 1) prot |= VM_PROT_EXECUTE; if (basic64->protection & 2) prot |= VM_PROT_WRITE; if (basic64->protection & 4) prot |= VM_PROT_READ; printf ("%d vs %d\n", prot, basic64->protection); int prot = VM_PROT_READ | VM_PROT_EXECUTE; if (vm_protect (task, addr, len, 0, prot) != KERN_SUCCESS) { //basic64->protection) != KERN_SUCCESS) { eprintf ("Oops (0x%"PFMT64x") error (%s)\n", addr, MACH_ERROR_STRING (err)); eprintf ("cant change page perms to rx\n"); } #endif return len; }
static kern_return_t macosx_vm_region_recurse_long (task_t task, mach_vm_address_t addr, mach_vm_address_t *r_start, mach_vm_size_t *r_size, vm_prot_t *prot, vm_prot_t *max_prot) { vm_region_submap_info_data_64_t r_long_data; mach_msg_type_number_t r_info_size; natural_t r_depth; kern_return_t kret; r_info_size = VM_REGION_SUBMAP_INFO_COUNT_64; r_depth = 1000; *r_start = addr; kret = mach_vm_region_recurse (task, r_start, r_size, & r_depth, (vm_region_recurse_info_t) &r_long_data, &r_info_size); if (kret == KERN_SUCCESS) { *prot = r_long_data.protection; *max_prot = r_long_data.max_protection; #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("macosx_vm_region_recurse_long ( 0x%8.8llx ): [ 0x%8.8llx - 0x%8.8llx ) " "depth = %d, prot = %c%c%s max_prot = %c%c%s\n", (uint64_t) addr, (uint64_t) (*r_start), (uint64_t) (*r_start + *r_size), r_depth, *prot & VM_PROT_COPY ? 'c' : '-', *prot & VM_PROT_NO_CHANGE ? '!' : '-', g_macosx_protection_strs[*prot & 7], *max_prot & VM_PROT_COPY ? 'c' : '-', *max_prot & VM_PROT_NO_CHANGE ? '!' : '-', g_macosx_protection_strs[*max_prot & 7]); #endif } else { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("macosx_vm_region_recurse_long ( 0x%8.8llx ): ERROR %s\n", (uint64_t) addr, MACH_ERROR_STRING (kret)); #endif *r_start = 0; *r_size = 0; *prot = VM_PROT_NONE; *max_prot = VM_PROT_NONE; } return kret; }
int xnu_reg_write (RDebug *dbg, int type, const ut8 *buf, int size) { int ret; thread_array_t inferior_threads = NULL; unsigned int inferior_thread_count = 0; R_DEBUG_REG_T *regs = (R_DEBUG_REG_T*)buf; unsigned int gp_count = R_DEBUG_STATE_SZ; ret = task_threads (pid_to_task (dbg->pid), &inferior_threads, &inferior_thread_count); if (ret != KERN_SUCCESS) { eprintf ("debug_getregs\n"); return R_FALSE; } /* TODO: thread cannot be selected */ if (inferior_thread_count > 0) { gp_count = ((dbg->bits == R_SYS_BITS_64)) ? 44 : 16; // XXX: kinda spaguetti coz multi-arch int tid = inferior_threads[0]; #if __i386__ || __x86_64__ switch (type) { case R_REG_TYPE_DRX: if (dbg->bits == R_SYS_BITS_64) { ret = THREAD_SET_STATE(x86_DEBUG_STATE64); } else { ret = THREAD_SET_STATE(x86_DEBUG_STATE32); } break; default: if (dbg->bits == R_SYS_BITS_64) { ret = THREAD_SET_STATE(x86_THREAD_STATE); } else { ret = THREAD_SET_STATE(i386_THREAD_STATE); } break; } #else ret = THREAD_SET_STATE(R_DEBUG_STATE_T); #endif if (ret != KERN_SUCCESS) { eprintf ("debug_setregs: Failed to set thread %d %d.error (%x). (%s)\n", (int)dbg->pid, pid_to_task (dbg->pid), (int)ret, MACH_ERROR_STRING (ret)); perror ("thread_set_state"); return R_FALSE; } } else { eprintf ("There are no threads!\n"); } return sizeof (R_DEBUG_REG_T); }
static task_t pid_to_task(int pid) { task_t task = 0; int err = task_for_pid (mach_task_self (), (pid_t)pid, &task); if ((err != KERN_SUCCESS) || !MACH_PORT_VALID (task)) { eprintf ("Failed to get task %d for pid %d.\n", (int)task, (int)pid); eprintf ("Reason: 0x%x: %s\n", err, MACH_ERROR_STRING (err)); eprintf ("You probably need to add user to procmod group.\n" " Or chmod g+s radare && chown root:procmod radare\n"); eprintf ("FMI: http://developer.apple.com/documentation/Darwin/Reference/ManPages/man8/taskgated.8.html\n"); return -1; } return task; }
void mach_warn_error(kern_return_t ret, const char *file, unsigned int line, const char *func) { if (ret == KERN_SUCCESS) { return; } if (func == NULL) { func = "[UNKNOWN]"; } warning("error on line %u of \"%s\" in function \"%s\": %s (0x%ux)", line, file, func, MACH_ERROR_STRING(ret), ret); }
void mach_check_error(kern_return_t ret, const char *file, unsigned int line, const char *func) { if (ret == KERN_SUCCESS) { return; } if (func == NULL) { func = "[UNKNOWN]"; } error("error on line %u of \"%s\" in function \"%s\": %s (0x%lx)\n", line, file, func, MACH_ERROR_STRING(ret), (unsigned long)ret); }
int xnu_reg_read (RDebug *dbg, int type, ut8 *buf, int size) { int ret; int pid = dbg->pid; thread_array_t inferior_threads = NULL; unsigned int inferior_thread_count = 0; R_DEBUG_REG_T *regs = (R_DEBUG_REG_T*)buf; unsigned int gp_count = R_DEBUG_STATE_SZ; int tid = dbg->tid; ret = task_threads (pid_to_task (pid), &inferior_threads, &inferior_thread_count); if (ret != KERN_SUCCESS) { return R_FALSE; } if (tid < 0 || tid >= inferior_thread_count) { dbg->tid = tid = dbg->pid; } if (tid == dbg->pid) { tid = 0; } if (inferior_thread_count > 0) { /* TODO: allow to choose the thread */ gp_count = R_DEBUG_STATE_SZ; // XXX: kinda spaguetti coz multi-arch #if __i386__ || __x86_64__ switch (type) { case R_REG_TYPE_SEG: case R_REG_TYPE_FLG: case R_REG_TYPE_GPR: ret = THREAD_GET_STATE ((dbg->bits == R_SYS_BITS_64)? x86_THREAD_STATE: i386_THREAD_STATE); break; case R_REG_TYPE_DRX: ret = THREAD_GET_STATE ((dbg->bits == R_SYS_BITS_64)? x86_DEBUG_STATE64: x86_DEBUG_STATE32); break; } #elif __arm__ || __arm64__ || __aarch64__ switch (type) { case R_REG_TYPE_FLG: case R_REG_TYPE_GPR: if (dbg->bits == R_SYS_BITS_64) { ret = THREAD_GET_STATE (ARM_THREAD_STATE64); } else { ret = THREAD_GET_STATE (ARM_THREAD_STATE); } break; case R_REG_TYPE_DRX: if (dbg->bits == R_SYS_BITS_64) { ret = THREAD_GET_STATE (ARM_DEBUG_STATE64); } else { ret = THREAD_GET_STATE (ARM_DEBUG_STATE32); } break; } #else eprintf ("Unknown architecture\n"); #endif if (ret != KERN_SUCCESS) { eprintf ( "debug_getregs: Failed to get thread %d %d.error (%x). (%s)\n", (int)pid, pid_to_task (pid), (int)ret, MACH_ERROR_STRING (ret) ); perror ("thread_get_state"); return R_FALSE; } } else eprintf ("There are no threads!\n"); return sizeof (R_DEBUG_REG_T); }
int mach_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write, task_t task) { mach_vm_address_t r_start = 0; mach_vm_address_t r_end = 0; mach_vm_size_t r_size = 0; vm_prot_t orig_protection = 0; vm_prot_t max_orig_protection = 0; CORE_ADDR cur_memaddr; char *cur_myaddr; int cur_len; vm_size_t pagesize = child_get_pagesize (); kern_return_t kret; int ret; /* check for out-of-range address */ r_start = memaddr; if (r_start != memaddr) { errno = EINVAL; return 0; } if (len == 0) { return 0; } CHECK_FATAL (myaddr != NULL); errno = 0; /* check for case where memory available only at address greater than address specified */ { kret = macosx_get_region_info (task, memaddr, &r_start, &r_size, &orig_protection, &max_orig_protection); if (kret != KERN_SUCCESS) { return 0; } if (r_start > memaddr) { if ((r_start - memaddr) <= MINUS_INT_MIN) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("First available address near 0x%8.8llx is at 0x%8.8llx; returning\n", (uint64_t) memaddr, (uint64_t) r_start); #endif return -(r_start - memaddr); } else { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("First available address near 0x%8.8llx is at 0x%8.8llx" "(too far; returning 0)\n", (uint64_t) memaddr, (uint64_t) r_start); #endif return 0; } } } cur_memaddr = memaddr; cur_myaddr = myaddr; cur_len = len; while (cur_len > 0) { int changed_protections = 0; /* We want the inner-most map containing our address, so set the recurse depth to some high value, and call mach_vm_region_recurse. */ kret = macosx_get_region_info (task, cur_memaddr, &r_start, &r_size, &orig_protection, &max_orig_protection); if (r_start > cur_memaddr) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Next available region for address at 0x%8.8llx is 0x%8.8llx\n", (uint64_t) cur_memaddr, (uint64_t) r_start); #endif break; } if (write) { /* Keep the execute permission if we modify protections. */ vm_prot_t new_prot = VM_PROT_READ | VM_PROT_WRITE; /* Do we need to modify our protections? */ if (orig_protection & VM_PROT_WRITE) { /* We don't need to modify our protections. */ kret = KERN_SUCCESS; #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("We already have write access to the region " "containing: 0x%8.8llx, skipping permission modification.\n", (uint64_t) cur_memaddr); #endif } else { changed_protections = 1; mach_vm_size_t prot_size; if (cur_len < r_size - (cur_memaddr - r_start)) prot_size = cur_len; else prot_size = cur_memaddr - r_start; kret = macosx_vm_protect (task, r_start, r_size, cur_memaddr, prot_size, new_prot, 0); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Without COPY failed: %s (0x%lx)\n", MACH_ERROR_STRING (kret), kret); #endif kret = macosx_vm_protect (task, r_start, r_size, cur_memaddr, prot_size, VM_PROT_COPY | new_prot, 0); } if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to add write access to region at 0x8.8llx: %s (0x%lx)\n", (uint64_t) r_start, MACH_ERROR_STRING (kret), kret); #endif break; } } } r_end = r_start + r_size; CHECK_FATAL (r_start <= cur_memaddr); CHECK_FATAL (r_end >= cur_memaddr); CHECK_FATAL ((r_start % pagesize) == 0); CHECK_FATAL ((r_end % pagesize) == 0); CHECK_FATAL (r_end >= (r_start + pagesize)); if ((cur_memaddr % pagesize) != 0) { int max_len = pagesize - (cur_memaddr % pagesize); int op_len = cur_len; if (op_len > max_len) { op_len = max_len; } ret = mach_xfer_memory_remainder (cur_memaddr, cur_myaddr, op_len, write, task); } else if (cur_len >= pagesize) { int max_len = r_end - cur_memaddr; int op_len = cur_len; if (op_len > max_len) { op_len = max_len; } op_len -= (op_len % pagesize); ret = mach_xfer_memory_block (cur_memaddr, cur_myaddr, op_len, write, task); } else { ret = mach_xfer_memory_remainder (cur_memaddr, cur_myaddr, cur_len, write, task); } if (write) { /* This vm_machine_attribute isn't supported on i386, so let's not try. */ #if defined (TARGET_POWERPC) vm_machine_attribute_val_t flush = MATTR_VAL_CACHE_FLUSH; kret = vm_machine_attribute (task, r_start, r_size, MATTR_CACHE, &flush); if (kret != KERN_SUCCESS) { static int nwarn = 0; nwarn++; if (nwarn <= MAX_INSTRUCTION_CACHE_WARNINGS) { warning ("Unable to flush data/instruction cache for region at 0x%8.8llx: %s", (uint64_t) r_start, MACH_ERROR_STRING (ret)); } if (nwarn == MAX_INSTRUCTION_CACHE_WARNINGS) { warning ("Support for flushing the data/instruction cache on this " "machine appears broken"); warning ("No further warning messages will be given."); } } #endif /* Try and restore permissions on the minimal address range. */ if (changed_protections) { mach_vm_size_t prot_size; if (cur_len < r_size - (cur_memaddr - r_start)) prot_size = cur_len; else prot_size = cur_memaddr - r_start; kret = macosx_vm_protect (task, r_start, r_size, cur_memaddr, prot_size, orig_protection, 0); if (kret != KERN_SUCCESS) { warning ("Unable to restore original permissions for region at 0x%8.8llx", (uint64_t) r_start); } } } cur_memaddr += ret; cur_myaddr += ret; cur_len -= ret; if (ret == 0) { break; } } return len - cur_len; }
static int mach_xfer_memory_block (CORE_ADDR memaddr, char *myaddr, int len, int write, task_t task) { vm_size_t pagesize = child_get_pagesize (); vm_offset_t mempointer; /* local copy of inferior's memory */ mach_msg_type_number_t memcopied; /* for vm_read to use */ kern_return_t kret; CHECK_FATAL ((memaddr % pagesize) == 0); CHECK_FATAL ((len % pagesize) == 0); if (!write) { kret = mach_vm_read (task, memaddr, len, &mempointer, &memcopied); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to read region at 0x%8.8llx with length %lu from inferior: %s (0x%lx)\n", (uint64_t) memaddr, (unsigned long) len, MACH_ERROR_STRING (kret), kret); #endif return 0; } if (memcopied != len) { kret = vm_deallocate (mach_task_self (), mempointer, memcopied); if (kret != KERN_SUCCESS) { warning ("Unable to deallocate memory used by failed read from inferior: %s (0x%ux)", MACH_ERROR_STRING (kret), kret); } #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to read region at 0x%8.8llx with length %lu from inferior: " "vm_read returned %lu bytes instead of %lu\n", (uint64_t) memaddr, (unsigned long) len, (unsigned long) memcopied, (unsigned long) len); #endif return 0; } memcpy (myaddr, ((unsigned char *) 0) + mempointer, len); kret = vm_deallocate (mach_task_self (), mempointer, memcopied); if (kret != KERN_SUCCESS) { warning ("Unable to deallocate memory used by read from inferior: %s (0x%ulx)", MACH_ERROR_STRING (kret), kret); return 0; } } else { kret = mach_vm_write (task, memaddr, (pointer_t) myaddr, len); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to write region at 0x%8.8llx with length %lu from inferior: %s (0x%lx)\n", (uint64_t) memaddr, (unsigned long) len, MACH_ERROR_STRING (kret), kret); #endif return 0; } } return len; }
static int mach_xfer_memory_remainder (CORE_ADDR memaddr, char *myaddr, int len, int write, task_t task) { vm_size_t pagesize = child_get_pagesize (); vm_offset_t mempointer; /* local copy of inferior's memory */ mach_msg_type_number_t memcopied; /* for vm_read to use */ CORE_ADDR pageaddr = memaddr - (memaddr % pagesize); kern_return_t kret; CHECK_FATAL (((memaddr + len - 1) - ((memaddr + len - 1) % pagesize)) == pageaddr); if (!write) { kret = mach_vm_read (task, pageaddr, pagesize, &mempointer, &memcopied); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to read page for region at 0x%8.8llx with length %lu from inferior: %s (0x%lx)\n", (uint64_t) pageaddr, (unsigned long) len, MACH_ERROR_STRING (kret), kret); #endif return 0; } if (memcopied != pagesize) { kret = vm_deallocate (mach_task_self (), mempointer, memcopied); if (kret != KERN_SUCCESS) { warning ("Unable to deallocate memory used by failed read from inferior: %s (0x%lx)", MACH_ERROR_STRING (kret), (unsigned long) kret); } #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to read region at 0x%8.8llx with length %lu from inferior: " "vm_read returned %lu bytes instead of %lu\n", (uint64_t) pageaddr, (unsigned long) pagesize, (unsigned long) memcopied, (unsigned long) pagesize); #endif return 0; } memcpy (myaddr, ((unsigned char *) 0) + mempointer + (memaddr - pageaddr), len); kret = vm_deallocate (mach_task_self (), mempointer, memcopied); if (kret != KERN_SUCCESS) { warning ("Unable to deallocate memory used to read from inferior: %s (0x%ulx)", MACH_ERROR_STRING (kret), kret); return 0; } } else { /* We used to read in a whole page, then modify the page contents, then write that page back out. I bet we did that so we didn't break up page maps or something like that. However, in Leopard there's a bug in the shared cache implementation, such that if we write into it with whole pages the maximum page protections don't get set properly and we can no longer reset the execute bit. In 64 bit Leopard apps, the execute bit has to be set or we can't run code from there. If we figure out that not writing whole pages causes problems of it's own, then we will have to revisit this. */ #if defined (TARGET_POWERPC) vm_machine_attribute_val_t flush = MATTR_VAL_CACHE_FLUSH; /* This vm_machine_attribute only works on PPC, so no reason to keep failing on x86... */ kret = vm_machine_attribute (mach_task_self (), mempointer, pagesize, MATTR_CACHE, &flush); #ifdef DEBUG_MACOSX_MUTILS if (kret != KERN_SUCCESS) { mutils_debug ("Unable to flush GDB's address space after memcpy prior to vm_write: %s (0x%lx)\n", MACH_ERROR_STRING (kret), kret); } #endif #endif kret = mach_vm_write (task, memaddr, (pointer_t) myaddr, len); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to write region at 0x%8.8llx with length %lu to inferior: %s (0x%lx)\n", (uint64_t) memaddr, (unsigned long) len, MACH_ERROR_STRING (kret), kret); #endif return 0; } } return len; }