void gdb_pthread_kill(pthread_t pthread) { mach_port_t mthread; kern_return_t kret; int ret; mthread = pthread_mach_thread_np(pthread); kret = thread_suspend(mthread); MACH_CHECK_ERROR(kret); ret = pthread_cancel(pthread); if (ret != 0) { /* in case a macro has re-defined this function: */ #undef strerror warning("Unable to cancel thread: %s (%d)", strerror(errno), errno); thread_terminate(mthread); } kret = thread_abort (mthread); MACH_CHECK_ERROR (kret); kret = thread_resume (mthread); MACH_CHECK_ERROR (kret); ret = pthread_join (pthread, NULL); if (ret != 0) { warning ("Unable to join to canceled thread: %s (%d)", strerror (errno), errno); } }
static void next_exception_forwarded_reply (struct next_inferior_status *inferior, struct next_exception_data *erequest) { kern_return_t kret; struct next_exception_reply ereply; mach_port_t orig_reply_port = erequest->header.msg_remote_port; erequest->header.msg_remote_port = inferior->saved_exceptions.port; erequest->header.msg_local_port = inferior->exception_reply_port; inferior_debug ("sending exception to old exception port\n"); kret = msg_send (&erequest->header, MSG_OPTION_NONE, 0); MACH_CHECK_ERROR (kret); ereply.header.msg_size = sizeof (struct next_exception_reply); ereply.header.msg_local_port = inferior->exception_reply_port; inferior_debug ("receiving exception reply from old exception port\n"); kret = msg_receive (&ereply.header, RCV_LARGE, 0); MACH_CHECK_ERROR (kret); ereply.header.msg_local_port = PORT_NULL; ereply.header.msg_remote_port = orig_reply_port; inferior_debug ("sending exception reply\n"); kret = msg_send (&ereply.header, MSG_OPTION_NONE, 0); MACH_CHECK_ERROR (kret); }
size_t Page_Size(void) { #if defined(__MACH30__) kern_return_t result; vm_size_t page_size = 0; result = host_page_size(mach_host_self(), &page_size); MACH_CHECK_ERROR(result); return (size_t)page_size; #else /* ! __MACH30__ */ static struct vm_statistics stats_data; static struct vm_statistics *stats = NULL; kern_return_t kret; if (stats == NULL) { kret = vm_statistics(mach_task_self(), &stats_data); MACH_CHECK_ERROR(kret); stats = &stats_data; } return stats->pagesize; #endif /* ! __MACH30__ */ }
static void info_mach_task_command (char *args, int from_tty) { union { struct task_basic_info basic; struct task_events_info events; struct task_thread_times_info thread_times; } task_info_data; kern_return_t result; unsigned int info_count; task_t task; CHECK_ARGS ("Task", args); sscanf (args, "0x%x", &task); printf_unfiltered ("TASK_BASIC_INFO:\n"); info_count = TASK_BASIC_INFO_COUNT; result = task_info (task, TASK_BASIC_INFO, (task_info_t) & task_info_data.basic, &info_count); MACH_CHECK_ERROR (result); PRINT_FIELD (&task_info_data.basic, suspend_count); PRINT_FIELD (&task_info_data.basic, virtual_size); PRINT_FIELD (&task_info_data.basic, resident_size); #if 0 PRINT_FIELD (&task_info_data.basic, user_time); PRINT_FIELD (&task_info_data.basic, system_time); printf_unfiltered ("\nTASK_EVENTS_INFO:\n"); info_count = TASK_EVENTS_INFO_COUNT; result = task_info (task, TASK_EVENTS_INFO, (task_info_t) & task_info_data.events, &info_count); MACH_CHECK_ERROR (result); PRINT_FIELD (&task_info_data.events, faults); PRINT_FIELD (&task_info_data.events, zero_fills); PRINT_FIELD (&task_info_data.events, reactivations); PRINT_FIELD (&task_info_data.events, pageins); PRINT_FIELD (&task_info_data.events, cow_faults); PRINT_FIELD (&task_info_data.events, messages_sent); PRINT_FIELD (&task_info_data.events, messages_received); #endif printf_unfiltered ("\nTASK_THREAD_TIMES_INFO:\n"); info_count = TASK_THREAD_TIMES_INFO_COUNT; result = task_info (task, TASK_THREAD_TIMES_INFO, (task_info_t) & task_info_data.thread_times, &info_count); MACH_CHECK_ERROR (result); #if 0 PRINT_FIELD (&task_info_data.thread_times, user_time); PRINT_FIELD (&task_info_data.thread_times, system_time); #endif }
void darwin_set_sstep (thread_t thread, int enable) { x86_thread_state_t regs; unsigned int count = x86_THREAD_STATE_COUNT; kern_return_t kret; kret = thread_get_state (thread, x86_THREAD_STATE, (thread_state_t) ®s, &count); if (kret != KERN_SUCCESS) { printf_unfiltered (_("darwin_set_sstep: error %x, thread=%x\n"), kret, thread); return; } switch (regs.tsh.flavor) { case x86_THREAD_STATE32: { __uint32_t bit = enable ? X86_EFLAGS_T : 0; if (enable && i386_darwin_sstep_at_sigreturn (®s)) return; if ((regs.uts.ts32.__eflags & X86_EFLAGS_T) == bit) return; regs.uts.ts32.__eflags = (regs.uts.ts32.__eflags & ~X86_EFLAGS_T) | bit; kret = thread_set_state (thread, x86_THREAD_STATE, (thread_state_t) ®s, count); MACH_CHECK_ERROR (kret); } break; #ifdef BFD64 case x86_THREAD_STATE64: { __uint64_t bit = enable ? X86_EFLAGS_T : 0; if (enable && amd64_darwin_sstep_at_sigreturn (®s)) return; if ((regs.uts.ts64.__rflags & X86_EFLAGS_T) == bit) return; regs.uts.ts64.__rflags = (regs.uts.ts64.__rflags & ~X86_EFLAGS_T) | bit; kret = thread_set_state (thread, x86_THREAD_STATE, (thread_state_t) ®s, count); MACH_CHECK_ERROR (kret); } break; #endif default: error (_("darwin_set_sstep: unknown flavour: %d"), regs.tsh.flavor); } }
void fetch_inferior_registers (int regno) { thread_t current_thread = ptid_get_tid (inferior_ptid); if ((regno == -1) || PPC_MACOSX_IS_GP_REGNUM (regno) || PPC_MACOSX_IS_GSP_REGNUM (regno)) { gdb_ppc_thread_state_64_t gp_regs; unsigned int gp_count = GDB_PPC_THREAD_STATE_64_COUNT; kern_return_t ret = thread_get_state (current_thread, GDB_PPC_THREAD_STATE_64, (thread_state_t) & gp_regs, &gp_count); if (ret != KERN_SUCCESS) { printf ("Error calling thread_get_state for GP registers for thread 0x%ulx", current_thread); MACH_CHECK_ERROR (ret); } ppc_macosx_fetch_gp_registers_64 (&gp_regs); } if ((regno == -1) || PPC_MACOSX_IS_FP_REGNUM (regno) || PPC_MACOSX_IS_FSP_REGNUM (regno)) { gdb_ppc_thread_fpstate_t fp_regs; unsigned int fp_count = GDB_PPC_THREAD_FPSTATE_COUNT; kern_return_t ret = thread_get_state (current_thread, GDB_PPC_THREAD_FPSTATE, (thread_state_t) & fp_regs, &fp_count); if (ret != KERN_SUCCESS) { printf ("Error calling thread_get_state for FP registers for thread 0x%ulx", current_thread); MACH_CHECK_ERROR (ret); } ppc_macosx_fetch_fp_registers (&fp_regs); } if ((regno == -1) || PPC_MACOSX_IS_VP_REGNUM (regno) || PPC_MACOSX_IS_VSP_REGNUM (regno)) { gdb_ppc_thread_vpstate_t vp_regs; unsigned int vp_count = GDB_PPC_THREAD_VPSTATE_COUNT; kern_return_t ret = thread_get_state (current_thread, GDB_PPC_THREAD_VPSTATE, (thread_state_t) & vp_regs, &vp_count); if (ret != KERN_SUCCESS) { printf ("Error calling thread_get_state for Vector registers for thread 0x%ulx", current_thread); MACH_CHECK_ERROR (ret); } ppc_macosx_fetch_vp_registers (&vp_regs); } }
static void info_mach_exceptions_command (const char *args, int from_tty) { int i; task_t task; kern_return_t kret; darwin_exception_info info; info.count = sizeof (info.ports) / sizeof (info.ports[0]); if (args != NULL) { if (strcmp (args, "saved") == 0) { if (ptid_equal (inferior_ptid, null_ptid)) printf_unfiltered (_("No inferior running\n")); darwin_inferior *priv = get_darwin_inferior (current_inferior ()); disp_exception (&priv->exception_info); return; } else if (strcmp (args, "host") == 0) { /* FIXME: This need a privilegied host port! */ kret = host_get_exception_ports (darwin_host_self, EXC_MASK_ALL, info.masks, &info.count, info.ports, info.behaviors, info.flavors); MACH_CHECK_ERROR (kret); disp_exception (&info); } else error (_("Parameter is saved, host or none")); } else { struct inferior *inf; if (ptid_equal (inferior_ptid, null_ptid)) printf_unfiltered (_("No inferior running\n")); inf = current_inferior (); darwin_inferior *priv = get_darwin_inferior (inf); kret = task_get_exception_ports (priv->task, EXC_MASK_ALL, info.masks, &info.count, info.ports, info.behaviors, info.flavors); MACH_CHECK_ERROR (kret); disp_exception (&info); } }
static void info_mach_ports_command (char *args, int from_tty) { port_name_array_t port_names_data; port_type_array_t port_types_data; unsigned int name_count, type_count; kern_return_t result; int index; task_t task; CHECK_ARGS ("Task", args); sscanf (args, "0x%x", &task); result = port_names (task, &port_names_data, &name_count, &port_types_data, &type_count); MACH_CHECK_ERROR (result); CHECK_FATAL (name_count == type_count); printf_unfiltered ("Ports for task %#x:\n", task); for (index = 0; index < name_count; ++index) { printf_unfiltered ("port name: %#x, type %#x\n", port_names_data[index], port_types_data[index]); } vm_deallocate (task_self (), (vm_address_t) port_names_data, (name_count * sizeof (mach_port_t))); vm_deallocate (task_self (), (vm_address_t) port_types_data, (type_count * sizeof (mach_port_type_t))); }
void darwin_check_osabi (darwin_inferior *inf, thread_t thread) { if (gdbarch_osabi (target_gdbarch) == GDB_OSABI_UNKNOWN) { /* Attaching to a process. Let's figure out what kind it is. */ x86_thread_state_t gp_regs; struct gdbarch_info info; unsigned int gp_count = x86_THREAD_STATE_COUNT; kern_return_t ret; ret = thread_get_state (thread, x86_THREAD_STATE, (thread_state_t) &gp_regs, &gp_count); if (ret != KERN_SUCCESS) { MACH_CHECK_ERROR (ret); return; } gdbarch_info_init (&info); gdbarch_info_fill (&info); info.byte_order = gdbarch_byte_order (target_gdbarch); info.osabi = GDB_OSABI_DARWIN; if (gp_regs.tsh.flavor == x86_THREAD_STATE64) info.bfd_arch_info = bfd_lookup_arch (bfd_arch_i386, bfd_mach_x86_64); else info.bfd_arch_info = bfd_lookup_arch (bfd_arch_i386, bfd_mach_i386_i386); gdbarch_update_p (info); } }
static void info_mach_thread_command (char *args, int from_tty) { union { struct thread_basic_info basic; } thread_info_data; thread_t thread; kern_return_t result; unsigned int info_count; CHECK_ARGS (_("Thread"), args); sscanf (args, "0x%x", &thread); printf_unfiltered (_("THREAD_BASIC_INFO\n")); info_count = THREAD_BASIC_INFO_COUNT; result = thread_info (thread, THREAD_BASIC_INFO, (thread_info_t) & thread_info_data.basic, &info_count); MACH_CHECK_ERROR (result); #if 0 PRINT_FIELD (&thread_info_data.basic, user_time); PRINT_FIELD (&thread_info_data.basic, system_time); #endif PRINT_FIELD (&thread_info_data.basic, cpu_usage); PRINT_FIELD (&thread_info_data.basic, run_state); PRINT_FIELD (&thread_info_data.basic, flags); PRINT_FIELD (&thread_info_data.basic, suspend_count); PRINT_FIELD (&thread_info_data.basic, sleep_time); }
static void info_mach_threads_command (char *args, int from_tty) { thread_array_t threads; unsigned int thread_count; kern_return_t result; task_t task; int i; task = get_task_from_args (args); if (task == TASK_NULL) return; result = task_threads (task, &threads, &thread_count); MACH_CHECK_ERROR (result); printf_unfiltered (_("Threads in task %#x:\n"), task); for (i = 0; i < thread_count; ++i) { printf_unfiltered (_(" %#x\n"), threads[i]); mach_port_deallocate (task_self (), threads[i]); } vm_deallocate (task_self (), (vm_address_t) threads, (thread_count * sizeof (thread_t))); }
void Page_Delete(void *address, size_t size) { kern_return_t kret; kret = vm_deallocate(mach_task_self(), (vm_address_t)address, size); MACH_CHECK_ERROR(kret); }
void store_inferior_registers (int regno) { int current_pid; thread_t current_thread; current_pid = ptid_get_pid (inferior_ptid); current_thread = ptid_get_tid (inferior_ptid); validate_inferior_registers (regno); if ((regno == -1) || PPC_MACOSX_IS_GP_REGNUM (regno) || PPC_MACOSX_IS_GSP_REGNUM (regno)) { gdb_ppc_thread_state_64_t gp_regs; kern_return_t ret; ppc_macosx_store_gp_registers_64 (&gp_regs); ret = thread_set_state (current_thread, GDB_PPC_THREAD_STATE_64, (thread_state_t) & gp_regs, GDB_PPC_THREAD_STATE_64_COUNT); MACH_CHECK_ERROR (ret); } if ((regno == -1) || PPC_MACOSX_IS_FP_REGNUM (regno) || PPC_MACOSX_IS_FSP_REGNUM (regno)) { gdb_ppc_thread_fpstate_t fp_regs; kern_return_t ret; ppc_macosx_store_fp_registers (&fp_regs); ret = thread_set_state (current_thread, GDB_PPC_THREAD_FPSTATE, (thread_state_t) & fp_regs, GDB_PPC_THREAD_FPSTATE_COUNT); MACH_CHECK_ERROR (ret); } if ((regno == -1) || PPC_MACOSX_IS_VP_REGNUM (regno) || PPC_MACOSX_IS_VSP_REGNUM (regno)) { gdb_ppc_thread_vpstate_t vp_regs; kern_return_t ret; ppc_macosx_store_vp_registers (&vp_regs); ret = thread_set_state (current_thread, GDB_PPC_THREAD_VPSTATE, (thread_state_t) & vp_regs, GDB_PPC_THREAD_VPSTATE_COUNT); MACH_CHECK_ERROR (ret); } }
void Page_AllowAccess(void *address, size_t size) { kern_return_t kret; kret = vm_protect(mach_task_self(), (vm_address_t)address, size, 0, (VM_PROT_READ | VM_PROT_WRITE)); MACH_CHECK_ERROR(kret); }
void Page_DenyAccess(void *address, size_t size) { kern_return_t kret; kret = vm_protect(mach_task_self(), (vm_address_t)address, size, 0, VM_PROT_NONE); MACH_CHECK_ERROR(kret); }
void ppc_fetch_inferior_registers (int regno) { thread_t current_thread = ((struct inferior_list_entry *) current_inferior)->id; if ((regno == -1) || PPC_MACOSX_IS_GP_REGNUM (regno) || PPC_MACOSX_IS_GSP_REGNUM (regno)) { gdb_ppc_thread_state_64_t gp_regs; unsigned int gp_count = GDB_PPC_THREAD_STATE_64_COUNT; kern_return_t ret = thread_get_state (current_thread, GDB_PPC_THREAD_STATE_64, (thread_state_t) & gp_regs, &gp_count); MACH_CHECK_ERROR (ret); ppc_macosx_fetch_gp_registers_64 (&gp_regs); } if ((regno == -1) || PPC_MACOSX_IS_FP_REGNUM (regno) || PPC_MACOSX_IS_FSP_REGNUM (regno)) { gdb_ppc_thread_fpstate_t fp_regs; unsigned int fp_count = GDB_PPC_THREAD_FPSTATE_COUNT; kern_return_t ret = thread_get_state (current_thread, GDB_PPC_THREAD_FPSTATE, (thread_state_t) & fp_regs, &fp_count); MACH_CHECK_ERROR (ret); ppc_macosx_fetch_fp_registers (&fp_regs); } if ((regno == -1) || PPC_MACOSX_IS_VP_REGNUM (regno) || PPC_MACOSX_IS_VSP_REGNUM (regno)) { gdb_ppc_thread_vpstate_t vp_regs; unsigned int vp_count = GDB_PPC_THREAD_VPSTATE_COUNT; kern_return_t ret = thread_get_state (current_thread, GDB_PPC_THREAD_VPSTATE, (thread_state_t) & vp_regs, &vp_count); MACH_CHECK_ERROR (ret); ppc_macosx_fetch_vp_registers (&vp_regs); } }
int macosx_thread_valid (task_t task, thread_t thread) { thread_array_t thread_list; unsigned int thread_count = 0; kern_return_t kret; unsigned int found = 0; unsigned int i; CHECK_FATAL (task != TASK_NULL); kret = task_threads (task, &thread_list, &thread_count); #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("macosx_thread_valid - task_threads (%d, %p, %d) returned 0x%lx\n", task, &thread_list, thread_count, kret); #endif if ((kret == KERN_INVALID_ARGUMENT) || (kret == MACH_SEND_INVALID_RIGHT) || (kret == MACH_RCV_INVALID_NAME)) { return 0; } MACH_CHECK_ERROR (kret); for (i = 0; i < thread_count; i++) { if (thread_list[i] == thread) { found = 1; } } kret = vm_deallocate (mach_task_self (), (vm_address_t) thread_list, (vm_size_t) (thread_count * sizeof (thread_t))); MACH_CHECK_ERROR (kret); #ifdef DEBUG_MACOSX_MUTILS if (!found) { mutils_debug ("thread 0x%lx no longer valid for task 0x%lx\n", (unsigned long) thread, (unsigned long) task); } #endif return found; }
void *Page_Create(size_t size) { kern_return_t kret; vm_address_t address = 0; kret = vm_allocate(mach_task_self(), &address, size, 1); MACH_CHECK_ERROR(kret); return ((void *)address); }
void next_save_exception_ports (task_t task, struct next_exception_info *info) { kern_return_t kret; info->count = (sizeof (info->ports) / sizeof (info->ports[0])); kret = task_get_exception_ports (task, EXC_MASK_ALL, info->masks, &info->count, info->ports, info->behaviors, info->flavors); MACH_CHECK_ERROR (kret); }
void next_restore_exception_ports (task_t task, struct next_exception_info *info) { int i; kern_return_t kret; for (i = 0; i < info->count; i++) { kret = task_set_exception_ports (task, info->masks[i], info->ports[i], info->behaviors[i], info->flavors[i]); MACH_CHECK_ERROR (kret); } }
static uint32_t i386_darwin_dr_get (int regnum) { thread_t current_thread; x86_debug_state_t dr_regs; kern_return_t ret; unsigned int dr_count = x86_DEBUG_STATE_COUNT; gdb_assert (regnum >= 0 && regnum <= DR_CONTROL); current_thread = ptid_get_tid (inferior_ptid); dr_regs.dsh.flavor = x86_DEBUG_STATE32; dr_regs.dsh.count = x86_DEBUG_STATE32_COUNT; dr_count = x86_DEBUG_STATE_COUNT; ret = thread_get_state (current_thread, x86_DEBUG_STATE, (thread_state_t) &dr_regs, &dr_count); if (ret != KERN_SUCCESS) { printf_unfiltered (_("Error reading debug registers " "thread 0x%x via thread_get_state\n"), (int) current_thread); MACH_CHECK_ERROR (ret); } switch (regnum) { case 0: return dr_regs.uds.ds32.__dr0; case 1: return dr_regs.uds.ds32.__dr1; case 2: return dr_regs.uds.ds32.__dr2; case 3: return dr_regs.uds.ds32.__dr3; case 4: return dr_regs.uds.ds32.__dr4; case 5: return dr_regs.uds.ds32.__dr5; case 6: return dr_regs.uds.ds32.__dr6; case 7: return dr_regs.uds.ds32.__dr7; default: return -1; } }
vm_size_t child_get_pagesize () { kern_return_t status; static vm_size_t g_cached_child_page_size = 0; if (g_cached_child_page_size == -1) { status = host_page_size (mach_host_self (), &g_cached_child_page_size); /* This is probably being over-careful, since if we can't call host_page_size on ourselves, we probably aren't going to get much further. */ if (status != KERN_SUCCESS) g_cached_child_page_size = 0; MACH_CHECK_ERROR (status); } return g_cached_child_page_size; }
static void info_mach_threads_command (char *args, int from_tty) { thread_array_t thread_array; unsigned int thread_count; kern_return_t result; task_t task; int i; CHECK_ARGS ("Task", args); sscanf (args, "0x%x", &task); result = task_threads (task, &thread_array, &thread_count); MACH_CHECK_ERROR (result); printf_unfiltered ("Threads in task %#x:\n", task); for (i = 0; i < thread_count; ++i) { printf_unfiltered (" %#x\n", thread_array[i]); } vm_deallocate (task_self (), (vm_address_t) thread_array, (thread_count * sizeof (thread_t))); }
static void darwin_debug_port_info (task_t task, mach_port_t port) { kern_return_t kret; mach_port_status_t status; mach_msg_type_number_t len = sizeof (status); kret = mach_port_get_attributes (task, port, MACH_PORT_RECEIVE_STATUS, (mach_port_info_t)&status, &len); MACH_CHECK_ERROR (kret); printf_unfiltered (_("Port 0x%lx in task 0x%lx:\n"), (unsigned long) port, (unsigned long) task); printf_unfiltered (_(" port set: 0x%x\n"), status.mps_pset); printf_unfiltered (_(" seqno: 0x%x\n"), status.mps_seqno); printf_unfiltered (_(" mscount: 0x%x\n"), status.mps_mscount); printf_unfiltered (_(" qlimit: 0x%x\n"), status.mps_qlimit); printf_unfiltered (_(" msgcount: 0x%x\n"), status.mps_msgcount); printf_unfiltered (_(" sorights: 0x%x\n"), status.mps_sorights); printf_unfiltered (_(" srights: 0x%x\n"), status.mps_srights); printf_unfiltered (_(" pdrequest: 0x%x\n"), status.mps_pdrequest); printf_unfiltered (_(" nsrequest: 0x%x\n"), status.mps_nsrequest); printf_unfiltered (_(" flags: 0x%x\n"), status.mps_flags); }
static void info_mach_thread_command (char *args, int from_tty) { union { struct thread_basic_info basic; } thread_info_data; thread_t thread; kern_return_t result; unsigned int info_count; CHECK_ARGS ("Thread", args); sscanf (args, "0x%x", &thread); printf_unfiltered ("THREAD_BASIC_INFO\n"); info_count = THREAD_BASIC_INFO_COUNT; result = thread_info (thread, THREAD_BASIC_INFO, (thread_info_t) & thread_info_data.basic, &info_count); MACH_CHECK_ERROR (result); #if 0 PRINT_FIELD (&thread_info_data.basic, user_time); PRINT_FIELD (&thread_info_data.basic, system_time); #endif PRINT_FIELD (&thread_info_data.basic, cpu_usage); PRINT_FIELD (&thread_info_data.basic, run_state); PRINT_FIELD (&thread_info_data.basic, flags); PRINT_FIELD (&thread_info_data.basic, suspend_count); PRINT_FIELD (&thread_info_data.basic, sleep_time); #ifdef __ppc__ { union { struct __darwin_ppc_thread_state thread; struct __darwin_ppc_exception_state exception; } thread_state; int register_count, i; unsigned int *register_data; info_count = PPC_THREAD_STATE_COUNT; result = thread_get_state (thread, PPC_THREAD_STATE, (thread_state_t) & thread_state.thread, &info_count); MACH_CHECK_ERROR (result); printf_unfiltered ("\nPPC_THREAD_STATE \n"); register_data = &thread_state.thread.__r0; register_count = 0; for (i = 0; i < 8; ++i) { printf_unfiltered ("r%02d: 0x%08x ", register_count++, *register_data++); printf_unfiltered ("r%02d: 0x%08x ", register_count++, *register_data++); printf_unfiltered ("r%02d: 0x%08x ", register_count++, *register_data++); printf_unfiltered ("r%02d: 0x%08x\n", register_count++, *register_data++); } printf_unfiltered ("srr0: 0x%08x srr1: 0x%08x\n", thread_state.thread.__srr0, thread_state.thread.__srr1); printf_unfiltered ("cr: 0x%08x xer: 0x%08x\n", thread_state.thread.__cr, thread_state.thread.__xer); printf_unfiltered ("lr: 0x%08x ctr: 0x%08x\n", thread_state.thread.__lr, thread_state.thread.__ctr); } #endif }
void next_create_inferior_for_task (struct next_inferior_status *inferior, task_t task, int pid) { kern_return_t ret; CHECK_FATAL (inferior != NULL); next_inferior_destroy (inferior); next_inferior_reset (inferior); inferior->task = task; inferior->pid = pid; inferior->attached_in_ptrace = 0; inferior->stopped_in_ptrace = 0; inferior->suspend_count = 0; /* */ dyld_init_paths (&inferior->dyld_status.path_info); /* get notification messages for current task */ ret = port_allocate (task_self (), &inferior->notify_port); MACH_CHECK_ERROR (ret); ret = port_set_backlog (task_self (), inferior->notify_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); if (inferior_bind_notify_port_flag) { ret = task_set_notify_port (task_self (), inferior->notify_port); MACH_CHECK_ERROR (ret); } /* initialize signal port */ ret = port_allocate (task_self (), &inferior->signal_port); MACH_CHECK_ERROR (ret); ret = port_set_backlog (task_self (), inferior->signal_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); /* initialize dyld port */ ret = port_allocate (task_self (), &inferior->dyld_port); MACH_WARN_ERROR (ret); ret = port_set_backlog (task_self (), inferior->dyld_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); /* initialize gdb exception port */ ret = port_allocate (task_self (), &inferior->exception_port); MACH_CHECK_ERROR (ret); ret = port_set_backlog (task_self (), inferior->exception_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); ret = port_allocate (task_self (), &inferior->exception_reply_port); MACH_CHECK_ERROR (ret); ret = port_set_backlog (task_self (), inferior->exception_reply_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); /* commandeer inferior exception port */ if (inferior_bind_exception_port_flag) { next_save_exception_ports (inferior->task, &inferior->saved_exceptions); ret = task_set_exception_port (task, inferior->exception_port); MACH_CHECK_ERROR (ret); } inferior->last_thread = next_primary_thread_of_task (inferior->task); }
/* Read register values from the inferior process. If REGNO is -1, do this for all registers. Otherwise, REGNO specifies which register (so we can save time). */ static void i386_darwin_fetch_inferior_registers (struct target_ops *ops, struct regcache *regcache, int regno) { thread_t current_thread = ptid_get_tid (inferior_ptid); int fetched = 0; struct gdbarch *gdbarch = get_regcache_arch (regcache); #ifdef BFD64 if (gdbarch_ptr_bit (gdbarch) == 64) { if (regno == -1 || amd64_native_gregset_supplies_p (gdbarch, regno)) { x86_thread_state_t gp_regs; unsigned int gp_count = x86_THREAD_STATE_COUNT; kern_return_t ret; ret = thread_get_state (current_thread, x86_THREAD_STATE, (thread_state_t) & gp_regs, &gp_count); if (ret != KERN_SUCCESS) { printf_unfiltered (_("Error calling thread_get_state for " "GP registers for thread 0x%ulx"), current_thread); MACH_CHECK_ERROR (ret); } amd64_supply_native_gregset (regcache, &gp_regs.uts, -1); fetched++; } if (regno == -1 || !amd64_native_gregset_supplies_p (gdbarch, regno)) { x86_float_state_t fp_regs; unsigned int fp_count = x86_FLOAT_STATE_COUNT; kern_return_t ret; ret = thread_get_state (current_thread, x86_FLOAT_STATE, (thread_state_t) & fp_regs, &fp_count); if (ret != KERN_SUCCESS) { printf_unfiltered (_("Error calling thread_get_state for " "float registers for thread 0x%ulx"), current_thread); MACH_CHECK_ERROR (ret); } amd64_supply_fxsave (regcache, -1, &fp_regs.ufs.fs64.__fpu_fcw); fetched++; } } else #endif { if (regno == -1 || regno < I386_NUM_GREGS) { i386_thread_state_t gp_regs; unsigned int gp_count = i386_THREAD_STATE_COUNT; kern_return_t ret; int i; ret = thread_get_state (current_thread, i386_THREAD_STATE, (thread_state_t) & gp_regs, &gp_count); if (ret != KERN_SUCCESS) { printf_unfiltered (_("Error calling thread_get_state for " "GP registers for thread 0x%ulx"), current_thread); MACH_CHECK_ERROR (ret); } for (i = 0; i < I386_NUM_GREGS; i++) regcache_raw_supply (regcache, i, (char *)&gp_regs + i386_darwin_thread_state_reg_offset[i]); fetched++; } if (regno == -1 || (regno >= I386_ST0_REGNUM && regno < I386_SSE_NUM_REGS)) { i386_float_state_t fp_regs; unsigned int fp_count = i386_FLOAT_STATE_COUNT; kern_return_t ret; ret = thread_get_state (current_thread, i386_FLOAT_STATE, (thread_state_t) & fp_regs, &fp_count); if (ret != KERN_SUCCESS) { printf_unfiltered (_("Error calling thread_get_state for " "float registers for thread 0x%ulx"), current_thread); MACH_CHECK_ERROR (ret); } i387_supply_fxsave (regcache, -1, &fp_regs.__fpu_fcw); fetched++; } } if (! fetched) { warning (_("unknown register %d"), regno); regcache_raw_supply (regcache, regno, NULL); } }
void store_inferior_registers (int regno) { int current_pid; thread_t current_thread; kern_return_t ret; current_pid = ptid_get_pid (inferior_ptid); current_thread = ptid_get_tid (inferior_ptid); validate_inferior_registers (regno); if ((regno == -1) || ARM_MACOSX_IS_GP_RELATED_REGNUM (regno)) { struct gdb_arm_thread_state gp_regs; arm_macosx_store_gp_registers (&gp_regs); ret = thread_set_state (current_thread, GDB_ARM_THREAD_STATE, (thread_state_t) & gp_regs, GDB_ARM_THREAD_STATE_COUNT); MACH_CHECK_ERROR (ret); } if ((regno == -1) || ARM_MACOSX_IS_VFP_RELATED_REGNUM (regno)) { enum arm_vfp_version vfp_version; vfp_version = gdbarch_tdep (current_gdbarch)->vfp_version; int fp_byte_size = -1; switch (vfp_version) { case ARM_VFP_UNSUPPORTED: /* No VFP support, so nothing to do. */ fp_byte_size = 0; break; case ARM_VFP_VERSION_1: { gdb_arm_thread_vfpv1_state_t fp_regs; arm_macosx_store_vfpv1_regs (&fp_regs); ret = thread_set_state (current_thread, GDB_ARM_THREAD_FPSTATE, (thread_state_t) & fp_regs, GDB_ARM_THREAD_FPSTATE_VFPV1_COUNT); MACH_CHECK_ERROR (ret); } break; case ARM_VFP_VERSION_3: { gdb_arm_thread_vfpv3_state_t fp_regs; arm_macosx_store_vfpv3_regs (&fp_regs); ret = thread_set_state (current_thread, GDB_ARM_THREAD_FPSTATE, (thread_state_t) & fp_regs, GDB_ARM_THREAD_FPSTATE_VFPV3_COUNT); MACH_CHECK_ERROR (ret); } break; default: error ("store_inferior_registers: unable to store ARM_THREAD_FPSTATE: " "unsupported vfp version: %d", (int)vfp_version); break; } } }
static void i386_darwin_dr_set (int regnum, uint32_t value) { int current_pid; thread_t current_thread; x86_debug_state_t dr_regs; kern_return_t ret; unsigned int dr_count = x86_DEBUG_STATE_COUNT; gdb_assert (regnum >= 0 && regnum <= DR_CONTROL); current_thread = ptid_get_tid (inferior_ptid); dr_regs.dsh.flavor = x86_DEBUG_STATE32; dr_regs.dsh.count = x86_DEBUG_STATE32_COUNT; dr_count = x86_DEBUG_STATE_COUNT; ret = thread_get_state (current_thread, x86_DEBUG_STATE, (thread_state_t) &dr_regs, &dr_count); if (ret != KERN_SUCCESS) { printf_unfiltered (_("Error reading debug registers " "thread 0x%x via thread_get_state\n"), (int) current_thread); MACH_CHECK_ERROR (ret); } switch (regnum) { case 0: dr_regs.uds.ds32.__dr0 = value; break; case 1: dr_regs.uds.ds32.__dr1 = value; break; case 2: dr_regs.uds.ds32.__dr2 = value; break; case 3: dr_regs.uds.ds32.__dr3 = value; break; case 4: dr_regs.uds.ds32.__dr4 = value; break; case 5: dr_regs.uds.ds32.__dr5 = value; break; case 6: dr_regs.uds.ds32.__dr6 = value; break; case 7: dr_regs.uds.ds32.__dr7 = value; break; } ret = thread_set_state (current_thread, x86_DEBUG_STATE, (thread_state_t) &dr_regs, dr_count); if (ret != KERN_SUCCESS) { printf_unfiltered (_("Error writing debug registers " "thread 0x%x via thread_get_state\n"), (int) current_thread); MACH_CHECK_ERROR (ret); } }
static void i386_darwin_store_inferior_registers (struct target_ops *ops, struct regcache *regcache, int regno) { thread_t current_thread = ptid_get_tid (inferior_ptid); struct gdbarch *gdbarch = get_regcache_arch (regcache); #ifdef BFD64 if (gdbarch_ptr_bit (gdbarch) == 64) { if (regno == -1 || amd64_native_gregset_supplies_p (gdbarch, regno)) { x86_thread_state_t gp_regs; kern_return_t ret; unsigned int gp_count = x86_THREAD_STATE_COUNT; ret = thread_get_state (current_thread, x86_THREAD_STATE, (thread_state_t) &gp_regs, &gp_count); MACH_CHECK_ERROR (ret); gdb_assert (gp_regs.tsh.flavor == x86_THREAD_STATE64); gdb_assert (gp_regs.tsh.count == x86_THREAD_STATE64_COUNT); amd64_collect_native_gregset (regcache, &gp_regs.uts, regno); ret = thread_set_state (current_thread, x86_THREAD_STATE, (thread_state_t) &gp_regs, x86_THREAD_STATE_COUNT); MACH_CHECK_ERROR (ret); } if (regno == -1 || !amd64_native_gregset_supplies_p (gdbarch, regno)) { x86_float_state_t fp_regs; kern_return_t ret; unsigned int fp_count = x86_FLOAT_STATE_COUNT; ret = thread_get_state (current_thread, x86_FLOAT_STATE, (thread_state_t) & fp_regs, &fp_count); MACH_CHECK_ERROR (ret); gdb_assert (fp_regs.fsh.flavor == x86_FLOAT_STATE64); gdb_assert (fp_regs.fsh.count == x86_FLOAT_STATE64_COUNT); amd64_collect_fxsave (regcache, regno, &fp_regs.ufs.fs64.__fpu_fcw); ret = thread_set_state (current_thread, x86_FLOAT_STATE, (thread_state_t) & fp_regs, x86_FLOAT_STATE_COUNT); MACH_CHECK_ERROR (ret); } } else #endif { if (regno == -1 || regno < I386_NUM_GREGS) { i386_thread_state_t gp_regs; kern_return_t ret; unsigned int gp_count = i386_THREAD_STATE_COUNT; int i; ret = thread_get_state (current_thread, i386_THREAD_STATE, (thread_state_t) & gp_regs, &gp_count); MACH_CHECK_ERROR (ret); for (i = 0; i < I386_NUM_GREGS; i++) if (regno == -1 || regno == i) regcache_raw_collect (regcache, i, (char *)&gp_regs + i386_darwin_thread_state_reg_offset[i]); ret = thread_set_state (current_thread, i386_THREAD_STATE, (thread_state_t) & gp_regs, i386_THREAD_STATE_COUNT); MACH_CHECK_ERROR (ret); } if (regno == -1 || (regno >= I386_ST0_REGNUM && regno < I386_SSE_NUM_REGS)) { i386_float_state_t fp_regs; unsigned int fp_count = i386_FLOAT_STATE_COUNT; kern_return_t ret; ret = thread_get_state (current_thread, i386_FLOAT_STATE, (thread_state_t) & fp_regs, &fp_count); MACH_CHECK_ERROR (ret); i387_collect_fxsave (regcache, regno, &fp_regs.__fpu_fcw); ret = thread_set_state (current_thread, i386_FLOAT_STATE, (thread_state_t) & fp_regs, i386_FLOAT_STATE_COUNT); MACH_CHECK_ERROR (ret); } } }