static void nto_find_new_threads (struct nto_inferior *nto_inferior) { pthread_t tid; TRACE ("%s pid:%d\n", __func__, nto_inferior->pid); if (nto_inferior->ctl_fd == -1) return; for (tid = 1;; ++tid) { procfs_status status; ptid_t ptid; int err; status.tid = tid; err = devctl (nto_inferior->ctl_fd, DCMD_PROC_TIDSTATUS, &status, sizeof (status), 0); if (err != EOK || status.tid == 0) break; /* All threads in between are gone. */ while (tid != status.tid || status.state == STATE_DEAD) { struct thread_info *ti; ptid = ptid_build (nto_inferior->pid, tid, 0); ti = find_thread_ptid (ptid); if (ti != NULL) { TRACE ("Removing thread %d\n", tid); remove_thread (ti); } if (tid == status.tid) break; ++tid; } if (status.state != STATE_DEAD) { TRACE ("Adding thread %d\n", tid); ptid = ptid_build (nto_inferior->pid, tid, 0); if (!find_thread_ptid (ptid)) add_thread (ptid, NULL); } } }
static void nbsd_update_thread_list (struct target_ops *ops) { int retval; ptid_t ptid; if (nbsd_thread_active == 0) return; if (ptid_equal (inferior_ptid, minus_one_ptid)) { printf_filtered ("No process.\n"); return; } if (target_has_execution) { struct ptrace_lwpinfo pl; pl.pl_lwpid = 0; retval = ptrace (PT_LWPINFO, ptid_get_pid(inferior_ptid), (void *)&pl, sizeof(pl)); while ((retval != -1) && pl.pl_lwpid != 0) { ptid = ptid_build (ptid_get_pid (main_ptid), pl.pl_lwpid, 0); if (!in_thread_list (ptid)) add_thread (ptid); retval = ptrace (PT_LWPINFO, ptid_get_pid(inferior_ptid), (void *)&pl, sizeof(pl)); } } }
static void add_to_thread_list (bfd *abfd, asection *asect, void *reg_sect_arg) { ptid_t ptid; int core_tid; int pid, lwpid; asection *reg_sect = (asection *) reg_sect_arg; if (strncmp (bfd_section_name (abfd, asect), ".reg/", 5) != 0) return; core_tid = atoi (bfd_section_name (abfd, asect) + 5); pid = bfd_core_file_pid (core_bfd); if (pid == 0) { core_has_fake_pid = 1; pid = CORELOW_PID; } lwpid = core_tid; if (current_inferior ()->pid == 0) inferior_appeared (current_inferior (), pid); ptid = ptid_build (pid, lwpid, 0); add_thread (ptid); /* Warning, Will Robinson, looking at BFD private data! */ if (reg_sect != NULL && asect->filepos == reg_sect->filepos) /* Did we find .reg? */ inferior_ptid = ptid; /* Yes, make it current. */ }
static ptid_t find_active_thread (void) { int val; td_thread_t *thread; td_thread_info_t ti; struct ptrace_lwpinfo pl; if (!ptid_equal (cached_thread, minus_one_ptid)) return cached_thread; if (target_has_execution) { pl.pl_lwpid = 0; val = ptrace (PT_LWPINFO, ptid_get_pid(inferior_ptid), (void *)&pl, sizeof(pl)); while ((val != -1) && (pl.pl_lwpid != 0) && (pl.pl_event != PL_EVENT_SIGNAL)) { val = ptrace (PT_LWPINFO, ptid_get_pid(inferior_ptid), (void *)&pl, sizeof(pl)); } if (pl.pl_lwpid == 0) /* found no "active" thread, stay with current */ pl.pl_lwpid = inferior_ptid.lwp; } else { return inferior_ptid; } cached_thread = ptid_build (ptid_get_pid (main_ptid), pl.pl_lwpid, 0); return cached_thread; }
static int attach_thread (const td_thrhandle_t *th_p, td_thrinfo_t *ti_p) { struct process_info *proc = current_process (); int pid = pid_of (proc); ptid_t ptid = ptid_build (pid, ti_p->ti_lid, 0); struct lwp_info *lwp; int err; if (debug_threads) debug_printf ("Attaching to thread %ld (LWP %d)\n", (unsigned long) ti_p->ti_tid, ti_p->ti_lid); err = linux_attach_lwp (ptid); if (err != 0) { warning ("Could not attach to thread %ld (LWP %d): %s\n", (unsigned long) ti_p->ti_tid, ti_p->ti_lid, linux_ptrace_attach_fail_reason_string (ptid, err)); return 0; } lwp = find_lwp_pid (ptid); gdb_assert (lwp != NULL); lwp->thread_known = 1; lwp->th = *th_p; return 1; }
static void obsd_update_thread_list (struct target_ops *ops) { pid_t pid = ptid_get_pid (inferior_ptid); struct ptrace_thread_state pts; prune_threads (); if (ptrace (PT_GET_THREAD_FIRST, pid, (caddr_t)&pts, sizeof pts) == -1) perror_with_name (("ptrace")); while (pts.pts_tid != -1) { ptid_t ptid = ptid_build (pid, pts.pts_tid, 0); if (!in_thread_list (ptid)) { if (ptid_get_lwp (inferior_ptid) == 0) thread_change_ptid (inferior_ptid, ptid); else add_thread (ptid); } if (ptrace (PT_GET_THREAD_NEXT, pid, (caddr_t)&pts, sizeof pts) == -1) perror_with_name (("ptrace")); } }
static int nbsd_thread_proc_setregs (void *arg, int regset, int lwp, void *buf) { struct nbsd_thread_proc_arg *a = (struct nbsd_thread_proc_arg *) arg; struct regcache *cache = a->cache; struct target_ops *ops = a->ops; struct cleanup *old_chain; struct target_ops *beneath = find_target_beneath (ops); int ret; ret = 0; old_chain = save_inferior_ptid (); switch (regset) { case 0: supply_gregset(cache, (gregset_t *)buf); break; case 1: #ifdef HAVE_FPREGS supply_fpregset(cache, (fpregset_t *)buf); #endif break; default: /* XXX need to handle other reg sets: SSE, AltiVec, etc. */ ret = TD_ERR_INVAL; } /* Storing registers requires that inferior_ptid is a LWP value rather than a thread value. */ inferior_ptid = ptid_build (ptid_get_pid (main_ptid), lwp, 0); beneath->to_store_registers (beneath, cache, -1); do_cleanups (old_chain); return ret; }
static void nto_request_interrupt (void) { TRACE ("%s\n", __func__); nto_set_thread (ptid_build (nto_inferior.pid, 1, 0)); if (EOK != devctl (nto_inferior.ctl_fd, DCMD_PROC_STOP, NULL, 0, 0)) TRACE ("Error stopping inferior.\n"); }
static pid_t do_attach (pid_t pid) { procfs_status status; struct sigevent event; if (nto_inferior.ctl_fd != -1) { close (nto_inferior.ctl_fd); init_nto_inferior (&nto_inferior); } xsnprintf (nto_inferior.nto_procfs_path, PATH_MAX - 1, "/proc/%d/as", pid); nto_inferior.ctl_fd = open (nto_inferior.nto_procfs_path, O_RDWR); if (nto_inferior.ctl_fd == -1) { TRACE ("Failed to open %s\n", nto_inferior.nto_procfs_path); init_nto_inferior (&nto_inferior); return -1; } if (devctl (nto_inferior.ctl_fd, DCMD_PROC_STOP, &status, sizeof (status), 0) != EOK) { do_detach (); return -1; } nto_inferior.pid = pid; /* Define a sigevent for process stopped notification. */ event.sigev_notify = SIGEV_SIGNAL_THREAD; event.sigev_signo = SIGUSR1; event.sigev_code = 0; event.sigev_value.sival_ptr = NULL; event.sigev_priority = -1; devctl (nto_inferior.ctl_fd, DCMD_PROC_EVENT, &event, sizeof (event), 0); if (devctl (nto_inferior.ctl_fd, DCMD_PROC_STATUS, &status, sizeof (status), 0) == EOK && (status.flags & _DEBUG_FLAG_STOPPED)) { ptid_t ptid; struct process_info *proc; kill (pid, SIGCONT); ptid = ptid_build (status.pid, status.tid, 0); the_low_target.arch_setup (); proc = add_process (status.pid, 1); proc->tdesc = nto_tdesc; TRACE ("Adding thread: pid=%d tid=%ld\n", status.pid, ptid_get_lwp (ptid)); nto_find_new_threads (&nto_inferior); } else { do_detach (); return -1; } return pid; }
static ptid_t lynx_ptid_build (int pid, long tid) { /* brobecker/2010-06-21: It looks like the LWP field in ptids should be distinct for each thread (see write_ptid where it writes the thread ID from the LWP). So instead of storing the LynxOS tid in the tid field of the ptid, we store it in the lwp field. */ return ptid_build (pid, tid, 0); }
static void nbsd_thread_activate (void) { nbsd_thread_active = 1; main_ptid = inferior_ptid; cached_thread = minus_one_ptid; thread_change_ptid(inferior_ptid, ptid_build (ptid_get_pid (inferior_ptid), 1, 0)); nbsd_update_thread_list (NULL); inferior_ptid = find_active_thread (); }
static struct sim_inferior_data * get_sim_inferior_data (struct inferior *inf, int sim_instance_needed) { SIM_DESC sim_desc = NULL; struct sim_inferior_data *sim_data = (struct sim_inferior_data *) inferior_data (inf, sim_inferior_data_key); /* Try to allocate a new sim instance, if needed. We do this ahead of a potential allocation of a sim_inferior_data struct in order to avoid needlessly allocating that struct in the event that the sim instance allocation fails. */ if (sim_instance_needed == SIM_INSTANCE_NEEDED && (sim_data == NULL || sim_data->gdbsim_desc == NULL)) { struct inferior *idup; sim_desc = sim_open (SIM_OPEN_DEBUG, &gdb_callback, exec_bfd, sim_argv); if (sim_desc == NULL) error (_("Unable to create simulator instance for inferior %d."), inf->num); idup = iterate_over_inferiors (check_for_duplicate_sim_descriptor, sim_desc); if (idup != NULL) { /* We don't close the descriptor due to the fact that it's shared with some other inferior. If we were to close it, that might needlessly muck up the other inferior. Of course, it's possible that the damage has already been done... Note that it *will* ultimately be closed during cleanup of the other inferior. */ sim_desc = NULL; error ( _("Inferior %d and inferior %d would have identical simulator state.\n" "(This simulator does not support the running of more than one inferior.)"), inf->num, idup->num); } } if (sim_data == NULL) { sim_data = XCNEW(struct sim_inferior_data); set_inferior_data (inf, sim_inferior_data_key, sim_data); /* Allocate a ptid for this inferior. */ sim_data->remote_sim_ptid = ptid_build (next_pid, 0, next_pid); next_pid++; /* Initialize the other instance variables. */ sim_data->program_loaded = 0; sim_data->gdbsim_desc = sim_desc; sim_data->resume_siggnal = GDB_SIGNAL_0; sim_data->resume_step = 0; }
ps_err_e ps_lsetregs (gdb_ps_prochandle_t ph, lwpid_t lwpid, const prgregset_t gregset) { ptid_t ptid = ptid_build (ptid_get_pid (ph->ptid), lwpid, 0); struct regcache *regcache = get_thread_arch_regcache (ptid, target_gdbarch ()); supply_gregset (regcache, (const gdb_gregset_t *) gregset); target_store_registers (regcache, -1); return PS_OK; }
ps_err_e ps_lgetregs (gdb_ps_prochandle_t ph, lwpid_t lwpid, prgregset_t gregset) { ptid_t ptid = ptid_build (ptid_get_pid (ph->ptid), lwpid, 0); struct regcache *regcache = get_thread_arch_regcache (ptid, target_gdbarch ()); target_fetch_registers (regcache, -1); fill_gregset (regcache, (gdb_gregset_t *) gregset, -1); return PS_OK; }
void _initialize_remote_sim (void) { init_gdbsim_ops (); add_target (&gdbsim_ops); add_com ("sim", class_obscure, simulator_command, _("Send a command to the simulator.")); /* Yes, 42000 is arbitrary. The only sense out of it, is that it isn't 0. */ remote_sim_ptid = ptid_build (42000, 0, 42000); }
static void nbsd_add_to_thread_list (bfd *abfd, asection *asect, PTR reg_sect_arg) { int regval; td_thread_t *dummy; if (strncmp (bfd_section_name (abfd, asect), ".reg/", 5) != 0) return; regval = atoi (bfd_section_name (abfd, asect) + 5); add_thread (ptid_build (ptid_get_pid (main_ptid), regval, 0)); }
static ptid_t find_active_thread (void) { static cma__t_int_tcb tcb; CORE_ADDR tcb_ptr; read_memory ((CORE_ADDR) P_cma__g_current_thread, (char *) &tcb_ptr, sizeof tcb_ptr); read_memory (tcb_ptr, (char *) &tcb, sizeof tcb); return (ptid_build (PIDGET (main_ptid), 0, cma_thread_get_unique (&tcb.prolog.client_thread))); }
static int nbsd_thread_proc_getregs (void *arg, int regset, int lwp, void *buf) { struct nbsd_thread_proc_arg *a = (struct nbsd_thread_proc_arg *) arg; struct regcache *cache = a->cache; struct target_ops *ops = a->ops; struct cleanup *old_chain; struct target_ops *beneath = find_target_beneath (ops); int ret; old_chain = save_inferior_ptid (); if (target_has_execution) { /* Fetching registers from a live process requires that inferior_ptid is a LWP value rather than a thread value. */ inferior_ptid = ptid_build (ptid_get_pid (main_ptid), lwp, 0); beneath->to_fetch_registers (beneath, cache, -1); } else { /* Fetching registers from a core process requires that the PID value of inferior_ptid have the funky value that the kernel drops rather than the real PID. Gross. */ inferior_ptid = pid_to_ptid ((lwp << 16) | ptid_get_pid (main_ptid)); beneath->to_fetch_registers (ops, cache, -1); } ret = 0; switch (regset) { case 0: fill_gregset (cache, (gregset_t *)buf, -1); break; case 1: #ifdef HAVE_FPREGS fill_fpregset (cache, (fpregset_t *)buf, -1); #endif break; default: /* XXX need to handle other reg sets: SSE, AltiVec, etc. */ ret = TD_ERR_INVAL; } do_cleanups (old_chain); return ret; }
/* Delete a thread from the list of threads. */ static void child_delete_thread (DWORD pid, DWORD tid) { struct inferior_list_entry *thread; ptid_t ptid; /* If the last thread is exiting, just return. */ if (all_threads.head == all_threads.tail) return; ptid = ptid_build (pid, tid, 0); thread = find_inferior_id (&all_threads, ptid); if (thread == NULL) return; delete_thread_info (thread); }
static void fbsd_add_threads (pid_t pid) { struct cleanup *cleanup; lwpid_t *lwps; int i, nlwps; gdb_assert (!in_thread_list (pid_to_ptid (pid))); nlwps = ptrace (PT_GETNUMLWPS, pid, NULL, 0); if (nlwps == -1) perror_with_name (("ptrace")); lwps = XCNEWVEC (lwpid_t, nlwps); cleanup = make_cleanup (xfree, lwps); nlwps = ptrace (PT_GETLWPLIST, pid, (caddr_t) lwps, nlwps); if (nlwps == -1) perror_with_name (("ptrace")); for (i = 0; i < nlwps; i++) { ptid_t ptid = ptid_build (pid, lwps[i], 0); if (!in_thread_list (ptid)) { #ifdef PT_LWP_EVENTS struct ptrace_lwpinfo pl; /* Don't add exited threads. Note that this is only called when attaching to a multi-threaded process. */ if (ptrace (PT_LWPINFO, lwps[i], (caddr_t) &pl, sizeof pl) == -1) perror_with_name (("ptrace")); if (pl.pl_flags & PL_FLAG_EXITED) continue; #endif if (debug_fbsd_lwp) fprintf_unfiltered (gdb_stdlog, "FLWP: adding thread for LWP %u\n", lwps[i]); add_thread (ptid); } } do_cleanups (cleanup); }
static thread_debug_info * haiku_add_thread(team_debug_info *teamDebugInfo, thread_id threadID) { struct thread_info *gdbThreadInfo; thread_debug_info *threadDebugInfo; if (threadID == teamDebugInfo->nub_thread) { error("haiku_thread_added(): Trying to add debug nub thread (%ld)\n", threadID); } // find the thread first threadDebugInfo = haiku_find_thread(teamDebugInfo, threadID); if (threadDebugInfo) return threadDebugInfo; // allocate a new thread debug info threadDebugInfo = XMALLOC(thread_debug_info); if (!threadDebugInfo) error("haiku_thread_added(): Out of memory!\n"); // init and add it threadDebugInfo->thread = threadID; threadDebugInfo->next = teamDebugInfo->threads; threadDebugInfo->stopped = false; threadDebugInfo->last_event = NULL; teamDebugInfo->threads = threadDebugInfo; // add it to gdb's thread DB gdbThreadInfo = add_thread(ptid_build(teamDebugInfo->team, 0, threadID)); // Note: In theory we could spare us the whole thread list management, since // gdb's thread DB is doing exactly the same. We could put our data as // thread_info::private. The only catch is that when the thread_info is // freed, xfree() is invoked on the private data directly, but there's no // callback invoked before that would allow us to do cleanup (e.g. free // last_event). TRACE(("haiku_add_thread(): team %ld thread %ld added: " "gdb thread info: %p\n", teamDebugInfo->team, threadID, gdbThreadInfo)); return threadDebugInfo; }
static void haiku_remove_thread(team_debug_info *teamDebugInfo, thread_id threadID) { thread_debug_info **info; for (info = &teamDebugInfo->threads; *info; info = &(*info)->next) { if ((*info)->thread == threadID) { thread_debug_info *foundInfo = *info; *info = foundInfo->next; if (foundInfo->last_event) xfree(foundInfo->last_event); xfree(foundInfo); // remove it from gdb's thread DB delete_thread(ptid_build(teamDebugInfo->team, 0, threadID)); return; } } }
static void add_to_thread_list (bfd *abfd, asection *asect, void *reg_sect_arg) { ptid_t ptid; int core_tid; int pid, lwpid; asection *reg_sect = (asection *) reg_sect_arg; int fake_pid_p = 0; struct inferior *inf; if (!startswith (bfd_section_name (abfd, asect), ".reg/")) return; core_tid = atoi (bfd_section_name (abfd, asect) + 5); pid = bfd_core_file_pid (core_bfd); if (pid == 0) { fake_pid_p = 1; pid = CORELOW_PID; } lwpid = core_tid; inf = current_inferior (); if (inf->pid == 0) { inferior_appeared (inf, pid); inf->fake_pid_p = fake_pid_p; } ptid = ptid_build (pid, lwpid, 0); add_thread (ptid); /* Warning, Will Robinson, looking at BFD private data! */ if (reg_sect != NULL && asect->filepos == reg_sect->filepos) /* Did we find .reg? */ inferior_ptid = ptid; /* Yes, make it current. */ }
/* Add a thread to the thread list. */ static win32_thread_info * child_add_thread (DWORD pid, DWORD tid, HANDLE h, void *tlb) { win32_thread_info *th; ptid_t ptid = ptid_build (pid, tid, 0); if ((th = thread_rec (ptid, FALSE))) return th; th = xcalloc (1, sizeof (*th)); th->tid = tid; th->h = h; th->thread_local_base = (CORE_ADDR) (uintptr_t) tlb; add_thread (ptid, th); if (the_low_target.thread_added != NULL) (*the_low_target.thread_added) (th); return th; }
static int attach_thread (const td_thrhandle_t *th_p, td_thrinfo_t *ti_p) { struct process_info *proc = current_process (); int pid = pid_of (proc); ptid_t ptid = ptid_build (pid, ti_p->ti_lid, 0); struct lwp_info *lwp; int err; if (debug_threads) debug_printf ("Attaching to thread %ld (LWP %d)\n", ti_p->ti_tid, ti_p->ti_lid); err = linux_attach_lwp (ptid); if (err != 0) { warning ("Could not attach to thread %ld (LWP %d): %s\n", ti_p->ti_tid, ti_p->ti_lid, linux_ptrace_attach_fail_reason_string (ptid, err)); return 0; } lwp = find_lwp_pid (ptid); gdb_assert (lwp != NULL); lwp->thread_known = 1; lwp->th = *th_p; if (thread_db_use_events) { td_err_e err; struct thread_db *thread_db = proc->priv->thread_db; err = thread_db->td_thr_event_enable_p (th_p, 1); if (err != TD_OK) error ("Cannot enable thread event reporting for %d: %s", ti_p->ti_lid, thread_db_err_str (err)); } return 1; }
static int thread_get_info_callback (const td_thrhandle_t *thp, void *argp) { td_thrinfo_t ti; td_err_e err; ptid_t thread_ptid; struct thread_get_info_inout *inout; struct thread_db_info *info; inout = argp; info = inout->thread_db_info; err = info->td_thr_get_info_p (thp, &ti); if (err != TD_OK) error (_("thread_get_info_callback: cannot get thread info: %s"), thread_db_err_str (err)); /* Fill the cache. */ thread_ptid = ptid_build (info->pid, ti.ti_lid, 0); inout->thread_info = find_thread_ptid (thread_ptid); /* In the case of a zombie thread, don't continue. We don't want to attach to it thinking it is a new thread. */ if (ti.ti_state == TD_THR_UNKNOWN || ti.ti_state == TD_THR_ZOMBIE) return TD_THR_ZOMBIE; if (inout->thread_info == NULL) { /* New thread. Attach to it now (why wait?). */ if (!have_threads (thread_ptid)) thread_db_find_new_threads_1 (thread_ptid); else attach_thread (thread_ptid, thp, &ti); inout->thread_info = find_thread_ptid (thread_ptid); gdb_assert (inout->thread_info != NULL); } return 0; }
static int get_cores_used_by_process (PID_T pid, int *cores) { char taskdir[sizeof ("/proc/") + MAX_PID_T_STRLEN + sizeof ("/task") - 1]; DIR *dir; struct dirent *dp; int task_count = 0; sprintf (taskdir, "/proc/%lld/task", pid); dir = opendir (taskdir); if (dir) { while ((dp = readdir (dir)) != NULL) { PID_T tid; int core; if (!isdigit (dp->d_name[0]) || NAMELEN (dp) > MAX_PID_T_STRLEN) continue; sscanf (dp->d_name, "%lld", &tid); core = linux_common_core_of_thread (ptid_build ((pid_t) pid, (pid_t) tid, 0)); if (core >= 0) { ++cores[core]; ++task_count; } } closedir (dir); } return task_count; }
/* Add a thread to the thread list. */ static win32_thread_info * child_add_thread (DWORD pid, DWORD tid, HANDLE h, void *tlb) { win32_thread_info *th; ptid_t ptid = ptid_build (pid, tid, 0); if ((th = thread_rec (ptid, FALSE))) return th; th = xcalloc (1, sizeof (*th)); th->tid = tid; th->h = h; th->thread_local_base = (CORE_ADDR) (uintptr_t) tlb; add_thread (ptid, th); set_inferior_regcache_data ((struct thread_info *) find_inferior_id (&all_threads, ptid), new_register_cache ()); if (the_low_target.thread_added != NULL) (*the_low_target.thread_added) (th); return th; }
/* The current debug event from WaitForDebugEvent. */ static ptid_t debug_event_ptid (DEBUG_EVENT *event) { return ptid_build (event->dwProcessId, event->dwThreadId, 0); }
static ptid_t nto_wait (ptid_t ptid, struct target_waitstatus *ourstatus, int target_options) { sigset_t set; siginfo_t info; procfs_status status; const int trace_mask = (_DEBUG_FLAG_TRACE_EXEC | _DEBUG_FLAG_TRACE_RD | _DEBUG_FLAG_TRACE_WR | _DEBUG_FLAG_TRACE_MODIFY); TRACE ("%s\n", __func__); ourstatus->kind = TARGET_WAITKIND_SPURIOUS; sigemptyset (&set); sigaddset (&set, SIGUSR1); devctl (nto_inferior.ctl_fd, DCMD_PROC_STATUS, &status, sizeof (status), 0); while (!(status.flags & _DEBUG_FLAG_ISTOP)) { sigwaitinfo (&set, &info); devctl (nto_inferior.ctl_fd, DCMD_PROC_STATUS, &status, sizeof (status), 0); } nto_find_new_threads (&nto_inferior); if (status.flags & _DEBUG_FLAG_SSTEP) { TRACE ("SSTEP\n"); ourstatus->kind = TARGET_WAITKIND_STOPPED; ourstatus->value.sig = TARGET_SIGNAL_TRAP; } /* Was it a breakpoint? */ else if (status.flags & trace_mask) { TRACE ("STOPPED\n"); ourstatus->kind = TARGET_WAITKIND_STOPPED; ourstatus->value.sig = TARGET_SIGNAL_TRAP; } else if (status.flags & _DEBUG_FLAG_ISTOP) { TRACE ("ISTOP\n"); switch (status.why) { case _DEBUG_WHY_SIGNALLED: TRACE (" SIGNALLED\n"); ourstatus->kind = TARGET_WAITKIND_STOPPED; ourstatus->value.sig = target_signal_from_host (status.info.si_signo); nto_inferior.exit_signo = ourstatus->value.sig; break; case _DEBUG_WHY_FAULTED: TRACE (" FAULTED\n"); ourstatus->kind = TARGET_WAITKIND_STOPPED; if (status.info.si_signo == SIGTRAP) { ourstatus->value.sig = 0; nto_inferior.exit_signo = 0; } else { ourstatus->value.sig = target_signal_from_host (status.info.si_signo); nto_inferior.exit_signo = ourstatus->value.sig; } break; case _DEBUG_WHY_TERMINATED: { int waitval = 0; TRACE (" TERMINATED\n"); waitpid (ptid_get_pid (ptid), &waitval, WNOHANG); if (nto_inferior.exit_signo) { /* Abnormal death. */ ourstatus->kind = TARGET_WAITKIND_SIGNALLED; ourstatus->value.sig = nto_inferior.exit_signo; } else { /* Normal death. */ ourstatus->kind = TARGET_WAITKIND_EXITED; ourstatus->value.integer = WEXITSTATUS (waitval); } nto_inferior.exit_signo = 0; break; } case _DEBUG_WHY_REQUESTED: TRACE ("REQUESTED\n"); /* We are assuming a requested stop is due to a SIGINT. */ ourstatus->kind = TARGET_WAITKIND_STOPPED; ourstatus->value.sig = TARGET_SIGNAL_INT; nto_inferior.exit_signo = 0; break; } } return ptid_build (status.pid, status.tid, 0); }