static void realloc_register_cache (struct inferior_list_entry *thread_p) { struct thread_info *thread = (struct thread_info *) thread_p; struct regcache *regcache = (struct regcache *) inferior_regcache_data (thread); free_register_cache (regcache); set_inferior_regcache_data (thread, new_register_cache ()); }
static void regcache_realloc_one (struct inferior_list_entry *entry) { struct thread_info *thread = (struct thread_info *) entry; struct inferior_regcache_data *regcache; regcache = (struct inferior_regcache_data *) inferior_regcache_data (thread); if (regcache) { free_register_cache (regcache); set_inferior_regcache_data (thread, new_register_cache ()); } }
void add_thread (unsigned long thread_id, void *target_data, unsigned int gdb_id) { struct thread_info *new_thread = (struct thread_info *) malloc (sizeof (*new_thread)); memset (new_thread, 0, sizeof (*new_thread)); new_thread->entry.id = thread_id; add_inferior_to_list (&all_threads, & new_thread->entry); if (current_inferior == NULL) current_inferior = new_thread; new_thread->target_data = target_data; set_inferior_regcache_data (new_thread, new_register_cache ()); new_thread->gdb_id = gdb_id; }
void add_thread (ptid_t thread_id, void *target_data) { struct thread_info *new_thread = xmalloc (sizeof (*new_thread)); memset (new_thread, 0, sizeof (*new_thread)); new_thread->entry.id = thread_id; new_thread->last_resume_kind = resume_continue; new_thread->last_status.kind = TARGET_WAITKIND_IGNORE; add_inferior_to_list (&all_threads, & new_thread->entry); if (current_inferior == NULL) current_inferior = new_thread; new_thread->target_data = target_data; set_inferior_regcache_data (new_thread, new_register_cache ()); }
struct regcache * get_thread_regcache (struct thread_info *thread, int fetch) { struct regcache *regcache; regcache = (struct regcache *) inferior_regcache_data (thread); /* Threads' regcaches are created lazily, because biarch targets add the main thread/lwp before seeing it stop for the first time, and it is only after the target sees the thread stop for the first time that the target has a chance of determining the process's architecture. IOW, when we first add the process's main thread we don't know which architecture/tdesc its regcache should have. */ if (regcache == NULL) { struct process_info *proc = get_thread_process (thread); gdb_assert (proc->tdesc != NULL); regcache = new_register_cache (proc->tdesc); set_inferior_regcache_data (thread, regcache); } if (fetch && regcache->registers_valid == 0) { struct thread_info *saved_thread = current_thread; current_thread = thread; /* Invalidate all registers, to prevent stale left-overs. */ memset (regcache->register_status, REG_UNAVAILABLE, regcache->tdesc->num_registers); fetch_inferior_registers (regcache, -1); current_thread = saved_thread; regcache->registers_valid = 1; } return regcache; }
/* Add a thread to the thread list. */ static thread_info * child_add_thread (DWORD tid, HANDLE h) { thread_info *th; if ((th = thread_rec (tid, FALSE))) return th; th = (thread_info *) malloc (sizeof (*th)); memset (th, 0, sizeof (*th)); th->tid = tid; th->h = h; add_thread (tid, th, (unsigned int) tid); set_inferior_regcache_data ((struct thread_info *) find_inferior_id (&all_threads, tid), new_register_cache ()); /* Set the debug registers for the new thread if they are used. */ if (debug_registers_used) { /* Only change the value of the debug registers. */ th->context.ContextFlags = CONTEXT_DEBUGGER_DR; GetThreadContext (th->h, &th->context); th->context.Dr0 = dr[0]; th->context.Dr1 = dr[1]; th->context.Dr2 = dr[2]; th->context.Dr3 = dr[3]; /* th->context.Dr6 = dr[6]; FIXME: should we set dr6 also ?? */ th->context.Dr7 = dr[7]; SetThreadContext (th->h, &th->context); th->context.ContextFlags = 0; } return th; }
/* Add a thread to the thread list. */ static win32_thread_info * child_add_thread (DWORD tid, HANDLE h) { win32_thread_info *th; if ((th = thread_rec (tid, FALSE))) return th; th = calloc (1, sizeof (*th)); th->tid = tid; th->h = h; add_thread (tid, th, (unsigned int) tid); set_inferior_regcache_data ((struct thread_info *) find_inferior_id (&all_threads, tid), new_register_cache ()); if (the_low_target.thread_added != NULL) (*the_low_target.thread_added) (th); return th; }
struct regcache * get_thread_regcache (struct thread_info *thread, int fetch) { struct regcache *regcache; regcache = (struct regcache *) inferior_regcache_data (thread); /* Threads' regcaches are created lazily, because biarch targets add the main thread/lwp before seeing it stop for the first time, and it is only after the target sees the thread stop for the first time that the target has a chance of determining the process's architecture. IOW, when we first add the process's main thread we don't know which architecture/tdesc its regcache should have. */ if (regcache == NULL) { struct process_info *proc = get_thread_process (thread); if (proc->tdesc == NULL) fatal ("no target description"); regcache = new_register_cache (proc->tdesc); set_inferior_regcache_data (thread, regcache); } if (fetch && regcache->registers_valid == 0) { struct thread_info *saved_inferior = current_inferior; current_inferior = thread; fetch_inferior_registers (regcache, -1); current_inferior = saved_inferior; regcache->registers_valid = 1; } return regcache; }
/* Add a thread to the thread list. */ static win32_thread_info * child_add_thread (DWORD pid, DWORD tid, HANDLE h, void *tlb) { win32_thread_info *th; ptid_t ptid = ptid_build (pid, tid, 0); if ((th = thread_rec (ptid, FALSE))) return th; th = xcalloc (1, sizeof (*th)); th->tid = tid; th->h = h; th->thread_local_base = (CORE_ADDR) (uintptr_t) tlb; add_thread (ptid, th); set_inferior_regcache_data ((struct thread_info *) find_inferior_id (&all_threads, ptid), new_register_cache ()); if (the_low_target.thread_added != NULL) (*the_low_target.thread_added) (th); return th; }
static void s390_arch_setup (void) { const struct target_desc *tdesc; struct regset_info *regset; /* Check whether the kernel supports extra register sets. */ int pid = pid_of (get_thread_lwp (current_inferior)); int have_regset_last_break = s390_check_regset (pid, NT_S390_LAST_BREAK, 8); int have_regset_system_call = s390_check_regset (pid, NT_S390_SYSTEM_CALL, 4); int have_regset_tdb = s390_check_regset (pid, NT_S390_TDB, 256); /* Update target_regsets according to available register sets. */ for (regset = s390_regsets; regset->fill_function != NULL; regset++) if (regset->get_request == PTRACE_GETREGSET) switch (regset->nt_type) { case NT_S390_LAST_BREAK: regset->size = have_regset_last_break? 8 : 0; break; case NT_S390_SYSTEM_CALL: regset->size = have_regset_system_call? 4 : 0; break; case NT_S390_TDB: regset->size = have_regset_tdb ? 256 : 0; default: break; } /* Assume 31-bit inferior process. */ if (have_regset_system_call) tdesc = tdesc_s390_linux32v2; else if (have_regset_last_break) tdesc = tdesc_s390_linux32v1; else tdesc = tdesc_s390_linux32; /* On a 64-bit host, check the low bit of the (31-bit) PSWM -- if this is one, we actually have a 64-bit inferior. */ #ifdef __s390x__ { unsigned int pswm; struct regcache *regcache = new_register_cache (tdesc); fetch_inferior_registers (regcache, find_regno (tdesc, "pswm")); collect_register_by_name (regcache, "pswm", &pswm); free_register_cache (regcache); if (pswm & 1) { if (have_regset_tdb) tdesc = tdesc_s390x_te_linux64; if (have_regset_system_call) tdesc = tdesc_s390x_linux64v2; else if (have_regset_last_break) tdesc = tdesc_s390x_linux64v1; else tdesc = tdesc_s390x_linux64; } /* For a 31-bit inferior, check whether the kernel supports using the full 64-bit GPRs. */ else if (s390_get_hwcap (tdesc) & HWCAP_S390_HIGH_GPRS) { have_hwcap_s390_high_gprs = 1; if (have_regset_tdb) tdesc = tdesc_s390_te_linux64; else if (have_regset_system_call) tdesc = tdesc_s390_linux64v2; else if (have_regset_last_break) tdesc = tdesc_s390_linux64v1; else tdesc = tdesc_s390_linux64; } } #endif current_process ()->tdesc = tdesc; }