ssize_t mdb_readsym(void *buf, size_t nbytes, const char *name) { ssize_t rbytes = mdb_tgt_readsym(mdb.m_target, MDB_TGT_AS_VIRT, buf, nbytes, MDB_TGT_OBJ_EXEC, name); if (rbytes > 0 && rbytes < nbytes) return (set_errbytes(rbytes, nbytes)); return (rbytes); }
void kt_amd64_init(mdb_tgt_t *t) { kt_data_t *kt = t->t_data; panic_data_t pd; struct regs regs; uintptr_t addr; /* * Initialize the machine-dependent parts of the kernel target * structure. Once this is complete and we fill in the ops * vector, the target is now fully constructed and we can use * the target API itself to perform the rest of our initialization. */ kt->k_rds = mdb_amd64_kregs; kt->k_regs = mdb_zalloc(sizeof (mdb_tgt_gregset_t), UM_SLEEP); kt->k_regsize = sizeof (mdb_tgt_gregset_t); kt->k_dcmd_regs = kt_regs; kt->k_dcmd_stack = kt_stack; kt->k_dcmd_stackv = kt_stackv; kt->k_dcmd_stackr = kt_stackv; kt->k_dcmd_cpustack = kt_cpustack; kt->k_dcmd_cpuregs = kt_cpuregs; t->t_ops = &kt_amd64_ops; (void) mdb_dis_select("amd64"); /* * Lookup the symbols corresponding to subroutines in locore.s where * we expect a saved regs structure to be pushed on the stack. When * performing stack tracebacks we will attempt to detect interrupt * frames by comparing the %eip value to these symbols. */ (void) mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC, "cmnint", &kt->k_intr_sym, NULL); (void) mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC, "cmntrap", &kt->k_trap_sym, NULL); /* * Don't attempt to load any thread or register information if * we're examining the live operating system. */ if (kt->k_symfile != NULL && strcmp(kt->k_symfile, "/dev/ksyms") == 0) return; /* * If the panicbuf symbol is present and we can consume a panicbuf * header of the appropriate version from this address, then we can * initialize our current register set based on its contents. * Prior to the re-structuring of panicbuf, our only register data * was the panic_regs label_t, into which a setjmp() was performed, * or the panic_reg register pointer, which was only non-zero if * the system panicked as a result of a trap calling die(). */ if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &pd, sizeof (pd), MDB_TGT_OBJ_EXEC, "panicbuf") == sizeof (pd) && pd.pd_version == PANICBUFVERS) { size_t pd_size = MIN(PANICBUFSIZE, pd.pd_msgoff); panic_data_t *pdp = mdb_zalloc(pd_size, UM_SLEEP); uint_t i, n; (void) mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, pdp, pd_size, MDB_TGT_OBJ_EXEC, "panicbuf"); n = (pd_size - (sizeof (panic_data_t) - sizeof (panic_nv_t))) / sizeof (panic_nv_t); for (i = 0; i < n; i++) { (void) kt_putareg(t, kt->k_tid, pdp->pd_nvdata[i].pnv_name, pdp->pd_nvdata[i].pnv_value); } mdb_free(pdp, pd_size); return; }; if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &addr, sizeof (addr), MDB_TGT_OBJ_EXEC, "panic_reg") == sizeof (addr) && addr != NULL && mdb_tgt_vread(t, ®s, sizeof (regs), addr) == sizeof (regs)) { kt_regs_to_kregs(®s, kt->k_regs); return; } /* * If we can't read any panic regs, then our final try is for any CPU * context that may have been stored (for example, in Xen core dumps). */ if (kt_kvmregs(t, 0, kt->k_regs) == 0) return; warn("failed to read panicbuf and panic_reg -- " "current register set will be unavailable\n"); }
int kt_uname(mdb_tgt_t *t, struct utsname *utsp) { return (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, utsp, sizeof (struct utsname), MDB_TGT_OBJ_EXEC, "utsname")); }
void kt_amd64_init(mdb_tgt_t *t) { kt_data_t *kt = t->t_data; panic_data_t pd; kreg_t *kregs; struct regs regs; uintptr_t addr; /* * Initialize the machine-dependent parts of the kernel target * structure. Once this is complete and we fill in the ops * vector, the target is now fully constructed and we can use * the target API itself to perform the rest of our initialization. */ kt->k_rds = mdb_amd64_kregs; kt->k_regs = mdb_zalloc(sizeof (mdb_tgt_gregset_t), UM_SLEEP); kt->k_regsize = sizeof (mdb_tgt_gregset_t); kt->k_dcmd_regs = kt_regs; kt->k_dcmd_stack = kt_stack; kt->k_dcmd_stackv = kt_stackv; kt->k_dcmd_stackr = kt_stackv; t->t_ops = &kt_amd64_ops; kregs = kt->k_regs->kregs; (void) mdb_dis_select("amd64"); /* * Lookup the symbols corresponding to subroutines in locore.s where * we expect a saved regs structure to be pushed on the stack. When * performing stack tracebacks we will attempt to detect interrupt * frames by comparing the %eip value to these symbols. */ (void) mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC, "cmnint", &kt->k_intr_sym, NULL); (void) mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC, "cmntrap", &kt->k_trap_sym, NULL); /* * Don't attempt to load any thread or register information if * we're examining the live operating system. */ if (strcmp(kt->k_symfile, "/dev/ksyms") == 0) return; /* * If the panicbuf symbol is present and we can consume a panicbuf * header of the appropriate version from this address, then we can * initialize our current register set based on its contents. * Prior to the re-structuring of panicbuf, our only register data * was the panic_regs label_t, into which a setjmp() was performed, * or the panic_reg register pointer, which was only non-zero if * the system panicked as a result of a trap calling die(). */ if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &pd, sizeof (pd), MDB_TGT_OBJ_EXEC, "panicbuf") == sizeof (pd) && pd.pd_version == PANICBUFVERS) { size_t pd_size = MIN(PANICBUFSIZE, pd.pd_msgoff); panic_data_t *pdp = mdb_zalloc(pd_size, UM_SLEEP); uint_t i, n; (void) mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, pdp, pd_size, MDB_TGT_OBJ_EXEC, "panicbuf"); n = (pd_size - (sizeof (panic_data_t) - sizeof (panic_nv_t))) / sizeof (panic_nv_t); for (i = 0; i < n; i++) { (void) kt_putareg(t, kt->k_tid, pdp->pd_nvdata[i].pnv_name, pdp->pd_nvdata[i].pnv_value); } mdb_free(pdp, pd_size); } else if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &addr, sizeof (addr), MDB_TGT_OBJ_EXEC, "panic_reg") == sizeof (addr) && addr != NULL && mdb_tgt_vread(t, ®s, sizeof (regs), addr) == sizeof (regs)) { kregs[KREG_SAVFP] = regs.r_savfp; kregs[KREG_SAVPC] = regs.r_savpc; kregs[KREG_RDI] = regs.r_rdi; kregs[KREG_RSI] = regs.r_rsi; kregs[KREG_RDX] = regs.r_rdx; kregs[KREG_RCX] = regs.r_rcx; kregs[KREG_R8] = regs.r_r8; kregs[KREG_R9] = regs.r_r9; kregs[KREG_RAX] = regs.r_rax; kregs[KREG_RBX] = regs.r_rbx; kregs[KREG_RBP] = regs.r_rbp; kregs[KREG_R10] = regs.r_r10; kregs[KREG_R11] = regs.r_r11; kregs[KREG_R12] = regs.r_r12; kregs[KREG_R13] = regs.r_r13; kregs[KREG_R14] = regs.r_r14; kregs[KREG_R15] = regs.r_r15; kregs[KREG_FSBASE] = regs.r_fsbase; kregs[KREG_GSBASE] = regs.r_gsbase; kregs[KREG_DS] = regs.r_ds; kregs[KREG_ES] = regs.r_es; kregs[KREG_FS] = regs.r_fs; kregs[KREG_GS] = regs.r_gs; kregs[KREG_TRAPNO] = regs.r_trapno; kregs[KREG_ERR] = regs.r_err; kregs[KREG_RIP] = regs.r_rip; kregs[KREG_CS] = regs.r_cs; kregs[KREG_RFLAGS] = regs.r_rfl; kregs[KREG_RSP] = regs.r_rsp; kregs[KREG_SS] = regs.r_ss; } else { warn("failed to read panicbuf and panic_reg -- " "current register set will be unavailable\n"); } }
static void kt_load_modules(kt_data_t *kt, mdb_tgt_t *t) { char name[MAXNAMELEN]; uintptr_t addr, head; struct module kmod; struct modctl ctl; Shdr symhdr, strhdr; GElf_Sym sym; kt_module_t *km; if (mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC, "modules", &sym, NULL) == -1) { warn("failed to get 'modules' symbol"); return; } if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &ctl, sizeof (ctl), MDB_TGT_OBJ_EXEC, "modules") != sizeof (ctl)) { warn("failed to read 'modules' struct"); return; } addr = head = (uintptr_t)sym.st_value; do { if (addr == NULL) break; /* Avoid spurious NULL pointers in list */ if (mdb_tgt_vread(t, &ctl, sizeof (ctl), addr) == -1) { warn("failed to read modctl at %p", (void *)addr); return; } if (ctl.mod_mp == NULL) continue; /* No associated krtld structure */ if (mdb_tgt_readstr(t, MDB_TGT_AS_VIRT, name, MAXNAMELEN, (uintptr_t)ctl.mod_modname) <= 0) { warn("failed to read module name at %p", (void *)ctl.mod_modname); continue; } mdb_dprintf(MDB_DBG_KMOD, "reading mod %s (%p)\n", name, (void *)addr); if (mdb_nv_lookup(&kt->k_modules, name) != NULL) { warn("skipping duplicate module '%s', id=%d\n", name, ctl.mod_id); continue; } if (mdb_tgt_vread(t, &kmod, sizeof (kmod), (uintptr_t)ctl.mod_mp) == -1) { warn("failed to read module at %p\n", (void *)ctl.mod_mp); continue; } if (kmod.symspace == NULL || kmod.symhdr == NULL || kmod.strhdr == NULL) { /* * If no buffer for the symbols has been allocated, * or the shdrs for .symtab and .strtab are missing, * then we're out of luck. */ continue; } if (mdb_tgt_vread(t, &symhdr, sizeof (Shdr), (uintptr_t)kmod.symhdr) == -1) { warn("failed to read .symtab header for '%s', id=%d", name, ctl.mod_id); continue; } if (mdb_tgt_vread(t, &strhdr, sizeof (Shdr), (uintptr_t)kmod.strhdr) == -1) { warn("failed to read .strtab header for '%s', id=%d", name, ctl.mod_id); continue; } /* * Now get clever: f(*^ing krtld didn't used to bother updating * its own kmod.symsize value. We know that prior to this bug * being fixed, symspace was a contiguous buffer containing * .symtab, .strtab, and the symbol hash table in that order. * So if symsize is zero, recompute it as the size of .symtab * plus the size of .strtab. We don't need to load the hash * table anyway since we re-hash all the symbols internally. */ if (kmod.symsize == 0) kmod.symsize = symhdr.sh_size + strhdr.sh_size; /* * Similar logic can be used to make educated guesses * at the values of kmod.symtbl and kmod.strings. */ if (kmod.symtbl == NULL) kmod.symtbl = kmod.symspace; if (kmod.strings == NULL) kmod.strings = kmod.symspace + symhdr.sh_size; /* * Make sure things seem reasonable before we proceed * to actually read and decipher the symspace. */ if (KT_BAD_BUF(kmod.symtbl, kmod.symspace, kmod.symsize) || KT_BAD_BUF(kmod.strings, kmod.symspace, kmod.symsize)) { warn("skipping module '%s', id=%d (corrupt symspace)\n", name, ctl.mod_id); continue; } km = mdb_zalloc(sizeof (kt_module_t), UM_SLEEP); km->km_name = strdup(name); (void) mdb_nv_insert(&kt->k_modules, km->km_name, NULL, (uintptr_t)km, MDB_NV_EXTNAME); km->km_datasz = kmod.symsize; km->km_symspace_va = (uintptr_t)kmod.symspace; km->km_symtab_va = (uintptr_t)kmod.symtbl; km->km_strtab_va = (uintptr_t)kmod.strings; km->km_symtab_hdr = symhdr; km->km_strtab_hdr = strhdr; km->km_text_va = (uintptr_t)kmod.text; km->km_text_size = kmod.text_size; km->km_data_va = (uintptr_t)kmod.data; km->km_data_size = kmod.data_size; km->km_bss_va = (uintptr_t)kmod.bss; km->km_bss_size = kmod.bss_size; if (kt->k_ctfvalid) { km->km_ctf_va = (uintptr_t)kmod.ctfdata; km->km_ctf_size = kmod.ctfsize; } /* * Add the module to the end of the list of modules in load- * dependency order. This is needed to load the corresponding * debugger modules in the same order for layering purposes. */ mdb_list_append(&kt->k_modlist, km); if (t->t_flags & MDB_TGT_F_PRELOAD) { mdb_iob_printf(mdb.m_out, " %s", name); mdb_iob_flush(mdb.m_out); kt_load_module(kt, t, km); } } while ((addr = (uintptr_t)ctl.mod_next) != head); }