const mdb_modinfo_t * _mdb_init(void) { uint_t d = 0, kd = 0, w = 0, kw = 0; const mdb_walker_t *wp; const mdb_dcmd_t *dp; for (dp = common_dcmds; dp->dc_name != NULL; dp++) d++; /* count common dcmds */ for (wp = common_walkers; wp->walk_name != NULL; wp++) w++; /* count common walkers */ #ifdef _KERNEL for (dp = kernel_dcmds; dp->dc_name != NULL; dp++) kd++; /* count kernel dcmds */ for (wp = kernel_walkers; wp->walk_name != NULL; wp++) kw++; /* count common walkers */ #endif modinfo.mi_dcmds = mdb_zalloc(sizeof (*dp) * (d + kd + 1), UM_SLEEP); modinfo.mi_walkers = mdb_zalloc(sizeof (*wp) * (w + kw + 1), UM_SLEEP); bcopy(common_dcmds, (void *)modinfo.mi_dcmds, sizeof (*dp) * d); bcopy(common_walkers, (void *)modinfo.mi_walkers, sizeof (*wp) * w); #ifdef _KERNEL bcopy(kernel_dcmds, (void *) (modinfo.mi_dcmds + d), sizeof (*dp) * kd); bcopy(kernel_walkers, (void *) (modinfo.mi_walkers + w), sizeof (*wp) * kw); #endif return (&modinfo); }
static int hash_walk_init(mdb_walk_state_t *wsp, uintptr_t addr, uint_t hashlen, const char *name, size_t size, size_t next) { hashwalk_data_t *hwp; size_t len = sizeof (uintptr_t) * hashlen; if (len == 0) { mdb_warn("failed to walk hash: invalid hash length\n"); return (WALK_ERR); } hwp = mdb_alloc(sizeof (hashwalk_data_t), UM_SLEEP); hwp->hw_hash = mdb_zalloc(len, UM_SLEEP); (void) mdb_vread(hwp->hw_hash, len, addr); hwp->hw_hashlen = hashlen; hwp->hw_hashidx = 0; hwp->hw_name = name; hwp->hw_data = mdb_zalloc(size, UM_SLEEP); hwp->hw_size = size; hwp->hw_next = next; wsp->walk_addr = hwp->hw_hash[0]; wsp->walk_data = hwp; return (WALK_NEXT); }
mdb_io_t * kmdb_promio_create(char *name) { mdb_io_t *io; pio_data_t *pdp; ihandle_t hdl = kmdb_prom_get_handle(name); if (hdl == -1) return (NULL); io = mdb_zalloc(sizeof (mdb_io_t), UM_SLEEP); pdp = mdb_zalloc(sizeof (pio_data_t), UM_SLEEP); (void) strlcpy(pdp->pio_name, name, MAXPATHLEN); pdp->pio_fd = hdl; #ifdef __sparc pdp->pio_ti.c_oflag |= ONLCR; pdp->pio_ti.c_iflag |= ICRNL; #endif pdp->pio_ti.c_lflag |= ECHO; io->io_data = pdp; io->io_ops = &promio_ops; return (io); }
/* allocate memory, initilize hash table and LRU queue */ static void vn_htable_init(vn_htable_t *hp, size_t vn_size) { int i; int htable_size = MAX(vn_size, VN_LARGE); if ((hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list) * htable_size, UM_NOSLEEP|UM_GC)) == NULL) { htable_size = VN_SMALL; hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list) * htable_size, UM_SLEEP|UM_GC); } hp->vn_htable = mdb_zalloc(sizeof (struct vn_htable_list *) * htable_size, UM_SLEEP|UM_GC); hp->vn_q_first = &hp->vn_htable_buf[0]; hp->vn_q_last = &hp->vn_htable_buf[htable_size - 1]; hp->vn_q_first->vn_q_next = &hp->vn_htable_buf[1]; hp->vn_q_last->vn_q_prev = &hp->vn_htable_buf[htable_size - 2]; for (i = 1; i < (htable_size-1); i++) { hp->vn_htable_buf[i].vn_q_next = &hp->vn_htable_buf[i + 1]; hp->vn_htable_buf[i].vn_q_prev = &hp->vn_htable_buf[i - 1]; } hp->vn_htable_size = htable_size; hp->vn_htable_buckets = htable_size; }
/* * Sends a request to the driver to load the module. If/when the load has * completed successfully, kmdb_module_loaded is called. */ int mdb_module_load(const char *fname, int mode) { const char *modname = strbasename(fname); kmdb_wr_load_t *dlr; kmdb_modctl_t *kmc = NULL; const char *wformat = NULL; mdb_var_t *v; if (!mdb_module_validate_name(modname, &wformat)) goto module_load_err; if ((v = mdb_nv_lookup(&mdb.m_dmodctl, modname)) != NULL) { kmc = MDB_NV_COOKIE(v); if (kmc->kmc_state == KMDB_MC_STATE_LOADING) wformat = "module %s is already being loaded\n"; else wformat = "module %s is being unloaded\n"; goto module_load_err; } kmc = mdb_zalloc(sizeof (kmdb_modctl_t), UM_SLEEP); kmc->kmc_loadmode = mode; kmc->kmc_modname = strdup(modname); kmc->kmc_state = KMDB_MC_STATE_LOADING; if (mdb_nv_insert(&mdb.m_dmodctl, modname, NULL, (uintptr_t)kmc, 0) == NULL) { wformat = "module %s can't be registered for load\n"; kmc_free(kmc); goto module_load_err; } dlr = mdb_zalloc(sizeof (kmdb_wr_load_t), UM_SLEEP); dlr->dlr_node.wn_task = WNTASK_DMOD_LOAD; dlr->dlr_fname = strdup(fname); kmdb_wr_driver_notify(dlr); if (!(mode & MDB_MOD_DEFER) && mdb_tgt_continue(mdb.m_target, NULL) == 0) return (0); if (!(mode & MDB_MOD_SILENT)) mdb_printf("%s load pending (:c to complete)\n", modname); return (0); module_load_err: if (!(mode & MDB_MOD_SILENT)) warn(wformat, modname); return (-1); }
unsigned char *read_section_names(int fd, Elf32_Ehdr *h) { int offset; Elf32_Shdr section; unsigned char *buffer= NULL; offset = h->e_shoff + h->e_shentsize * h->e_shstrndx; lseek(fd, offset, SEEK_SET); read(fd, §ion, sizeof(Elf32_Shdr)); buffer = (unsigned char *)mdb_zalloc(section.sh_size, UM_SLEEP); if (buffer == NULL) { printf("Unable to allocate memory for data size=%d\n", section.sh_size); return NULL; } lseek(fd, section.sh_offset, SEEK_SET); /* printf("address of buffer is =%p reading string table of size = %d\n", buffer, section.sh_size);a */ if (read(fd, buffer, section.sh_size) != section.sh_size) { printf("Error reading string table\n"); free(buffer); return NULL; } return buffer; }
/* * Given a V8 ScopeInfo in "addr", load it into "sip". This will validate * basic properties of the ScopeInfo. "memflags" are used for memory * allocation. * * Returns 0 on success and -1 on failure. On failure, the specifed scope info * must not be used for anything. */ v8scopeinfo_t * v8scopeinfo_load(uintptr_t addr, int memflags) { v8scopeinfo_t *sip; if ((sip = mdb_zalloc(sizeof (*sip), memflags)) == NULL) { return (NULL); } sip->v8si_addr = addr; if (read_heap_array(addr, &sip->v8si_elts, &sip->v8si_nelts, memflags) != 0) { goto err; } if (sip->v8si_nelts < V8_SCOPEINFO_IDX_FIRST_VARS) { v8_warn("array too short to be a ScopeInfo\n"); goto err; } if (!V8_IS_SMI(sip->v8si_elts[V8_SCOPEINFO_IDX_NPARAMS]) || !V8_IS_SMI(sip->v8si_elts[V8_SCOPEINFO_IDX_NSTACKLOCALS]) || !V8_IS_SMI(sip->v8si_elts[V8_SCOPEINFO_IDX_NCONTEXTLOCALS])) { v8_warn("static ScopeInfo fields do not look like SMIs\n"); goto err; } return (sip); err: maybefree(sip, sizeof (*sip), memflags); return (NULL); }
/* * Given a V8 Context in "addr", load it into "ctxp". This will validate basic * properties of the context. "memflags" are used for memory allocation. * Returns a context on success and NULL on failure. */ v8context_t * v8context_load(uintptr_t addr, int memflags) { v8context_t *ctxp; if ((ctxp = mdb_zalloc(sizeof (*ctxp), memflags)) == NULL) { return (NULL); } ctxp->v8ctx_addr = addr; if (read_heap_array(addr, &ctxp->v8ctx_elts, &ctxp->v8ctx_nelts, memflags) != 0) { goto err; } if (ctxp->v8ctx_nelts < V8_CONTEXT_NCOMMON) { v8_warn("%p: context array is too short\n", addr); goto err; } return (ctxp); err: maybefree(ctxp, sizeof (*ctxp), memflags); return (NULL); }
static int sv_dev_winit(mdb_walk_state_t *wsp) { struct sv_dev_winfo *winfo; sv_dev_t *sv_devs; int sv_max_devices; winfo = mdb_zalloc(sizeof (struct sv_dev_winfo), UM_SLEEP); if (mdb_readvar(&sv_devs, "sv_devs") == -1) { mdb_warn("failed to read 'sv_devs'"); mdb_free(winfo, sizeof (struct sv_dev_winfo)); return (WALK_ERR); } if (mdb_readvar(&sv_max_devices, "sv_max_devices") == -1) { mdb_warn("failed to read 'sv_max_devices'"); mdb_free(winfo, sizeof (struct sv_dev_winfo)); return (WALK_ERR); } winfo->start = (uintptr_t)sv_devs; winfo->end = (uintptr_t)(sv_devs + sv_max_devices); if (wsp->walk_addr == NULL) wsp->walk_addr = winfo->start; wsp->walk_data = winfo; return (WALK_NEXT); }
void print_scsi_devid_desc(uintptr_t addr, uint16_t len, char *spacer) { scsi_devid_desc_t *id; if (len < sizeof (*id)) { mdb_warn("%sError: Devid Size = %d < sizeof(scsi_devid_desc_t)" "\n", spacer, len); return; } id = mdb_zalloc(len, UM_SLEEP); if (mdb_vread(id, len, addr) == -1) { mdb_warn("failed to read scsi_devid_desc at %p\n", addr); mdb_free(id, len); return; } mdb_printf("%sTotal length:\t%d\n", spacer, len); mdb_printf("%sProtocol:\t%d => %-16s\n", spacer, id->protocol_id, (id->protocol_id < ARRAY_SIZE(stmf_protocol_str)) ? stmf_protocol_str[id->protocol_id] : ""); mdb_printf("%sCode Set:\t%d\n", spacer, id->code_set); mdb_printf("%sIdent Length:\t%d\n", spacer, id->ident_length); if (len < sizeof (*id) + id->ident_length - 1) { mdb_printf("%s(Can not recognize ident data)\n", spacer); } else { id->ident[id->ident_length] = '\0'; mdb_printf("%sIdent:\t\t%s\n", spacer, id->ident); } mdb_free(id, len); mdb_printf("\n"); }
mdb_vcb_t * mdb_vcb_create(mdb_var_t *v) { mdb_vcb_t *vcb = mdb_zalloc(sizeof (mdb_vcb_t), UM_SLEEP); vcb->vc_var = v; return (vcb); }
/* * Decipher and print transport id which is pointed by addr variable. */ static int print_transport_id(uintptr_t addr, uint16_t tpd_len, char *spacer) { scsi_transport_id_t *tpd; if (tpd_len < sizeof (*tpd)) { mdb_warn("%sError: Transport ID Size = %d < " "sizeof (scsi_transport_id_t)\n", spacer, tpd_len); return (DCMD_ERR); } tpd = mdb_zalloc(tpd_len, UM_SLEEP); if (mdb_vread(tpd, tpd_len, addr) == -1) { mdb_warn("failed to read scsi_transport_id at %p\n", addr); mdb_free(tpd, tpd_len); return (DCMD_ERR); } mdb_printf("%sTotal length:\t%d\n", spacer, tpd_len); mdb_printf("%sProtocol:\t%d => %16s\n", spacer, tpd->protocol_id, (tpd->protocol_id < ARRAY_SIZE(stmf_protocol_str)) ? stmf_protocol_str[tpd->protocol_id] : ""); mdb_printf("%sFormat Code:\t0x%x\n", spacer, tpd->format_code); switch (tpd->protocol_id) { case PROTOCOL_FIBRE_CHANNEL: { uint8_t *p = ((scsi_fc_transport_id_t *)tpd)->port_name; mdb_printf("%sFC Port Name:\t%016llX\n", spacer, nhconvert_8bytes(p)); } break; case PROTOCOL_PARALLEL_SCSI: case PROTOCOL_SSA: case PROTOCOL_IEEE_1394: break; case PROTOCOL_SRP: { uint8_t *p = ((scsi_srp_transport_id_t *)tpd)->srp_name; /* Print 8 byte initiator extention and guid in order */ mdb_printf("%sSRP Name:\t%016llX:%016llX\n", spacer, nhconvert_8bytes(&p[8]), nhconvert_8bytes(&p[0])); } break; case PROTOCOL_iSCSI: mdb_printf("%sISCSI Name:\t%s\n", spacer, ((iscsi_transport_id_t *)tpd)->iscsi_name); break; case PROTOCOL_SAS: case PROTOCOL_ADT: case PROTOCOL_ATAPI: default: break; } mdb_free(tpd, tpd_len); return (DCMD_OK); }
mdb_tab_cookie_t * mdb_tab_init(void) { mdb_tab_cookie_t *mcp; mcp = mdb_zalloc(sizeof (mdb_tab_cookie_t), UM_SLEEP | UM_GC); (void) mdb_nv_create(&mcp->mtc_nv, UM_SLEEP | UM_GC); return (mcp); }
void cyclic_pretty_dump(cyc_cpu_t *cpu) { char **c; int i, j; int width = 80; int depth; cyc_index_t *heap; size_t hsize = sizeof (cyc_index_t) * cpu->cyp_size; heap = mdb_alloc(hsize, UM_SLEEP | UM_GC); if (mdb_vread(heap, hsize, (uintptr_t)cpu->cyp_heap) == -1) { mdb_warn("couldn't read heap at %p", (uintptr_t)cpu->cyp_heap); return; } for (depth = 0; (1 << depth) < cpu->cyp_nelems; depth++) continue; depth++; depth = (depth + 1) * LINES_PER_LEVEL; c = mdb_zalloc(sizeof (char *) * depth, UM_SLEEP|UM_GC); for (i = 0; i < depth; i++) c[i] = mdb_zalloc(width, UM_SLEEP|UM_GC); cyclic_dump_node(cpu, heap, c, width, 0, 1, width - 2, 0); for (i = 0; i < depth; i++) { int dump = 0; for (j = 0; j < width - 1; j++) { if (c[i][j] == '\0') c[i][j] = ' '; else dump = 1; } c[i][width - 2] = '\n'; if (dump) mdb_printf(c[i]); } }
static int sv_hash_winit(mdb_walk_state_t *wsp) { if (wsp->walk_addr == NULL) return (WALK_ERR); wsp->walk_data = mdb_zalloc(sizeof (sv_dev_t), UM_SLEEP); return (WALK_NEXT); }
/*ARGSUSED*/ static int x86_featureset_cmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) { void *fset; GElf_Sym sym; uintptr_t nptr; char name[128]; int ii; size_t sz = sizeof (uchar_t) * BT_SIZEOFMAP(NUM_X86_FEATURES); if (argc != 0) return (DCMD_USAGE); if (mdb_lookup_by_name("x86_feature_names", &sym) == -1) { mdb_warn("couldn't find x86_feature_names"); return (DCMD_ERR); } fset = mdb_zalloc(sz, UM_NOSLEEP); if (fset == NULL) { mdb_warn("failed to allocate memory for x86_featureset"); return (DCMD_ERR); } if (mdb_readvar(fset, "x86_featureset") != sz) { mdb_warn("failed to read x86_featureset"); mdb_free(fset, sz); return (DCMD_ERR); } for (ii = 0; ii < NUM_X86_FEATURES; ii++) { if (!BT_TEST((ulong_t *)fset, ii)) continue; if (mdb_vread(&nptr, sizeof (char *), sym.st_value + sizeof (void *) * ii) != sizeof (char *)) { mdb_warn("failed to read feature array %d", ii); mdb_free(fset, sz); return (DCMD_ERR); } if (mdb_readstr(name, sizeof (name), nptr) == -1) { mdb_warn("failed to read feature %d", ii); mdb_free(fset, sz); return (DCMD_ERR); } mdb_printf("%s\n", name); } mdb_free(fset, sz); return (DCMD_OK); }
static kp_file_t * kp_file_create(mdb_tgt_t *t, kp_map_t *kpm, GElf_Half etype) { kp_file_t *kpf = mdb_zalloc(sizeof (kp_file_t), UM_SLEEP); kp_data_t *kp = t->t_data; size_t dyns_sz; void *dyns; kpf->kpf_fio = kp_io_create(t, kpm); kpf->kpf_map = kpm; kpf->kpf_basename = strbasename(kpm->kpm_map.map_name); kpf->kpf_file = mdb_gelf_create(kpf->kpf_fio, etype, GF_PROGRAM); kpf->kpf_text_base = kpm->kpm_map.map_base; if (kpm != kp->kp_map_exec) kpf->kpf_dyn_base = kpf->kpf_text_base; if (kpf->kpf_file == NULL) goto err; /* Failed to create ELF file */ mdb_dprintf(MDB_DBG_TGT, "loading symbols for %s\n", kpm->kpm_map.map_name); if ((kp->kp_rap != NULL) && (rd_get_dyns(kp->kp_rap, kpf->kpf_text_base, &dyns, &dyns_sz) == RD_OK)) mdb_gelf_dyns_set(kpf->kpf_file, dyns, dyns_sz); kpf->kpf_dynsym = mdb_gelf_symtab_create_dynamic(kpf->kpf_file, MDB_TGT_DYNSYM); if (kpf->kpf_dynsym == NULL) goto err; /* Failed to create symbol table */ kpm->kpm_file = kpf; if (kp->kp_file_tail != NULL) kp->kp_file_tail->kpf_next = kpf; else kp->kp_file_head = kpf; kp->kp_file_tail = kpf; kp->kp_num_files++; return (kpf); err: if (kpf->kpf_file != NULL) mdb_gelf_destroy(kpf->kpf_file); else mdb_io_destroy(kpf->kpf_fio); mdb_free(kpf, sizeof (kp_file_t)); return (NULL); }
void mdb_module_load_all(int mode) { kmdb_wr_t *wn; ASSERT(mode & MDB_MOD_DEFER); wn = mdb_zalloc(sizeof (kmdb_wr_t), UM_SLEEP); wn->wn_task = WNTASK_DMOD_LOAD_ALL; kmdb_wr_driver_notify(wn); }
/*ARGSUSED*/ int stacks_thread_cb(uintptr_t addr, const void *ignored, void *cbarg) { stacks_info_t *sip = cbarg; findstack_info_t *fsip = &sip->si_fsi; stacks_entry_t **sepp, *nsep, *sep; int idx; size_t depth; if (stacks_findstack(addr, fsip, 0) != DCMD_OK && fsip->fsi_failed == FSI_FAIL_BADTHREAD) { mdb_warn("couldn't read thread at %p\n", addr); return (WALK_NEXT); } sip->si_count++; depth = fsip->fsi_depth; nsep = mdb_zalloc(STACKS_ENTRY_SIZE(depth), UM_SLEEP); nsep->se_thread = addr; nsep->se_sp = fsip->fsi_sp; nsep->se_sobj_ops = fsip->fsi_sobj_ops; nsep->se_tstate = fsip->fsi_tstate; nsep->se_count = 1; nsep->se_overflow = fsip->fsi_overflow; nsep->se_depth = depth; nsep->se_failed = fsip->fsi_failed; nsep->se_panic = fsip->fsi_panic; for (idx = 0; idx < depth; idx++) nsep->se_stack[idx] = fsip->fsi_stack[idx]; for (sepp = &sip->si_hash[stacks_hash_entry(nsep)]; (sep = *sepp) != NULL; sepp = &sep->se_next) { if (stacks_entry_comp_impl(sep, nsep, 0) != 0) continue; nsep->se_dup = sep->se_dup; sep->se_dup = nsep; sep->se_count++; return (WALK_NEXT); } nsep->se_next = NULL; *sepp = nsep; sip->si_entries++; return (WALK_NEXT); }
static int sv_gclient_winit(mdb_walk_state_t *wsp) { if (wsp->walk_addr == NULL && mdb_readvar(&wsp->walk_addr, "sv_gclients") == -1) { mdb_warn("unable to read 'sv_gclients'"); return (WALK_ERR); } wsp->walk_data = mdb_zalloc(sizeof (sv_gclient_t), UM_SLEEP); return (WALK_NEXT); }
int ttrace_walk_init(mdb_walk_state_t *wsp) { trap_trace_ctl_t *ttcp; size_t ttc_size = sizeof (trap_trace_ctl_t) * NCPU; int i; if (!ttrace_ttr_size_check()) return (WALK_ERR); ttcp = mdb_zalloc(ttc_size, UM_SLEEP); if (wsp->walk_addr != NULL) { mdb_warn("ttrace only supports global walks\n"); return (WALK_ERR); } if (mdb_readsym(ttcp, ttc_size, "trap_trace_ctl") == -1) { mdb_warn("symbol 'trap_trace_ctl' not found; " "non-TRAPTRACE kernel?\n"); mdb_free(ttcp, ttc_size); return (WALK_ERR); } /* * We'll poach the ttc_current pointer (which isn't used for * anything) to store a pointer to our current TRAPTRACE record. * This allows us to only keep the array of trap_trace_ctl structures * as our walker state (ttc_current may be the only kernel data * structure member added exclusively to make writing the mdb walker * a little easier). */ for (i = 0; i < NCPU; i++) { trap_trace_ctl_t *ttc = &ttcp[i]; if (ttc->ttc_first == NULL) continue; /* * Assign ttc_current to be the last completed record. * Note that the error checking (i.e. in the ttc_next == * ttc_first case) is performed in the step function. */ ttc->ttc_current = ttc->ttc_next - sizeof (trap_trace_rec_t); } wsp->walk_data = ttcp; return (WALK_NEXT); }
mdb_context_t * mdb_context_create(int (*func)(void)) { mdb_context_t *c = mdb_zalloc(sizeof (mdb_context_t), UM_NOSLEEP); size_t pagesize = sysconf(_SC_PAGESIZE); int prot = sysconf(_SC_STACK_PROT); static int zfd = -1; if (c == NULL) return (NULL); if (prot == -1) prot = PROT_READ | PROT_WRITE | PROT_EXEC; c->ctx_func = func; c->ctx_stacksize = pagesize * 4; c->ctx_stack = mmap(NULL, c->ctx_stacksize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); /* * If the mmap failed with EBADFD, this kernel doesn't have MAP_ANON * support; fall back to opening /dev/zero, caching the fd, and using * that to mmap chunks of anonymous memory. */ if (c->ctx_stack == MAP_FAILED && errno == EBADF) { if (zfd == -1 && (zfd = open("/dev/zero", O_RDWR)) >= 0) (void) fcntl(zfd, F_SETFD, FD_CLOEXEC); if (zfd >= 0) { c->ctx_stack = mmap(NULL, c->ctx_stacksize, prot, MAP_PRIVATE, zfd, 0); } } c->ctx_uc.uc_flags = UC_ALL; if (c->ctx_stack == MAP_FAILED || getcontext(&c->ctx_uc) != 0) { mdb_free(c, sizeof (mdb_context_t)); return (NULL); } c->ctx_uc.uc_stack.ss_sp = c->ctx_stack; c->ctx_uc.uc_stack.ss_size = c->ctx_stacksize; c->ctx_uc.uc_stack.ss_flags = 0; c->ctx_uc.uc_link = NULL; makecontext(&c->ctx_uc, context_init, 1, c); return (c); }
static void kp_add_mapping(const mdb_map_t *pmp, void *data) { kp_map_t *kpm = mdb_zalloc(sizeof (kp_map_t), UM_SLEEP); kp_data_t *kp = data; bcopy(pmp, &kpm->kpm_map, sizeof (mdb_map_t)); if (kp->kp_map_tail != NULL) kp->kp_map_tail->kpm_next = kpm; else kp->kp_map_head = kpm; kp->kp_map_tail = kpm; kp->kp_num_maps++; }
static int kaif_init(kmdb_auxv_t *kav) { /* Allocate the per-CPU save areas */ kaif_cpusave = mdb_zalloc(sizeof (kaif_cpusave_t) * kav->kav_ncpu, UM_SLEEP); kaif_ncpusave = kav->kav_ncpu; kaif_modchg_cb = NULL; kaif_waptmap = 0; kaif_trap_switch = (kav->kav_flags & KMDB_AUXV_FL_NOTRPSWTCH) == 0; return (0); }
int cyctrace_walk_init(mdb_walk_state_t *wsp) { cyc_cpu_t *cpu; int i; cpu = mdb_zalloc(sizeof (cyc_cpu_t), UM_SLEEP); if (wsp->walk_addr == NULL) { /* * If an address isn't provided, we'll use the passive buffer. */ GElf_Sym sym; cyc_tracebuf_t *tr = &cpu->cyp_trace[0]; uintptr_t addr; if (mdb_lookup_by_name("cyc_ptrace", &sym) == -1) { mdb_warn("couldn't find passive buffer"); return (-1); } addr = (uintptr_t)sym.st_value; if (mdb_vread(tr, sizeof (cyc_tracebuf_t), addr) == -1) { mdb_warn("couldn't read passive buffer"); return (-1); } wsp->walk_addr = addr - offsetof(cyc_cpu_t, cyp_trace[0]); } else { if (mdb_vread(cpu, sizeof (cyc_cpu_t), wsp->walk_addr) == -1) { mdb_warn("couldn't read cyc_cpu at %p", wsp->walk_addr); mdb_free(cpu, sizeof (cyc_cpu_t)); return (-1); } } for (i = 0; i < CY_LEVELS; i++) { if (cpu->cyp_trace[i].cyt_ndx-- == 0) cpu->cyp_trace[i].cyt_ndx = CY_NTRACEREC - 1; } wsp->walk_data = cpu; return (0); }
static int sv_maj_winit(mdb_walk_state_t *wsp) { if (wsp->walk_addr == NULL) { if (mdb_readvar(&sv_majors, "sv_majors") == -1) { mdb_warn("failed to read 'sv_majors'"); return (WALK_ERR); } } else { sv_majors[0] = (sv_maj_t *)wsp->walk_addr; } wsp->walk_addr = (uintptr_t)&sv_majors[0]; wsp->walk_data = mdb_zalloc(sizeof (sv_maj_t), UM_SLEEP); return (WALK_NEXT); }
/* * read mmu parameters from kernel */ static void init_mmu(void) { struct as kas; if (mmu.num_level != 0) return; if (mdb_readsym(&mmu, sizeof (mmu), "mmu") == -1) mdb_warn("Can't use HAT information before mmu_init()\n"); if (mdb_readsym(&kas, sizeof (kas), "kas") == -1) mdb_warn("Couldn't find kas - kernel's struct as\n"); if (mdb_readsym(&kernelbase, sizeof (kernelbase), "kernelbase") == -1) mdb_warn("Couldn't find kernelbase\n"); khat = kas.a_hat; /* * Is this a paravirtualized domain image? */ if (mdb_readsym(&mfn_list_addr, sizeof (mfn_list_addr), "mfn_list") == -1 || mdb_readsym(&xen_virt_start, sizeof (xen_virt_start), "xen_virt_start") == -1 || mdb_readsym(&mfn_count, sizeof (mfn_count), "mfn_count") == -1) { mfn_list_addr = NULL; } is_xpv = mfn_list_addr != NULL; #ifndef _KMDB /* * recreate the local mfn_list */ if (is_xpv) { size_t sz = mfn_count * sizeof (pfn_t); mfn_list = mdb_zalloc(sz, UM_SLEEP); if (mdb_vread(mfn_list, sz, (uintptr_t)mfn_list_addr) == -1) { mdb_warn("Failed to read MFN list\n"); mdb_free(mfn_list, sz); mfn_list = NULL; } } #endif }
const char * mdb_one_bit(int width, int bit, int on) { int i, j = 0; char *buf; buf = mdb_zalloc(width + (width / NBNB) + 2, UM_GC | UM_SLEEP); for (i = --width; i > bit; i--) SETBIT(buf, j, '.'); SETBIT(buf, j, on ? '1' : '0'); for (i = bit - 1; i >= 0; i--) SETBIT(buf, j, '.'); return (buf); }
const char * mdb_inval_bits(int width, int start, int stop) { int i, j = 0; char *buf; buf = mdb_zalloc(width + (width / NBNB) + 2, UM_GC | UM_SLEEP); for (i = --width; i > stop; i--) SETBIT(buf, j, '.'); for (i = stop; i >= start; i--) SETBIT(buf, j, 'x'); for (; i >= 0; i--) SETBIT(buf, j, '.'); return (buf); }
static void kmdb_module_request_unload(kmdb_modctl_t *kmc, const char *modname, int mode) { kmdb_wr_unload_t *dur = mdb_zalloc(sizeof (kmdb_wr_unload_t), UM_SLEEP); dur->dur_node.wn_task = WNTASK_DMOD_UNLOAD; dur->dur_modname = strdup(modname); dur->dur_modctl = kmc->kmc_modctl; kmdb_wr_driver_notify(dur); kmc->kmc_state = KMDB_MC_STATE_UNLOADING; if (!(mode & MDB_MOD_DEFER) && mdb_tgt_continue(mdb.m_target, NULL) == 0) return; if (!(mode & MDB_MOD_SILENT)) mdb_printf("%s unload pending (:c to complete)\n", modname); }