/*
 * Determine the return address for the current frame.  Typically this is the
 * fr_savpc value from the current frame, but we also perform some special
 * handling to see if we are stopped on one of the first two instructions of a
 * typical function prologue, in which case %ebp will not be set up yet.
 */
int
mdb_ia32_step_out(mdb_tgt_t *t, uintptr_t *p, kreg_t pc, kreg_t fp, kreg_t sp,
    mdb_instr_t curinstr)
{
	struct frame fr;
	GElf_Sym s;
	char buf[1];

	enum {
		M_PUSHL_EBP	= 0x55, /* pushl %ebp */
		M_MOVL_EBP	= 0x8b  /* movl %esp, %ebp */
	};

	if (mdb_tgt_lookup_by_addr(t, pc, MDB_TGT_SYM_FUZZY,
	    buf, 0, &s, NULL) == 0) {
		if (pc == s.st_value && curinstr == M_PUSHL_EBP)
			fp = sp - 4;
		else if (pc == s.st_value + 1 && curinstr == M_MOVL_EBP)
			fp = sp;
	}

	if (mdb_tgt_vread(t, &fr, sizeof (fr), fp) == sizeof (fr)) {
		*p = fr.fr_savpc;
		return (0);
	}

	return (-1); /* errno is set for us */
}
/*
 * Given a return address (%eip), determine the likely number of arguments
 * that were pushed on the stack prior to its execution.  We do this by
 * expecting that a typical call sequence consists of pushing arguments on
 * the stack, executing a call instruction, and then performing an add
 * on %esp to restore it to the value prior to pushing the arguments for
 * the call.  We attempt to detect such an add, and divide the addend
 * by the size of a word to determine the number of pushed arguments.
 */
static uint_t
kvm_argcount(mdb_tgt_t *t, uintptr_t eip, ssize_t size)
{
	uint8_t ins[6];
	ulong_t n;

	enum {
		M_MODRM_ESP = 0xc4,	/* Mod/RM byte indicates %esp */
		M_ADD_IMM32 = 0x81,	/* ADD imm32 to r/m32 */
		M_ADD_IMM8  = 0x83	/* ADD imm8 to r/m32 */
	};

	if (mdb_tgt_vread(t, ins, sizeof (ins), eip) != sizeof (ins))
		return (0);

	if (ins[1] != M_MODRM_ESP)
		return (0);

	switch (ins[0]) {
	case M_ADD_IMM32:
		n = ins[2] + (ins[3] << 8) + (ins[4] << 16) + (ins[5] << 24);
		break;

	case M_ADD_IMM8:
		n = ins[2];
		break;

	default:
		n = 0;
	}

	return (MIN((ssize_t)n, size) / sizeof (long));
}
Exemple #3
0
ssize_t
mdb_vread(void *buf, size_t nbytes, uintptr_t addr)
{
	ssize_t rbytes = mdb_tgt_vread(mdb.m_target, buf, nbytes, addr);

	if (rbytes > 0 && rbytes < nbytes)
		return (set_errbytes(rbytes, nbytes));

	return (rbytes);
}
Exemple #4
0
/*
 * We cannot rely on pr_instr, because if we hit a breakpoint or the user has
 * artifically modified memory, it will no longer be correct.
 */
static uint32_t
pt_read_instr(mdb_tgt_t *t)
{
	const lwpstatus_t *psp = &Pstatus(t->t_pshandle)->pr_lwp;
	uint32_t ret = 0;

	(void) mdb_tgt_vread(t, &ret, sizeof (ret), psp->pr_reg[R_PC]);

	return (ret);
}
Exemple #5
0
ssize_t
mdb_readvar(void *buf, const char *name)
{
	GElf_Sym sym;

	if (mdb_tgt_lookup_by_name(mdb.m_target, MDB_TGT_OBJ_EXEC,
	    name, &sym, NULL))
		return (-1);

	if (mdb_tgt_vread(mdb.m_target, buf, sym.st_size,
	    (uintptr_t)sym.st_value) == sym.st_size)
		return ((ssize_t)sym.st_size);

	return (-1);
}
Exemple #6
0
static void
kt_load_module(kt_data_t *kt, mdb_tgt_t *t, kt_module_t *km)
{
	km->km_data = mdb_alloc(km->km_datasz, UM_SLEEP);

	(void) mdb_tgt_vread(t, km->km_data, km->km_datasz, km->km_symspace_va);

	km->km_symbuf = (void *)
	    KT_RELOC_BUF(km->km_symtab_va, km->km_symspace_va, km->km_data);

	km->km_strtab = (char *)
	    KT_RELOC_BUF(km->km_strtab_va, km->km_symspace_va, km->km_data);

	km->km_symtab = mdb_gelf_symtab_create_raw(&kt->k_file->gf_ehdr,
	    &km->km_symtab_hdr, km->km_symbuf,
	    &km->km_strtab_hdr, km->km_strtab, MDB_TGT_SYMTAB);
}
Exemple #7
0
static int
kaif_brkpt_arm(uintptr_t addr, mdb_instr_t *instrp)
{
	mdb_instr_t bkpt = KAIF_BREAKPOINT_INSTR;

	if (kaif_toxic_text(addr)) {
		warn("%a cannot be a breakpoint target\n", addr);
		return (set_errno(EMDB_TGTNOTSUP));
	}

	if (mdb_tgt_vread(mdb.m_target, instrp, sizeof (mdb_instr_t), addr) !=
	    sizeof (mdb_instr_t))
		return (-1); /* errno is set for us */

	if (mdb_tgt_vwrite(mdb.m_target, &bkpt, sizeof (mdb_instr_t), addr) !=
	    sizeof (mdb_instr_t))
		return (-1); /* errno is set for us */

	return (0);
}
void
kt_amd64_init(mdb_tgt_t *t)
{
	kt_data_t *kt = t->t_data;
	panic_data_t pd;
	struct regs regs;
	uintptr_t addr;

	/*
	 * Initialize the machine-dependent parts of the kernel target
	 * structure.  Once this is complete and we fill in the ops
	 * vector, the target is now fully constructed and we can use
	 * the target API itself to perform the rest of our initialization.
	 */
	kt->k_rds = mdb_amd64_kregs;
	kt->k_regs = mdb_zalloc(sizeof (mdb_tgt_gregset_t), UM_SLEEP);
	kt->k_regsize = sizeof (mdb_tgt_gregset_t);
	kt->k_dcmd_regs = kt_regs;
	kt->k_dcmd_stack = kt_stack;
	kt->k_dcmd_stackv = kt_stackv;
	kt->k_dcmd_stackr = kt_stackv;
	kt->k_dcmd_cpustack = kt_cpustack;
	kt->k_dcmd_cpuregs = kt_cpuregs;

	t->t_ops = &kt_amd64_ops;

	(void) mdb_dis_select("amd64");

	/*
	 * Lookup the symbols corresponding to subroutines in locore.s where
	 * we expect a saved regs structure to be pushed on the stack.  When
	 * performing stack tracebacks we will attempt to detect interrupt
	 * frames by comparing the %eip value to these symbols.
	 */
	(void) mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC,
	    "cmnint", &kt->k_intr_sym, NULL);

	(void) mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC,
	    "cmntrap", &kt->k_trap_sym, NULL);

	/*
	 * Don't attempt to load any thread or register information if
	 * we're examining the live operating system.
	 */
	if (kt->k_symfile != NULL && strcmp(kt->k_symfile, "/dev/ksyms") == 0)
		return;

	/*
	 * If the panicbuf symbol is present and we can consume a panicbuf
	 * header of the appropriate version from this address, then we can
	 * initialize our current register set based on its contents.
	 * Prior to the re-structuring of panicbuf, our only register data
	 * was the panic_regs label_t, into which a setjmp() was performed,
	 * or the panic_reg register pointer, which was only non-zero if
	 * the system panicked as a result of a trap calling die().
	 */
	if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &pd, sizeof (pd),
	    MDB_TGT_OBJ_EXEC, "panicbuf") == sizeof (pd) &&
	    pd.pd_version == PANICBUFVERS) {

		size_t pd_size = MIN(PANICBUFSIZE, pd.pd_msgoff);
		panic_data_t *pdp = mdb_zalloc(pd_size, UM_SLEEP);
		uint_t i, n;

		(void) mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, pdp, pd_size,
		    MDB_TGT_OBJ_EXEC, "panicbuf");

		n = (pd_size - (sizeof (panic_data_t) -
		    sizeof (panic_nv_t))) / sizeof (panic_nv_t);

		for (i = 0; i < n; i++) {
			(void) kt_putareg(t, kt->k_tid,
			    pdp->pd_nvdata[i].pnv_name,
			    pdp->pd_nvdata[i].pnv_value);
		}

		mdb_free(pdp, pd_size);

		return;
	};

	if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &addr, sizeof (addr),
	    MDB_TGT_OBJ_EXEC, "panic_reg") == sizeof (addr) && addr != NULL &&
	    mdb_tgt_vread(t, &regs, sizeof (regs), addr) == sizeof (regs)) {
		kt_regs_to_kregs(&regs, kt->k_regs);
		return;
	}

	/*
	 * If we can't read any panic regs, then our final try is for any CPU
	 * context that may have been stored (for example, in Xen core dumps).
	 */
	if (kt_kvmregs(t, 0, kt->k_regs) == 0)
		return;

	warn("failed to read panicbuf and panic_reg -- "
	    "current register set will be unavailable\n");
}
Exemple #9
0
static int
kaif_step(void)
{
	kreg_t pc, fl, oldfl, newfl, sp;
	mdb_tgt_addr_t npc;
	mdb_instr_t instr;
	int emulated = 0, rchk = 0;
	size_t pcoff = 0;

	(void) kmdb_dpi_get_register("pc", &pc);

	if (kaif_toxic_text(pc)) {
		warn("%a cannot be stepped\n", pc);
		return (set_errno(EMDB_TGTNOTSUP));
	}

	if ((npc = mdb_dis_nextins(mdb.m_disasm, mdb.m_target,
	    MDB_TGT_AS_VIRT, pc)) == pc) {
		warn("failed to decode instruction at %a for step\n", pc);
		return (set_errno(EINVAL));
	}

	/*
	 * Stepping behavior depends on the type of instruction.  It does not
	 * depend on the presence of a REX prefix, as the action we take for a
	 * given instruction doesn't currently vary for 32-bit instructions
	 * versus their 64-bit counterparts.
	 */
	do {
		if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
		    pc + pcoff) != sizeof (mdb_instr_t)) {
			warn("failed to read at %p for step",
			    (void *)(pc + pcoff));
			return (-1);
		}
	} while (pcoff++, (instr >= M_REX_LO && instr <= M_REX_HI && !rchk++));

	switch (instr) {
	case M_IRET:
		warn("iret cannot be stepped\n");
		return (set_errno(EMDB_TGTNOTSUP));

	case M_INT3:
	case M_INTX:
	case M_INTO:
		warn("int cannot be stepped\n");
		return (set_errno(EMDB_TGTNOTSUP));

	case M_ESC:
		if (mdb_tgt_vread(mdb.m_target, &instr, sizeof (mdb_instr_t),
		    pc + pcoff) != sizeof (mdb_instr_t)) {
			warn("failed to read at %p for step",
			    (void *)(pc + pcoff));
			return (-1);
		}

		switch (instr) {
		case M_SYSRET:
			warn("sysret cannot be stepped\n");
			return (set_errno(EMDB_TGTNOTSUP));
		case M_SYSEXIT:
			warn("sysexit cannot be stepped\n");
			return (set_errno(EMDB_TGTNOTSUP));
		}
		break;

	/*
	 * Some instructions need to be emulated.  We need to prevent direct
	 * manipulations of EFLAGS, so we'll emulate cli, sti.  pushfl and
	 * popfl also receive special handling, as they manipulate both EFLAGS
	 * and %esp.
	 */
	case M_CLI:
		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
		fl &= ~KREG_EFLAGS_IF_MASK;
		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);

		emulated = 1;
		break;

	case M_STI:
		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
		fl |= (1 << KREG_EFLAGS_IF_SHIFT);
		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, fl);

		emulated = 1;
		break;

	case M_POPF:
		/*
		 * popfl will restore a pushed EFLAGS from the stack, and could
		 * in so doing cause IF to be turned on, if only for a brief
		 * period.  To avoid this, we'll secretly replace the stack's
		 * EFLAGS with our decaffeinated brand.  We'll then manually
		 * load our EFLAGS copy with the real verion after the step.
		 */
		(void) kmdb_dpi_get_register("sp", &sp);
		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);

		if (mdb_tgt_vread(mdb.m_target, &newfl, sizeof (kreg_t),
		    sp) != sizeof (kreg_t)) {
			warn("failed to read " FLAGS_REG_NAME
			    " at %p for popfl step\n", (void *)sp);
			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
		}

		fl = (fl & ~KREG_EFLAGS_IF_MASK) | KREG_EFLAGS_TF_MASK;

		if (mdb_tgt_vwrite(mdb.m_target, &fl, sizeof (kreg_t),
		    sp) != sizeof (kreg_t)) {
			warn("failed to update " FLAGS_REG_NAME
			    " at %p for popfl step\n", (void *)sp);
			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
		}
		break;
	}

	if (emulated) {
		(void) kmdb_dpi_set_register("pc", npc);
		return (0);
	}

	/* Do the step with IF off, and TF (step) on */
	(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &oldfl);
	(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
	    ((oldfl | (1 << KREG_EFLAGS_TF_SHIFT)) & ~KREG_EFLAGS_IF_MASK));

	kmdb_dpi_resume_master(); /* ... there and back again ... */

	/* EFLAGS has now changed, and may require tuning */

	switch (instr) {
	case M_POPF:
		/*
		 * Use the EFLAGS we grabbed before the pop - see the pre-step
		 * M_POPFL comment.
		 */
		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, newfl);
		return (0);

	case M_PUSHF:
		/*
		 * We pushed our modified EFLAGS (with IF and TF turned off)
		 * onto the stack.  Replace the pushed version with our
		 * unmodified one.
		 */
		(void) kmdb_dpi_get_register("sp", &sp);

		if (mdb_tgt_vwrite(mdb.m_target, &oldfl, sizeof (kreg_t),
		    sp) != sizeof (kreg_t)) {
			warn("failed to update pushed " FLAGS_REG_NAME
			    " at %p after pushfl step\n", (void *)sp);
			return (set_errno(EMDB_TGTNOTSUP)); /* XXX ? */
		}

		/* Go back to using the EFLAGS we were using before the step */
		(void) kmdb_dpi_set_register(FLAGS_REG_NAME, oldfl);
		return (0);

	default:
		/*
		 * The stepped instruction may have altered EFLAGS.  We only
		 * really care about the value of IF, and we know the stepped
		 * instruction didn't alter it, so we can simply copy the
		 * pre-step value.  We'll also need to turn TF back off.
		 */
		(void) kmdb_dpi_get_register(FLAGS_REG_NAME, &fl);
		(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
		    ((fl & ~(KREG_EFLAGS_TF_MASK|KREG_EFLAGS_IF_MASK)) |
		    (oldfl & KREG_EFLAGS_IF_MASK)));
		return (0);
	}
}
/*
 * Return the address of the next instruction following a call, or return -1
 * and set errno to EAGAIN if the target should just single-step.  We perform
 * a bit of disassembly on the current instruction in order to determine if it
 * is a call and how many bytes should be skipped, depending on the exact form
 * of the call instruction that is being used.
 */
int
mdb_ia32_next(mdb_tgt_t *t, uintptr_t *p, kreg_t pc, mdb_instr_t curinstr)
{
	uint8_t m;

	enum {
		M_CALL_REL = 0xe8, /* call near with relative displacement */
		M_CALL_REG = 0xff, /* call near indirect or call far register */

		M_MODRM_MD = 0xc0, /* mask for Mod/RM byte Mod field */
		M_MODRM_OP = 0x38, /* mask for Mod/RM byte opcode field */
		M_MODRM_RM = 0x07, /* mask for Mod/RM byte R/M field */

		M_MD_IND   = 0x00, /* Mod code for [REG] */
		M_MD_DSP8  = 0x40, /* Mod code for disp8[REG] */
		M_MD_DSP32 = 0x80, /* Mod code for disp32[REG] */
		M_MD_REG   = 0xc0, /* Mod code for REG */

		M_OP_IND   = 0x10, /* Opcode for call near indirect */
		M_RM_DSP32 = 0x05  /* R/M code for disp32 */
	};

	/*
	 * If the opcode is a near call with relative displacement, assume the
	 * displacement is a rel32 from the next instruction.
	 */
	if (curinstr == M_CALL_REL) {
		*p = pc + sizeof (mdb_instr_t) + sizeof (uint32_t);
		return (0);
	}

	/*
	 * If the opcode is a call near indirect or call far register opcode,
	 * read the subsequent Mod/RM byte to perform additional decoding.
	 */
	if (curinstr == M_CALL_REG) {
		if (mdb_tgt_vread(t, &m, sizeof (m), pc + 1) != sizeof (m))
			return (-1); /* errno is set for us */

		/*
		 * If the Mod/RM opcode extension indicates a near indirect
		 * call, then skip the appropriate number of additional
		 * bytes depending on the addressing form that is used.
		 */
		if ((m & M_MODRM_OP) == M_OP_IND) {
			switch (m & M_MODRM_MD) {
			case M_MD_DSP8:
				*p = pc + 3; /* skip pr_instr, m, disp8 */
				break;
			case M_MD_DSP32:
				*p = pc + 6; /* skip pr_instr, m, disp32 */
				break;
			case M_MD_IND:
				if ((m & M_MODRM_RM) == M_RM_DSP32) {
					*p = pc + 6;
					break; /* skip pr_instr, m, disp32 */
				}
				/* FALLTHRU */
			case M_MD_REG:
				*p = pc + 2; /* skip pr_instr, m */
				break;
			}
			return (0);
		}
	}

	return (set_errno(EAGAIN));
}
int
mdb_ia32_kvm_stack_iter(mdb_tgt_t *t, const mdb_tgt_gregset_t *gsp,
    mdb_tgt_stack_f *func, void *arg)
{
	mdb_tgt_gregset_t gregs;
	kreg_t *kregs = &gregs.kregs[0];
	int got_pc = (gsp->kregs[KREG_EIP] != 0);
	int err;

	struct fr {
		uintptr_t fr_savfp;
		uintptr_t fr_savpc;
		long fr_argv[32];
	} fr;

	uintptr_t fp = gsp->kregs[KREG_EBP];
	uintptr_t pc = gsp->kregs[KREG_EIP];
	uintptr_t lastfp = 0;

	ssize_t size;
	uint_t argc;
	int detect_exception_frames = 0;
	int advance_tortoise = 1;
	uintptr_t tortoise_fp = 0;
#ifndef	_KMDB
	int xp;

	if ((mdb_readsym(&xp, sizeof (xp), "xpv_panicking") != -1) && (xp > 0))
		detect_exception_frames = 1;
#endif

	bcopy(gsp, &gregs, sizeof (gregs));

	while (fp != 0) {
		if (fp & (STACK_ALIGN - 1)) {
			err = EMDB_STKALIGN;
			goto badfp;
		}
		if ((size = mdb_tgt_vread(t, &fr, sizeof (fr), fp)) >=
		    (ssize_t)(2 * sizeof (uintptr_t))) {
			size -= (ssize_t)(2 * sizeof (uintptr_t));
			argc = kvm_argcount(t, fr.fr_savpc, size);
		} else {
			err = EMDB_NOMAP;
			goto badfp;
		}

		if (tortoise_fp == 0) {
			tortoise_fp = fp;
		} else {
			/*
			 * Advance tortoise_fp every other frame, so we detect
			 * cycles with Floyd's tortoise/hare.
			 */
			if (advance_tortoise != 0) {
				struct fr tfr;

				if (mdb_tgt_vread(t, &tfr, sizeof (tfr),
				    tortoise_fp) != sizeof (tfr)) {
					err = EMDB_NOMAP;
					goto badfp;
				}

				tortoise_fp = tfr.fr_savfp;
			}

			if (fp == tortoise_fp) {
				err = EMDB_STKFRAME;
				goto badfp;
			}
		}

		advance_tortoise = !advance_tortoise;

		if (got_pc && func(arg, pc, argc, fr.fr_argv, &gregs) != 0)
			break;

		kregs[KREG_ESP] = kregs[KREG_EBP];

		lastfp = fp;
		fp = fr.fr_savfp;
		/*
		 * The Xen hypervisor marks a stack frame as belonging to
		 * an exception by inverting the bits of the pointer to
		 * that frame.  We attempt to identify these frames by
		 * inverting the pointer and seeing if it is within 0xfff
		 * bytes of the last frame.
		 */
		if (detect_exception_frames)
			if ((fp != 0) && (fp < lastfp) &&
			    ((lastfp ^ ~fp) < 0xfff))
				fp = ~fp;

		kregs[KREG_EBP] = fp;
		kregs[KREG_EIP] = pc = fr.fr_savpc;

		got_pc = (pc != 0);
	}

	return (0);

badfp:
	mdb_printf("%p [%s]", fp, mdb_strerror(err));
	return (set_errno(err));
}
Exemple #12
0
void
kt_amd64_init(mdb_tgt_t *t)
{
	kt_data_t *kt = t->t_data;

	panic_data_t pd;
	kreg_t *kregs;
	struct regs regs;
	uintptr_t addr;

	/*
	 * Initialize the machine-dependent parts of the kernel target
	 * structure.  Once this is complete and we fill in the ops
	 * vector, the target is now fully constructed and we can use
	 * the target API itself to perform the rest of our initialization.
	 */
	kt->k_rds = mdb_amd64_kregs;
	kt->k_regs = mdb_zalloc(sizeof (mdb_tgt_gregset_t), UM_SLEEP);
	kt->k_regsize = sizeof (mdb_tgt_gregset_t);
	kt->k_dcmd_regs = kt_regs;
	kt->k_dcmd_stack = kt_stack;
	kt->k_dcmd_stackv = kt_stackv;
	kt->k_dcmd_stackr = kt_stackv;

	t->t_ops = &kt_amd64_ops;
	kregs = kt->k_regs->kregs;

	(void) mdb_dis_select("amd64");

	/*
	 * Lookup the symbols corresponding to subroutines in locore.s where
	 * we expect a saved regs structure to be pushed on the stack.  When
	 * performing stack tracebacks we will attempt to detect interrupt
	 * frames by comparing the %eip value to these symbols.
	 */
	(void) mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC,
	    "cmnint", &kt->k_intr_sym, NULL);

	(void) mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC,
	    "cmntrap", &kt->k_trap_sym, NULL);

	/*
	 * Don't attempt to load any thread or register information if
	 * we're examining the live operating system.
	 */
	if (strcmp(kt->k_symfile, "/dev/ksyms") == 0)
		return;

	/*
	 * If the panicbuf symbol is present and we can consume a panicbuf
	 * header of the appropriate version from this address, then we can
	 * initialize our current register set based on its contents.
	 * Prior to the re-structuring of panicbuf, our only register data
	 * was the panic_regs label_t, into which a setjmp() was performed,
	 * or the panic_reg register pointer, which was only non-zero if
	 * the system panicked as a result of a trap calling die().
	 */
	if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &pd, sizeof (pd),
	    MDB_TGT_OBJ_EXEC, "panicbuf") == sizeof (pd) &&
	    pd.pd_version == PANICBUFVERS) {

		size_t pd_size = MIN(PANICBUFSIZE, pd.pd_msgoff);
		panic_data_t *pdp = mdb_zalloc(pd_size, UM_SLEEP);
		uint_t i, n;

		(void) mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, pdp, pd_size,
		    MDB_TGT_OBJ_EXEC, "panicbuf");

		n = (pd_size - (sizeof (panic_data_t) -
		    sizeof (panic_nv_t))) / sizeof (panic_nv_t);

		for (i = 0; i < n; i++) {
			(void) kt_putareg(t, kt->k_tid,
			    pdp->pd_nvdata[i].pnv_name,
			    pdp->pd_nvdata[i].pnv_value);
		}

		mdb_free(pdp, pd_size);

	} else if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &addr, sizeof (addr),
	    MDB_TGT_OBJ_EXEC, "panic_reg") == sizeof (addr) && addr != NULL &&
	    mdb_tgt_vread(t, &regs, sizeof (regs), addr) == sizeof (regs)) {

		kregs[KREG_SAVFP] = regs.r_savfp;
		kregs[KREG_SAVPC] = regs.r_savpc;
		kregs[KREG_RDI] = regs.r_rdi;
		kregs[KREG_RSI] = regs.r_rsi;
		kregs[KREG_RDX] = regs.r_rdx;
		kregs[KREG_RCX] = regs.r_rcx;
		kregs[KREG_R8] = regs.r_r8;
		kregs[KREG_R9] = regs.r_r9;
		kregs[KREG_RAX] = regs.r_rax;
		kregs[KREG_RBX] = regs.r_rbx;
		kregs[KREG_RBP] = regs.r_rbp;
		kregs[KREG_R10] = regs.r_r10;
		kregs[KREG_R11] = regs.r_r11;
		kregs[KREG_R12] = regs.r_r12;
		kregs[KREG_R13] = regs.r_r13;
		kregs[KREG_R14] = regs.r_r14;
		kregs[KREG_R15] = regs.r_r15;
		kregs[KREG_FSBASE] = regs.r_fsbase;
		kregs[KREG_GSBASE] = regs.r_gsbase;
		kregs[KREG_DS] = regs.r_ds;
		kregs[KREG_ES] = regs.r_es;
		kregs[KREG_FS] = regs.r_fs;
		kregs[KREG_GS] = regs.r_gs;
		kregs[KREG_TRAPNO] = regs.r_trapno;
		kregs[KREG_ERR] = regs.r_err;
		kregs[KREG_RIP] = regs.r_rip;
		kregs[KREG_CS] = regs.r_cs;
		kregs[KREG_RFLAGS] = regs.r_rfl;
		kregs[KREG_RSP] = regs.r_rsp;
		kregs[KREG_SS] = regs.r_ss;

	} else {
		warn("failed to read panicbuf and panic_reg -- "
		    "current register set will be unavailable\n");
	}
}
Exemple #13
0
static void
kt_load_modules(kt_data_t *kt, mdb_tgt_t *t)
{
	char name[MAXNAMELEN];
	uintptr_t addr, head;

	struct module kmod;
	struct modctl ctl;
	Shdr symhdr, strhdr;
	GElf_Sym sym;

	kt_module_t *km;

	if (mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC,
	    "modules", &sym, NULL) == -1) {
		warn("failed to get 'modules' symbol");
		return;
	}

	if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &ctl, sizeof (ctl),
	    MDB_TGT_OBJ_EXEC, "modules") != sizeof (ctl)) {
		warn("failed to read 'modules' struct");
		return;
	}

	addr = head = (uintptr_t)sym.st_value;

	do {
		if (addr == NULL)
			break; /* Avoid spurious NULL pointers in list */

		if (mdb_tgt_vread(t, &ctl, sizeof (ctl), addr) == -1) {
			warn("failed to read modctl at %p", (void *)addr);
			return;
		}

		if (ctl.mod_mp == NULL)
			continue; /* No associated krtld structure */

		if (mdb_tgt_readstr(t, MDB_TGT_AS_VIRT, name, MAXNAMELEN,
		    (uintptr_t)ctl.mod_modname) <= 0) {
			warn("failed to read module name at %p",
			    (void *)ctl.mod_modname);
			continue;
		}

		mdb_dprintf(MDB_DBG_KMOD, "reading mod %s (%p)\n",
		    name, (void *)addr);

		if (mdb_nv_lookup(&kt->k_modules, name) != NULL) {
			warn("skipping duplicate module '%s', id=%d\n",
			    name, ctl.mod_id);
			continue;
		}

		if (mdb_tgt_vread(t, &kmod, sizeof (kmod),
		    (uintptr_t)ctl.mod_mp) == -1) {
			warn("failed to read module at %p\n",
			    (void *)ctl.mod_mp);
			continue;
		}

		if (kmod.symspace == NULL || kmod.symhdr == NULL ||
		    kmod.strhdr == NULL) {
			/*
			 * If no buffer for the symbols has been allocated,
			 * or the shdrs for .symtab and .strtab are missing,
			 * then we're out of luck.
			 */
			continue;
		}

		if (mdb_tgt_vread(t, &symhdr, sizeof (Shdr),
		    (uintptr_t)kmod.symhdr) == -1) {
			warn("failed to read .symtab header for '%s', id=%d",
			    name, ctl.mod_id);
			continue;
		}

		if (mdb_tgt_vread(t, &strhdr, sizeof (Shdr),
		    (uintptr_t)kmod.strhdr) == -1) {
			warn("failed to read .strtab header for '%s', id=%d",
			    name, ctl.mod_id);
			continue;
		}

		/*
		 * Now get clever: f(*^ing krtld didn't used to bother updating
		 * its own kmod.symsize value.  We know that prior to this bug
		 * being fixed, symspace was a contiguous buffer containing
		 * .symtab, .strtab, and the symbol hash table in that order.
		 * So if symsize is zero, recompute it as the size of .symtab
		 * plus the size of .strtab.  We don't need to load the hash
		 * table anyway since we re-hash all the symbols internally.
		 */
		if (kmod.symsize == 0)
			kmod.symsize = symhdr.sh_size + strhdr.sh_size;

		/*
		 * Similar logic can be used to make educated guesses
		 * at the values of kmod.symtbl and kmod.strings.
		 */
		if (kmod.symtbl == NULL)
			kmod.symtbl = kmod.symspace;
		if (kmod.strings == NULL)
			kmod.strings = kmod.symspace + symhdr.sh_size;

		/*
		 * Make sure things seem reasonable before we proceed
		 * to actually read and decipher the symspace.
		 */
		if (KT_BAD_BUF(kmod.symtbl, kmod.symspace, kmod.symsize) ||
		    KT_BAD_BUF(kmod.strings, kmod.symspace, kmod.symsize)) {
			warn("skipping module '%s', id=%d (corrupt symspace)\n",
			    name, ctl.mod_id);
			continue;
		}

		km = mdb_zalloc(sizeof (kt_module_t), UM_SLEEP);
		km->km_name = strdup(name);

		(void) mdb_nv_insert(&kt->k_modules, km->km_name, NULL,
		    (uintptr_t)km, MDB_NV_EXTNAME);

		km->km_datasz = kmod.symsize;
		km->km_symspace_va = (uintptr_t)kmod.symspace;
		km->km_symtab_va = (uintptr_t)kmod.symtbl;
		km->km_strtab_va = (uintptr_t)kmod.strings;
		km->km_symtab_hdr = symhdr;
		km->km_strtab_hdr = strhdr;
		km->km_text_va = (uintptr_t)kmod.text;
		km->km_text_size = kmod.text_size;
		km->km_data_va = (uintptr_t)kmod.data;
		km->km_data_size = kmod.data_size;
		km->km_bss_va = (uintptr_t)kmod.bss;
		km->km_bss_size = kmod.bss_size;

		if (kt->k_ctfvalid) {
			km->km_ctf_va = (uintptr_t)kmod.ctfdata;
			km->km_ctf_size = kmod.ctfsize;
		}

		/*
		 * Add the module to the end of the list of modules in load-
		 * dependency order.  This is needed to load the corresponding
		 * debugger modules in the same order for layering purposes.
		 */
		mdb_list_append(&kt->k_modlist, km);

		if (t->t_flags & MDB_TGT_F_PRELOAD) {
			mdb_iob_printf(mdb.m_out, " %s", name);
			mdb_iob_flush(mdb.m_out);
			kt_load_module(kt, t, km);
		}

	} while ((addr = (uintptr_t)ctl.mod_next) != head);
}