Example #1
0
db_addr_t
BP_REGS(db_regs_t *regs)
{
    struct soft_segment_descriptor softseg;

    sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(regs->tf_ds & 0xffff)].sd, &softseg);
    return(regs->tf_ebp + softseg.ssd_base);
}
Example #2
0
static void
trap_fatal(struct trapframe *frame, vm_offset_t eva)
{
	int code, ss;
	u_int type;
	long rsp;
	struct soft_segment_descriptor softseg;
	char *msg;

	code = frame->tf_err;
	type = frame->tf_trapno;
	sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg);

	if (type <= MAX_TRAP_MSG)
		msg = trap_msg[type];
	else
		msg = "UNKNOWN";
	kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
	    ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
	/* three separate prints in case of a trap on an unmapped page */
	kprintf("cpuid = %d; ", mycpu->gd_cpuid);
	if (lapic_usable)
		kprintf("lapic id = %u\n", LAPIC_READID);
	if (type == T_PAGEFLT) {
		kprintf("fault virtual address	= 0x%lx\n", eva);
		kprintf("fault code		= %s %s %s, %s\n",
			code & PGEX_U ? "user" : "supervisor",
			code & PGEX_W ? "write" : "read",
			code & PGEX_I ? "instruction" : "data",
			code & PGEX_P ? "protection violation" : "page not present");
	}
	kprintf("instruction pointer	= 0x%lx:0x%lx\n",
	       frame->tf_cs & 0xffff, frame->tf_rip);
        if (ISPL(frame->tf_cs) == SEL_UPL) {
		ss = frame->tf_ss & 0xffff;
		rsp = frame->tf_rsp;
	} else {
		/*
		 * NOTE: in 64-bit mode traps push rsp/ss even if no ring
		 *	 change occurs.
		 */
		ss = GSEL(GDATA_SEL, SEL_KPL);
		rsp = frame->tf_rsp;
	}
	kprintf("stack pointer	        = 0x%x:0x%lx\n", ss, rsp);
	kprintf("frame pointer	        = 0x%x:0x%lx\n", ss, frame->tf_rbp);
	kprintf("code segment		= base 0x%lx, limit 0x%lx, type 0x%x\n",
	       softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
	kprintf("			= DPL %d, pres %d, long %d, def32 %d, gran %d\n",
	       softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
	       softseg.ssd_gran);
	kprintf("processor eflags	= ");
	if (frame->tf_rflags & PSL_T)
		kprintf("trace trap, ");
	if (frame->tf_rflags & PSL_I)
		kprintf("interrupt enabled, ");
	if (frame->tf_rflags & PSL_NT)
		kprintf("nested task, ");
	if (frame->tf_rflags & PSL_RF)
		kprintf("resume, ");
	kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
	kprintf("current process		= ");
	if (curproc) {
		kprintf("%lu\n",
		    (u_long)curproc->p_pid);
	} else {
		kprintf("Idle\n");
	}
	kprintf("current thread          = pri %d ", curthread->td_pri);
	if (curthread->td_critcount)
		kprintf("(CRIT)");
	kprintf("\n");

#ifdef DDB
	if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
		return;
#endif
	kprintf("trap number		= %d\n", type);
	if (type <= MAX_TRAP_MSG)
		panic("%s", trap_msg[type]);
	else
		panic("unknown/reserved trap");
}
Example #3
0
static int
trap_pfault(struct trapframe *frame, int usermode)
{
	vm_offset_t va;
	struct vmspace *vm = NULL;
	vm_map_t map;
	int rv = 0;
	int fault_flags;
	vm_prot_t ftype;
	thread_t td = curthread;
	struct lwp *lp = td->td_lwp;
	struct proc *p;

	va = trunc_page(frame->tf_addr);
	if (va >= VM_MIN_KERNEL_ADDRESS) {
		/*
		 * Don't allow user-mode faults in kernel address space.
		 */
		if (usermode) {
			fault_flags = -1;
			ftype = -1;
			goto nogo;
		}

		map = &kernel_map;
	} else {
		/*
		 * This is a fault on non-kernel virtual memory.
		 * vm is initialized above to NULL. If curproc is NULL
		 * or curproc->p_vmspace is NULL the fault is fatal.
		 */
		if (lp != NULL)
			vm = lp->lwp_vmspace;

		if (vm == NULL) {
			fault_flags = -1;
			ftype = -1;
			goto nogo;
		}

		/*
		 * Debugging, try to catch kernel faults on the user address space when not inside
		 * on onfault (e.g. copyin/copyout) routine.
		 */
		if (usermode == 0 && (td->td_pcb == NULL || td->td_pcb->pcb_onfault == NULL)) {
#ifdef DDB
			if (freeze_on_seg_fault) {
				kprintf("trap_pfault: user address fault from kernel mode "
					"%016lx\n", (long)frame->tf_addr);
				while (freeze_on_seg_fault)
					    tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20);
			}
#endif
		}
		map = &vm->vm_map;
	}

	/*
	 * PGEX_I is defined only if the execute disable bit capability is
	 * supported and enabled.
	 */
	if (frame->tf_err & PGEX_W)
		ftype = VM_PROT_WRITE;
#if JG
	else if ((frame->tf_err & PGEX_I) && pg_nx != 0)
		ftype = VM_PROT_EXECUTE;
#endif
	else
		ftype = VM_PROT_READ;

	if (map != &kernel_map) {
		/*
		 * Keep swapout from messing with us during this
		 *	critical time.
		 */
		PHOLD(lp->lwp_proc);

		/*
		 * Issue fault
		 */
		fault_flags = 0;
		if (usermode)
			fault_flags |= VM_FAULT_BURST;
		if (ftype & VM_PROT_WRITE)
			fault_flags |= VM_FAULT_DIRTY;
		else
			fault_flags |= VM_FAULT_NORMAL;
		rv = vm_fault(map, va, ftype, fault_flags);

		PRELE(lp->lwp_proc);
	} else {
		/*
		 * Don't have to worry about process locking or stacks in the
		 * kernel.
		 */
		fault_flags = VM_FAULT_NORMAL;
		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
	}
	if (rv == KERN_SUCCESS)
		return (0);
nogo:
	if (!usermode) {
		if (td->td_gd->gd_intr_nesting_level == 0 &&
		    td->td_pcb->pcb_onfault) {
			frame->tf_rip = (register_t)td->td_pcb->pcb_onfault;
			return (0);
		}
		trap_fatal(frame, frame->tf_addr);
		return (-1);
	}

	/*
	 * NOTE: on x86_64 we have a tf_addr field in the trapframe, no
	 * kludge is needed to pass the fault address to signal handlers.
	 */
	p = td->td_proc;
	if (td->td_lwp->lwp_vkernel == NULL) {
#ifdef DDB
		if (bootverbose || freeze_on_seg_fault || ddb_on_seg_fault) {
#else
		if (bootverbose) {
#endif
			kprintf("seg-fault ft=%04x ff=%04x addr=%p rip=%p "
			    "pid=%d cpu=%d p_comm=%s\n",
			    ftype, fault_flags,
			    (void *)frame->tf_addr,
			    (void *)frame->tf_rip,
			    p->p_pid, mycpu->gd_cpuid, p->p_comm);
		}
#ifdef DDB
		while (freeze_on_seg_fault) {
			tsleep(p, 0, "freeze", hz * 20);
		}
		if (ddb_on_seg_fault)
			Debugger("ddb_on_seg_fault");
#endif
	}

	return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
}

static void
trap_fatal(struct trapframe *frame, vm_offset_t eva)
{
	int code, ss;
	u_int type;
	long rsp;
	struct soft_segment_descriptor softseg;
	char *msg;

	code = frame->tf_err;
	type = frame->tf_trapno;
	sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg);

	if (type <= MAX_TRAP_MSG)
		msg = trap_msg[type];
	else
		msg = "UNKNOWN";
	kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
	    ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
#ifdef SMP
	/* three separate prints in case of a trap on an unmapped page */
	kprintf("cpuid = %d; ", mycpu->gd_cpuid);
	kprintf("lapic->id = %08x\n", lapic->id);
#endif
	if (type == T_PAGEFLT) {
		kprintf("fault virtual address	= 0x%lx\n", eva);
		kprintf("fault code		= %s %s %s, %s\n",
			code & PGEX_U ? "user" : "supervisor",
			code & PGEX_W ? "write" : "read",
			code & PGEX_I ? "instruction" : "data",
			code & PGEX_P ? "protection violation" : "page not present");
	}
	kprintf("instruction pointer	= 0x%lx:0x%lx\n",
	       frame->tf_cs & 0xffff, frame->tf_rip);
        if (ISPL(frame->tf_cs) == SEL_UPL) {
		ss = frame->tf_ss & 0xffff;
		rsp = frame->tf_rsp;
	} else {
		ss = GSEL(GDATA_SEL, SEL_KPL);
		rsp = (long)&frame->tf_rsp;
	}
	kprintf("stack pointer	        = 0x%x:0x%lx\n", ss, rsp);
	kprintf("frame pointer	        = 0x%x:0x%lx\n", ss, frame->tf_rbp);
	kprintf("code segment		= base 0x%lx, limit 0x%lx, type 0x%x\n",
	       softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
	kprintf("			= DPL %d, pres %d, long %d, def32 %d, gran %d\n",
	       softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32,
	       softseg.ssd_gran);
	kprintf("processor eflags	= ");
	if (frame->tf_rflags & PSL_T)
		kprintf("trace trap, ");
	if (frame->tf_rflags & PSL_I)
		kprintf("interrupt enabled, ");
	if (frame->tf_rflags & PSL_NT)
		kprintf("nested task, ");
	if (frame->tf_rflags & PSL_RF)
		kprintf("resume, ");
	kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12);
	kprintf("current process		= ");
	if (curproc) {
		kprintf("%lu\n",
		    (u_long)curproc->p_pid);
	} else {
		kprintf("Idle\n");
	}
	kprintf("current thread          = pri %d ", curthread->td_pri);
	if (curthread->td_critcount)
		kprintf("(CRIT)");
	kprintf("\n");

#ifdef DDB
	if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
		return;
#endif
	kprintf("trap number		= %d\n", type);
	if (type <= MAX_TRAP_MSG)
		panic("%s", trap_msg[type]);
	else
		panic("unknown/reserved trap");
}
Example #4
0
static void
trap_fatal(struct trapframe *frame, vm_offset_t eva)
{
	int code, type, ss, esp;
	struct soft_segment_descriptor softseg;

	code = frame->tf_err;
	type = frame->tf_trapno;
	sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);

	if (type <= MAX_TRAP_MSG)
		kprintf("\n\nFatal trap %d: %s while in %s mode\n",
			type, trap_msg[type],
        		frame->tf_eflags & PSL_VM ? "vm86" :
			ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
	/* three separate prints in case of a trap on an unmapped page */
	kprintf("cpuid = %d; ", mycpu->gd_cpuid);
	kprintf("lapic.id = %08x\n", lapic->id);
	if (type == T_PAGEFLT) {
		kprintf("fault virtual address	= %p\n", (void *)eva);
		kprintf("fault code		= %s %s, %s\n",
			code & PGEX_U ? "user" : "supervisor",
			code & PGEX_W ? "write" : "read",
			code & PGEX_P ? "protection violation" : "page not present");
	}
	kprintf("instruction pointer	= 0x%x:0x%x\n",
	       frame->tf_cs & 0xffff, frame->tf_eip);
        if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
		ss = frame->tf_ss & 0xffff;
		esp = frame->tf_esp;
	} else {
		ss = GSEL(GDATA_SEL, SEL_KPL);
		esp = (int)&frame->tf_esp;
	}
	kprintf("stack pointer	        = 0x%x:0x%x\n", ss, esp);
	kprintf("frame pointer	        = 0x%x:0x%x\n", ss, frame->tf_ebp);
	kprintf("code segment		= base 0x%x, limit 0x%x, type 0x%x\n",
	       softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
	kprintf("			= DPL %d, pres %d, def32 %d, gran %d\n",
	       softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
	       softseg.ssd_gran);
	kprintf("processor eflags	= ");
	if (frame->tf_eflags & PSL_T)
		kprintf("trace trap, ");
	if (frame->tf_eflags & PSL_I)
		kprintf("interrupt enabled, ");
	if (frame->tf_eflags & PSL_NT)
		kprintf("nested task, ");
	if (frame->tf_eflags & PSL_RF)
		kprintf("resume, ");
	if (frame->tf_eflags & PSL_VM)
		kprintf("vm86, ");
	kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
	kprintf("current process		= ");
	if (curproc) {
		kprintf("%lu (%s)\n",
		    (u_long)curproc->p_pid, curproc->p_comm ?
		    curproc->p_comm : "");
	} else {
		kprintf("Idle\n");
	}
	kprintf("current thread          = pri %d ", curthread->td_pri);
	if (curthread->td_critcount)
		kprintf("(CRIT)");
	kprintf("\n");
/**
 *  XXX FIXME:
 *	we probably SHOULD have stopped the other CPUs before now!
 *	another CPU COULD have been touching cpl at this moment...
 */
	kprintf(" <- SMP: XXX");
	kprintf("\n");

#ifdef KDB
	if (kdb_trap(&psl))
		return;
#endif
#ifdef DDB
	if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame))
		return;
#endif
	kprintf("trap number		= %d\n", type);
	if (type <= MAX_TRAP_MSG)
		panic("%s", trap_msg[type]);
	else
		panic("unknown/reserved trap");
}