int db_trap_glue(struct trapframe *tf) { if (!(tf->srr1 & PSL_PR)) return (kdb_trap(tf->exc, 0, tf)); return (0); }
/* * Received keyboard interrupt sequence. */ void kdb_kbd_trap(struct trapframe *tf) { if (db_active == 0 && (boothowto & RB_KDB)) { printf("\n\nkernel: keyboard interrupt\n"); kdb_trap(-1, tf); } }
/* * Received keyboard interrupt sequence. */ void kdb_kbd_trap(struct trapframe64 *tf) { if (db_active == 0 /* && (boothowto & RB_KDB) */) { printf("\n\nkernel: keyboard interrupt tf=%p\n", tf); kdb_trap(-1, tf); } }
/* * abort_fatal() handles the following data aborts: * * FAULT_DEBUG - Debug Event * FAULT_ACCESS_xx - Acces Bit * FAULT_EA_PREC - Precise External Abort * FAULT_DOMAIN_xx - Domain Fault * FAULT_EA_TRAN_xx - External Translation Abort * FAULT_EA_IMPREC - Imprecise External Abort * + all undefined codes for ABORT * * We should never see these on a properly functioning system. * * This function is also called by the other handlers if they * detect a fatal problem. * * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. */ static int abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch, struct thread *td, struct ksig *ksig) { bool usermode; const char *mode; const char *rw_mode; usermode = TRAPF_USERMODE(tf); #ifdef KDTRACE_HOOKS if (!usermode) { if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far)) return (0); } #endif mode = usermode ? "user" : "kernel"; rw_mode = fsr & FSR_WNR ? "write" : "read"; disable_interrupts(PSR_I|PSR_F); if (td != NULL) { printf("Fatal %s mode data abort: '%s' on %s\n", mode, aborts[idx].desc, rw_mode); printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); if (idx != FAULT_EA_IMPREC) printf("%08x, ", far); else printf("Invalid, "); printf("spsr=%08x\n", tf->tf_spsr); } else { printf("Fatal %s mode prefetch abort at 0x%08x\n", mode, tf->tf_pc); printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); } printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("r12=%08x, ", tf->tf_r12); if (usermode) printf("usp=%08x, ulr=%08x", tf->tf_usr_sp, tf->tf_usr_lr); else printf("ssp=%08x, slr=%08x", tf->tf_svc_sp, tf->tf_svc_lr); printf(", pc =%08x\n\n", tf->tf_pc); #ifdef KDB if (debugger_on_panic || kdb_active) kdb_trap(fsr, 0, tf); #endif panic("Fatal abort"); /*NOTREACHED*/ }
/* * Received keyboard interrupt sequence. */ void kdb_kintr(db_regs_t *regs) { if (db_active == 0 && (boothowto & RB_KDB)) { printf("\n\nkernel: keyboard interrupt\n"); kdb_trap(-1, regs); } }
/* * dab_fatal() handles the following data aborts: * * FAULT_WRTBUF_0 - Vector Exception * FAULT_WRTBUF_1 - Terminal Exception * * We should never see these on a properly functioning system. * * This function is also called by the other handlers if they * detect a fatal problem. * * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. */ static int dab_fatal(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) { const char *mode; #ifdef KDTRACE_HOOKS if (!TRAP_USERMODE(tf)) { if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far & FAULT_TYPE_MASK)) return (0); } #endif mode = TRAP_USERMODE(tf) ? "user" : "kernel"; disable_interrupts(PSR_I|PSR_F); if (td != NULL) { printf("Fatal %s mode data abort: '%s'\n", mode, data_aborts[fsr & FAULT_TYPE_MASK].desc); printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); if ((fsr & FAULT_IMPRECISE) == 0) printf("%08x, ", far); else printf("Invalid, "); printf("spsr=%08x\n", tf->tf_spsr); } else { printf("Fatal %s mode prefetch abort at 0x%08x\n", mode, tf->tf_pc); printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); } printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("r12=%08x, ", tf->tf_r12); if (TRAP_USERMODE(tf)) printf("usp=%08x, ulr=%08x", tf->tf_usr_sp, tf->tf_usr_lr); else printf("ssp=%08x, slr=%08x", tf->tf_svc_sp, tf->tf_svc_lr); printf(", pc =%08x\n\n", tf->tf_pc); #ifdef KDB if (debugger_on_panic || kdb_active) if (kdb_trap(fsr, 0, tf)) return (0); #endif panic("Fatal abort"); /*NOTREACHED*/ }
static void trap_fatal(struct trapframe *frame) { printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR)); #ifdef KDB if ((debugger_on_panic || kdb_active) && kdb_trap(frame->exc, 0, frame)) return; #endif panic("%s trap", trapname(frame->exc)); }
int ddb_trap_glue(struct trapframe *frame) { #ifdef PPC_IBM4XX if ((frame->srr1 & PSL_PR) == 0) return kdb_trap(frame->exc, frame); #else /* PPC_OEA */ if ((frame->srr1 & PSL_PR) == 0 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC || (frame->exc == EXC_PGM && (frame->srr1 & 0x20000)) || frame->exc == EXC_BPT || frame->exc == EXC_DSI)) { int type = frame->exc; if (type == EXC_PGM && (frame->srr1 & 0x20000)) { type = T_BREAKPOINT; } return kdb_trap(type, frame); } #endif return 0; }
void prefetch_abort_handler(struct trapframe *tf) { vaddr_t pc; struct proc *p; struct lwp *l; /* Enable interrupts if they were enabled before the trap. */ if ((tf->tf_r15 & R15_IRQ_DISABLE) == 0) int_on(); /* * XXX Not done yet: * Check if the page being requested is already present. If * so, call the undefined instruction handler instead (ARM3 ds * p15). */ uvmexp.traps++; l = curlwp; if (l == NULL) l = &lwp0; p = l->l_proc; if ((tf->tf_r15 & R15_MODE) == R15_MODE_USR) { l->l_addr->u_pcb.pcb_tf = tf; LWP_CACHE_CREDS(l, p); } if ((tf->tf_r15 & R15_MODE) != R15_MODE_USR) { #ifdef DDB db_printf("Prefetch abort in kernel mode\n"); kdb_trap(T_FAULT, tf); #else #ifdef DEBUG printf("Prefetch abort:\n"); printregs(tf); #endif panic("prefetch abort in kernel mode"); #endif } /* User-mode prefetch abort */ pc = tf->tf_r15 & R15_PC; do_fault(tf, l, &p->p_vmspace->vm_map, pc, VM_PROT_EXECUTE); userret(l); }
/* * abort_debug() handles the following abort: * * FAULT_DEBUG - Debug Event * */ static __inline void abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, u_int usermode, u_int far) { if (usermode) { struct thread *td; td = curthread; call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far); userret(td, tf); } else { #ifdef KDB kdb_trap(T_BREAKPOINT, 0, tf); #else printf("No debugger in kernel.\n"); #endif } }
/* * This is called by locore for supervisor-mode trace and * breakpoint traps. This is separate from trap() above * so that breakpoints in trap() will work. * * If we have both DDB and KGDB, let KGDB see it first, * because KGDB will just return 0 if not connected. */ void trap_kdebug(int type, struct trapframe tf) { #ifdef KGDB /* Let KGDB handle it (if connected) */ if (kgdb_trap(type, &tf)) return; #endif #ifdef DDB /* Let DDB handle it. */ if (kdb_trap(type, &tf)) return; #endif /* Drop into the PROM temporarily... */ (void)_nodb_trap(type, &tf); }
/* * dab_fatal() handles the following data aborts: * * FAULT_WRTBUF_0 - Vector Exception * FAULT_WRTBUF_1 - Terminal Exception * * We should never see these on a properly functioning system. * * This function is also called by the other handlers if they * detect a fatal problem. * * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. */ static int dab_fatal(trapframe_t *tf, u_int fsr, u_int far, struct lwp *l, ksiginfo_t *ksi) { const char * const mode = TRAP_USERMODE(tf) ? "user" : "kernel"; if (l != NULL) { printf("Fatal %s mode data abort: '%s'\n", mode, data_aborts[fsr & FAULT_TYPE_MASK].desc); printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); if ((fsr & FAULT_IMPRECISE) == 0) printf("%08x, ", far); else printf("Invalid, "); printf("spsr=%08x\n", tf->tf_spsr); } else { printf("Fatal %s mode prefetch abort at 0x%08x\n", mode, tf->tf_pc); printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); } printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("r12=%08x, ", tf->tf_r12); if (TRAP_USERMODE(tf)) printf("usp=%08x, ulr=%08x", tf->tf_usr_sp, tf->tf_usr_lr); else printf("ssp=%08x, slr=%08x", tf->tf_svc_sp, tf->tf_svc_lr); printf(", pc =%08x\n\n", tf->tf_pc); #if defined(DDB) || defined(KGDB) kdb_trap(T_FAULT, tf); #endif panic("Fatal abort"); /*NOTREACHED*/ }
void prefetch_abort_handler(struct trapframe *tf) { struct lwp * const l = curlwp; struct proc * const p = l->l_proc; /* Enable interrupts if they were enabled before the trap. */ if ((tf->tf_r15 & R15_IRQ_DISABLE) == 0) int_on(); /* * XXX Not done yet: * Check if the page being requested is already present. If * so, call the undefined instruction handler instead (ARM3 ds * p15). */ curcpu()->ci_data.cpu_ntrap++; if (TRAP_USERMODE(tf)) { lwp_settrapframe(l, tf); LWP_CACHE_CREDS(l, p); } else { #ifdef DDB db_printf("Prefetch abort in kernel mode\n"); kdb_trap(T_FAULT, tf); #else #ifdef DEBUG printf("Prefetch abort:\n"); printregs(tf); #endif panic("prefetch abort in kernel mode"); #endif } /* User-mode prefetch abort */ vaddr_t pc = tf->tf_r15 & R15_PC; do_fault(tf, l, &p->p_vmspace->vm_map, pc, VM_PROT_EXECUTE); userret(l); }
/* * Called by locore.s for an unexpected interrupt. * XXX - Almost identical to trap_kdebug... */ void straytrap(struct trapframe tf) { int type = -1; printf("unexpected trap; vector=0x%x at pc=0x%x\n", tf.tf_vector, tf.tf_pc); #ifdef KGDB /* Let KGDB handle it (if connected) */ if (kgdb_trap(type, &tf)) return; #endif #ifdef DDB /* Let DDB handle it. */ if (kdb_trap(type, &tf)) return; #endif /* Drop into the PROM temporarily... */ (void)_nodb_trap(type, &tf); }
void trap(struct trapframe *frame) { struct thread *td; struct proc *p; int sig, type, user; ksiginfo_t ksi; #ifdef KDB if (kdb_active) { kdb_reenter(); return; } #endif PCPU_INC(cnt.v_trap); td = curthread; p = td->td_proc; type = frame->exc; sig = 0; user = (frame->srr1 & PSL_PR) ? 1 : 0; CTR3(KTR_TRAP, "trap: %s type=%s (%s)", p->p_comm, trapname(type), user ? "user" : "kernel"); if (user) { td->td_frame = frame; if (td->td_ucred != p->p_ucred) cred_update_thread(td); /* User Mode Traps */ switch (type) { case EXC_DSI: case EXC_ISI: sig = trap_pfault(frame, 1); break; case EXC_SC: syscall(frame); break; case EXC_ALI: if (fix_unaligned(td, frame) != 0) sig = SIGBUS; else frame->srr0 += 4; break; case EXC_DEBUG: /* Single stepping */ mtspr(SPR_DBSR, mfspr(SPR_DBSR)); frame->srr1 &= ~PSL_DE; frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC); sig = SIGTRAP; break; case EXC_PGM: /* Program exception */ #ifdef FPU_EMU sig = fpu_emulate(frame, (struct fpreg *)&td->td_pcb->pcb_fpu); #else /* XXX SIGILL for non-trap instructions. */ sig = SIGTRAP; #endif break; default: trap_fatal(frame); } } else { /* Kernel Mode Traps */ KASSERT(cold || td->td_ucred != NULL, ("kernel trap doesn't have ucred")); switch (type) { case EXC_DEBUG: mtspr(SPR_DBSR, mfspr(SPR_DBSR)); kdb_trap(frame->exc, 0, frame); return; case EXC_DSI: if (trap_pfault(frame, 0) == 0) return; break; case EXC_MCHK: if (handle_onfault(frame)) return; break; #ifdef KDB case EXC_PGM: if (frame->cpu.booke.esr & ESR_PTR) kdb_trap(EXC_PGM, 0, frame); return; #endif default: break; } trap_fatal(frame); } if (sig != 0) { if (p->p_sysent->sv_transtrap != NULL) sig = (p->p_sysent->sv_transtrap)(sig, type); ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = type; /* XXX, not POSIX */ /* ksi.ksi_addr = ? */ ksi.ksi_trapno = type; trapsignal(td, &ksi); } userret(td, frame); }
/*ARGSUSED*/ void trap(struct trapframe *frame) { struct proc *p = curproc; int type = (int)frame->tf_trapno; struct pcb *pcb; extern char doreti_iret[], resume_iret[]; caddr_t onfault; int error; uint64_t cr2; union sigval sv; uvmexp.traps++; pcb = (p != NULL && p->p_addr != NULL) ? &p->p_addr->u_pcb : NULL; #ifdef DEBUG if (trapdebug) { printf("trap %d code %lx rip %lx cs %lx rflags %lx cr2 %lx " "cpl %x\n", type, frame->tf_err, frame->tf_rip, frame->tf_cs, frame->tf_rflags, rcr2(), curcpu()->ci_ilevel); printf("curproc %p\n", curproc); if (curproc) printf("pid %d\n", p->p_pid); } #endif if (!KERNELMODE(frame->tf_cs, frame->tf_rflags)) { type |= T_USER; p->p_md.md_regs = frame; } switch (type) { default: we_re_toast: #ifdef KGDB if (kgdb_trap(type, frame)) return; else { /* * If this is a breakpoint, don't panic * if we're not connected. */ if (type == T_BPTFLT) { printf("kgdb: ignored %s\n", trap_type[type]); return; } } #endif #ifdef DDB if (kdb_trap(type, 0, frame)) return; #endif if (frame->tf_trapno < trap_types) printf("fatal %s", trap_type[frame->tf_trapno]); else printf("unknown trap %ld", (u_long)frame->tf_trapno); printf(" in %s mode\n", (type & T_USER) ? "user" : "supervisor"); printf("trap type %d code %lx rip %lx cs %lx rflags %lx cr2 " " %lx cpl %x rsp %lx\n", type, frame->tf_err, (u_long)frame->tf_rip, frame->tf_cs, frame->tf_rflags, rcr2(), curcpu()->ci_ilevel, frame->tf_rsp); panic("trap type %d, code=%lx, pc=%lx", type, frame->tf_err, frame->tf_rip); /*NOTREACHED*/ case T_PROTFLT: case T_SEGNPFLT: case T_ALIGNFLT: case T_TSSFLT: if (p == NULL) goto we_re_toast; /* Check for copyin/copyout fault. */ if (pcb->pcb_onfault != 0) { error = EFAULT; copyfault: frame->tf_rip = (u_int64_t)pcb->pcb_onfault; frame->tf_rax = error; return; } /* * Check for failure during return to user mode. * We do this by looking at the address of the * instruction that faulted. */ if (frame->tf_rip == (u_int64_t)doreti_iret) { frame->tf_rip = (u_int64_t)resume_iret; return; } goto we_re_toast; case T_PROTFLT|T_USER: /* protection fault */ case T_TSSFLT|T_USER: case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: case T_NMI|T_USER: #ifdef TRAP_SIGDEBUG printf("pid %d (%s): BUS at rip %lx addr %lx\n", p->p_pid, p->p_comm, frame->tf_rip, rcr2()); frame_dump(frame); #endif sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGBUS, type & ~T_USER, BUS_OBJERR, sv); KERNEL_UNLOCK(); goto out; case T_ALIGNFLT|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGBUS, type & ~T_USER, BUS_ADRALN, sv); KERNEL_UNLOCK(); goto out; case T_PRIVINFLT|T_USER: /* privileged instruction fault */ sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVOPC, sv); KERNEL_UNLOCK(); goto out; case T_FPOPFLT|T_USER: /* coprocessor operand fault */ #ifdef TRAP_SIGDEBUG printf("pid %d (%s): ILL at rip %lx addr %lx\n", p->p_pid, p->p_comm, frame->tf_rip, rcr2()); frame_dump(frame); #endif sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGILL, type & ~T_USER, ILL_COPROC, sv); KERNEL_UNLOCK(); goto out; case T_ASTFLT|T_USER: /* Allow process switch */ uvmexp.softs++; if (p->p_flag & P_OWEUPC) { KERNEL_LOCK(); ADDUPROF(p); KERNEL_UNLOCK(); } /* Allow a forced task switch. */ if (curcpu()->ci_want_resched) preempt(NULL); goto out; case T_BOUND|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGFPE, type &~ T_USER, FPE_FLTSUB, sv); KERNEL_UNLOCK(); goto out; case T_OFLOW|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGFPE, type &~ T_USER, FPE_INTOVF, sv); KERNEL_UNLOCK(); goto out; case T_DIVIDE|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGFPE, type &~ T_USER, FPE_INTDIV, sv); KERNEL_UNLOCK(); goto out; case T_ARITHTRAP|T_USER: case T_XMM|T_USER: fputrap(frame); goto out; case T_PAGEFLT: /* allow page faults in kernel mode */ if (p == NULL) goto we_re_toast; cr2 = rcr2(); KERNEL_LOCK(); goto faultcommon; case T_PAGEFLT|T_USER: { /* page fault */ vaddr_t va, fa; struct vmspace *vm; struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; cr2 = rcr2(); KERNEL_LOCK(); faultcommon: vm = p->p_vmspace; if (vm == NULL) goto we_re_toast; fa = cr2; va = trunc_page((vaddr_t)cr2); /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if (type == T_PAGEFLT && va >= VM_MIN_KERNEL_ADDRESS) map = kernel_map; else map = &vm->vm_map; if (frame->tf_err & PGEX_W) ftype = VM_PROT_WRITE; else if (frame->tf_err & PGEX_I) ftype = VM_PROT_EXECUTE; else ftype = VM_PROT_READ; #ifdef DIAGNOSTIC if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %lx\n", va); goto we_re_toast; } #endif /* Fault the original page in. */ onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = uvm_fault(map, va, frame->tf_err & PGEX_P? VM_FAULT_PROTECT : VM_FAULT_INVALID, ftype); pcb->pcb_onfault = onfault; if (error == 0) { if (map != kernel_map) uvm_grow(p, va); if (type == T_PAGEFLT) { KERNEL_UNLOCK(); return; } KERNEL_UNLOCK(); goto out; } if (error == EACCES) { error = EFAULT; } if (type == T_PAGEFLT) { if (pcb->pcb_onfault != 0) { KERNEL_UNLOCK(); goto copyfault; } printf("uvm_fault(%p, 0x%lx, 0, %d) -> %x\n", map, va, ftype, error); goto we_re_toast; } if (error == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, p->p_cred && p->p_ucred ? (int)p->p_ucred->cr_uid : -1); sv.sival_ptr = (void *)fa; trapsignal(p, SIGKILL, T_PAGEFLT, SEGV_MAPERR, sv); } else { #ifdef TRAP_SIGDEBUG printf("pid %d (%s): SEGV at rip %lx addr %lx\n", p->p_pid, p->p_comm, frame->tf_rip, va); frame_dump(frame); #endif sv.sival_ptr = (void *)fa; trapsignal(p, SIGSEGV, T_PAGEFLT, SEGV_MAPERR, sv); } KERNEL_UNLOCK(); break; } case T_TRCTRAP: goto we_re_toast; case T_BPTFLT|T_USER: /* bpt instruction fault */ case T_TRCTRAP|T_USER: /* trace trap */ #ifdef MATH_EMULATE trace: #endif KERNEL_LOCK(); trapsignal(p, SIGTRAP, type &~ T_USER, TRAP_BRKPT, sv); KERNEL_UNLOCK(); break; #if NISA > 0 case T_NMI: #if defined(KGDB) || defined(DDB) /* NMI can be hooked up to a pushbutton for debugging */ printf ("NMI ... going to debugger\n"); #ifdef KGDB if (kgdb_trap(type, frame)) return; #endif #ifdef DDB if (kdb_trap(type, 0, frame)) return; #endif #endif /* KGDB || DDB */ /* machine/parity/power fail/"kitchen sink" faults */ if (x86_nmi() != 0) goto we_re_toast; else return; #endif /* NISA > 0 */ } if ((type & T_USER) == 0) return; out: userret(p); }
static void trap_fatal(struct trapframe *frame, vm_offset_t eva) { int code, ss; u_int type; long rsp; struct soft_segment_descriptor softseg; char *msg; code = frame->tf_err; type = frame->tf_trapno; sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg); if (type <= MAX_TRAP_MSG) msg = trap_msg[type]; else msg = "UNKNOWN"; kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg, ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); /* three separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d; ", mycpu->gd_cpuid); if (lapic_usable) kprintf("lapic id = %u\n", LAPIC_READID); if (type == T_PAGEFLT) { kprintf("fault virtual address = 0x%lx\n", eva); kprintf("fault code = %s %s %s, %s\n", code & PGEX_U ? "user" : "supervisor", code & PGEX_W ? "write" : "read", code & PGEX_I ? "instruction" : "data", code & PGEX_P ? "protection violation" : "page not present"); } kprintf("instruction pointer = 0x%lx:0x%lx\n", frame->tf_cs & 0xffff, frame->tf_rip); if (ISPL(frame->tf_cs) == SEL_UPL) { ss = frame->tf_ss & 0xffff; rsp = frame->tf_rsp; } else { /* * NOTE: in 64-bit mode traps push rsp/ss even if no ring * change occurs. */ ss = GSEL(GDATA_SEL, SEL_KPL); rsp = frame->tf_rsp; } kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp); kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp); kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n", softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n", softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32, softseg.ssd_gran); kprintf("processor eflags = "); if (frame->tf_rflags & PSL_T) kprintf("trace trap, "); if (frame->tf_rflags & PSL_I) kprintf("interrupt enabled, "); if (frame->tf_rflags & PSL_NT) kprintf("nested task, "); if (frame->tf_rflags & PSL_RF) kprintf("resume, "); kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12); kprintf("current process = "); if (curproc) { kprintf("%lu\n", (u_long)curproc->p_pid); } else { kprintf("Idle\n"); } kprintf("current thread = pri %d ", curthread->td_pri); if (curthread->td_critcount) kprintf("(CRIT)"); kprintf("\n"); #ifdef DDB if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame)) return; #endif kprintf("trap number = %d\n", type); if (type <= MAX_TRAP_MSG) panic("%s", trap_msg[type]); else panic("unknown/reserved trap"); }
void undefinedinstruction(struct trapframe *frame) { struct thread *td; u_int fault_pc; int fault_instruction; int fault_code; int coprocessor; struct undefined_handler *uh; int error; #ifdef VERBOSE_ARM32 int s; #endif ksiginfo_t ksi; /* Enable interrupts if they were enabled before the exception. */ if (__predict_true(frame->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(frame->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); VM_CNT_INC(v_trap); fault_pc = frame->tf_pc; /* * Get the current thread/proc structure or thread0/proc0 if there is * none. */ td = curthread == NULL ? &thread0 : curthread; coprocessor = 0; if ((frame->tf_spsr & PSR_T) == 0) { /* * Make sure the program counter is correctly aligned so we * don't take an alignment fault trying to read the opcode. */ if (__predict_false((fault_pc & 3) != 0)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLADR; ksi.ksi_addr = (u_int32_t *)(intptr_t) fault_pc; trapsignal(td, &ksi); userret(td, frame); return; } /* * Should use fuword() here .. but in the interests of * squeezing every bit of speed we will just use ReadWord(). * We know the instruction can be read as was just executed * so this will never fail unless the kernel is screwed up * in which case it does not really matter does it ? */ fault_instruction = *(u_int32_t *)fault_pc; /* Check for coprocessor instruction */ /* * According to the datasheets you only need to look at bit * 27 of the instruction to tell the difference between and * undefined instruction and a coprocessor instruction * following an undefined instruction trap. */ if (ARM_COPROC_INSN(fault_instruction)) coprocessor = ARM_COPROC(fault_instruction); else { /* check for special instructions */ if (ARM_VFP_INSN(fault_instruction)) coprocessor = COPROC_VFP; /* vfp / simd */ } } else { #if __ARM_ARCH >= 7 fault_instruction = *(uint16_t *)fault_pc; if (THUMB_32BIT_INSN(fault_instruction)) { fault_instruction <<= 16; fault_instruction |= *(uint16_t *)(fault_pc + 2); /* * Is it a Coprocessor, Advanced SIMD, or * Floating-point instruction. */ if (THUMB_COPROC_INSN(fault_instruction)) { if (THUMB_COPROC_UNDEFINED(fault_instruction)) { /* undefined insn */ } else if (THUMB_VFP_INSN(fault_instruction)) coprocessor = COPROC_VFP; else coprocessor = THUMB_COPROC(fault_instruction); } } #else /* * No support for Thumb-2 on this cpu */ ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLADR; ksi.ksi_addr = (u_int32_t *)(intptr_t) fault_pc; trapsignal(td, &ksi); userret(td, frame); return; #endif } if ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE) { /* * Modify the fault_code to reflect the USR/SVC state at * time of fault. */ fault_code = FAULT_USER; td->td_frame = frame; } else fault_code = 0; /* OK this is were we do something about the instruction. */ LIST_FOREACH(uh, &undefined_handlers[coprocessor], uh_link) if (uh->uh_handler(fault_pc, fault_instruction, frame, fault_code) == 0) break; if (fault_code & FAULT_USER) { /* TODO: No support for ptrace from Thumb-2 */ if ((frame->tf_spsr & PSR_T) == 0 && fault_instruction == PTRACE_BREAKPOINT) { PROC_LOCK(td->td_proc); _PHOLD(td->td_proc); error = ptrace_clear_single_step(td); _PRELE(td->td_proc); PROC_UNLOCK(td->td_proc); if (error != 0) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; ksi.ksi_addr = (u_int32_t *)(intptr_t) fault_pc; trapsignal(td, &ksi); } return; } } if (uh == NULL && (fault_code & FAULT_USER)) { /* Fault has not been handled */ ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; ksi.ksi_addr = (u_int32_t *)(intptr_t) fault_pc; trapsignal(td, &ksi); } if ((fault_code & FAULT_USER) == 0) { if (fault_instruction == KERNEL_BREAKPOINT) { #ifdef KDB kdb_trap(T_BREAKPOINT, 0, frame); #else printf("No debugger in kernel.\n"); #endif return; } else panic("Undefined instruction in kernel.\n"); } userret(td, frame); }
void trap(struct trapframe *frame) { struct thread *td = curthread; struct proc *p = td->td_proc; int i = 0, ucode = 0, code; u_int type; register_t addr = 0; vm_offset_t eva; ksiginfo_t ksi; #ifdef POWERFAIL_NMI static int lastalert = 0; #endif PCPU_INC(cnt.v_trap); type = frame->tf_trapno; #ifdef SMP /* Handler for NMI IPIs used for stopping CPUs. */ if (type == T_NMI) { if (ipi_nmi_handler() == 0) goto out; } #endif /* SMP */ #ifdef KDB if (kdb_active) { kdb_reenter(); goto out; } #endif if (type == T_RESERVED) { trap_fatal(frame, 0); goto out; } #ifdef HWPMC_HOOKS /* * CPU PMCs interrupt using an NMI so we check for that first. * If the HWPMC module is active, 'pmc_hook' will point to * the function to be called. A return value of '1' from the * hook means that the NMI was handled by it and that we can * return immediately. */ if (type == T_NMI && pmc_intr && (*pmc_intr)(PCPU_GET(cpuid), frame)) goto out; #endif if (type == T_MCHK) { if (!mca_intr()) trap_fatal(frame, 0); goto out; } #ifdef KDTRACE_HOOKS /* * A trap can occur while DTrace executes a probe. Before * executing the probe, DTrace blocks re-scheduling and sets * a flag in it's per-cpu flags to indicate that it doesn't * want to fault. On returning from the probe, the no-fault * flag is cleared and finally re-scheduling is enabled. * * If the DTrace kernel module has registered a trap handler, * call it and if it returns non-zero, assume that it has * handled the trap and modified the trap frame so that this * function can return normally. */ if ((type == T_PROTFLT || type == T_PAGEFLT) && dtrace_trap_func != NULL) if ((*dtrace_trap_func)(frame, type)) goto out; if (type == T_DTRACE_PROBE || type == T_DTRACE_RET || type == T_BPTFLT) { struct reg regs; fill_frame_regs(frame, ®s); if (type == T_DTRACE_PROBE && dtrace_fasttrap_probe_ptr != NULL && dtrace_fasttrap_probe_ptr(®s) == 0) goto out; if (type == T_BPTFLT && dtrace_pid_probe_ptr != NULL && dtrace_pid_probe_ptr(®s) == 0) goto out; if (type == T_DTRACE_RET && dtrace_return_probe_ptr != NULL && dtrace_return_probe_ptr(®s) == 0) goto out; } #endif if ((frame->tf_eflags & PSL_I) == 0) { /* * Buggy application or kernel code has disabled * interrupts and then trapped. Enabling interrupts * now is wrong, but it is better than running with * interrupts disabled until they are accidentally * enabled later. */ if (ISPL(frame->tf_cs) == SEL_UPL || (frame->tf_eflags & PSL_VM)) uprintf( "pid %ld (%s): trap %d with interrupts disabled\n", (long)curproc->p_pid, curthread->td_name, type); else if (type != T_BPTFLT && type != T_TRCTRAP && frame->tf_eip != (int)cpu_switch_load_gs) { /* * XXX not quite right, since this may be for a * multiple fault in user mode. */ printf("kernel trap %d with interrupts disabled\n", type); /* * Page faults need interrupts disabled until later, * and we shouldn't enable interrupts while holding * a spin lock or if servicing an NMI. */ if (type != T_NMI && type != T_PAGEFLT && td->td_md.md_spinlock_count == 0) enable_intr(); } } eva = 0; code = frame->tf_err; if (type == T_PAGEFLT) { /* * For some Cyrix CPUs, %cr2 is clobbered by * interrupts. This problem is worked around by using * an interrupt gate for the pagefault handler. We * are finally ready to read %cr2 and then must * reenable interrupts. * * If we get a page fault while in a critical section, then * it is most likely a fatal kernel page fault. The kernel * is already going to panic trying to get a sleep lock to * do the VM lookup, so just consider it a fatal trap so the * kernel can print out a useful trap message and even get * to the debugger. * * If we get a page fault while holding a non-sleepable * lock, then it is most likely a fatal kernel page fault. * If WITNESS is enabled, then it's going to whine about * bogus LORs with various VM locks, so just skip to the * fatal trap handling directly. */ eva = rcr2(); if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) trap_fatal(frame, eva); else enable_intr(); } if ((ISPL(frame->tf_cs) == SEL_UPL) || ((frame->tf_eflags & PSL_VM) && !(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL))) { /* user trap */ td->td_pticks = 0; td->td_frame = frame; addr = frame->tf_eip; if (td->td_ucred != p->p_ucred) cred_update_thread(td); switch (type) { case T_PRIVINFLT: /* privileged instruction fault */ i = SIGILL; ucode = ILL_PRVOPC; break; case T_BPTFLT: /* bpt instruction fault */ case T_TRCTRAP: /* trace trap */ enable_intr(); frame->tf_eflags &= ~PSL_T; i = SIGTRAP; ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); break; case T_ARITHTRAP: /* arithmetic trap */ #ifdef DEV_NPX ucode = npxtrap(); if (ucode == -1) goto userout; #else ucode = 0; #endif i = SIGFPE; break; /* * The following two traps can happen in * vm86 mode, and, if so, we want to handle * them specially. */ case T_PROTFLT: /* general protection fault */ case T_STKFLT: /* stack fault */ if (frame->tf_eflags & PSL_VM) { i = vm86_emulate((struct vm86frame *)frame); if (i == 0) goto user; break; } i = SIGBUS; ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR; break; case T_SEGNPFLT: /* segment not present fault */ i = SIGBUS; ucode = BUS_ADRERR; break; case T_TSSFLT: /* invalid TSS fault */ i = SIGBUS; ucode = BUS_OBJERR; break; case T_DOUBLEFLT: /* double fault */ default: i = SIGBUS; ucode = BUS_OBJERR; break; case T_PAGEFLT: /* page fault */ i = trap_pfault(frame, TRUE, eva); #if defined(I586_CPU) && !defined(NO_F00F_HACK) if (i == -2) { /* * The f00f hack workaround has triggered, so * treat the fault as an illegal instruction * (T_PRIVINFLT) instead of a page fault. */ type = frame->tf_trapno = T_PRIVINFLT; /* Proceed as in that case. */ ucode = ILL_PRVOPC; i = SIGILL; break; } #endif if (i == -1) goto userout; if (i == 0) goto user; if (i == SIGSEGV) ucode = SEGV_MAPERR; else { if (prot_fault_translation == 0) { /* * Autodetect. * This check also covers the images * without the ABI-tag ELF note. */ if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && p->p_osrel >= P_OSREL_SIGSEGV) { i = SIGSEGV; ucode = SEGV_ACCERR; } else { i = SIGBUS; ucode = BUS_PAGE_FAULT; } } else if (prot_fault_translation == 1) { /* * Always compat mode. */ i = SIGBUS; ucode = BUS_PAGE_FAULT; } else { /* * Always SIGSEGV mode. */ i = SIGSEGV; ucode = SEGV_ACCERR; } } addr = eva; break; case T_DIVIDE: /* integer divide fault */ ucode = FPE_INTDIV; i = SIGFPE; break; #ifdef DEV_ISA case T_NMI: #ifdef POWERFAIL_NMI #ifndef TIMER_FREQ # define TIMER_FREQ 1193182 #endif if (time_second - lastalert > 10) { log(LOG_WARNING, "NMI: power fail\n"); sysbeep(880, hz); lastalert = time_second; } goto userout; #else /* !POWERFAIL_NMI */ /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef KDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (kdb_on_nmi) { printf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* KDB */ goto userout; } else if (panic_on_nmi) panic("NMI indicates hardware failure"); break; #endif /* POWERFAIL_NMI */ #endif /* DEV_ISA */ case T_OFLOW: /* integer overflow fault */ ucode = FPE_INTOVF; i = SIGFPE; break; case T_BOUND: /* bounds check fault */ ucode = FPE_FLTSUB; i = SIGFPE; break; case T_DNA: #ifdef DEV_NPX KASSERT(PCB_USER_FPU(td->td_pcb), ("kernel FPU ctx has leaked")); /* transparent fault (due to context switch "late") */ if (npxdna()) goto userout; #endif uprintf("pid %d killed due to lack of floating point\n", p->p_pid); i = SIGKILL; ucode = 0; break; case T_FPOPFLT: /* FPU operand fetch fault */ ucode = ILL_COPROC; i = SIGILL; break; case T_XMMFLT: /* SIMD floating-point exception */ ucode = 0; /* XXX */ i = SIGFPE; break; } } else { /* kernel trap */ KASSERT(cold || td->td_ucred != NULL, ("kernel trap doesn't have ucred")); switch (type) { case T_PAGEFLT: /* page fault */ (void) trap_pfault(frame, FALSE, eva); goto out; case T_DNA: #ifdef DEV_NPX KASSERT(!PCB_USER_FPU(td->td_pcb), ("Unregistered use of FPU in kernel")); if (npxdna()) goto out; #endif break; case T_ARITHTRAP: /* arithmetic trap */ case T_XMMFLT: /* SIMD floating-point exception */ case T_FPOPFLT: /* FPU operand fetch fault */ /* * XXXKIB for now disable any FPU traps in kernel * handler registration seems to be overkill */ trap_fatal(frame, 0); goto out; /* * The following two traps can happen in * vm86 mode, and, if so, we want to handle * them specially. */ case T_PROTFLT: /* general protection fault */ case T_STKFLT: /* stack fault */ if (frame->tf_eflags & PSL_VM) { i = vm86_emulate((struct vm86frame *)frame); if (i != 0) /* * returns to original process */ vm86_trap((struct vm86frame *)frame); goto out; } if (type == T_STKFLT) break; /* FALL THROUGH */ case T_SEGNPFLT: /* segment not present fault */ if (PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL) break; /* * Invalid %fs's and %gs's can be created using * procfs or PT_SETREGS or by invalidating the * underlying LDT entry. This causes a fault * in kernel mode when the kernel attempts to * switch contexts. Lose the bad context * (XXX) so that we can continue, and generate * a signal. */ if (frame->tf_eip == (int)cpu_switch_load_gs) { PCPU_GET(curpcb)->pcb_gs = 0; #if 0 PROC_LOCK(p); kern_psignal(p, SIGBUS); PROC_UNLOCK(p); #endif goto out; } if (td->td_intr_nesting_level != 0) break; /* * Invalid segment selectors and out of bounds * %eip's and %esp's can be set up in user mode. * This causes a fault in kernel mode when the * kernel tries to return to user mode. We want * to get this fault so that we can fix the * problem here and not have to check all the * selectors and pointers when the user changes * them. */ if (frame->tf_eip == (int)doreti_iret) { frame->tf_eip = (int)doreti_iret_fault; goto out; } if (frame->tf_eip == (int)doreti_popl_ds) { frame->tf_eip = (int)doreti_popl_ds_fault; goto out; } if (frame->tf_eip == (int)doreti_popl_es) { frame->tf_eip = (int)doreti_popl_es_fault; goto out; } if (frame->tf_eip == (int)doreti_popl_fs) { frame->tf_eip = (int)doreti_popl_fs_fault; goto out; } if (PCPU_GET(curpcb)->pcb_onfault != NULL) { frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault; goto out; } break; case T_TSSFLT: /* * PSL_NT can be set in user mode and isn't cleared * automatically when the kernel is entered. This * causes a TSS fault when the kernel attempts to * `iret' because the TSS link is uninitialized. We * want to get this fault so that we can fix the * problem here and not every time the kernel is * entered. */ if (frame->tf_eflags & PSL_NT) { frame->tf_eflags &= ~PSL_NT; goto out; } break; case T_TRCTRAP: /* trace trap */ if (frame->tf_eip == (int)IDTVEC(lcall_syscall)) { /* * We've just entered system mode via the * syscall lcall. Continue single stepping * silently until the syscall handler has * saved the flags. */ goto out; } if (frame->tf_eip == (int)IDTVEC(lcall_syscall) + 1) { /* * The syscall handler has now saved the * flags. Stop single stepping it. */ frame->tf_eflags &= ~PSL_T; goto out; } /* * Ignore debug register trace traps due to * accesses in the user's address space, which * can happen under several conditions such as * if a user sets a watchpoint on a buffer and * then passes that buffer to a system call. * We still want to get TRCTRAPS for addresses * in kernel space because that is useful when * debugging the kernel. */ if (user_dbreg_trap() && !(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL)) { /* * Reset breakpoint bits because the * processor doesn't */ load_dr6(rdr6() & 0xfffffff0); goto out; } /* * FALLTHROUGH (TRCTRAP kernel mode, kernel address) */ case T_BPTFLT: /* * If KDB is enabled, let it handle the debugger trap. * Otherwise, debugger traps "can't happen". */ #ifdef KDB if (kdb_trap(type, 0, frame)) goto out; #endif break; #ifdef DEV_ISA case T_NMI: #ifdef POWERFAIL_NMI if (time_second - lastalert > 10) { log(LOG_WARNING, "NMI: power fail\n"); sysbeep(880, hz); lastalert = time_second; } goto out; #else /* !POWERFAIL_NMI */ /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef KDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (kdb_on_nmi) { printf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* KDB */ goto out; } else if (panic_on_nmi == 0) goto out; /* FALLTHROUGH */ #endif /* POWERFAIL_NMI */ #endif /* DEV_ISA */ } trap_fatal(frame, eva); goto out; } /* Translate fault for emulators (e.g. Linux) */ if (*p->p_sysent->sv_transtrap) i = (*p->p_sysent->sv_transtrap)(i, type); ksiginfo_init_trap(&ksi); ksi.ksi_signo = i; ksi.ksi_code = ucode; ksi.ksi_addr = (void *)addr; ksi.ksi_trapno = type; trapsignal(td, &ksi); #ifdef DEBUG if (type <= MAX_TRAP_MSG) { uprintf("fatal process exception: %s", trap_msg[type]); if ((type == T_PAGEFLT) || (type == T_PROTFLT)) uprintf(", fault VA = 0x%lx", (u_long)eva); uprintf("\n"); } #endif user: userret(td, frame); mtx_assert(&Giant, MA_NOTOWNED); KASSERT(PCB_USER_FPU(td->td_pcb), ("Return from trap with kernel FPU ctx leaked")); userout: out: return; }
/* * trap(frame): exception, fault, and trap interface to BSD kernel. * * This common code is called from assembly language IDT gate entry routines * that prepare a suitable stack frame, and restore this frame after the * exception has been processed. Note that the effect is as if the arguments * were passed call by reference. */ void trap(struct trapframe *frame) { struct lwp *l = curlwp; struct proc *p; struct pcb *pcb; extern char fusubail[], kcopy_fault[], return_address_fault[], IDTVEC(osyscall)[]; struct trapframe *vframe; ksiginfo_t ksi; void *onfault; int type, error; uint32_t cr2; bool pfail; if (__predict_true(l != NULL)) { pcb = lwp_getpcb(l); p = l->l_proc; } else { /* * this can happen eg. on break points in early on boot. */ pcb = NULL; p = NULL; } type = frame->tf_trapno; #ifdef DEBUG if (trapdebug) { trap_print(frame, l); } #endif if (type != T_NMI && !KERNELMODE(frame->tf_cs, frame->tf_eflags)) { type |= T_USER; l->l_md.md_regs = frame; pcb->pcb_cr2 = 0; LWP_CACHE_CREDS(l, p); } #ifdef KDTRACE_HOOKS /* * A trap can occur while DTrace executes a probe. Before * executing the probe, DTrace blocks re-scheduling and sets * a flag in its per-cpu flags to indicate that it doesn't * want to fault. On returning from the the probe, the no-fault * flag is cleared and finally re-scheduling is enabled. * * If the DTrace kernel module has registered a trap handler, * call it and if it returns non-zero, assume that it has * handled the trap and modified the trap frame so that this * function can return normally. */ if ((type == T_PROTFLT || type == T_PAGEFLT) && dtrace_trap_func != NULL) { if ((*dtrace_trap_func)(frame, type)) { return; } } #endif switch (type) { case T_ASTFLT: /*FALLTHROUGH*/ default: we_re_toast: if (type == T_TRCTRAP) check_dr0(); else trap_print(frame, l); if (kdb_trap(type, 0, frame)) return; if (kgdb_trap(type, frame)) return; /* * If this is a breakpoint, don't panic if we're not connected. */ if (type == T_BPTFLT && kgdb_disconnected()) { printf("kgdb: ignored %s\n", trap_type[type]); return; } panic("trap"); /*NOTREACHED*/ case T_PROTFLT: case T_SEGNPFLT: case T_ALIGNFLT: case T_TSSFLT: if (p == NULL) goto we_re_toast; /* Check for copyin/copyout fault. */ onfault = onfault_handler(pcb, frame); if (onfault != NULL) { copyefault: error = EFAULT; copyfault: frame->tf_eip = (uintptr_t)onfault; frame->tf_eax = error; return; } /* * Check for failure during return to user mode. * This can happen loading invalid values into the segment * registers, or during the 'iret' itself. * * We do this by looking at the instruction we faulted on. * The specific instructions we recognize only happen when * returning from a trap, syscall, or interrupt. */ kernelfault: KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_trap = type; switch (*(u_char *)frame->tf_eip) { case 0xcf: /* iret */ /* * The 'iret' instruction faulted, so we have the * 'user' registers saved after the kernel %eip:%cs:%fl * of the 'iret' and below that the user %eip:%cs:%fl * the 'iret' was processing. * We must delete the 3 words of kernel return address * from the stack to generate a normal stack frame * (eg for sending a SIGSEGV). */ vframe = (void *)((int *)frame + 3); if (KERNELMODE(vframe->tf_cs, vframe->tf_eflags)) goto we_re_toast; memmove(vframe, frame, offsetof(struct trapframe, tf_eip)); /* Set the faulting address to the user %eip */ ksi.ksi_addr = (void *)vframe->tf_eip; break; case 0x8e: switch (*(uint32_t *)frame->tf_eip) { case 0x8e242c8e: /* mov (%esp,%gs), then */ case 0x0424648e: /* mov 0x4(%esp),%fs */ case 0x0824448e: /* mov 0x8(%esp),%es */ case 0x0c245c8e: /* mov 0xc(%esp),%ds */ break; default: goto we_re_toast; } /* * We faulted loading one if the user segment registers. * The stack frame containing the user registers is * still valid and is just below the %eip:%cs:%fl of * the kernel fault frame. */ vframe = (void *)(&frame->tf_eflags + 1); if (KERNELMODE(vframe->tf_cs, vframe->tf_eflags)) goto we_re_toast; /* There is no valid address for the fault */ break; default: goto we_re_toast; } /* * We might have faulted trying to execute the * trampoline for a local (nested) signal handler. * Only generate SIGSEGV if the user %cs isn't changed. * (This is only strictly necessary in the 'iret' case.) */ if (!pmap_exec_fixup(&p->p_vmspace->vm_map, vframe, pcb)) { /* Save outer frame for any signal return */ l->l_md.md_regs = vframe; (*p->p_emul->e_trapsignal)(l, &ksi); } /* Return to user by reloading the user frame */ trap_return_fault_return(vframe); /* NOTREACHED */ case T_PROTFLT|T_USER: /* protection fault */ case T_TSSFLT|T_USER: case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: case T_ALIGNFLT|T_USER: KSI_INIT_TRAP(&ksi); ksi.ksi_addr = (void *)rcr2(); switch (type) { case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case T_TSSFLT|T_USER: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; break; case T_ALIGNFLT|T_USER: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRALN; break; case T_PROTFLT|T_USER: #ifdef VM86 if (frame->tf_eflags & PSL_VM) { vm86_gpfault(l, type & ~T_USER); goto out; } #endif /* * If pmap_exec_fixup does something, * let's retry the trap. */ if (pmap_exec_fixup(&p->p_vmspace->vm_map, frame, pcb)){ goto out; } ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; break; default: KASSERT(0); break; } goto trapsignal; case T_PRIVINFLT|T_USER: /* privileged instruction fault */ case T_FPOPFLT|T_USER: /* coprocessor operand fault */ KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *) frame->tf_eip; switch (type) { case T_PRIVINFLT|T_USER: ksi.ksi_code = ILL_PRVOPC; break; case T_FPOPFLT|T_USER: ksi.ksi_code = ILL_COPROC; break; default: ksi.ksi_code = 0; break; } goto trapsignal; case T_ASTFLT|T_USER: /* Allow process switch. */ //curcpu()->ci_data.cpu_nast++; if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } /* Allow a forced task switch. */ if (curcpu()->ci_want_resched) { preempt(); } goto out; case T_BOUND|T_USER: case T_OFLOW|T_USER: case T_DIVIDE|T_USER: KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGFPE; ksi.ksi_addr = (void *)frame->tf_eip; switch (type) { case T_BOUND|T_USER: ksi.ksi_code = FPE_FLTSUB; break; case T_OFLOW|T_USER: ksi.ksi_code = FPE_INTOVF; break; case T_DIVIDE|T_USER: ksi.ksi_code = FPE_INTDIV; break; default: ksi.ksi_code = 0; break; } goto trapsignal; case T_PAGEFLT: /* Allow page faults in kernel mode. */ if (__predict_false(l == NULL)) goto we_re_toast; /* * fusubail is used by [fs]uswintr() to prevent page faulting * from inside the profiling interrupt. */ onfault = pcb->pcb_onfault; if (onfault == fusubail || onfault == return_address_fault) { goto copyefault; } if (cpu_intr_p() || (l->l_pflag & LP_INTR) != 0) { goto we_re_toast; } cr2 = rcr2(); goto faultcommon; case T_PAGEFLT|T_USER: { /* page fault */ register vaddr_t va; register struct vmspace *vm; register struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; cr2 = rcr2(); faultcommon: vm = p->p_vmspace; if (__predict_false(vm == NULL)) { goto we_re_toast; } pcb->pcb_cr2 = cr2; va = trunc_page((vaddr_t)cr2); /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if (type == T_PAGEFLT && va >= KERNBASE) map = kernel_map; else map = &vm->vm_map; if (frame->tf_err & PGEX_W) ftype = VM_PROT_WRITE; else if (frame->tf_err & PGEX_X) ftype = VM_PROT_EXECUTE; else ftype = VM_PROT_READ; #ifdef DIAGNOSTIC if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %lx\n", va); goto we_re_toast; } #endif /* Fault the original page in. */ onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = uvm_fault(map, va, ftype); pcb->pcb_onfault = onfault; if (error == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); pfail = false; while (type == T_PAGEFLT) { /* * we need to switch pmap now if we're in * the middle of copyin/out. * * but we don't need to do so for kcopy as * it never touch userspace. */ kpreempt_disable(); if (curcpu()->ci_want_pmapload) { onfault = onfault_handler(pcb, frame); if (onfault != kcopy_fault) { pmap_load(); } } /* * We need to keep the pmap loaded and * so avoid being preempted until back * into the copy functions. Disable * interrupts at the hardware level before * re-enabling preemption. Interrupts * will be re-enabled by 'iret' when * returning back out of the trap stub. * They'll only be re-enabled when the * program counter is once again in * the copy functions, and so visible * to cpu_kpreempt_exit(). */ #ifndef XEN x86_disable_intr(); #endif l->l_nopreempt--; if (l->l_nopreempt > 0 || !l->l_dopreempt || pfail) { return; } #ifndef XEN x86_enable_intr(); #endif /* * If preemption fails for some reason, * don't retry it. The conditions won't * change under our nose. */ pfail = kpreempt(0); } goto out; } if (type == T_PAGEFLT) { onfault = onfault_handler(pcb, frame); if (onfault != NULL) goto copyfault; printf("uvm_fault(%p, %#lx, %d) -> %#x\n", map, va, ftype, error); goto kernelfault; } KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; ksi.ksi_addr = (void *)cr2; switch (error) { case EINVAL: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case EACCES: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; error = EFAULT; break; case ENOMEM: ksi.ksi_signo = SIGKILL; printf("UVM: pid %d.%d (%s), uid %d killed: " "out of swap\n", p->p_pid, l->l_lid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); break; default: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; break; } #ifdef TRAP_SIGDEBUG printf("pid %d.%d (%s): signal %d at eip %x addr %lx " "error %d\n", p->p_pid, l->l_lid, p->p_comm, ksi.ksi_signo, frame->tf_eip, va, error); #endif (*p->p_emul->e_trapsignal)(l, &ksi); break; } case T_TRCTRAP: /* Check whether they single-stepped into a lcall. */ if (frame->tf_eip == (int)IDTVEC(osyscall)) return; if (frame->tf_eip == (int)IDTVEC(osyscall) + 1) { frame->tf_eflags &= ~PSL_T; return; } goto we_re_toast; case T_BPTFLT|T_USER: /* bpt instruction fault */ case T_TRCTRAP|T_USER: /* trace trap */ /* * Don't go single-stepping into a RAS. */ if (p->p_raslist == NULL || (ras_lookup(p, (void *)frame->tf_eip) == (void *)-1)) { KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGTRAP; ksi.ksi_trap = type & ~T_USER; if (type == (T_BPTFLT|T_USER)) ksi.ksi_code = TRAP_BRKPT; else ksi.ksi_code = TRAP_TRACE; ksi.ksi_addr = (void *)frame->tf_eip; (*p->p_emul->e_trapsignal)(l, &ksi); } break; case T_NMI: if (nmi_dispatch(frame)) return; /* NMI can be hooked up to a pushbutton for debugging */ if (kgdb_trap(type, frame)) return; if (kdb_trap(type, 0, frame)) return; /* machine/parity/power fail/"kitchen sink" faults */ #if NMCA > 0 mca_nmi(); #endif x86_nmi(); } if ((type & T_USER) == 0) return; out: userret(l); return; trapsignal: ksi.ksi_trap = type & ~T_USER; (*p->p_emul->e_trapsignal)(l, &ksi); userret(l); }
void trap(struct trapframe *frame) { struct globaldata *gd = mycpu; struct thread *td = gd->gd_curthread; struct lwp *lp = td->td_lwp; struct proc *p; int sticks = 0; int i = 0, ucode = 0, type, code; int have_mplock = 0; #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; #endif vm_offset_t eva; p = td->td_proc; #ifdef DDB /* * We need to allow T_DNA faults when the debugger is active since * some dumping paths do large bcopy() which use the floating * point registers for faster copying. */ if (db_active && frame->tf_trapno != T_DNA) { eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0); ++gd->gd_trap_nesting_level; MAKEMPSAFE(have_mplock); trap_fatal(frame, eva); --gd->gd_trap_nesting_level; goto out2; } #endif eva = 0; ++gd->gd_trap_nesting_level; if (frame->tf_trapno == T_PAGEFLT) { /* * For some Cyrix CPUs, %cr2 is clobbered by interrupts. * This problem is worked around by using an interrupt * gate for the pagefault handler. We are finally ready * to read %cr2 and then must reenable interrupts. * * XXX this should be in the switch statement, but the * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the * flow of control too much for this to be obviously * correct. */ eva = rcr2(); cpu_enable_intr(); } --gd->gd_trap_nesting_level; if (!(frame->tf_eflags & PSL_I)) { /* * Buggy application or kernel code has disabled interrupts * and then trapped. Enabling interrupts now is wrong, but * it is better than running with interrupts disabled until * they are accidentally enabled later. */ type = frame->tf_trapno; if (ISPL(frame->tf_cs)==SEL_UPL || (frame->tf_eflags & PSL_VM)) { MAKEMPSAFE(have_mplock); kprintf( "pid %ld (%s): trap %d with interrupts disabled\n", (long)curproc->p_pid, curproc->p_comm, type); } else if (type != T_BPTFLT && type != T_TRCTRAP) { /* * XXX not quite right, since this may be for a * multiple fault in user mode. */ MAKEMPSAFE(have_mplock); kprintf("kernel trap %d with interrupts disabled\n", type); } cpu_enable_intr(); } #if defined(I586_CPU) && !defined(NO_F00F_HACK) restart: #endif type = frame->tf_trapno; code = frame->tf_err; if (in_vm86call) { if (frame->tf_eflags & PSL_VM && (type == T_PROTFLT || type == T_STKFLT)) { KKASSERT(get_mplock_count(curthread) > 0); i = vm86_emulate((struct vm86frame *)frame); KKASSERT(get_mplock_count(curthread) > 0); if (i != 0) { /* * returns to original process */ vm86_trap((struct vm86frame *)frame, have_mplock); KKASSERT(0); /* NOT REACHED */ } goto out2; } switch (type) { /* * these traps want either a process context, or * assume a normal userspace trap. */ case T_PROTFLT: case T_SEGNPFLT: trap_fatal(frame, eva); goto out2; case T_TRCTRAP: type = T_BPTFLT; /* kernel breakpoint */ /* FALL THROUGH */ } goto kernel_trap; /* normal kernel trap handling */ } if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { /* user trap */ KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid, frame->tf_trapno, eva); userenter(td, p); sticks = (int)td->td_sticks; lp->lwp_md.md_regs = frame; switch (type) { case T_PRIVINFLT: /* privileged instruction fault */ i = SIGILL; ucode = ILL_PRVOPC; break; case T_BPTFLT: /* bpt instruction fault */ case T_TRCTRAP: /* trace trap */ frame->tf_eflags &= ~PSL_T; i = SIGTRAP; ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); break; case T_ARITHTRAP: /* arithmetic trap */ ucode = code; i = SIGFPE; break; case T_ASTFLT: /* Allow process switch */ mycpu->gd_cnt.v_soft++; if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { atomic_clear_int(&mycpu->gd_reqflags, RQF_AST_OWEUPC); addupc_task(p, p->p_prof.pr_addr, p->p_prof.pr_ticks); } goto out; /* * The following two traps can happen in * vm86 mode, and, if so, we want to handle * them specially. */ case T_PROTFLT: /* general protection fault */ case T_STKFLT: /* stack fault */ if (frame->tf_eflags & PSL_VM) { i = vm86_emulate((struct vm86frame *)frame); if (i == 0) goto out; break; } i = SIGBUS; ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR; break; case T_SEGNPFLT: /* segment not present fault */ i = SIGBUS; ucode = BUS_ADRERR; break; case T_TSSFLT: /* invalid TSS fault */ case T_DOUBLEFLT: /* double fault */ default: i = SIGBUS; ucode = BUS_OBJERR; break; case T_PAGEFLT: /* page fault */ i = trap_pfault(frame, TRUE, eva); if (i == -1) goto out; #if defined(I586_CPU) && !defined(NO_F00F_HACK) if (i == -2) goto restart; #endif if (i == 0) goto out; if (i == SIGSEGV) ucode = SEGV_MAPERR; else { i = SIGSEGV; ucode = SEGV_ACCERR; } break; case T_DIVIDE: /* integer divide fault */ ucode = FPE_INTDIV; i = SIGFPE; break; #if NISA > 0 case T_NMI: MAKEMPSAFE(have_mplock); #ifdef POWERFAIL_NMI goto handle_powerfail; #else /* !POWERFAIL_NMI */ /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef DDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (ddb_on_nmi) { kprintf ("NMI ... going to debugger\n"); kdb_trap (type, 0, frame); } #endif /* DDB */ goto out2; } else if (panic_on_nmi) panic("NMI indicates hardware failure"); break; #endif /* POWERFAIL_NMI */ #endif /* NISA > 0 */ case T_OFLOW: /* integer overflow fault */ ucode = FPE_INTOVF; i = SIGFPE; break; case T_BOUND: /* bounds check fault */ ucode = FPE_FLTSUB; i = SIGFPE; break; case T_DNA: /* * Virtual kernel intercept - pass the DNA exception * to the virtual kernel if it asked to handle it. * This occurs when the virtual kernel is holding * onto the FP context for a different emulated * process then the one currently running. * * We must still call npxdna() since we may have * saved FP state that the virtual kernel needs * to hand over to a different emulated process. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve && (td->td_pcb->pcb_flags & FP_VIRTFP) ) { npxdna(); break; } #if NNPX > 0 /* * The kernel may have switched out the FP unit's * state, causing the user process to take a fault * when it tries to use the FP unit. Restore the * state here */ if (npxdna()) goto out; #endif if (!pmath_emulate) { i = SIGFPE; ucode = FPE_FPU_NP_TRAP; break; } i = (*pmath_emulate)(frame); if (i == 0) { if (!(frame->tf_eflags & PSL_T)) goto out2; frame->tf_eflags &= ~PSL_T; i = SIGTRAP; } /* else ucode = emulator_only_knows() XXX */ break; case T_FPOPFLT: /* FPU operand fetch fault */ ucode = ILL_COPROC; i = SIGILL; break; case T_XMMFLT: /* SIMD floating-point exception */ ucode = 0; /* XXX */ i = SIGFPE; break; } } else { kernel_trap: /* kernel trap */ switch (type) { case T_PAGEFLT: /* page fault */ trap_pfault(frame, FALSE, eva); goto out2; case T_DNA: #if NNPX > 0 /* * The kernel may be using npx for copying or other * purposes. */ if (npxdna()) goto out2; #endif break; case T_PROTFLT: /* general protection fault */ case T_SEGNPFLT: /* segment not present fault */ /* * Invalid segment selectors and out of bounds * %eip's and %esp's can be set up in user mode. * This causes a fault in kernel mode when the * kernel tries to return to user mode. We want * to get this fault so that we can fix the * problem here and not have to check all the * selectors and pointers when the user changes * them. */ #define MAYBE_DORETI_FAULT(where, whereto) \ do { \ if (frame->tf_eip == (int)where) { \ frame->tf_eip = (int)whereto; \ goto out2; \ } \ } while (0) if (mycpu->gd_intr_nesting_level == 0) { /* * Invalid %fs's and %gs's can be created using * procfs or PT_SETREGS or by invalidating the * underlying LDT entry. This causes a fault * in kernel mode when the kernel attempts to * switch contexts. Lose the bad context * (XXX) so that we can continue, and generate * a signal. */ MAYBE_DORETI_FAULT(doreti_iret, doreti_iret_fault); MAYBE_DORETI_FAULT(doreti_popl_ds, doreti_popl_ds_fault); MAYBE_DORETI_FAULT(doreti_popl_es, doreti_popl_es_fault); MAYBE_DORETI_FAULT(doreti_popl_fs, doreti_popl_fs_fault); MAYBE_DORETI_FAULT(doreti_popl_gs, doreti_popl_gs_fault); /* * NOTE: cpu doesn't push esp on kernel trap */ if (td->td_pcb->pcb_onfault && td->td_pcb->pcb_onfault_sp == (int)&frame->tf_esp) { frame->tf_eip = (register_t)td->td_pcb->pcb_onfault; goto out2; } } break; case T_TSSFLT: /* * PSL_NT can be set in user mode and isn't cleared * automatically when the kernel is entered. This * causes a TSS fault when the kernel attempts to * `iret' because the TSS link is uninitialized. We * want to get this fault so that we can fix the * problem here and not every time the kernel is * entered. */ if (frame->tf_eflags & PSL_NT) { frame->tf_eflags &= ~PSL_NT; goto out2; } break; case T_TRCTRAP: /* trace trap */ if (frame->tf_eip == (int)IDTVEC(syscall)) { /* * We've just entered system mode via the * syscall lcall. Continue single stepping * silently until the syscall handler has * saved the flags. */ goto out2; } if (frame->tf_eip == (int)IDTVEC(syscall) + 1) { /* * The syscall handler has now saved the * flags. Stop single stepping it. */ frame->tf_eflags &= ~PSL_T; goto out2; } /* * Ignore debug register trace traps due to * accesses in the user's address space, which * can happen under several conditions such as * if a user sets a watchpoint on a buffer and * then passes that buffer to a system call. * We still want to get TRCTRAPS for addresses * in kernel space because that is useful when * debugging the kernel. */ if (user_dbreg_trap()) { /* * Reset breakpoint bits because the * processor doesn't */ load_dr6(rdr6() & 0xfffffff0); goto out2; } /* * FALLTHROUGH (TRCTRAP kernel mode, kernel address) */ case T_BPTFLT: /* * If DDB is enabled, let it handle the debugger trap. * Otherwise, debugger traps "can't happen". */ ucode = TRAP_BRKPT; #ifdef DDB MAKEMPSAFE(have_mplock); if (kdb_trap (type, 0, frame)) goto out2; #endif break; #if NISA > 0 case T_NMI: MAKEMPSAFE(have_mplock); #ifdef POWERFAIL_NMI #ifndef TIMER_FREQ # define TIMER_FREQ 1193182 #endif handle_powerfail: { static unsigned lastalert = 0; if (time_uptime - lastalert > 10) { log(LOG_WARNING, "NMI: power fail\n"); sysbeep(TIMER_FREQ/880, hz); lastalert = time_uptime; } /* YYY mp count */ goto out2; } #else /* !POWERFAIL_NMI */ /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef DDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (ddb_on_nmi) { kprintf ("NMI ... going to debugger\n"); kdb_trap (type, 0, frame); } #endif /* DDB */ goto out2; } else if (panic_on_nmi == 0) goto out2; /* FALL THROUGH */ #endif /* POWERFAIL_NMI */ #endif /* NISA > 0 */ } MAKEMPSAFE(have_mplock); trap_fatal(frame, eva); goto out2; } /* * Virtual kernel intercept - if the fault is directly related to a * VM context managed by a virtual kernel then let the virtual kernel * handle it. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { vkernel_trap(lp, frame); goto out; } /* Translate fault for emulators (e.g. Linux) */ if (*p->p_sysent->sv_transtrap) i = (*p->p_sysent->sv_transtrap)(i, type); MAKEMPSAFE(have_mplock); trapsignal(lp, i, ucode); #ifdef DEBUG if (type <= MAX_TRAP_MSG) { uprintf("fatal process exception: %s", trap_msg[type]); if ((type == T_PAGEFLT) || (type == T_PROTFLT)) uprintf(", fault VA = 0x%lx", (u_long)eva); uprintf("\n"); } #endif out: userret(lp, frame, sticks); userexit(lp); out2: ; if (have_mplock) rel_mplock(); if (p != NULL && lp != NULL) KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("trap: critical section count mismatch! %d/%d", crit_count, td->td_pri)); KASSERT(curstop == td->td_toks_stop, ("trap: extra tokens held after trap! %zd/%zd", curstop - &td->td_toks_base, td->td_toks_stop - &td->td_toks_base)); #endif }
static void trap_fatal(struct trapframe *frame, vm_offset_t eva) { int code, type, ss, esp; struct soft_segment_descriptor softseg; code = frame->tf_err; type = frame->tf_trapno; sdtossd(&gdt[mycpu->gd_cpuid * NGDT + IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); if (type <= MAX_TRAP_MSG) kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, trap_msg[type], frame->tf_eflags & PSL_VM ? "vm86" : ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); /* three separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d; ", mycpu->gd_cpuid); kprintf("lapic.id = %08x\n", lapic->id); if (type == T_PAGEFLT) { kprintf("fault virtual address = %p\n", (void *)eva); kprintf("fault code = %s %s, %s\n", code & PGEX_U ? "user" : "supervisor", code & PGEX_W ? "write" : "read", code & PGEX_P ? "protection violation" : "page not present"); } kprintf("instruction pointer = 0x%x:0x%x\n", frame->tf_cs & 0xffff, frame->tf_eip); if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { ss = frame->tf_ss & 0xffff; esp = frame->tf_esp; } else { ss = GSEL(GDATA_SEL, SEL_KPL); esp = (int)&frame->tf_esp; } kprintf("stack pointer = 0x%x:0x%x\n", ss, esp); kprintf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); kprintf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); kprintf(" = DPL %d, pres %d, def32 %d, gran %d\n", softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, softseg.ssd_gran); kprintf("processor eflags = "); if (frame->tf_eflags & PSL_T) kprintf("trace trap, "); if (frame->tf_eflags & PSL_I) kprintf("interrupt enabled, "); if (frame->tf_eflags & PSL_NT) kprintf("nested task, "); if (frame->tf_eflags & PSL_RF) kprintf("resume, "); if (frame->tf_eflags & PSL_VM) kprintf("vm86, "); kprintf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); kprintf("current process = "); if (curproc) { kprintf("%lu (%s)\n", (u_long)curproc->p_pid, curproc->p_comm ? curproc->p_comm : ""); } else { kprintf("Idle\n"); } kprintf("current thread = pri %d ", curthread->td_pri); if (curthread->td_critcount) kprintf("(CRIT)"); kprintf("\n"); /** * XXX FIXME: * we probably SHOULD have stopped the other CPUs before now! * another CPU COULD have been touching cpl at this moment... */ kprintf(" <- SMP: XXX"); kprintf("\n"); #ifdef KDB if (kdb_trap(&psl)) return; #endif #ifdef DDB if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame)) return; #endif kprintf("trap number = %d\n", type); if (type <= MAX_TRAP_MSG) panic("%s", trap_msg[type]); else panic("unknown/reserved trap"); }
void trap(struct trapframe *tf) { u_int sig = 0, type = tf->tf_trap, code = 0; u_int rv, addr; bool trapsig = true; const bool usermode = USERMODE_P(tf); struct lwp * const l = curlwp; struct proc * const p = l->l_proc; struct pcb * const pcb = lwp_getpcb(l); u_quad_t oticks = 0; struct vmspace *vm; struct vm_map *map; vm_prot_t ftype; void *onfault = pcb->pcb_onfault; KASSERT(p != NULL); curcpu()->ci_data.cpu_ntrap++; if (usermode) { type |= T_USER; oticks = p->p_sticks; l->l_md.md_utf = tf; LWP_CACHE_CREDS(l, p); } type &= ~(T_WRITE|T_PTEFETCH); #ifdef TRAPDEBUG if(tf->tf_trap==7) goto fram; if(faultdebug)printf("Trap: type %lx, code %lx, pc %lx, psl %lx\n", tf->tf_trap, tf->tf_code, tf->tf_pc, tf->tf_psl); fram: #endif switch (type) { default: #ifdef DDB kdb_trap(tf); #endif panic("trap: type %x, code %x, pc %x, psl %x", (u_int)tf->tf_trap, (u_int)tf->tf_code, (u_int)tf->tf_pc, (u_int)tf->tf_psl); case T_KSPNOTVAL: panic("%d.%d (%s): KSP invalid %#x@%#x pcb %p fp %#x psl %#x)", p->p_pid, l->l_lid, l->l_name ? l->l_name : "??", mfpr(PR_KSP), (u_int)tf->tf_pc, pcb, (u_int)tf->tf_fp, (u_int)tf->tf_psl); case T_TRANSFLT|T_USER: case T_TRANSFLT: /* * BUG! BUG! BUG! BUG! BUG! * Due to a hardware bug (at in least KA65x CPUs) a double * page table fetch trap will cause a translation fault * even if access in the SPT PTE entry specifies 'no access'. * In for example section 6.4.2 in VAX Architecture * Reference Manual it states that if a page both are invalid * and have no access set, a 'access violation fault' occurs. * Therefore, we must fall through here... */ #ifdef nohwbug panic("translation fault"); #endif case T_PTELEN|T_USER: /* Page table length exceeded */ case T_ACCFLT|T_USER: if (tf->tf_code < 0) { /* Check for kernel space */ sig = SIGSEGV; code = SEGV_ACCERR; break; } case T_PTELEN: #ifndef MULTIPROCESSOR /* * If we referred to an address beyond the end of the system * page table, it may be due to a failed CAS * restartable-atomic-sequence. If it is, restart it at the * beginning and restart. */ { extern const uint8_t cas32_ras_start[], cas32_ras_end[]; if (tf->tf_code == CASMAGIC && tf->tf_pc >= (uintptr_t) cas32_ras_start && tf->tf_pc < (uintptr_t) cas32_ras_end) { tf->tf_pc = (uintptr_t) cas32_ras_start; trapsig = false; break; } } /* FALLTHROUGH */ #endif case T_ACCFLT: #ifdef TRAPDEBUG if(faultdebug)printf("trap accflt type %lx, code %lx, pc %lx, psl %lx\n", tf->tf_trap, tf->tf_code, tf->tf_pc, tf->tf_psl); #endif #ifdef DIAGNOSTIC if (p == 0) panic("trap: access fault: addr %lx code %lx", tf->tf_pc, tf->tf_code); if (tf->tf_psl & PSL_IS) panic("trap: pflt on IS"); #endif /* * Page tables are allocated in pmap_enter(). We get * info from below if it is a page table fault, but * UVM may want to map in pages without faults, so * because we must check for PTE pages anyway we don't * bother doing it here. */ addr = trunc_page(tf->tf_code); if (!usermode && (tf->tf_code < 0)) { vm = NULL; map = kernel_map; } else { vm = p->p_vmspace; map = &vm->vm_map; } if (tf->tf_trap & T_WRITE) ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; pcb->pcb_onfault = NULL; rv = uvm_fault(map, addr, ftype); pcb->pcb_onfault = onfault; if (rv != 0) { if (!usermode) { if (onfault) { pcb->pcb_onfault = NULL; tf->tf_pc = (unsigned)onfault; tf->tf_psl &= ~PSL_FPD; tf->tf_r0 = rv; return; } printf("r0=%08lx r1=%08lx r2=%08lx r3=%08lx ", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4=%08lx r5=%08lx r6=%08lx r7=%08lx\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf( "r8=%08lx r9=%08lx r10=%08lx r11=%08lx\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("ap=%08lx fp=%08lx sp=%08lx pc=%08lx\n", tf->tf_ap, tf->tf_fp, tf->tf_sp, tf->tf_pc); panic("SEGV in kernel mode: pc %#lx addr %#lx", tf->tf_pc, tf->tf_code); } switch (rv) { case ENOMEM: printf("UVM: pid %d (%s), uid %d killed: " "out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); sig = SIGKILL; code = SI_NOINFO; break; case EINVAL: code = BUS_ADRERR; sig = SIGBUS; break; case EACCES: code = SEGV_ACCERR; sig = SIGSEGV; break; default: code = SEGV_MAPERR; sig = SIGSEGV; break; } } else { trapsig = false; if (map != kernel_map && addr > 0 && (void *)addr >= vm->vm_maxsaddr) uvm_grow(p, addr); } break; case T_BPTFLT|T_USER: sig = SIGTRAP; code = TRAP_BRKPT; break; case T_TRCTRAP|T_USER: sig = SIGTRAP; code = TRAP_TRACE; tf->tf_psl &= ~PSL_T; break; case T_PRIVINFLT|T_USER: sig = SIGILL; code = ILL_PRVOPC; break; case T_RESADFLT|T_USER: sig = SIGILL; code = ILL_ILLADR; break; case T_RESOPFLT|T_USER: sig = SIGILL; code = ILL_ILLOPC; break; case T_XFCFLT|T_USER: sig = SIGEMT; break; case T_ARITHFLT|T_USER: sig = SIGFPE; switch (tf->tf_code) { case ATRP_INTOVF: code = FPE_INTOVF; break; case ATRP_INTDIV: code = FPE_INTDIV; break; case ATRP_FLTOVF: code = FPE_FLTOVF; break; case ATRP_FLTDIV: code = FPE_FLTDIV; break; case ATRP_FLTUND: code = FPE_FLTUND; break; case ATRP_DECOVF: code = FPE_INTOVF; break; case ATRP_FLTSUB: code = FPE_FLTSUB; break; case AFLT_FLTDIV: code = FPE_FLTDIV; break; case AFLT_FLTUND: code = FPE_FLTUND; break; case AFLT_FLTOVF: code = FPE_FLTOVF; break; default: code = FPE_FLTINV; break; } break; case T_ASTFLT|T_USER: mtpr(AST_NO,PR_ASTLVL); trapsig = false; if (curcpu()->ci_want_resched) preempt(); break; #ifdef DDB case T_BPTFLT: /* Kernel breakpoint */ case T_KDBTRAP: case T_KDBTRAP|T_USER: case T_TRCTRAP: kdb_trap(tf); return; #endif } if (trapsig) { ksiginfo_t ksi; if ((sig == SIGSEGV || sig == SIGILL) && cpu_printfataltraps && (p->p_slflag & PSL_TRACED) == 0 && !sigismember(&p->p_sigctx.ps_sigcatch, sig)) printf("pid %d.%d (%s): sig %d: type %lx, code %lx, pc %lx, psl %lx\n", p->p_pid, l->l_lid, p->p_comm, sig, tf->tf_trap, tf->tf_code, tf->tf_pc, tf->tf_psl); KSI_INIT_TRAP(&ksi); ksi.ksi_signo = sig; ksi.ksi_trap = tf->tf_trap; ksi.ksi_addr = (void *)tf->tf_code; ksi.ksi_code = code; /* * Arithmetic exceptions can be of two kinds: * - traps (codes 1..7), where pc points to the * next instruction to execute. * - faults (codes 8..10), where pc points to the * faulting instruction. * In the latter case, we need to advance pc by ourselves * to prevent a signal loop. * * XXX this is gross -- miod */ if (type == (T_ARITHFLT | T_USER) && (tf->tf_code & 8)) tf->tf_pc = skip_opcode(tf->tf_pc); trapsignal(l, &ksi); } if (!usermode) return; userret(l, tf, oticks); }
/* * Trap is called from locore to handle most types of processor traps. */ void trap(unsigned int status, unsigned int cause, vaddr_t vaddr, vaddr_t opc, struct trapframe *frame) { int type; struct lwp *l = curlwp; struct proc *p = curproc; vm_prot_t ftype; ksiginfo_t ksi; struct frame *fp; extern void fswintrberr(void); KSI_INIT_TRAP(&ksi); uvmexp.traps++; if ((type = TRAPTYPE(cause)) >= LENGTH(trap_type)) panic("trap: unknown trap type %d", type); if (USERMODE(status)) { type |= T_USER; LWP_CACHE_CREDS(l, p); } /* Enable interrupts just at it was before the trap. */ _splset(status & AVR32_STATUS_IMx); switch (type) { default: dopanic: (void)splhigh(); printf("trap: %s in %s mode\n", trap_type[TRAPTYPE(cause)], USERMODE(status) ? "user" : "kernel"); printf("status=0x%x, cause=0x%x, epc=%#lx, vaddr=%#lx\n", status, cause, opc, vaddr); if (curlwp != NULL) { fp = (struct frame *)l->l_md.md_regs; printf("pid=%d cmd=%s usp=0x%x ", p->p_pid, p->p_comm, (int)fp->f_regs[_R_SP]); } else printf("curlwp == NULL "); printf("ksp=%p\n", &status); #if defined(DDB) kdb_trap(type, (mips_reg_t *) frame); /* XXX force halt XXX */ #elif defined(KGDB) { struct frame *f = (struct frame *)&ddb_regs; extern mips_reg_t kgdb_cause, kgdb_vaddr; kgdb_cause = cause; kgdb_vaddr = vaddr; /* * init global ddb_regs, used in db_interface.c routines * shared between ddb and gdb. Send ddb_regs to gdb so * that db_machdep.h macros will work with it, and * allow gdb to alter the PC. */ db_set_ddb_regs(type, (mips_reg_t *) frame); PC_BREAK_ADVANCE(f); if (kgdb_trap(type, &ddb_regs)) { ((mips_reg_t *)frame)[21] = f->f_regs[_R_PC]; return; } } #else panic("trap: dopanic: notyet"); #endif /*NOTREACHED*/ case T_TLB_MOD: panic("trap: T_TLB_MOD: notyet"); #if notyet if (KERNLAND(vaddr)) { pt_entry_t *pte; unsigned entry; paddr_t pa; pte = kvtopte(vaddr); entry = pte->pt_entry; if (!avr32_pte_v(entry) /*|| (entry & mips_pg_m_bit())*/) { panic("ktlbmod: invalid pte"); } if (entry & avr32_pte_ropage_bit()) { /* write to read only page in the kernel */ ftype = VM_PROT_WRITE; goto kernelfault; } entry |= mips_pg_m_bit(); /* XXXAVR32 Do it on tlbarlo/ tlbarhi? */ pte->pt_entry = entry; vaddr &= ~PGOFSET; MachTLBUpdate(vaddr, entry); pa = avr32_tlbpfn_to_paddr(entry); if (!IS_VM_PHYSADDR(pa)) { printf("ktlbmod: va %#lx pa %#llx\n", vaddr, (long long)pa); panic("ktlbmod: unmanaged page"); } pmap_set_modified(pa); return; /* KERN */ } /*FALLTHROUGH*/ #endif case T_TLB_MOD+T_USER: panic("trap: T_TLB_MOD+T_USER: notyet"); #if notyet { pt_entry_t *pte; unsigned entry; paddr_t pa; pmap_t pmap; pmap = p->p_vmspace->vm_map.pmap; if (!(pte = pmap_segmap(pmap, vaddr))) panic("utlbmod: invalid segmap"); pte += (vaddr >> PGSHIFT) & (NPTEPG - 1); entry = pte->pt_entry; if (!avr32_pte_v(entry)) panic("utlbmod: invalid pte"); if (entry & avr32_pte_ropage_bit()) { /* write to read only page */ ftype = VM_PROT_WRITE; goto pagefault; } /* entry |= mips_pg_m_bit(); XXXAVR32 Do it on tlbarlo/ tlbarhi? */ pte->pt_entry = entry; vaddr = (vaddr & ~PGOFSET) | (pmap->pm_asid << AVR32_TLB_PID_SHIFT); MachTLBUpdate(vaddr, entry); pa = avr32_tlbpfn_to_paddr(entry); if (!IS_VM_PHYSADDR(pa)) { printf("utlbmod: va %#lx pa %#llx\n", vaddr, (long long)pa); panic("utlbmod: unmanaged page"); } pmap_set_modified(pa); if (type & T_USER) userret(l); return; /* GEN */ } #endif case T_TLB_LD_MISS: panic("trap: T_TLB_LD_MISS: notyet"); case T_TLB_ST_MISS: ftype = (type == T_TLB_LD_MISS) ? VM_PROT_READ : VM_PROT_WRITE; if (KERNLAND(vaddr)) goto kernelfault; panic("trap: T_TLB_ST_MISS: notyet"); #if notyet /* * It is an error for the kernel to access user space except * through the copyin/copyout routines. */ if (l == NULL || l->l_addr->u_pcb.pcb_onfault == NULL) goto dopanic; /* check for fuswintr() or suswintr() getting a page fault */ if (l->l_addr->u_pcb.pcb_onfault == (void *)fswintrberr) { frame->tf_regs[TF_EPC] = (int)fswintrberr; return; /* KERN */ } goto pagefault; #endif case T_TLB_LD_MISS+T_USER: panic("trap: T_TLB_LD_MISS+T_USER: notyet"); #if notyet ftype = VM_PROT_READ; goto pagefault; #endif case T_TLB_ST_MISS+T_USER: panic("trap: T_TLB_ST_MISS+T_USER: notyet"); #if notyet ftype = VM_PROT_WRITE; #endif pagefault: ; { vaddr_t va; struct vmspace *vm; struct vm_map *map; int rv; vm = p->p_vmspace; map = &vm->vm_map; va = trunc_page(vaddr); if ((l->l_flag & LW_SA) && (~l->l_pflag & LP_SA_NOBLOCK)) { l->l_savp->savp_faultaddr = (vaddr_t)vaddr; l->l_pflag |= LP_SA_PAGEFAULT; } if (p->p_emul->e_fault) rv = (*p->p_emul->e_fault)(p, va, ftype); else rv = uvm_fault(map, va, ftype); #ifdef VMFAULT_TRACE printf( "uvm_fault(%p (pmap %p), %lx (0x%x), %d) -> %d at pc %p\n", map, vm->vm_map.pmap, va, vaddr, ftype, rv, (void*)opc); #endif /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if ((void *)va >= vm->vm_maxsaddr) { if (rv == 0){ uvm_grow(p, va); } else if (rv == EACCES) rv = EFAULT; } l->l_pflag &= ~LP_SA_PAGEFAULT; if (rv == 0) { if (type & T_USER) { userret(l); } return; /* GEN */ } if ((type & T_USER) == 0) goto copyfault; if (rv == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : (uid_t) -1); ksi.ksi_signo = SIGKILL; ksi.ksi_code = 0; } else { if (rv == EACCES) { ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; } else { ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; } } ksi.ksi_trap = type & ~T_USER; ksi.ksi_addr = (void *)vaddr; break; /* SIGNAL */ } kernelfault: ; { vaddr_t va; int rv; va = trunc_page(vaddr); rv = uvm_fault(kernel_map, va, ftype); if (rv == 0) return; /* KERN */ /*FALLTHROUGH*/ } case T_ADDR_ERR_LD: /* misaligned access */ case T_ADDR_ERR_ST: /* misaligned access */ case T_BUS_ERR_LD_ST: /* BERR asserted to CPU */ copyfault: panic("trap: copyfault: notyet"); #if notyet if (l == NULL || l->l_addr->u_pcb.pcb_onfault == NULL) goto dopanic; frame->tf_regs[TF_EPC] = (intptr_t)l->l_addr->u_pcb.pcb_onfault; return; /* KERN */ #endif #if notyet case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to CPU */ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to CPU */ ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGSEGV; /* XXX */ ksi.ksi_addr = (void *)vaddr; ksi.ksi_code = SEGV_MAPERR; /* XXX */ break; /* SIGNAL */ case T_BREAK: panic("trap: T_BREAK: notyet"); #if defined(DDB) kdb_trap(type, (avr32_reg_t *) frame); return; /* KERN */ #elif defined(KGDB) { struct frame *f = (struct frame *)&ddb_regs; extern avr32_reg_t kgdb_cause, kgdb_vaddr; kgdb_cause = cause; kgdb_vaddr = vaddr; /* * init global ddb_regs, used in db_interface.c routines * shared between ddb and gdb. Send ddb_regs to gdb so * that db_machdep.h macros will work with it, and * allow gdb to alter the PC. */ db_set_ddb_regs(type, (avr32_reg_t *) frame); PC_BREAK_ADVANCE(f); if (!kgdb_trap(type, &ddb_regs)) printf("kgdb: ignored %s\n", trap_type[TRAPTYPE(cause)]); else ((avr32_reg_t *)frame)[21] = f->f_regs[_R_PC]; return; } #else goto dopanic; #endif case T_BREAK+T_USER: { vaddr_t va; uint32_t instr; int rv; /* compute address of break instruction */ va = (DELAYBRANCH(cause)) ? opc + sizeof(int) : opc; /* read break instruction */ instr = fuiword((void *)va); if (l->l_md.md_ss_addr != va || instr != MIPS_BREAK_SSTEP) { ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGTRAP; ksi.ksi_addr = (void *)va; ksi.ksi_code = TRAP_TRACE; break; } /* * Restore original instruction and clear BP */ rv = suiword((void *)va, l->l_md.md_ss_instr); if (rv < 0) { vaddr_t sa, ea; sa = trunc_page(va); ea = round_page(va + sizeof(int) - 1); rv = uvm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_ALL, false); if (rv == 0) { rv = suiword((void *)va, l->l_md.md_ss_instr); (void)uvm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false); } } mips_icache_sync_all(); /* XXXJRT -- necessary? */ mips_dcache_wbinv_all(); /* XXXJRT -- necessary? */ if (rv < 0) printf("Warning: can't restore instruction at 0x%lx: 0x%x\n", l->l_md.md_ss_addr, l->l_md.md_ss_instr); l->l_md.md_ss_addr = 0; ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGTRAP; ksi.ksi_addr = (void *)va; ksi.ksi_code = TRAP_BRKPT; break; /* SIGNAL */ } case T_RES_INST+T_USER: case T_COP_UNUSABLE+T_USER: #if !defined(SOFTFLOAT) && !defined(NOFPU) if ((cause & MIPS_CR_COP_ERR) == 0x10000000) { struct frame *f; f = (struct frame *)l->l_md.md_regs; savefpregs(fpcurlwp); /* yield FPA */ loadfpregs(l); /* load FPA */ fpcurlwp = l; l->l_md.md_flags |= MDP_FPUSED; f->f_regs[_R_SR] |= MIPS_SR_COP_1_BIT; } else #endif { MachEmulateInst(status, cause, opc, l->l_md.md_regs); } userret(l); return; /* GEN */ case T_FPE+T_USER: panic ("trap: T_FPE+T_USER: notyet"); #if defined(SOFTFLOAT) MachEmulateInst(status, cause, opc, l->l_md.md_regs); #elif !defined(NOFPU) MachFPTrap(status, cause, opc, l->l_md.md_regs); #endif userret(l); return; /* GEN */ case T_OVFLOW+T_USER: case T_TRAP+T_USER: ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGFPE; fp = (struct frame *)l->l_md.md_regs; ksi.ksi_addr = (void *)fp->f_regs[_R_PC]; ksi.ksi_code = FPE_FLTOVF; /* XXX */ break; /* SIGNAL */ #endif } panic("trap: post-switch: notyet"); #if notyet fp = (struct frame *)l->l_md.md_regs; fp->f_regs[_R_CAUSE] = cause; fp->f_regs[_R_BADVADDR] = vaddr; (*p->p_emul->e_trapsignal)(l, &ksi); if ((type & T_USER) == 0) panic("trapsignal"); userret(l); #endif return; }
/*ARGSUSED*/ void trap(struct frame *fp, int type, unsigned code, unsigned v) { extern char fubail[], subail[]; struct lwp *l; struct proc *p; struct pcb *pcb; void *onfault; ksiginfo_t ksi; int s; int rv; u_quad_t sticks = 0 /* XXX initialiser works around compiler bug */; static int panicking __diagused; curcpu()->ci_data.cpu_ntrap++; l = curlwp; p = l->l_proc; pcb = lwp_getpcb(l); KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; if (USERMODE(fp->f_sr)) { type |= T_USER; sticks = p->p_sticks; l->l_md.md_regs = fp->f_regs; LWP_CACHE_CREDS(l, p); } switch (type) { default: dopanic: /* * Let the kernel debugger see the trap frame that * caused us to panic. This is a convenience so * one can see registers at the point of failure. */ s = splhigh(); panicking = 1; printf("trap type %d, code = 0x%x, v = 0x%x\n", type, code, v); printf("%s program counter = 0x%x\n", (type & T_USER) ? "user" : "kernel", fp->f_pc); #ifdef KGDB /* If connected, step or cont returns 1 */ if (kgdb_trap(type, (db_regs_t *)fp)) goto kgdb_cont; #endif #ifdef DDB (void)kdb_trap(type, (db_regs_t *)fp); #endif #ifdef KGDB kgdb_cont: #endif splx(s); if (panicstr) { printf("trap during panic!\n"); #ifdef DEBUG /* XXX should be a machine-dependent hook */ printf("(press a key)\n"); (void)cngetc(); #endif } regdump((struct trapframe *)fp, 128); type &= ~T_USER; if ((u_int)type < trap_types) panic(trap_type[type]); panic("trap"); case T_BUSERR: /* kernel bus error */ onfault = pcb->pcb_onfault; if (onfault == NULL) goto dopanic; rv = EFAULT; /* FALLTHROUGH */ copyfault: /* * If we have arranged to catch this fault in any of the * copy to/from user space routines, set PC to return to * indicated location and set flag informing buserror code * that it may need to clean up stack frame. */ fp->f_stackadj = exframesize[fp->f_format]; fp->f_format = fp->f_vector = 0; fp->f_pc = (int)onfault; fp->f_regs[D0] = rv; return; case T_BUSERR|T_USER: /* bus error */ case T_ADDRERR|T_USER: /* address error */ ksi.ksi_addr = (void *)v; ksi.ksi_signo = SIGBUS; ksi.ksi_code = (type == (T_BUSERR|T_USER)) ? BUS_OBJERR : BUS_ADRERR; break; case T_COPERR: /* kernel coprocessor violation */ case T_FMTERR|T_USER: /* do all RTE errors come in as T_USER? */ case T_FMTERR: /* ...just in case... */ /* * The user has most likely trashed the RTE or FP state info * in the stack frame of a signal handler. */ printf("pid %d: kernel %s exception\n", p->p_pid, type==T_COPERR ? "coprocessor" : "format"); type |= T_USER; mutex_enter(p->p_lock); SIGACTION(p, SIGILL).sa_handler = SIG_DFL; sigdelset(&p->p_sigctx.ps_sigignore, SIGILL); sigdelset(&p->p_sigctx.ps_sigcatch, SIGILL); sigdelset(&l->l_sigmask, SIGILL); mutex_exit(p->p_lock); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_RESAD_FAULT */ ksi.ksi_code = (type == T_COPERR) ? ILL_COPROC : ILL_ILLOPC; break; case T_COPERR|T_USER: /* user coprocessor violation */ /* What is a proper response here? */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; case T_FPERR|T_USER: /* 68881 exceptions */ /* * We pass along the 68881 status register which locore stashed * in code for us. */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = fpsr2siginfocode(code); break; #ifdef M68040 case T_FPEMULI|T_USER: /* unimplemented FP instruction */ case T_FPEMULD|T_USER: /* unimplemented FP data type */ /* XXX need to FSAVE */ printf("pid %d(%s): unimplemented FP %s at %x (EA %x)\n", p->p_pid, p->p_comm, fp->f_format == 2 ? "instruction" : "data type", fp->f_pc, fp->f_fmt2.f_iaddr); /* XXX need to FRESTORE */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; #endif case T_ILLINST|T_USER: /* illegal instruction fault */ case T_PRIVINST|T_USER: /* privileged instruction fault */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_PRIVIN_FAULT */ ksi.ksi_signo = SIGILL; ksi.ksi_code = (type == (T_PRIVINST|T_USER)) ? ILL_PRVOPC : ILL_ILLOPC; break; case T_ZERODIV|T_USER: /* Divide by zero */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was FPE_INTDIV_TRAP */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTDIV; break; case T_CHKINST|T_USER: /* CHK instruction trap */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was FPE_SUBRNG_TRAP */ ksi.ksi_signo = SIGFPE; break; case T_TRAPVINST|T_USER: /* TRAPV instruction trap */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was FPE_INTOVF_TRAP */ ksi.ksi_signo = SIGFPE; break; /* * XXX: Trace traps are a nightmare. * * HP-UX uses trap #1 for breakpoints, * NetBSD/m68k uses trap #2, * SUN 3.x uses trap #15, * DDB and KGDB uses trap #15 (for kernel breakpoints; * handled elsewhere). * * NetBSD and HP-UX traps both get mapped by locore.s into T_TRACE. * SUN 3.x traps get passed through as T_TRAP15 and are not really * supported yet. * * XXX: We should never get kernel-mode T_TRAP15 * XXX: because locore.s now gives them special treatment. */ case T_TRAP15: /* kernel breakpoint */ #ifdef DEBUG printf("unexpected kernel trace trap, type = %d\n", type); printf("program counter = 0x%x\n", fp->f_pc); #endif fp->f_sr &= ~PSL_T; return; case T_TRACE|T_USER: /* user trace trap */ #ifdef COMPAT_SUNOS /* * SunOS uses Trap #2 for a "CPU cache flush". * Just flush the on-chip caches and return. */ if (p->p_emul == &emul_sunos) { ICIA(); DCIU(); return; } #endif /* FALLTHROUGH */ case T_TRACE: /* tracing a trap instruction */ case T_TRAP15|T_USER: /* SUN user trace trap */ fp->f_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_ASTFLT: /* system async trap, cannot happen */ goto dopanic; case T_ASTFLT|T_USER: /* user async trap */ astpending = 0; /* * We check for software interrupts first. This is because * they are at a higher level than ASTs, and on a VAX would * interrupt the AST. We assume that if we are processing * an AST that we must be at IPL0 so we don't bother to * check. Note that we ensure that we are at least at SIR * IPL while processing the SIR. */ spl1(); /* fall into... */ case T_SSIR: /* software interrupt */ case T_SSIR|T_USER: /* * If this was not an AST trap, we are all done. */ if (type != (T_ASTFLT|T_USER)) { curcpu()->ci_data.cpu_ntrap--; return; } spl0(); if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } if (curcpu()->ci_want_resched) preempt(); goto out; case T_MMUFLT: /* kernel mode page fault */ /* * If we were doing profiling ticks or other user mode * stuff from interrupt code, Just Say No. */ onfault = pcb->pcb_onfault; if (onfault == fubail || onfault == subail) { rv = EFAULT; goto copyfault; } /* fall into ... */ case T_MMUFLT|T_USER: /* page fault */ { vaddr_t va; struct vmspace *vm = p->p_vmspace; struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; onfault = pcb->pcb_onfault; #ifdef DEBUG if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) printf("trap: T_MMUFLT pid=%d, code=%x, v=%x, pc=%x, sr=%x\n", p->p_pid, code, v, fp->f_pc, fp->f_sr); #endif /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space data fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if ((type & T_USER) == 0 && (onfault == NULL || KDFAULT(code))) map = kernel_map; else { map = vm ? &vm->vm_map : kernel_map; } if (WRFAULT(code)) ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; va = trunc_page((vaddr_t)v); if (map == kernel_map && va == 0) { printf("trap: bad kernel %s access at 0x%x\n", (ftype & VM_PROT_WRITE) ? "read/write" : "read", v); goto dopanic; } #ifdef DIAGNOSTIC if (interrupt_depth && !panicking) { printf("trap: calling uvm_fault() from interrupt!\n"); goto dopanic; } #endif pcb->pcb_onfault = NULL; rv = uvm_fault(map, va, ftype); pcb->pcb_onfault = onfault; #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); #endif /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (rv == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); if (type == T_MMUFLT) { if (ucas_ras_check(&fp->F_t)) { return; } #ifdef M68040 if (cputype == CPU_68040) (void) writeback(fp, 1); #endif return; } goto out; } if (rv == EACCES) { ksi.ksi_code = SEGV_ACCERR; rv = EFAULT; } else ksi.ksi_code = SEGV_MAPERR; if (type == T_MMUFLT) { if (onfault) goto copyfault; printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); printf(" type %x, code [mmu,,ssw]: %x\n", type, code); goto dopanic; } ksi.ksi_addr = (void *)v; switch (rv) { case ENOMEM: printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; break; case EINVAL: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case EACCES: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; break; default: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; break; } break; } } trapsignal(l, &ksi); if ((type & T_USER) == 0) return; out: userret(l, fp, sticks, v, 1); }
/* * Handle a single exception. */ void itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p, int type) { int i; unsigned ucode = 0; vm_prot_t ftype; extern vaddr_t onfault_table[]; int onfault; int typ = 0; union sigval sv; struct pcb *pcb; switch (type) { case T_TLB_MOD: /* check for kernel address */ if (trapframe->badvaddr < 0) { pt_entry_t *pte, entry; paddr_t pa; vm_page_t pg; pte = kvtopte(trapframe->badvaddr); entry = *pte; #ifdef DIAGNOSTIC if (!(entry & PG_V) || (entry & PG_M)) panic("trap: ktlbmod: invalid pte"); #endif if (pmap_is_page_ro(pmap_kernel(), trunc_page(trapframe->badvaddr), entry)) { /* write to read only page in the kernel */ ftype = VM_PROT_WRITE; pcb = &p->p_addr->u_pcb; goto kernel_fault; } entry |= PG_M; *pte = entry; KERNEL_LOCK(); pmap_update_kernel_page(trapframe->badvaddr & ~PGOFSET, entry); pa = pfn_to_pad(entry); pg = PHYS_TO_VM_PAGE(pa); if (pg == NULL) panic("trap: ktlbmod: unmanaged page"); pmap_set_modify(pg); KERNEL_UNLOCK(); return; } /* FALLTHROUGH */ case T_TLB_MOD+T_USER: { pt_entry_t *pte, entry; paddr_t pa; vm_page_t pg; pmap_t pmap = p->p_vmspace->vm_map.pmap; if (!(pte = pmap_segmap(pmap, trapframe->badvaddr))) panic("trap: utlbmod: invalid segmap"); pte += uvtopte(trapframe->badvaddr); entry = *pte; #ifdef DIAGNOSTIC if (!(entry & PG_V) || (entry & PG_M)) panic("trap: utlbmod: invalid pte"); #endif if (pmap_is_page_ro(pmap, trunc_page(trapframe->badvaddr), entry)) { /* write to read only page */ ftype = VM_PROT_WRITE; pcb = &p->p_addr->u_pcb; goto fault_common_no_miss; } entry |= PG_M; *pte = entry; KERNEL_LOCK(); pmap_update_user_page(pmap, (trapframe->badvaddr & ~PGOFSET), entry); pa = pfn_to_pad(entry); pg = PHYS_TO_VM_PAGE(pa); if (pg == NULL) panic("trap: utlbmod: unmanaged page"); pmap_set_modify(pg); KERNEL_UNLOCK(); return; } case T_TLB_LD_MISS: case T_TLB_ST_MISS: ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; pcb = &p->p_addr->u_pcb; /* check for kernel address */ if (trapframe->badvaddr < 0) { vaddr_t va; int rv; kernel_fault: va = trunc_page((vaddr_t)trapframe->badvaddr); onfault = pcb->pcb_onfault; pcb->pcb_onfault = 0; KERNEL_LOCK(); rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype); KERNEL_UNLOCK(); pcb->pcb_onfault = onfault; if (rv == 0) return; if (onfault != 0) { pcb->pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; } /* * It is an error for the kernel to access user space except * through the copyin/copyout routines. */ if (pcb->pcb_onfault != 0) { /* * We want to resolve the TLB fault before invoking * pcb_onfault if necessary. */ goto fault_common; } else { goto err; } case T_TLB_LD_MISS+T_USER: ftype = VM_PROT_READ; pcb = &p->p_addr->u_pcb; goto fault_common; case T_TLB_ST_MISS+T_USER: ftype = VM_PROT_WRITE; pcb = &p->p_addr->u_pcb; fault_common: #ifdef CPU_R4000 if (r4000_errata != 0) { if (eop_tlb_miss_handler(trapframe, ci, p) != 0) return; } #endif fault_common_no_miss: #ifdef CPU_R4000 if (r4000_errata != 0) { eop_cleanup(trapframe, p); } #endif { vaddr_t va; struct vmspace *vm; vm_map_t map; int rv; vm = p->p_vmspace; map = &vm->vm_map; va = trunc_page((vaddr_t)trapframe->badvaddr); onfault = pcb->pcb_onfault; pcb->pcb_onfault = 0; KERNEL_LOCK(); rv = uvm_fault(map, va, 0, ftype); pcb->pcb_onfault = onfault; /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if ((caddr_t)va >= vm->vm_maxsaddr) { if (rv == 0) uvm_grow(p, va); else if (rv == EACCES) rv = EFAULT; } KERNEL_UNLOCK(); if (rv == 0) return; if (!USERMODE(trapframe->sr)) { if (onfault != 0) { pcb->pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; } ucode = ftype; i = SIGSEGV; typ = SEGV_MAPERR; break; } case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ ucode = 0; /* XXX should be VM_PROT_something */ i = SIGBUS; typ = BUS_ADRALN; break; case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ ucode = 0; /* XXX should be VM_PROT_something */ i = SIGBUS; typ = BUS_OBJERR; break; case T_SYSCALL+T_USER: { struct trap_frame *locr0 = p->p_md.md_regs; struct sysent *callp; unsigned int code; register_t tpc; int numsys, error; struct args { register_t i[8]; } args; register_t rval[2]; atomic_add_int(&uvmexp.syscalls, 1); /* compute next PC after syscall instruction */ tpc = trapframe->pc; /* Remember if restart */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; callp = p->p_p->ps_emul->e_sysent; numsys = p->p_p->ps_emul->e_nsysent; code = locr0->v0; switch (code) { case SYS_syscall: case SYS___syscall: /* * Code is first argument, followed by actual args. * __syscall provides the code as a quad to maintain * proper alignment of 64-bit arguments on 32-bit * platforms, which doesn't change anything here. */ code = locr0->a0; if (code >= numsys) callp += p->p_p->ps_emul->e_nosys; /* (illegal) */ else callp += code; i = callp->sy_argsize / sizeof(register_t); args.i[0] = locr0->a1; args.i[1] = locr0->a2; args.i[2] = locr0->a3; if (i > 3) { args.i[3] = locr0->a4; args.i[4] = locr0->a5; args.i[5] = locr0->a6; args.i[6] = locr0->a7; if (i > 7) if ((error = copyin((void *)locr0->sp, &args.i[7], sizeof(register_t)))) goto bad; } break; default: if (code >= numsys) callp += p->p_p->ps_emul->e_nosys; /* (illegal) */ else callp += code; i = callp->sy_narg; args.i[0] = locr0->a0; args.i[1] = locr0->a1; args.i[2] = locr0->a2; args.i[3] = locr0->a3; if (i > 4) { args.i[4] = locr0->a4; args.i[5] = locr0->a5; args.i[6] = locr0->a6; args.i[7] = locr0->a7; } } rval[0] = 0; rval[1] = locr0->v1; #if defined(DDB) || defined(DEBUG) trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ? TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code; #endif error = mi_syscall(p, code, callp, args.i, rval); switch (error) { case 0: locr0->v0 = rval[0]; locr0->v1 = rval[1]; locr0->a3 = 0; break; case ERESTART: locr0->pc = tpc; break; case EJUSTRETURN: break; /* nothing to do */ default: bad: locr0->v0 = error; locr0->a3 = 1; } mi_syscall_return(p, code, error, rval); return; } case T_BREAK: #ifdef DDB kdb_trap(type, trapframe); #endif /* Reenable interrupts if necessary */ if (trapframe->sr & SR_INT_ENAB) { enableintr(); } return; case T_BREAK+T_USER: { caddr_t va; u_int32_t instr; struct trap_frame *locr0 = p->p_md.md_regs; /* compute address of break instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; /* read break instruction */ copyin(va, &instr, sizeof(int32_t)); switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) { case 6: /* gcc range error */ i = SIGFPE; typ = FPE_FLTSUB; /* skip instruction */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; break; case 7: /* gcc3 divide by zero */ i = SIGFPE; typ = FPE_INTDIV; /* skip instruction */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; break; #ifdef PTRACE case BREAK_SSTEP_VAL: if (p->p_md.md_ss_addr == (long)va) { #ifdef DEBUG printf("trap: %s (%d): breakpoint at %p " "(insn %08x)\n", p->p_comm, p->p_pid, (void *)p->p_md.md_ss_addr, p->p_md.md_ss_instr); #endif /* Restore original instruction and clear BP */ process_sstep(p, 0); typ = TRAP_BRKPT; } else { typ = TRAP_TRACE; } i = SIGTRAP; break; #endif #ifdef FPUEMUL case BREAK_FPUEMUL_VAL: /* * If this is a genuine FP emulation break, * resume execution to our branch destination. */ if ((p->p_md.md_flags & MDP_FPUSED) != 0 && p->p_md.md_fppgva + 4 == (vaddr_t)va) { struct vm_map *map = &p->p_vmspace->vm_map; p->p_md.md_flags &= ~MDP_FPUSED; locr0->pc = p->p_md.md_fpbranchva; /* * Prevent access to the relocation page. * XXX needs to be fixed to work with rthreads */ uvm_fault_unwire(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE); (void)uvm_map_protect(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE, UVM_PROT_NONE, FALSE); return; } /* FALLTHROUGH */ #endif default: typ = TRAP_TRACE; i = SIGTRAP; break; } break; } case T_IWATCH+T_USER: case T_DWATCH+T_USER: { caddr_t va; /* compute address of trapped instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; printf("watch exception @ %p\n", va); #ifdef RM7K_PERFCNTR if (rm7k_watchintr(trapframe)) { /* Return to user, don't add any more overhead */ return; } #endif i = SIGTRAP; typ = TRAP_BRKPT; break; } case T_TRAP+T_USER: { caddr_t va; u_int32_t instr; struct trap_frame *locr0 = p->p_md.md_regs; /* compute address of trap instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; /* read break instruction */ copyin(va, &instr, sizeof(int32_t)); if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; #ifdef RM7K_PERFCNTR if (instr == 0x040c0000) { /* Performance cntr trap */ int result; result = rm7k_perfcntr(trapframe->a0, trapframe->a1, trapframe->a2, trapframe->a3); locr0->v0 = -result; /* Return to user, don't add any more overhead */ return; } else #endif /* * GCC 4 uses teq with code 7 to signal divide by * zero at runtime. This is one instruction shorter * than the BEQ + BREAK combination used by gcc 3. */ if ((instr & 0xfc00003f) == 0x00000034 /* teq */ && (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) { i = SIGFPE; typ = FPE_INTDIV; } else { i = SIGEMT; /* Stuff it with something for now */ typ = 0; } break; } case T_RES_INST+T_USER: i = SIGILL; typ = ILL_ILLOPC; break; case T_COP_UNUSABLE+T_USER: /* * Note MIPS IV COP1X instructions issued with FPU * disabled correctly report coprocessor 1 as the * unusable coprocessor number. */ if ((trapframe->cause & CR_COP_ERR) != CR_COP1_ERR) { i = SIGILL; /* only FPU instructions allowed */ typ = ILL_ILLOPC; break; } #ifdef FPUEMUL MipsFPTrap(trapframe); #else enable_fpu(p); #endif return; case T_FPE: printf("FPU Trap: PC %lx CR %lx SR %lx\n", trapframe->pc, trapframe->cause, trapframe->sr); goto err; case T_FPE+T_USER: MipsFPTrap(trapframe); return; case T_OVFLOW+T_USER: i = SIGFPE; typ = FPE_FLTOVF; break; case T_ADDR_ERR_LD: /* misaligned access */ case T_ADDR_ERR_ST: /* misaligned access */ case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ pcb = &p->p_addr->u_pcb; if ((onfault = pcb->pcb_onfault) != 0) { pcb->pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; default: err: disableintr(); #if !defined(DDB) && defined(DEBUG) trapDump("trap", printf); #endif printf("\nTrap cause = %d Frame %p\n", type, trapframe); printf("Trap PC %p RA %p fault %p\n", (void *)trapframe->pc, (void *)trapframe->ra, (void *)trapframe->badvaddr); #ifdef DDB stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs); kdb_trap(type, trapframe); #endif panic("trap"); } #ifdef FPUEMUL /* * If a relocated delay slot causes an exception, blame the * original delay slot address - userland is not supposed to * know anything about emulation bowels. */ if ((p->p_md.md_flags & MDP_FPUSED) != 0 && trapframe->badvaddr == p->p_md.md_fppgva) trapframe->badvaddr = p->p_md.md_fpslotva; #endif p->p_md.md_regs->pc = trapframe->pc; p->p_md.md_regs->cause = trapframe->cause; p->p_md.md_regs->badvaddr = trapframe->badvaddr; sv.sival_ptr = (void *)trapframe->badvaddr; KERNEL_LOCK(); trapsignal(p, i, ucode, typ, sv); KERNEL_UNLOCK(); }
static int trap_pfault(struct trapframe *frame, int usermode) { vm_offset_t va; struct vmspace *vm = NULL; vm_map_t map; int rv = 0; int fault_flags; vm_prot_t ftype; thread_t td = curthread; struct lwp *lp = td->td_lwp; struct proc *p; va = trunc_page(frame->tf_addr); if (va >= VM_MIN_KERNEL_ADDRESS) { /* * Don't allow user-mode faults in kernel address space. */ if (usermode) { fault_flags = -1; ftype = -1; goto nogo; } map = &kernel_map; } else { /* * This is a fault on non-kernel virtual memory. * vm is initialized above to NULL. If curproc is NULL * or curproc->p_vmspace is NULL the fault is fatal. */ if (lp != NULL) vm = lp->lwp_vmspace; if (vm == NULL) { fault_flags = -1; ftype = -1; goto nogo; } /* * Debugging, try to catch kernel faults on the user address space when not inside * on onfault (e.g. copyin/copyout) routine. */ if (usermode == 0 && (td->td_pcb == NULL || td->td_pcb->pcb_onfault == NULL)) { #ifdef DDB if (freeze_on_seg_fault) { kprintf("trap_pfault: user address fault from kernel mode " "%016lx\n", (long)frame->tf_addr); while (freeze_on_seg_fault) tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20); } #endif } map = &vm->vm_map; } /* * PGEX_I is defined only if the execute disable bit capability is * supported and enabled. */ if (frame->tf_err & PGEX_W) ftype = VM_PROT_WRITE; #if JG else if ((frame->tf_err & PGEX_I) && pg_nx != 0) ftype = VM_PROT_EXECUTE; #endif else ftype = VM_PROT_READ; if (map != &kernel_map) { /* * Keep swapout from messing with us during this * critical time. */ PHOLD(lp->lwp_proc); /* * Issue fault */ fault_flags = 0; if (usermode) fault_flags |= VM_FAULT_BURST; if (ftype & VM_PROT_WRITE) fault_flags |= VM_FAULT_DIRTY; else fault_flags |= VM_FAULT_NORMAL; rv = vm_fault(map, va, ftype, fault_flags); PRELE(lp->lwp_proc); } else { /* * Don't have to worry about process locking or stacks in the * kernel. */ fault_flags = VM_FAULT_NORMAL; rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); } if (rv == KERN_SUCCESS) return (0); nogo: if (!usermode) { if (td->td_gd->gd_intr_nesting_level == 0 && td->td_pcb->pcb_onfault) { frame->tf_rip = (register_t)td->td_pcb->pcb_onfault; return (0); } trap_fatal(frame, frame->tf_addr); return (-1); } /* * NOTE: on x86_64 we have a tf_addr field in the trapframe, no * kludge is needed to pass the fault address to signal handlers. */ p = td->td_proc; if (td->td_lwp->lwp_vkernel == NULL) { #ifdef DDB if (bootverbose || freeze_on_seg_fault || ddb_on_seg_fault) { #else if (bootverbose) { #endif kprintf("seg-fault ft=%04x ff=%04x addr=%p rip=%p " "pid=%d cpu=%d p_comm=%s\n", ftype, fault_flags, (void *)frame->tf_addr, (void *)frame->tf_rip, p->p_pid, mycpu->gd_cpuid, p->p_comm); } #ifdef DDB while (freeze_on_seg_fault) { tsleep(p, 0, "freeze", hz * 20); } if (ddb_on_seg_fault) Debugger("ddb_on_seg_fault"); #endif } return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); } static void trap_fatal(struct trapframe *frame, vm_offset_t eva) { int code, ss; u_int type; long rsp; struct soft_segment_descriptor softseg; char *msg; code = frame->tf_err; type = frame->tf_trapno; sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg); if (type <= MAX_TRAP_MSG) msg = trap_msg[type]; else msg = "UNKNOWN"; kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg, ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); #ifdef SMP /* three separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d; ", mycpu->gd_cpuid); kprintf("lapic->id = %08x\n", lapic->id); #endif if (type == T_PAGEFLT) { kprintf("fault virtual address = 0x%lx\n", eva); kprintf("fault code = %s %s %s, %s\n", code & PGEX_U ? "user" : "supervisor", code & PGEX_W ? "write" : "read", code & PGEX_I ? "instruction" : "data", code & PGEX_P ? "protection violation" : "page not present"); } kprintf("instruction pointer = 0x%lx:0x%lx\n", frame->tf_cs & 0xffff, frame->tf_rip); if (ISPL(frame->tf_cs) == SEL_UPL) { ss = frame->tf_ss & 0xffff; rsp = frame->tf_rsp; } else { ss = GSEL(GDATA_SEL, SEL_KPL); rsp = (long)&frame->tf_rsp; } kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp); kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp); kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n", softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n", softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32, softseg.ssd_gran); kprintf("processor eflags = "); if (frame->tf_rflags & PSL_T) kprintf("trace trap, "); if (frame->tf_rflags & PSL_I) kprintf("interrupt enabled, "); if (frame->tf_rflags & PSL_NT) kprintf("nested task, "); if (frame->tf_rflags & PSL_RF) kprintf("resume, "); kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12); kprintf("current process = "); if (curproc) { kprintf("%lu\n", (u_long)curproc->p_pid); } else { kprintf("Idle\n"); } kprintf("current thread = pri %d ", curthread->td_pri); if (curthread->td_critcount) kprintf("(CRIT)"); kprintf("\n"); #ifdef DDB if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame)) return; #endif kprintf("trap number = %d\n", type); if (type <= MAX_TRAP_MSG) panic("%s", trap_msg[type]); else panic("unknown/reserved trap"); }
/* * Exception, fault, and trap interface to the kernel. * This common code is called from assembly language IDT gate entry * routines that prepare a suitable stack frame, and restore this * frame after the exception has been processed. * * This function is also called from doreti in an interlock to handle ASTs. * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap * * NOTE! We have to retrieve the fault address prior to potentially * blocking, including blocking on any token. * * NOTE! NMI and kernel DBG traps remain on their respective pcpu IST * stacks if taken from a kernel RPL. trap() cannot block in this * situation. DDB entry or a direct report-and-return is ok. * * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing * if an attempt is made to switch from a fast interrupt or IPI. */ void trap(struct trapframe *frame) { static struct krate sscpubugrate = { 1 }; struct globaldata *gd = mycpu; struct thread *td = gd->gd_curthread; struct lwp *lp = td->td_lwp; struct proc *p; int sticks = 0; int i = 0, ucode = 0, type, code; #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; #endif vm_offset_t eva; p = td->td_proc; clear_quickret(); #ifdef DDB /* * We need to allow T_DNA faults when the debugger is active since * some dumping paths do large bcopy() which use the floating * point registers for faster copying. */ if (db_active && frame->tf_trapno != T_DNA) { eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0); ++gd->gd_trap_nesting_level; trap_fatal(frame, eva); --gd->gd_trap_nesting_level; goto out2; } #endif eva = 0; if ((frame->tf_rflags & PSL_I) == 0) { /* * Buggy application or kernel code has disabled interrupts * and then trapped. Enabling interrupts now is wrong, but * it is better than running with interrupts disabled until * they are accidentally enabled later. */ type = frame->tf_trapno; if (ISPL(frame->tf_cs) == SEL_UPL) { /* JG curproc can be NULL */ kprintf( "pid %ld (%s): trap %d with interrupts disabled\n", (long)curproc->p_pid, curproc->p_comm, type); } else if ((type == T_STKFLT || type == T_PROTFLT || type == T_SEGNPFLT) && frame->tf_rip == (long)doreti_iret) { /* * iretq fault from kernel mode during return to * userland. * * This situation is expected, don't complain. */ } else if (type != T_NMI && type != T_BPTFLT && type != T_TRCTRAP) { /* * XXX not quite right, since this may be for a * multiple fault in user mode. */ kprintf("kernel trap %d (%s @ 0x%016jx) with " "interrupts disabled\n", type, td->td_comm, frame->tf_rip); } cpu_enable_intr(); } type = frame->tf_trapno; code = frame->tf_err; if (ISPL(frame->tf_cs) == SEL_UPL) { /* user trap */ KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid, frame->tf_trapno, eva); userenter(td, p); sticks = (int)td->td_sticks; KASSERT(lp->lwp_md.md_regs == frame, ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); switch (type) { case T_PRIVINFLT: /* privileged instruction fault */ i = SIGILL; ucode = ILL_PRVOPC; break; case T_BPTFLT: /* bpt instruction fault */ case T_TRCTRAP: /* trace trap */ frame->tf_rflags &= ~PSL_T; i = SIGTRAP; ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); break; case T_ARITHTRAP: /* arithmetic trap */ ucode = code; i = SIGFPE; break; case T_ASTFLT: /* Allow process switch */ mycpu->gd_cnt.v_soft++; if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { atomic_clear_int(&mycpu->gd_reqflags, RQF_AST_OWEUPC); addupc_task(p, p->p_prof.pr_addr, p->p_prof.pr_ticks); } goto out; case T_PROTFLT: /* general protection fault */ i = SIGBUS; ucode = BUS_OBJERR; break; case T_STKFLT: /* stack fault */ case T_SEGNPFLT: /* segment not present fault */ i = SIGBUS; ucode = BUS_ADRERR; break; case T_TSSFLT: /* invalid TSS fault */ case T_DOUBLEFLT: /* double fault */ default: i = SIGBUS; ucode = BUS_OBJERR; break; case T_PAGEFLT: /* page fault */ i = trap_pfault(frame, TRUE); #ifdef DDB if (frame->tf_rip == 0) { /* used for kernel debugging only */ while (freeze_on_seg_fault) tsleep(p, 0, "freeze", hz * 20); } #endif if (i == -1 || i == 0) goto out; if (i == SIGSEGV) { ucode = SEGV_MAPERR; } else { i = SIGSEGV; ucode = SEGV_ACCERR; } break; case T_DIVIDE: /* integer divide fault */ ucode = FPE_INTDIV; i = SIGFPE; break; #if NISA > 0 case T_NMI: /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef DDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (ddb_on_nmi) { kprintf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* DDB */ goto out2; } else if (panic_on_nmi) panic("NMI indicates hardware failure"); break; #endif /* NISA > 0 */ case T_OFLOW: /* integer overflow fault */ ucode = FPE_INTOVF; i = SIGFPE; break; case T_BOUND: /* bounds check fault */ ucode = FPE_FLTSUB; i = SIGFPE; break; case T_DNA: /* * Virtual kernel intercept - pass the DNA exception * to the virtual kernel if it asked to handle it. * This occurs when the virtual kernel is holding * onto the FP context for a different emulated * process then the one currently running. * * We must still call npxdna() since we may have * saved FP state that the virtual kernel needs * to hand over to a different emulated process. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve && (td->td_pcb->pcb_flags & FP_VIRTFP) ) { npxdna(); break; } /* * The kernel may have switched out the FP unit's * state, causing the user process to take a fault * when it tries to use the FP unit. Restore the * state here */ if (npxdna()) { gd->gd_cnt.v_trap++; goto out; } i = SIGFPE; ucode = FPE_FPU_NP_TRAP; break; case T_FPOPFLT: /* FPU operand fetch fault */ ucode = ILL_COPROC; i = SIGILL; break; case T_XMMFLT: /* SIMD floating-point exception */ ucode = 0; /* XXX */ i = SIGFPE; break; } } else { /* kernel trap */ switch (type) { case T_PAGEFLT: /* page fault */ trap_pfault(frame, FALSE); goto out2; case T_DNA: /* * The kernel is apparently using fpu for copying. * XXX this should be fatal unless the kernel has * registered such use. */ if (npxdna()) { gd->gd_cnt.v_trap++; goto out2; } break; case T_STKFLT: /* stack fault */ case T_PROTFLT: /* general protection fault */ case T_SEGNPFLT: /* segment not present fault */ /* * Invalid segment selectors and out of bounds * %rip's and %rsp's can be set up in user mode. * This causes a fault in kernel mode when the * kernel tries to return to user mode. We want * to get this fault so that we can fix the * problem here and not have to check all the * selectors and pointers when the user changes * them. */ if (mycpu->gd_intr_nesting_level == 0) { /* * NOTE: in 64-bit mode traps push rsp/ss * even if no ring change occurs. */ if (td->td_pcb->pcb_onfault && td->td_pcb->pcb_onfault_sp == frame->tf_rsp) { frame->tf_rip = (register_t) td->td_pcb->pcb_onfault; goto out2; } /* * If the iretq in doreti faults during * return to user, it will be special-cased * in IDTVEC(prot) to get here. We want * to 'return' to doreti_iret_fault in * ipl.s in approximately the same state we * were in at the iretq. */ if (frame->tf_rip == (long)doreti_iret) { frame->tf_rip = (long)doreti_iret_fault; goto out2; } } break; case T_TSSFLT: /* * PSL_NT can be set in user mode and isn't cleared * automatically when the kernel is entered. This * causes a TSS fault when the kernel attempts to * `iret' because the TSS link is uninitialized. We * want to get this fault so that we can fix the * problem here and not every time the kernel is * entered. */ if (frame->tf_rflags & PSL_NT) { frame->tf_rflags &= ~PSL_NT; #if 0 /* do we need this? */ if (frame->tf_rip == (long)doreti_iret) frame->tf_rip = (long)doreti_iret_fault; #endif goto out2; } break; case T_TRCTRAP: /* trace trap */ /* * Detect historical CPU artifact on syscall or int $3 * entry (if not shortcutted in exception.s via * DIRECT_DISALLOW_SS_CPUBUG). */ gd->gd_cnt.v_trap++; if (frame->tf_rip == (register_t)IDTVEC(fast_syscall)) { krateprintf(&sscpubugrate, "Caught #DB at syscall cpu artifact\n"); goto out2; } if (frame->tf_rip == (register_t)IDTVEC(bpt)) { krateprintf(&sscpubugrate, "Caught #DB at int $N cpu artifact\n"); goto out2; } /* * Ignore debug register trace traps due to * accesses in the user's address space, which * can happen under several conditions such as * if a user sets a watchpoint on a buffer and * then passes that buffer to a system call. * We still want to get TRCTRAPS for addresses * in kernel space because that is useful when * debugging the kernel. */ if (user_dbreg_trap()) { /* * Reset breakpoint bits because the * processor doesn't */ load_dr6(rdr6() & ~0xf); goto out2; } /* * FALLTHROUGH (TRCTRAP kernel mode, kernel address) */ case T_BPTFLT: /* * If DDB is enabled, let it handle the debugger trap. * Otherwise, debugger traps "can't happen". */ ucode = TRAP_BRKPT; #ifdef DDB if (kdb_trap(type, 0, frame)) goto out2; #endif break; #if NISA > 0 case T_NMI: /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef DDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (ddb_on_nmi) { kprintf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* DDB */ goto out2; } else if (panic_on_nmi == 0) goto out2; /* FALL THROUGH */ #endif /* NISA > 0 */ } trap_fatal(frame, 0); goto out2; } /* * Fault from user mode, virtual kernel interecept. * * If the fault is directly related to a VM context managed by a * virtual kernel then let the virtual kernel handle it. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { vkernel_trap(lp, frame); goto out; } /* Translate fault for emulators (e.g. Linux) */ if (*p->p_sysent->sv_transtrap) i = (*p->p_sysent->sv_transtrap)(i, type); gd->gd_cnt.v_trap++; trapsignal(lp, i, ucode); #ifdef DEBUG if (type <= MAX_TRAP_MSG) { uprintf("fatal process exception: %s", trap_msg[type]); if ((type == T_PAGEFLT) || (type == T_PROTFLT)) uprintf(", fault VA = 0x%lx", frame->tf_addr); uprintf("\n"); } #endif out: userret(lp, frame, sticks); userexit(lp); out2: ; if (p != NULL && lp != NULL) KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("trap: critical section count mismatch! %d/%d", crit_count, td->td_pri)); KASSERT(curstop == td->td_toks_stop, ("trap: extra tokens held after trap! %ld/%ld", curstop - &td->td_toks_base, td->td_toks_stop - &td->td_toks_base)); #endif }
void trap(struct trapframe *frame) { #ifdef KDTRACE_HOOKS struct reg regs; #endif struct thread *td = curthread; struct proc *p = td->td_proc; int i = 0, ucode = 0, code; u_int type; register_t addr = 0; ksiginfo_t ksi; PCPU_INC(cnt.v_trap); type = frame->tf_trapno; #ifdef SMP /* Handler for NMI IPIs used for stopping CPUs. */ if (type == T_NMI) { if (ipi_nmi_handler() == 0) goto out; } #endif /* SMP */ #ifdef KDB if (kdb_active) { kdb_reenter(); goto out; } #endif if (type == T_RESERVED) { trap_fatal(frame, 0); goto out; } #ifdef HWPMC_HOOKS /* * CPU PMCs interrupt using an NMI. If the PMC module is * active, pass the 'rip' value to the PMC module's interrupt * handler. A return value of '1' from the handler means that * the NMI was handled by it and we can return immediately. */ if (type == T_NMI && pmc_intr && (*pmc_intr)(PCPU_GET(cpuid), frame)) goto out; #endif if (type == T_MCHK) { mca_intr(); goto out; } #ifdef KDTRACE_HOOKS /* * A trap can occur while DTrace executes a probe. Before * executing the probe, DTrace blocks re-scheduling and sets * a flag in its per-cpu flags to indicate that it doesn't * want to fault. On returning from the probe, the no-fault * flag is cleared and finally re-scheduling is enabled. */ if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type)) goto out; #endif if ((frame->tf_rflags & PSL_I) == 0) { /* * Buggy application or kernel code has disabled * interrupts and then trapped. Enabling interrupts * now is wrong, but it is better than running with * interrupts disabled until they are accidentally * enabled later. */ if (ISPL(frame->tf_cs) == SEL_UPL) uprintf( "pid %ld (%s): trap %d with interrupts disabled\n", (long)curproc->p_pid, curthread->td_name, type); else if (type != T_NMI && type != T_BPTFLT && type != T_TRCTRAP) { /* * XXX not quite right, since this may be for a * multiple fault in user mode. */ printf("kernel trap %d with interrupts disabled\n", type); /* * We shouldn't enable interrupts while holding a * spin lock. */ if (td->td_md.md_spinlock_count == 0) enable_intr(); } } code = frame->tf_err; if (ISPL(frame->tf_cs) == SEL_UPL) { /* user trap */ td->td_pticks = 0; td->td_frame = frame; addr = frame->tf_rip; if (td->td_ucred != p->p_ucred) cred_update_thread(td); switch (type) { case T_PRIVINFLT: /* privileged instruction fault */ i = SIGILL; ucode = ILL_PRVOPC; break; case T_BPTFLT: /* bpt instruction fault */ case T_TRCTRAP: /* trace trap */ enable_intr(); #ifdef KDTRACE_HOOKS if (type == T_BPTFLT) { fill_frame_regs(frame, ®s); if (dtrace_pid_probe_ptr != NULL && dtrace_pid_probe_ptr(®s) == 0) goto out; } #endif frame->tf_rflags &= ~PSL_T; i = SIGTRAP; ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); break; case T_ARITHTRAP: /* arithmetic trap */ ucode = fputrap_x87(); if (ucode == -1) goto userout; i = SIGFPE; break; case T_PROTFLT: /* general protection fault */ i = SIGBUS; ucode = BUS_OBJERR; break; case T_STKFLT: /* stack fault */ case T_SEGNPFLT: /* segment not present fault */ i = SIGBUS; ucode = BUS_ADRERR; break; case T_TSSFLT: /* invalid TSS fault */ i = SIGBUS; ucode = BUS_OBJERR; break; case T_DOUBLEFLT: /* double fault */ default: i = SIGBUS; ucode = BUS_OBJERR; break; case T_PAGEFLT: /* page fault */ addr = frame->tf_addr; i = trap_pfault(frame, TRUE); if (i == -1) goto userout; if (i == 0) goto user; if (i == SIGSEGV) ucode = SEGV_MAPERR; else { if (prot_fault_translation == 0) { /* * Autodetect. * This check also covers the images * without the ABI-tag ELF note. */ if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && p->p_osrel >= P_OSREL_SIGSEGV) { i = SIGSEGV; ucode = SEGV_ACCERR; } else { i = SIGBUS; ucode = BUS_PAGE_FAULT; } } else if (prot_fault_translation == 1) { /* * Always compat mode. */ i = SIGBUS; ucode = BUS_PAGE_FAULT; } else { /* * Always SIGSEGV mode. */ i = SIGSEGV; ucode = SEGV_ACCERR; } } break; case T_DIVIDE: /* integer divide fault */ ucode = FPE_INTDIV; i = SIGFPE; break; #ifdef DEV_ISA case T_NMI: /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef KDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (kdb_on_nmi) { printf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* KDB */ goto userout; } else if (panic_on_nmi) panic("NMI indicates hardware failure"); break; #endif /* DEV_ISA */ case T_OFLOW: /* integer overflow fault */ ucode = FPE_INTOVF; i = SIGFPE; break; case T_BOUND: /* bounds check fault */ ucode = FPE_FLTSUB; i = SIGFPE; break; case T_DNA: /* transparent fault (due to context switch "late") */ KASSERT(PCB_USER_FPU(td->td_pcb), ("kernel FPU ctx has leaked")); fpudna(); goto userout; case T_FPOPFLT: /* FPU operand fetch fault */ ucode = ILL_COPROC; i = SIGILL; break; case T_XMMFLT: /* SIMD floating-point exception */ ucode = fputrap_sse(); if (ucode == -1) goto userout; i = SIGFPE; break; #ifdef KDTRACE_HOOKS case T_DTRACE_RET: enable_intr(); fill_frame_regs(frame, ®s); if (dtrace_return_probe_ptr != NULL && dtrace_return_probe_ptr(®s) == 0) goto out; break; #endif } } else { /* kernel trap */ KASSERT(cold || td->td_ucred != NULL, ("kernel trap doesn't have ucred")); switch (type) { case T_PAGEFLT: /* page fault */ (void) trap_pfault(frame, FALSE); goto out; case T_DNA: KASSERT(!PCB_USER_FPU(td->td_pcb), ("Unregistered use of FPU in kernel")); fpudna(); goto out; case T_ARITHTRAP: /* arithmetic trap */ case T_XMMFLT: /* SIMD floating-point exception */ case T_FPOPFLT: /* FPU operand fetch fault */ /* * XXXKIB for now disable any FPU traps in kernel * handler registration seems to be overkill */ trap_fatal(frame, 0); goto out; case T_STKFLT: /* stack fault */ break; case T_PROTFLT: /* general protection fault */ case T_SEGNPFLT: /* segment not present fault */ if (td->td_intr_nesting_level != 0) break; /* * Invalid segment selectors and out of bounds * %rip's and %rsp's can be set up in user mode. * This causes a fault in kernel mode when the * kernel tries to return to user mode. We want * to get this fault so that we can fix the * problem here and not have to check all the * selectors and pointers when the user changes * them. */ if (frame->tf_rip == (long)doreti_iret) { frame->tf_rip = (long)doreti_iret_fault; goto out; } if (frame->tf_rip == (long)ld_ds) { frame->tf_rip = (long)ds_load_fault; goto out; } if (frame->tf_rip == (long)ld_es) { frame->tf_rip = (long)es_load_fault; goto out; } if (frame->tf_rip == (long)ld_fs) { frame->tf_rip = (long)fs_load_fault; goto out; } if (frame->tf_rip == (long)ld_gs) { frame->tf_rip = (long)gs_load_fault; goto out; } if (frame->tf_rip == (long)ld_gsbase) { frame->tf_rip = (long)gsbase_load_fault; goto out; } if (frame->tf_rip == (long)ld_fsbase) { frame->tf_rip = (long)fsbase_load_fault; goto out; } if (curpcb->pcb_onfault != NULL) { frame->tf_rip = (long)curpcb->pcb_onfault; goto out; } break; case T_TSSFLT: /* * PSL_NT can be set in user mode and isn't cleared * automatically when the kernel is entered. This * causes a TSS fault when the kernel attempts to * `iret' because the TSS link is uninitialized. We * want to get this fault so that we can fix the * problem here and not every time the kernel is * entered. */ if (frame->tf_rflags & PSL_NT) { frame->tf_rflags &= ~PSL_NT; goto out; } break; case T_TRCTRAP: /* trace trap */ /* * Ignore debug register trace traps due to * accesses in the user's address space, which * can happen under several conditions such as * if a user sets a watchpoint on a buffer and * then passes that buffer to a system call. * We still want to get TRCTRAPS for addresses * in kernel space because that is useful when * debugging the kernel. */ if (user_dbreg_trap()) { /* * Reset breakpoint bits because the * processor doesn't */ /* XXX check upper bits here */ load_dr6(rdr6() & 0xfffffff0); goto out; } /* * FALLTHROUGH (TRCTRAP kernel mode, kernel address) */ case T_BPTFLT: /* * If KDB is enabled, let it handle the debugger trap. * Otherwise, debugger traps "can't happen". */ #ifdef KDB if (kdb_trap(type, 0, frame)) goto out; #endif break; #ifdef DEV_ISA case T_NMI: /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef KDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (kdb_on_nmi) { printf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* KDB */ goto out; } else if (panic_on_nmi == 0) goto out; /* FALLTHROUGH */ #endif /* DEV_ISA */ } trap_fatal(frame, 0); goto out; } /* Translate fault for emulators (e.g. Linux) */ if (*p->p_sysent->sv_transtrap) i = (*p->p_sysent->sv_transtrap)(i, type); ksiginfo_init_trap(&ksi); ksi.ksi_signo = i; ksi.ksi_code = ucode; ksi.ksi_trapno = type; ksi.ksi_addr = (void *)addr; if (uprintf_signal) { uprintf("pid %d comm %s: signal %d err %lx code %d type %d " "addr 0x%lx rsp 0x%lx rip 0x%lx " "<%02x %02x %02x %02x %02x %02x %02x %02x>\n", p->p_pid, p->p_comm, i, frame->tf_err, ucode, type, addr, frame->tf_rsp, frame->tf_rip, fubyte((void *)(frame->tf_rip + 0)), fubyte((void *)(frame->tf_rip + 1)), fubyte((void *)(frame->tf_rip + 2)), fubyte((void *)(frame->tf_rip + 3)), fubyte((void *)(frame->tf_rip + 4)), fubyte((void *)(frame->tf_rip + 5)), fubyte((void *)(frame->tf_rip + 6)), fubyte((void *)(frame->tf_rip + 7))); } KASSERT((read_rflags() & PSL_I) != 0, ("interrupts disabled")); trapsignal(td, &ksi); user: userret(td, frame); KASSERT(PCB_USER_FPU(td->td_pcb), ("Return from trap with kernel FPU ctx leaked")); userout: out: return; }
void trap(struct trapframe *frame) { struct thread *td = curthread; struct proc *p = td->td_proc; int i = 0, ucode = 0, code; u_int type; register_t addr = 0; ksiginfo_t ksi; PCPU_INC(cnt.v_trap); type = frame->tf_trapno; #ifdef SMP #ifdef STOP_NMI /* Handler for NMI IPIs used for stopping CPUs. */ if (type == T_NMI) { if (ipi_nmi_handler() == 0) goto out; } #endif /* STOP_NMI */ #endif /* SMP */ #ifdef KDB if (kdb_active) { kdb_reenter(); goto out; } #endif #ifdef HWPMC_HOOKS /* * CPU PMCs interrupt using an NMI. If the PMC module is * active, pass the 'rip' value to the PMC module's interrupt * handler. A return value of '1' from the handler means that * the NMI was handled by it and we can return immediately. */ if (type == T_NMI && pmc_intr && (*pmc_intr)(PCPU_GET(cpuid), frame)) goto out; #endif if (type == T_MCHK) { if (!mca_intr()) trap_fatal(frame, 0); goto out; } #ifdef KDTRACE_HOOKS /* * A trap can occur while DTrace executes a probe. Before * executing the probe, DTrace blocks re-scheduling and sets * a flag in it's per-cpu flags to indicate that it doesn't * want to fault. On returning from the the probe, the no-fault * flag is cleared and finally re-scheduling is enabled. * * If the DTrace kernel module has registered a trap handler, * call it and if it returns non-zero, assume that it has * handled the trap and modified the trap frame so that this * function can return normally. */ if (dtrace_trap_func != NULL) if ((*dtrace_trap_func)(frame, type)) goto out; #endif if ((frame->tf_rflags & PSL_I) == 0) { /* * Buggy application or kernel code has disabled * interrupts and then trapped. Enabling interrupts * now is wrong, but it is better than running with * interrupts disabled until they are accidentally * enabled later. */ if (ISPL(frame->tf_cs) == SEL_UPL) printf( "pid %ld (%s): trap %d with interrupts disabled\n", (long)curproc->p_pid, curproc->p_comm, type); else if (type != T_NMI && type != T_BPTFLT && type != T_TRCTRAP) { /* * XXX not quite right, since this may be for a * multiple fault in user mode. */ printf("kernel trap %d with interrupts disabled\n", type); /* * We shouldn't enable interrupts while holding a * spin lock or servicing an NMI. */ if (type != T_NMI && td->td_md.md_spinlock_count == 0) enable_intr(); } } code = frame->tf_err; if (type == T_PAGEFLT) { /* * If we get a page fault while in a critical section, then * it is most likely a fatal kernel page fault. The kernel * is already going to panic trying to get a sleep lock to * do the VM lookup, so just consider it a fatal trap so the * kernel can print out a useful trap message and even get * to the debugger. * * If we get a page fault while holding a non-sleepable * lock, then it is most likely a fatal kernel page fault. * If WITNESS is enabled, then it's going to whine about * bogus LORs with various VM locks, so just skip to the * fatal trap handling directly. */ if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) trap_fatal(frame, frame->tf_addr); } if (ISPL(frame->tf_cs) == SEL_UPL) { /* user trap */ td->td_pticks = 0; td->td_frame = frame; addr = frame->tf_rip; if (td->td_ucred != p->p_ucred) cred_update_thread(td); switch (type) { case T_PRIVINFLT: /* privileged instruction fault */ i = SIGILL; ucode = ILL_PRVOPC; break; case T_BPTFLT: /* bpt instruction fault */ case T_TRCTRAP: /* trace trap */ enable_intr(); frame->tf_rflags &= ~PSL_T; i = SIGTRAP; ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); break; case T_ARITHTRAP: /* arithmetic trap */ ucode = fputrap(); if (ucode == -1) goto userout; i = SIGFPE; break; case T_PROTFLT: /* general protection fault */ i = SIGBUS; ucode = BUS_OBJERR; break; case T_STKFLT: /* stack fault */ case T_SEGNPFLT: /* segment not present fault */ i = SIGBUS; ucode = BUS_ADRERR; break; case T_TSSFLT: /* invalid TSS fault */ i = SIGBUS; ucode = BUS_OBJERR; break; case T_DOUBLEFLT: /* double fault */ default: i = SIGBUS; ucode = BUS_OBJERR; break; case T_PAGEFLT: /* page fault */ addr = frame->tf_addr; #ifdef KSE if (td->td_pflags & TDP_SA) thread_user_enter(td); #endif i = trap_pfault(frame, TRUE); if (i == -1) goto userout; if (i == 0) goto user; if (i == SIGSEGV) ucode = SEGV_MAPERR; else { if (prot_fault_translation == 0) { /* * Autodetect. * This check also covers the images * without the ABI-tag ELF note. */ if ((curproc->p_sysent == &elf64_freebsd_sysvec #ifdef COMPAT_IA32 || curproc->p_sysent == &ia32_freebsd_sysvec #endif ) && p->p_osrel >= 700004) { i = SIGSEGV; ucode = SEGV_ACCERR; } else { i = SIGBUS; ucode = BUS_PAGE_FAULT; } } else if (prot_fault_translation == 1) { /* * Always compat mode. */ i = SIGBUS; ucode = BUS_PAGE_FAULT; } else { /* * Always SIGSEGV mode. */ i = SIGSEGV; ucode = SEGV_ACCERR; } } break; case T_DIVIDE: /* integer divide fault */ ucode = FPE_INTDIV; i = SIGFPE; break; #ifdef DEV_ISA case T_NMI: /* machine/parity/power fail/"kitchen sink" faults */ /* XXX Giant */ if (isa_nmi(code) == 0) { #ifdef KDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (kdb_on_nmi) { printf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* KDB */ goto userout; } else if (panic_on_nmi) panic("NMI indicates hardware failure"); break; #endif /* DEV_ISA */ case T_OFLOW: /* integer overflow fault */ ucode = FPE_INTOVF; i = SIGFPE; break; case T_BOUND: /* bounds check fault */ ucode = FPE_FLTSUB; i = SIGFPE; break; case T_DNA: /* transparent fault (due to context switch "late") */ fpudna(); goto userout; case T_FPOPFLT: /* FPU operand fetch fault */ ucode = ILL_COPROC; i = SIGILL; break; case T_XMMFLT: /* SIMD floating-point exception */ ucode = 0; /* XXX */ i = SIGFPE; break; } } else { /* kernel trap */ KASSERT(cold || td->td_ucred != NULL, ("kernel trap doesn't have ucred")); switch (type) { case T_PAGEFLT: /* page fault */ (void) trap_pfault(frame, FALSE); goto out; case T_DNA: /* * The kernel is apparently using fpu for copying. * XXX this should be fatal unless the kernel has * registered such use. */ fpudna(); printf("fpudna in kernel mode!\n"); goto out; case T_STKFLT: /* stack fault */ break; case T_PROTFLT: /* general protection fault */ case T_SEGNPFLT: /* segment not present fault */ if (td->td_intr_nesting_level != 0) break; /* * Invalid segment selectors and out of bounds * %rip's and %rsp's can be set up in user mode. * This causes a fault in kernel mode when the * kernel tries to return to user mode. We want * to get this fault so that we can fix the * problem here and not have to check all the * selectors and pointers when the user changes * them. */ if (frame->tf_rip == (long)doreti_iret) { frame->tf_rip = (long)doreti_iret_fault; goto out; } if (PCPU_GET(curpcb)->pcb_onfault != NULL) { frame->tf_rip = (long)PCPU_GET(curpcb)->pcb_onfault; goto out; } break; case T_TSSFLT: /* * PSL_NT can be set in user mode and isn't cleared * automatically when the kernel is entered. This * causes a TSS fault when the kernel attempts to * `iret' because the TSS link is uninitialized. We * want to get this fault so that we can fix the * problem here and not every time the kernel is * entered. */ if (frame->tf_rflags & PSL_NT) { frame->tf_rflags &= ~PSL_NT; goto out; } break; case T_TRCTRAP: /* trace trap */ /* * Ignore debug register trace traps due to * accesses in the user's address space, which * can happen under several conditions such as * if a user sets a watchpoint on a buffer and * then passes that buffer to a system call. * We still want to get TRCTRAPS for addresses * in kernel space because that is useful when * debugging the kernel. */ if (user_dbreg_trap()) { /* * Reset breakpoint bits because the * processor doesn't */ /* XXX check upper bits here */ load_dr6(rdr6() & 0xfffffff0); goto out; } /* * FALLTHROUGH (TRCTRAP kernel mode, kernel address) */ case T_BPTFLT: /* * If KDB is enabled, let it handle the debugger trap. * Otherwise, debugger traps "can't happen". */ #ifdef KDB if (kdb_trap(type, 0, frame)) goto out; #endif break; #ifdef DEV_ISA case T_NMI: /* XXX Giant */ /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef KDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (kdb_on_nmi) { printf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* KDB */ goto out; } else if (panic_on_nmi == 0) goto out; /* FALLTHROUGH */ #endif /* DEV_ISA */ } trap_fatal(frame, 0); goto out; } /* Translate fault for emulators (e.g. Linux) */ if (*p->p_sysent->sv_transtrap) i = (*p->p_sysent->sv_transtrap)(i, type); ksiginfo_init_trap(&ksi); ksi.ksi_signo = i; ksi.ksi_code = ucode; ksi.ksi_trapno = type; ksi.ksi_addr = (void *)addr; trapsignal(td, &ksi); #ifdef DEBUG if (type <= MAX_TRAP_MSG) { uprintf("fatal process exception: %s", trap_msg[type]); if ((type == T_PAGEFLT) || (type == T_PROTFLT)) uprintf(", fault VA = 0x%lx", frame->tf_addr); uprintf("\n"); } #endif user: userret(td, frame); mtx_assert(&Giant, MA_NOTOWNED); userout: out: return; }