/* * General page fault handler. */ void do_fault(struct trapframe *tf, struct proc *p, struct vm_map *map, vaddr_t va, vm_prot_t atype) { int error; struct pcb *curpcb; if (pmap_fault(map->pmap, va, atype)) return; KASSERT(current_intr_depth == 0); for (;;) { error = uvm_fault(map, va, 0, atype); if (error != ENOMEM) break; log(LOG_WARNING, "pid %d: VM shortage, sleeping\n", p->p_pid); tsleep(&lbolt, PVM, "abtretry", 0); } if (error != 0) { curpcb = &p->p_addr->u_pcb; if (curpcb->pcb_onfault != NULL) { tf->tf_r0 = error; tf->tf_r15 = (tf->tf_r15 & ~R15_PC) | (register_t)curpcb->pcb_onfault; return; } trapsignal(p, SIGSEGV, va); } }
/* * Handle a single exception. */ void itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p, int type) { int i; unsigned ucode = 0; vm_prot_t ftype; extern vaddr_t onfault_table[]; int onfault; int typ = 0; union sigval sv; struct pcb *pcb; switch (type) { case T_TLB_MOD: /* check for kernel address */ if (trapframe->badvaddr < 0) { pt_entry_t *pte, entry; paddr_t pa; vm_page_t pg; pte = kvtopte(trapframe->badvaddr); entry = *pte; #ifdef DIAGNOSTIC if (!(entry & PG_V) || (entry & PG_M)) panic("trap: ktlbmod: invalid pte"); #endif if (pmap_is_page_ro(pmap_kernel(), trunc_page(trapframe->badvaddr), entry)) { /* write to read only page in the kernel */ ftype = VM_PROT_WRITE; pcb = &p->p_addr->u_pcb; goto kernel_fault; } entry |= PG_M; *pte = entry; KERNEL_LOCK(); pmap_update_kernel_page(trapframe->badvaddr & ~PGOFSET, entry); pa = pfn_to_pad(entry); pg = PHYS_TO_VM_PAGE(pa); if (pg == NULL) panic("trap: ktlbmod: unmanaged page"); pmap_set_modify(pg); KERNEL_UNLOCK(); return; } /* FALLTHROUGH */ case T_TLB_MOD+T_USER: { pt_entry_t *pte, entry; paddr_t pa; vm_page_t pg; pmap_t pmap = p->p_vmspace->vm_map.pmap; if (!(pte = pmap_segmap(pmap, trapframe->badvaddr))) panic("trap: utlbmod: invalid segmap"); pte += uvtopte(trapframe->badvaddr); entry = *pte; #ifdef DIAGNOSTIC if (!(entry & PG_V) || (entry & PG_M)) panic("trap: utlbmod: invalid pte"); #endif if (pmap_is_page_ro(pmap, trunc_page(trapframe->badvaddr), entry)) { /* write to read only page */ ftype = VM_PROT_WRITE; pcb = &p->p_addr->u_pcb; goto fault_common_no_miss; } entry |= PG_M; *pte = entry; KERNEL_LOCK(); pmap_update_user_page(pmap, (trapframe->badvaddr & ~PGOFSET), entry); pa = pfn_to_pad(entry); pg = PHYS_TO_VM_PAGE(pa); if (pg == NULL) panic("trap: utlbmod: unmanaged page"); pmap_set_modify(pg); KERNEL_UNLOCK(); return; } case T_TLB_LD_MISS: case T_TLB_ST_MISS: ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; pcb = &p->p_addr->u_pcb; /* check for kernel address */ if (trapframe->badvaddr < 0) { vaddr_t va; int rv; kernel_fault: va = trunc_page((vaddr_t)trapframe->badvaddr); onfault = pcb->pcb_onfault; pcb->pcb_onfault = 0; KERNEL_LOCK(); rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype); KERNEL_UNLOCK(); pcb->pcb_onfault = onfault; if (rv == 0) return; if (onfault != 0) { pcb->pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; } /* * It is an error for the kernel to access user space except * through the copyin/copyout routines. */ if (pcb->pcb_onfault != 0) { /* * We want to resolve the TLB fault before invoking * pcb_onfault if necessary. */ goto fault_common; } else { goto err; } case T_TLB_LD_MISS+T_USER: ftype = VM_PROT_READ; pcb = &p->p_addr->u_pcb; goto fault_common; case T_TLB_ST_MISS+T_USER: ftype = VM_PROT_WRITE; pcb = &p->p_addr->u_pcb; fault_common: #ifdef CPU_R4000 if (r4000_errata != 0) { if (eop_tlb_miss_handler(trapframe, ci, p) != 0) return; } #endif fault_common_no_miss: #ifdef CPU_R4000 if (r4000_errata != 0) { eop_cleanup(trapframe, p); } #endif { vaddr_t va; struct vmspace *vm; vm_map_t map; int rv; vm = p->p_vmspace; map = &vm->vm_map; va = trunc_page((vaddr_t)trapframe->badvaddr); onfault = pcb->pcb_onfault; pcb->pcb_onfault = 0; KERNEL_LOCK(); rv = uvm_fault(map, va, 0, ftype); pcb->pcb_onfault = onfault; /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if ((caddr_t)va >= vm->vm_maxsaddr) { if (rv == 0) uvm_grow(p, va); else if (rv == EACCES) rv = EFAULT; } KERNEL_UNLOCK(); if (rv == 0) return; if (!USERMODE(trapframe->sr)) { if (onfault != 0) { pcb->pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; } ucode = ftype; i = SIGSEGV; typ = SEGV_MAPERR; break; } case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ ucode = 0; /* XXX should be VM_PROT_something */ i = SIGBUS; typ = BUS_ADRALN; break; case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ ucode = 0; /* XXX should be VM_PROT_something */ i = SIGBUS; typ = BUS_OBJERR; break; case T_SYSCALL+T_USER: { struct trap_frame *locr0 = p->p_md.md_regs; struct sysent *callp; unsigned int code; register_t tpc; int numsys, error; struct args { register_t i[8]; } args; register_t rval[2]; atomic_add_int(&uvmexp.syscalls, 1); /* compute next PC after syscall instruction */ tpc = trapframe->pc; /* Remember if restart */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; callp = p->p_p->ps_emul->e_sysent; numsys = p->p_p->ps_emul->e_nsysent; code = locr0->v0; switch (code) { case SYS_syscall: case SYS___syscall: /* * Code is first argument, followed by actual args. * __syscall provides the code as a quad to maintain * proper alignment of 64-bit arguments on 32-bit * platforms, which doesn't change anything here. */ code = locr0->a0; if (code >= numsys) callp += p->p_p->ps_emul->e_nosys; /* (illegal) */ else callp += code; i = callp->sy_argsize / sizeof(register_t); args.i[0] = locr0->a1; args.i[1] = locr0->a2; args.i[2] = locr0->a3; if (i > 3) { args.i[3] = locr0->a4; args.i[4] = locr0->a5; args.i[5] = locr0->a6; args.i[6] = locr0->a7; if (i > 7) if ((error = copyin((void *)locr0->sp, &args.i[7], sizeof(register_t)))) goto bad; } break; default: if (code >= numsys) callp += p->p_p->ps_emul->e_nosys; /* (illegal) */ else callp += code; i = callp->sy_narg; args.i[0] = locr0->a0; args.i[1] = locr0->a1; args.i[2] = locr0->a2; args.i[3] = locr0->a3; if (i > 4) { args.i[4] = locr0->a4; args.i[5] = locr0->a5; args.i[6] = locr0->a6; args.i[7] = locr0->a7; } } rval[0] = 0; rval[1] = locr0->v1; #if defined(DDB) || defined(DEBUG) trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ? TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code; #endif error = mi_syscall(p, code, callp, args.i, rval); switch (error) { case 0: locr0->v0 = rval[0]; locr0->v1 = rval[1]; locr0->a3 = 0; break; case ERESTART: locr0->pc = tpc; break; case EJUSTRETURN: break; /* nothing to do */ default: bad: locr0->v0 = error; locr0->a3 = 1; } mi_syscall_return(p, code, error, rval); return; } case T_BREAK: #ifdef DDB kdb_trap(type, trapframe); #endif /* Reenable interrupts if necessary */ if (trapframe->sr & SR_INT_ENAB) { enableintr(); } return; case T_BREAK+T_USER: { caddr_t va; u_int32_t instr; struct trap_frame *locr0 = p->p_md.md_regs; /* compute address of break instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; /* read break instruction */ copyin(va, &instr, sizeof(int32_t)); switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) { case 6: /* gcc range error */ i = SIGFPE; typ = FPE_FLTSUB; /* skip instruction */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; break; case 7: /* gcc3 divide by zero */ i = SIGFPE; typ = FPE_INTDIV; /* skip instruction */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; break; #ifdef PTRACE case BREAK_SSTEP_VAL: if (p->p_md.md_ss_addr == (long)va) { #ifdef DEBUG printf("trap: %s (%d): breakpoint at %p " "(insn %08x)\n", p->p_comm, p->p_pid, (void *)p->p_md.md_ss_addr, p->p_md.md_ss_instr); #endif /* Restore original instruction and clear BP */ process_sstep(p, 0); typ = TRAP_BRKPT; } else { typ = TRAP_TRACE; } i = SIGTRAP; break; #endif #ifdef FPUEMUL case BREAK_FPUEMUL_VAL: /* * If this is a genuine FP emulation break, * resume execution to our branch destination. */ if ((p->p_md.md_flags & MDP_FPUSED) != 0 && p->p_md.md_fppgva + 4 == (vaddr_t)va) { struct vm_map *map = &p->p_vmspace->vm_map; p->p_md.md_flags &= ~MDP_FPUSED; locr0->pc = p->p_md.md_fpbranchva; /* * Prevent access to the relocation page. * XXX needs to be fixed to work with rthreads */ uvm_fault_unwire(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE); (void)uvm_map_protect(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE, UVM_PROT_NONE, FALSE); return; } /* FALLTHROUGH */ #endif default: typ = TRAP_TRACE; i = SIGTRAP; break; } break; } case T_IWATCH+T_USER: case T_DWATCH+T_USER: { caddr_t va; /* compute address of trapped instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; printf("watch exception @ %p\n", va); #ifdef RM7K_PERFCNTR if (rm7k_watchintr(trapframe)) { /* Return to user, don't add any more overhead */ return; } #endif i = SIGTRAP; typ = TRAP_BRKPT; break; } case T_TRAP+T_USER: { caddr_t va; u_int32_t instr; struct trap_frame *locr0 = p->p_md.md_regs; /* compute address of trap instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; /* read break instruction */ copyin(va, &instr, sizeof(int32_t)); if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; #ifdef RM7K_PERFCNTR if (instr == 0x040c0000) { /* Performance cntr trap */ int result; result = rm7k_perfcntr(trapframe->a0, trapframe->a1, trapframe->a2, trapframe->a3); locr0->v0 = -result; /* Return to user, don't add any more overhead */ return; } else #endif /* * GCC 4 uses teq with code 7 to signal divide by * zero at runtime. This is one instruction shorter * than the BEQ + BREAK combination used by gcc 3. */ if ((instr & 0xfc00003f) == 0x00000034 /* teq */ && (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) { i = SIGFPE; typ = FPE_INTDIV; } else { i = SIGEMT; /* Stuff it with something for now */ typ = 0; } break; } case T_RES_INST+T_USER: i = SIGILL; typ = ILL_ILLOPC; break; case T_COP_UNUSABLE+T_USER: /* * Note MIPS IV COP1X instructions issued with FPU * disabled correctly report coprocessor 1 as the * unusable coprocessor number. */ if ((trapframe->cause & CR_COP_ERR) != CR_COP1_ERR) { i = SIGILL; /* only FPU instructions allowed */ typ = ILL_ILLOPC; break; } #ifdef FPUEMUL MipsFPTrap(trapframe); #else enable_fpu(p); #endif return; case T_FPE: printf("FPU Trap: PC %lx CR %lx SR %lx\n", trapframe->pc, trapframe->cause, trapframe->sr); goto err; case T_FPE+T_USER: MipsFPTrap(trapframe); return; case T_OVFLOW+T_USER: i = SIGFPE; typ = FPE_FLTOVF; break; case T_ADDR_ERR_LD: /* misaligned access */ case T_ADDR_ERR_ST: /* misaligned access */ case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ pcb = &p->p_addr->u_pcb; if ((onfault = pcb->pcb_onfault) != 0) { pcb->pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; default: err: disableintr(); #if !defined(DDB) && defined(DEBUG) trapDump("trap", printf); #endif printf("\nTrap cause = %d Frame %p\n", type, trapframe); printf("Trap PC %p RA %p fault %p\n", (void *)trapframe->pc, (void *)trapframe->ra, (void *)trapframe->badvaddr); #ifdef DDB stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs); kdb_trap(type, trapframe); #endif panic("trap"); } #ifdef FPUEMUL /* * If a relocated delay slot causes an exception, blame the * original delay slot address - userland is not supposed to * know anything about emulation bowels. */ if ((p->p_md.md_flags & MDP_FPUSED) != 0 && trapframe->badvaddr == p->p_md.md_fppgva) trapframe->badvaddr = p->p_md.md_fpslotva; #endif p->p_md.md_regs->pc = trapframe->pc; p->p_md.md_regs->cause = trapframe->cause; p->p_md.md_regs->badvaddr = trapframe->badvaddr; sv.sival_ptr = (void *)trapframe->badvaddr; KERNEL_LOCK(); trapsignal(p, i, ucode, typ, sv); KERNEL_UNLOCK(); }
/* * trap(frame): exception, fault, and trap interface to BSD kernel. * * This common code is called from assembly language IDT gate entry routines * that prepare a suitable stack frame, and restore this frame after the * exception has been processed. Note that the effect is as if the arguments * were passed call by reference. */ void trap(struct trapframe *frame) { struct lwp *l = curlwp; struct proc *p; struct pcb *pcb; extern char fusubail[], kcopy_fault[], return_address_fault[], IDTVEC(osyscall)[]; struct trapframe *vframe; ksiginfo_t ksi; void *onfault; int type, error; uint32_t cr2; bool pfail; if (__predict_true(l != NULL)) { pcb = lwp_getpcb(l); p = l->l_proc; } else { /* * this can happen eg. on break points in early on boot. */ pcb = NULL; p = NULL; } type = frame->tf_trapno; #ifdef DEBUG if (trapdebug) { trap_print(frame, l); } #endif if (type != T_NMI && !KERNELMODE(frame->tf_cs, frame->tf_eflags)) { type |= T_USER; l->l_md.md_regs = frame; pcb->pcb_cr2 = 0; LWP_CACHE_CREDS(l, p); } #ifdef KDTRACE_HOOKS /* * A trap can occur while DTrace executes a probe. Before * executing the probe, DTrace blocks re-scheduling and sets * a flag in its per-cpu flags to indicate that it doesn't * want to fault. On returning from the the probe, the no-fault * flag is cleared and finally re-scheduling is enabled. * * If the DTrace kernel module has registered a trap handler, * call it and if it returns non-zero, assume that it has * handled the trap and modified the trap frame so that this * function can return normally. */ if ((type == T_PROTFLT || type == T_PAGEFLT) && dtrace_trap_func != NULL) { if ((*dtrace_trap_func)(frame, type)) { return; } } #endif switch (type) { case T_ASTFLT: /*FALLTHROUGH*/ default: we_re_toast: if (type == T_TRCTRAP) check_dr0(); else trap_print(frame, l); if (kdb_trap(type, 0, frame)) return; if (kgdb_trap(type, frame)) return; /* * If this is a breakpoint, don't panic if we're not connected. */ if (type == T_BPTFLT && kgdb_disconnected()) { printf("kgdb: ignored %s\n", trap_type[type]); return; } panic("trap"); /*NOTREACHED*/ case T_PROTFLT: case T_SEGNPFLT: case T_ALIGNFLT: case T_TSSFLT: if (p == NULL) goto we_re_toast; /* Check for copyin/copyout fault. */ onfault = onfault_handler(pcb, frame); if (onfault != NULL) { copyefault: error = EFAULT; copyfault: frame->tf_eip = (uintptr_t)onfault; frame->tf_eax = error; return; } /* * Check for failure during return to user mode. * This can happen loading invalid values into the segment * registers, or during the 'iret' itself. * * We do this by looking at the instruction we faulted on. * The specific instructions we recognize only happen when * returning from a trap, syscall, or interrupt. */ kernelfault: KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_trap = type; switch (*(u_char *)frame->tf_eip) { case 0xcf: /* iret */ /* * The 'iret' instruction faulted, so we have the * 'user' registers saved after the kernel %eip:%cs:%fl * of the 'iret' and below that the user %eip:%cs:%fl * the 'iret' was processing. * We must delete the 3 words of kernel return address * from the stack to generate a normal stack frame * (eg for sending a SIGSEGV). */ vframe = (void *)((int *)frame + 3); if (KERNELMODE(vframe->tf_cs, vframe->tf_eflags)) goto we_re_toast; memmove(vframe, frame, offsetof(struct trapframe, tf_eip)); /* Set the faulting address to the user %eip */ ksi.ksi_addr = (void *)vframe->tf_eip; break; case 0x8e: switch (*(uint32_t *)frame->tf_eip) { case 0x8e242c8e: /* mov (%esp,%gs), then */ case 0x0424648e: /* mov 0x4(%esp),%fs */ case 0x0824448e: /* mov 0x8(%esp),%es */ case 0x0c245c8e: /* mov 0xc(%esp),%ds */ break; default: goto we_re_toast; } /* * We faulted loading one if the user segment registers. * The stack frame containing the user registers is * still valid and is just below the %eip:%cs:%fl of * the kernel fault frame. */ vframe = (void *)(&frame->tf_eflags + 1); if (KERNELMODE(vframe->tf_cs, vframe->tf_eflags)) goto we_re_toast; /* There is no valid address for the fault */ break; default: goto we_re_toast; } /* * We might have faulted trying to execute the * trampoline for a local (nested) signal handler. * Only generate SIGSEGV if the user %cs isn't changed. * (This is only strictly necessary in the 'iret' case.) */ if (!pmap_exec_fixup(&p->p_vmspace->vm_map, vframe, pcb)) { /* Save outer frame for any signal return */ l->l_md.md_regs = vframe; (*p->p_emul->e_trapsignal)(l, &ksi); } /* Return to user by reloading the user frame */ trap_return_fault_return(vframe); /* NOTREACHED */ case T_PROTFLT|T_USER: /* protection fault */ case T_TSSFLT|T_USER: case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: case T_ALIGNFLT|T_USER: KSI_INIT_TRAP(&ksi); ksi.ksi_addr = (void *)rcr2(); switch (type) { case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case T_TSSFLT|T_USER: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; break; case T_ALIGNFLT|T_USER: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRALN; break; case T_PROTFLT|T_USER: #ifdef VM86 if (frame->tf_eflags & PSL_VM) { vm86_gpfault(l, type & ~T_USER); goto out; } #endif /* * If pmap_exec_fixup does something, * let's retry the trap. */ if (pmap_exec_fixup(&p->p_vmspace->vm_map, frame, pcb)){ goto out; } ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; break; default: KASSERT(0); break; } goto trapsignal; case T_PRIVINFLT|T_USER: /* privileged instruction fault */ case T_FPOPFLT|T_USER: /* coprocessor operand fault */ KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *) frame->tf_eip; switch (type) { case T_PRIVINFLT|T_USER: ksi.ksi_code = ILL_PRVOPC; break; case T_FPOPFLT|T_USER: ksi.ksi_code = ILL_COPROC; break; default: ksi.ksi_code = 0; break; } goto trapsignal; case T_ASTFLT|T_USER: /* Allow process switch. */ //curcpu()->ci_data.cpu_nast++; if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } /* Allow a forced task switch. */ if (curcpu()->ci_want_resched) { preempt(); } goto out; case T_BOUND|T_USER: case T_OFLOW|T_USER: case T_DIVIDE|T_USER: KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGFPE; ksi.ksi_addr = (void *)frame->tf_eip; switch (type) { case T_BOUND|T_USER: ksi.ksi_code = FPE_FLTSUB; break; case T_OFLOW|T_USER: ksi.ksi_code = FPE_INTOVF; break; case T_DIVIDE|T_USER: ksi.ksi_code = FPE_INTDIV; break; default: ksi.ksi_code = 0; break; } goto trapsignal; case T_PAGEFLT: /* Allow page faults in kernel mode. */ if (__predict_false(l == NULL)) goto we_re_toast; /* * fusubail is used by [fs]uswintr() to prevent page faulting * from inside the profiling interrupt. */ onfault = pcb->pcb_onfault; if (onfault == fusubail || onfault == return_address_fault) { goto copyefault; } if (cpu_intr_p() || (l->l_pflag & LP_INTR) != 0) { goto we_re_toast; } cr2 = rcr2(); goto faultcommon; case T_PAGEFLT|T_USER: { /* page fault */ register vaddr_t va; register struct vmspace *vm; register struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; cr2 = rcr2(); faultcommon: vm = p->p_vmspace; if (__predict_false(vm == NULL)) { goto we_re_toast; } pcb->pcb_cr2 = cr2; va = trunc_page((vaddr_t)cr2); /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if (type == T_PAGEFLT && va >= KERNBASE) map = kernel_map; else map = &vm->vm_map; if (frame->tf_err & PGEX_W) ftype = VM_PROT_WRITE; else if (frame->tf_err & PGEX_X) ftype = VM_PROT_EXECUTE; else ftype = VM_PROT_READ; #ifdef DIAGNOSTIC if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %lx\n", va); goto we_re_toast; } #endif /* Fault the original page in. */ onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = uvm_fault(map, va, ftype); pcb->pcb_onfault = onfault; if (error == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); pfail = false; while (type == T_PAGEFLT) { /* * we need to switch pmap now if we're in * the middle of copyin/out. * * but we don't need to do so for kcopy as * it never touch userspace. */ kpreempt_disable(); if (curcpu()->ci_want_pmapload) { onfault = onfault_handler(pcb, frame); if (onfault != kcopy_fault) { pmap_load(); } } /* * We need to keep the pmap loaded and * so avoid being preempted until back * into the copy functions. Disable * interrupts at the hardware level before * re-enabling preemption. Interrupts * will be re-enabled by 'iret' when * returning back out of the trap stub. * They'll only be re-enabled when the * program counter is once again in * the copy functions, and so visible * to cpu_kpreempt_exit(). */ #ifndef XEN x86_disable_intr(); #endif l->l_nopreempt--; if (l->l_nopreempt > 0 || !l->l_dopreempt || pfail) { return; } #ifndef XEN x86_enable_intr(); #endif /* * If preemption fails for some reason, * don't retry it. The conditions won't * change under our nose. */ pfail = kpreempt(0); } goto out; } if (type == T_PAGEFLT) { onfault = onfault_handler(pcb, frame); if (onfault != NULL) goto copyfault; printf("uvm_fault(%p, %#lx, %d) -> %#x\n", map, va, ftype, error); goto kernelfault; } KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; ksi.ksi_addr = (void *)cr2; switch (error) { case EINVAL: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case EACCES: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; error = EFAULT; break; case ENOMEM: ksi.ksi_signo = SIGKILL; printf("UVM: pid %d.%d (%s), uid %d killed: " "out of swap\n", p->p_pid, l->l_lid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); break; default: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; break; } #ifdef TRAP_SIGDEBUG printf("pid %d.%d (%s): signal %d at eip %x addr %lx " "error %d\n", p->p_pid, l->l_lid, p->p_comm, ksi.ksi_signo, frame->tf_eip, va, error); #endif (*p->p_emul->e_trapsignal)(l, &ksi); break; } case T_TRCTRAP: /* Check whether they single-stepped into a lcall. */ if (frame->tf_eip == (int)IDTVEC(osyscall)) return; if (frame->tf_eip == (int)IDTVEC(osyscall) + 1) { frame->tf_eflags &= ~PSL_T; return; } goto we_re_toast; case T_BPTFLT|T_USER: /* bpt instruction fault */ case T_TRCTRAP|T_USER: /* trace trap */ /* * Don't go single-stepping into a RAS. */ if (p->p_raslist == NULL || (ras_lookup(p, (void *)frame->tf_eip) == (void *)-1)) { KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGTRAP; ksi.ksi_trap = type & ~T_USER; if (type == (T_BPTFLT|T_USER)) ksi.ksi_code = TRAP_BRKPT; else ksi.ksi_code = TRAP_TRACE; ksi.ksi_addr = (void *)frame->tf_eip; (*p->p_emul->e_trapsignal)(l, &ksi); } break; case T_NMI: if (nmi_dispatch(frame)) return; /* NMI can be hooked up to a pushbutton for debugging */ if (kgdb_trap(type, frame)) return; if (kdb_trap(type, 0, frame)) return; /* machine/parity/power fail/"kitchen sink" faults */ #if NMCA > 0 mca_nmi(); #endif x86_nmi(); } if ((type & T_USER) == 0) return; out: userret(l); return; trapsignal: ksi.ksi_trap = type & ~T_USER; (*p->p_emul->e_trapsignal)(l, &ksi); userret(l); }
void trap(struct trapframe *tf) { u_int sig = 0, type = tf->tf_trap, code = 0; u_int rv, addr; bool trapsig = true; const bool usermode = USERMODE_P(tf); struct lwp * const l = curlwp; struct proc * const p = l->l_proc; struct pcb * const pcb = lwp_getpcb(l); u_quad_t oticks = 0; struct vmspace *vm; struct vm_map *map; vm_prot_t ftype; void *onfault = pcb->pcb_onfault; KASSERT(p != NULL); curcpu()->ci_data.cpu_ntrap++; if (usermode) { type |= T_USER; oticks = p->p_sticks; l->l_md.md_utf = tf; LWP_CACHE_CREDS(l, p); } type &= ~(T_WRITE|T_PTEFETCH); #ifdef TRAPDEBUG if(tf->tf_trap==7) goto fram; if(faultdebug)printf("Trap: type %lx, code %lx, pc %lx, psl %lx\n", tf->tf_trap, tf->tf_code, tf->tf_pc, tf->tf_psl); fram: #endif switch (type) { default: #ifdef DDB kdb_trap(tf); #endif panic("trap: type %x, code %x, pc %x, psl %x", (u_int)tf->tf_trap, (u_int)tf->tf_code, (u_int)tf->tf_pc, (u_int)tf->tf_psl); case T_KSPNOTVAL: panic("%d.%d (%s): KSP invalid %#x@%#x pcb %p fp %#x psl %#x)", p->p_pid, l->l_lid, l->l_name ? l->l_name : "??", mfpr(PR_KSP), (u_int)tf->tf_pc, pcb, (u_int)tf->tf_fp, (u_int)tf->tf_psl); case T_TRANSFLT|T_USER: case T_TRANSFLT: /* * BUG! BUG! BUG! BUG! BUG! * Due to a hardware bug (at in least KA65x CPUs) a double * page table fetch trap will cause a translation fault * even if access in the SPT PTE entry specifies 'no access'. * In for example section 6.4.2 in VAX Architecture * Reference Manual it states that if a page both are invalid * and have no access set, a 'access violation fault' occurs. * Therefore, we must fall through here... */ #ifdef nohwbug panic("translation fault"); #endif case T_PTELEN|T_USER: /* Page table length exceeded */ case T_ACCFLT|T_USER: if (tf->tf_code < 0) { /* Check for kernel space */ sig = SIGSEGV; code = SEGV_ACCERR; break; } case T_PTELEN: #ifndef MULTIPROCESSOR /* * If we referred to an address beyond the end of the system * page table, it may be due to a failed CAS * restartable-atomic-sequence. If it is, restart it at the * beginning and restart. */ { extern const uint8_t cas32_ras_start[], cas32_ras_end[]; if (tf->tf_code == CASMAGIC && tf->tf_pc >= (uintptr_t) cas32_ras_start && tf->tf_pc < (uintptr_t) cas32_ras_end) { tf->tf_pc = (uintptr_t) cas32_ras_start; trapsig = false; break; } } /* FALLTHROUGH */ #endif case T_ACCFLT: #ifdef TRAPDEBUG if(faultdebug)printf("trap accflt type %lx, code %lx, pc %lx, psl %lx\n", tf->tf_trap, tf->tf_code, tf->tf_pc, tf->tf_psl); #endif #ifdef DIAGNOSTIC if (p == 0) panic("trap: access fault: addr %lx code %lx", tf->tf_pc, tf->tf_code); if (tf->tf_psl & PSL_IS) panic("trap: pflt on IS"); #endif /* * Page tables are allocated in pmap_enter(). We get * info from below if it is a page table fault, but * UVM may want to map in pages without faults, so * because we must check for PTE pages anyway we don't * bother doing it here. */ addr = trunc_page(tf->tf_code); if (!usermode && (tf->tf_code < 0)) { vm = NULL; map = kernel_map; } else { vm = p->p_vmspace; map = &vm->vm_map; } if (tf->tf_trap & T_WRITE) ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; pcb->pcb_onfault = NULL; rv = uvm_fault(map, addr, ftype); pcb->pcb_onfault = onfault; if (rv != 0) { if (!usermode) { if (onfault) { pcb->pcb_onfault = NULL; tf->tf_pc = (unsigned)onfault; tf->tf_psl &= ~PSL_FPD; tf->tf_r0 = rv; return; } printf("r0=%08lx r1=%08lx r2=%08lx r3=%08lx ", tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); printf("r4=%08lx r5=%08lx r6=%08lx r7=%08lx\n", tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); printf( "r8=%08lx r9=%08lx r10=%08lx r11=%08lx\n", tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); printf("ap=%08lx fp=%08lx sp=%08lx pc=%08lx\n", tf->tf_ap, tf->tf_fp, tf->tf_sp, tf->tf_pc); panic("SEGV in kernel mode: pc %#lx addr %#lx", tf->tf_pc, tf->tf_code); } switch (rv) { case ENOMEM: printf("UVM: pid %d (%s), uid %d killed: " "out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); sig = SIGKILL; code = SI_NOINFO; break; case EINVAL: code = BUS_ADRERR; sig = SIGBUS; break; case EACCES: code = SEGV_ACCERR; sig = SIGSEGV; break; default: code = SEGV_MAPERR; sig = SIGSEGV; break; } } else { trapsig = false; if (map != kernel_map && addr > 0 && (void *)addr >= vm->vm_maxsaddr) uvm_grow(p, addr); } break; case T_BPTFLT|T_USER: sig = SIGTRAP; code = TRAP_BRKPT; break; case T_TRCTRAP|T_USER: sig = SIGTRAP; code = TRAP_TRACE; tf->tf_psl &= ~PSL_T; break; case T_PRIVINFLT|T_USER: sig = SIGILL; code = ILL_PRVOPC; break; case T_RESADFLT|T_USER: sig = SIGILL; code = ILL_ILLADR; break; case T_RESOPFLT|T_USER: sig = SIGILL; code = ILL_ILLOPC; break; case T_XFCFLT|T_USER: sig = SIGEMT; break; case T_ARITHFLT|T_USER: sig = SIGFPE; switch (tf->tf_code) { case ATRP_INTOVF: code = FPE_INTOVF; break; case ATRP_INTDIV: code = FPE_INTDIV; break; case ATRP_FLTOVF: code = FPE_FLTOVF; break; case ATRP_FLTDIV: code = FPE_FLTDIV; break; case ATRP_FLTUND: code = FPE_FLTUND; break; case ATRP_DECOVF: code = FPE_INTOVF; break; case ATRP_FLTSUB: code = FPE_FLTSUB; break; case AFLT_FLTDIV: code = FPE_FLTDIV; break; case AFLT_FLTUND: code = FPE_FLTUND; break; case AFLT_FLTOVF: code = FPE_FLTOVF; break; default: code = FPE_FLTINV; break; } break; case T_ASTFLT|T_USER: mtpr(AST_NO,PR_ASTLVL); trapsig = false; if (curcpu()->ci_want_resched) preempt(); break; #ifdef DDB case T_BPTFLT: /* Kernel breakpoint */ case T_KDBTRAP: case T_KDBTRAP|T_USER: case T_TRCTRAP: kdb_trap(tf); return; #endif } if (trapsig) { ksiginfo_t ksi; if ((sig == SIGSEGV || sig == SIGILL) && cpu_printfataltraps && (p->p_slflag & PSL_TRACED) == 0 && !sigismember(&p->p_sigctx.ps_sigcatch, sig)) printf("pid %d.%d (%s): sig %d: type %lx, code %lx, pc %lx, psl %lx\n", p->p_pid, l->l_lid, p->p_comm, sig, tf->tf_trap, tf->tf_code, tf->tf_pc, tf->tf_psl); KSI_INIT_TRAP(&ksi); ksi.ksi_signo = sig; ksi.ksi_trap = tf->tf_trap; ksi.ksi_addr = (void *)tf->tf_code; ksi.ksi_code = code; /* * Arithmetic exceptions can be of two kinds: * - traps (codes 1..7), where pc points to the * next instruction to execute. * - faults (codes 8..10), where pc points to the * faulting instruction. * In the latter case, we need to advance pc by ourselves * to prevent a signal loop. * * XXX this is gross -- miod */ if (type == (T_ARITHFLT | T_USER) && (tf->tf_code & 8)) tf->tf_pc = skip_opcode(tf->tf_pc); trapsignal(l, &ksi); } if (!usermode) return; userret(l, tf, oticks); }
void m88110_trap(unsigned type, struct trapframe *frame) { struct proc *p; u_quad_t sticks = 0; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type; u_long fault_code; unsigned nss, fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; #endif int sig = 0; pt_entry_t *pte; extern struct vm_map *kernel_map; extern unsigned guarded_access_start; extern unsigned guarded_access_end; extern unsigned guarded_access_bad; extern pt_entry_t *pmap_pte(pmap_t, vaddr_t); uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; if (USERMODE(frame->tf_epsr)) { sticks = p->p_sticks; type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } fault_type = 0; fault_code = 0; fault_addr = frame->tf_exip & XIP_ADDR; switch (type) { default: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_197_READ+T_USER: case T_197_READ: printf("DMMU read miss: Hardware Table Searches should be enabled!\n"); panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_197_WRITE+T_USER: case T_197_WRITE: printf("DMMU write miss: Hardware Table Searches should be enabled!\n"); panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_197_INST+T_USER: case T_197_INST: printf("IMMU miss: Hardware Table Searches should be enabled!\n"); panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #ifdef DDB case T_KDB_TRACE: s = splhigh(); db_enable_interrupt(); ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; case T_KDB_BREAK: s = splhigh(); db_enable_interrupt(); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; case T_KDB_ENTRY: s = splhigh(); db_enable_interrupt(); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); db_disable_interrupt(); /* skip one instruction */ if (frame->tf_exip & 1) frame->tf_exip = frame->tf_enip; else frame->tf_exip += 4; splx(s); return; #if 0 case T_ILLFLT: s = splhigh(); db_enable_interrupt(); ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" : "error fault", (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; #endif /* 0 */ #endif /* DDB */ case T_ILLFLT: printf("Unimplemented opcode!\n"); panictrap(frame->tf_vector, frame); break; case T_NON_MASK: case T_NON_MASK+T_USER: (*md.interrupt_func)(T_NON_MASK, frame); return; case T_INT: case T_INT+T_USER: (*md.interrupt_func)(T_INT, frame); return; case T_MISALGNFLT: printf("kernel mode misaligned access exception @ 0x%08x\n", frame->tf_exip); panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG printf("Kernel Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dsr & CMMU_DSR_SU) == 0) { type = T_DATAFLT + T_USER; goto m88110_user_fault; } #ifdef TRAPDEBUG printf("Kernel Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } va = trunc_page((vaddr_t)fault_addr); if (va == 0) { panic("trap: bad kernel access at %x", fault_addr); } vm = p->p_vmspace; map = kernel_map; if (frame->tf_dsr & CMMU_DSR_BE) { /* * If it is a guarded access, bus error is OK. */ if ((frame->tf_exip & XIP_ADDR) >= (unsigned)&guarded_access_start && (frame->tf_exip & XIP_ADDR) <= (unsigned)&guarded_access_end) { frame->tf_exip = (unsigned)&guarded_access_bad; return; } } if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { frame->tf_dsr &= ~CMMU_DSR_WE; /* undefined */ /* * On a segment or a page fault, call uvm_fault() to * resolve the fault. */ if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) return; } if (frame->tf_dsr & CMMU_DSR_WE) { /* write fault */ /* * This could be a write protection fault or an * exception to set the used and modified bits * in the pte. Basically, if we got a write error, * then we already have a pte entry that faulted * in from a previous seg fault or page fault. * Get the pte and check the status of the * modified and valid bits to determine if this * indeed a real write fault. XXX smurph */ pte = pmap_pte(map->pmap, va); #ifdef DEBUG if (pte == PT_ENTRY_NULL) panic("NULL pte on write fault??"); #endif if (!(*pte & PG_M) && !(*pte & PG_RO)) { /* Set modified bit and try the write again. */ #ifdef TRAPDEBUG printf("Corrected kernel write fault, map %x pte %x\n", map->pmap, *pte); #endif *pte |= PG_M; return; #if 1 /* shouldn't happen */ } else { /* must be a real wp fault */ #ifdef TRAPDEBUG printf("Uncorrected kernel write fault, map %x pte %x\n", map->pmap, *pte); #endif if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) return; #endif } } panictrap(frame->tf_vector, frame); /* NOTREACHED */ case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: m88110_user_fault: if (type == T_INSTFLT+T_USER) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; #ifdef TRAPDEBUG printf("User Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif } else { fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } #ifdef TRAPDEBUG printf("User Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif } va = trunc_page((vaddr_t)fault_addr); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* * Call uvm_fault() to resolve non-bus error faults * whenever possible. */ if (type == T_DATAFLT+T_USER) { /* data faults */ if (frame->tf_dsr & CMMU_DSR_BE) { /* bus error */ result = EACCES; } else if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == EACCES) result = EFAULT; } else if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) { /* copyback or write allocate error */ result = EACCES; } else if (frame->tf_dsr & CMMU_DSR_WE) { /* write fault */ /* This could be a write protection fault or an * exception to set the used and modified bits * in the pte. Basically, if we got a write * error, then we already have a pte entry that * faulted in from a previous seg fault or page * fault. * Get the pte and check the status of the * modified and valid bits to determine if this * indeed a real write fault. XXX smurph */ pte = pmap_pte(vm_map_pmap(map), va); #ifdef DEBUG if (pte == PT_ENTRY_NULL) panic("NULL pte on write fault??"); #endif if (!(*pte & PG_M) && !(*pte & PG_RO)) { /* * Set modified bit and try the * write again. */ #ifdef TRAPDEBUG printf("Corrected userland write fault, map %x pte %x\n", map->pmap, *pte); #endif *pte |= PG_M; /* * invalidate ATCs to force * table search */ set_dcmd(CMMU_DCMD_INV_UATC); return; } else { /* must be a real wp fault */ #ifdef TRAPDEBUG printf("Uncorrected userland write fault, map %x pte %x\n", map->pmap, *pte); #endif result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == EACCES) result = EFAULT; } } else { #ifdef TRAPDEBUG printf("Unexpected Data access fault dsr %x\n", frame->tf_dsr); #endif panictrap(frame->tf_vector, frame); } } else { /* instruction faults */ if (frame->tf_isr & (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) { /* bus error, supervisor protection */ result = EACCES; } else if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == EACCES) result = EFAULT; } else { #ifdef TRAPDEBUG printf("Unexpected Instruction fault isr %x\n", frame->tf_isr); #endif panictrap(frame->tf_vector, frame); } } if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) { nss = btoc(USRSTACK - va);/* XXX check this */ if (nss > vm->vm_ssize) vm->vm_ssize = nss; } } /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_exip = pcb_onfault; /* * Continue as if the fault had been resolved. */ result = 0; } if (result != 0) { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; break; case T_PRIVINFLT+T_USER: case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: case T_KDB_TRACE: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; break; case T_FPEPFLT+T_USER: case T_FPEIFLT+T_USER: sig = SIGFPE; break; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_SIGTRAP+T_USER: sig = SIGTRAP; fault_type = TRAP_TRACE; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { unsigned instr; unsigned pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(unsigned)); #if 0 printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n", p->p_comm, p->p_pid, instr, pc, p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */ #endif /* check and see if we got here by accident */ #ifdef notyet if (p->p_md.md_ss_addr != pc || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } #endif /* restore original instruction and clear BP */ instr = p->p_md.md_ss_instr; if (instr != 0) ss_put_value(p, pc, instr, sizeof(instr)); p->p_md.md_ss_addr = 0; p->p_md.md_ss_instr = 0; sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ sig = SIGTRAP; fault_type = TRAP_BRKPT; break; case T_ASTFLT+T_USER: uvmexp.softs++; want_ast = 0; if (p->p_flag & P_OWEUPC) { p->p_flag &= ~P_OWEUPC; ADDUPROF(p); } break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { sv.sival_int = fault_addr; trapsignal(p, sig, fault_code, fault_type, sv); } userret(p, frame, sticks); }
void m88100_trap(unsigned type, struct trapframe *frame) { struct proc *p; u_quad_t sticks = 0; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type, pbus_type; u_long fault_code; unsigned nss, fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; #endif int sig = 0; extern struct vm_map *kernel_map; extern caddr_t guarded_access_start; extern caddr_t guarded_access_end; extern caddr_t guarded_access_bad; uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; if (USERMODE(frame->tf_epsr)) { sticks = p->p_sticks; type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } fault_type = 0; fault_code = 0; fault_addr = frame->tf_sxip & XIP_ADDR; switch (type) { default: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #if defined(DDB) case T_KDB_BREAK: s = splhigh(); db_enable_interrupt(); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; case T_KDB_ENTRY: s = splhigh(); db_enable_interrupt(); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; #endif /* DDB */ case T_ILLFLT: printf("Unimplemented opcode!\n"); panictrap(frame->tf_vector, frame); break; case T_INT: case T_INT+T_USER: /* This function pointer is set in machdep.c It calls m188_ext_int or sbc_ext_int depending on the value of brdtyp - smurph */ (*md.interrupt_func)(T_INT, frame); return; case T_MISALGNFLT: printf("kernel misaligned access exception @ 0x%08x\n", frame->tf_sxip); panictrap(frame->tf_vector, frame); break; case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif panictrap(frame->tf_vector, frame); break; case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dmt0 & DMT_DAS) == 0) { type = T_DATAFLT + T_USER; goto user_fault; } fault_addr = frame->tf_dma0; if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); if (va == 0) { panic("trap: bad kernel access at %x", fault_addr); } vm = p->p_vmspace; map = kernel_map; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif switch (pbus_type) { case CMMU_PFSR_BERROR: /* * If it is a guarded access, bus error is OK. */ if ((frame->tf_sxip & XIP_ADDR) >= (unsigned)&guarded_access_start && (frame->tf_sxip & XIP_ADDR) <= (unsigned)&guarded_access_end) { frame->tf_snip = ((unsigned)&guarded_access_bad ) | NIP_V; frame->tf_sfip = ((unsigned)&guarded_access_bad + 4) | FIP_V; frame->tf_sxip = 0; /* We sort of resolved the fault ourselves * because we know where it came from * [guarded_access()]. But we must still think * about the other possible transactions in * dmt1 & dmt2. Mark dmt0 so that * data_access_emulation skips it. XXX smurph */ frame->tf_dmt0 |= DMT_SKIP; data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; return; } break; case CMMU_PFSR_SUCCESS: /* * The fault was resolved. Call data_access_emulation * to drain the data unit pipe line and reset dmt0 * so that trap won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; return; case CMMU_PFSR_SFAULT: case CMMU_PFSR_PFAULT: if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) { /* * We could resolve the fault. Call * data_access_emulation to drain the data * unit pipe line and reset dmt0 so that trap * won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; return; } break; } #ifdef TRAPDEBUG printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type, pbus_exception_type[pbus_type], va); #endif panictrap(frame->tf_vector, frame); /* NOTREACHED */ case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: user_fault: if (type == T_INSTFLT + T_USER) { pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); #ifdef TRAPDEBUG printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } else { fault_addr = frame->tf_dma0; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) { ftype = VM_PROT_READ | VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* Call uvm_fault() to resolve non-bus error faults */ switch (pbus_type) { case CMMU_PFSR_SUCCESS: result = 0; break; case CMMU_PFSR_BERROR: result = EACCES; break; default: result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); if (result == EACCES) result = EFAULT; break; } p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) { nss = btoc(USRSTACK - va);/* XXX check this */ if (nss > vm->vm_ssize) vm->vm_ssize = nss; } } /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_snip = pcb_onfault | NIP_V; frame->tf_sfip = (pcb_onfault + 4) | FIP_V; frame->tf_sxip = 0; /* * Continue as if the fault had been resolved, but * do not try to complete the faulting access. */ frame->tf_dmt0 |= DMT_SKIP; result = 0; } if (result == 0) { if (type == T_DATAFLT+T_USER) { /* * We could resolve the fault. Call * data_access_emulation to drain the data unit * pipe line and reset dmt0 so that trap won't * get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; } else { /* * back up SXIP, SNIP, * clearing the Error bit */ frame->tf_sfip = frame->tf_snip & ~FIP_E; frame->tf_snip = frame->tf_sxip & ~NIP_E; frame->tf_ipfsr = 0; } } else { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; break; case T_PRIVINFLT+T_USER: case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; break; case T_FPEPFLT+T_USER: case T_FPEIFLT+T_USER: sig = SIGFPE; break; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_SIGTRAP+T_USER: sig = SIGTRAP; fault_type = TRAP_TRACE; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { unsigned va; unsigned instr; unsigned pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(unsigned)); #if 0 printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n", p->p_comm, p->p_pid, instr, pc, p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */ #endif /* check and see if we got here by accident */ if ((p->p_md.md_ss_addr != pc && p->p_md.md_ss_taken_addr != pc) || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } /* restore original instruction and clear BP */ va = p->p_md.md_ss_addr; if (va != 0) { instr = p->p_md.md_ss_instr; ss_put_value(p, va, instr, sizeof(instr)); } /* branch taken instruction */ instr = p->p_md.md_ss_taken_instr; if (instr != 0) { va = p->p_md.md_ss_taken_addr; ss_put_value(p, va, instr, sizeof(instr)); } #if 1 frame->tf_sfip = frame->tf_snip; frame->tf_snip = pc | NIP_V; #endif p->p_md.md_ss_addr = 0; p->p_md.md_ss_instr = 0; p->p_md.md_ss_taken_addr = 0; p->p_md.md_ss_taken_instr = 0; sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ frame->tf_sfip = frame->tf_snip; frame->tf_snip = frame->tf_sxip; sig = SIGTRAP; fault_type = TRAP_BRKPT; break; case T_ASTFLT+T_USER: uvmexp.softs++; want_ast = 0; if (p->p_flag & P_OWEUPC) { p->p_flag &= ~P_OWEUPC; ADDUPROF(p); } break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { sv.sival_int = fault_addr; trapsignal(p, sig, fault_code, fault_type, sv); /* * don't want multiple faults - we are going to * deliver signal. */ frame->tf_dmt0 = 0; frame->tf_ipfsr = frame->tf_dpfsr = 0; } userret(p, frame, sticks); }
/* * Trap is called from locore to handle most types of processor traps. */ void trap(unsigned int status, unsigned int cause, vaddr_t vaddr, vaddr_t opc, struct trapframe *frame) { int type; struct lwp *l = curlwp; struct proc *p = curproc; vm_prot_t ftype; ksiginfo_t ksi; struct frame *fp; extern void fswintrberr(void); KSI_INIT_TRAP(&ksi); uvmexp.traps++; if ((type = TRAPTYPE(cause)) >= LENGTH(trap_type)) panic("trap: unknown trap type %d", type); if (USERMODE(status)) { type |= T_USER; LWP_CACHE_CREDS(l, p); } /* Enable interrupts just at it was before the trap. */ _splset(status & AVR32_STATUS_IMx); switch (type) { default: dopanic: (void)splhigh(); printf("trap: %s in %s mode\n", trap_type[TRAPTYPE(cause)], USERMODE(status) ? "user" : "kernel"); printf("status=0x%x, cause=0x%x, epc=%#lx, vaddr=%#lx\n", status, cause, opc, vaddr); if (curlwp != NULL) { fp = (struct frame *)l->l_md.md_regs; printf("pid=%d cmd=%s usp=0x%x ", p->p_pid, p->p_comm, (int)fp->f_regs[_R_SP]); } else printf("curlwp == NULL "); printf("ksp=%p\n", &status); #if defined(DDB) kdb_trap(type, (mips_reg_t *) frame); /* XXX force halt XXX */ #elif defined(KGDB) { struct frame *f = (struct frame *)&ddb_regs; extern mips_reg_t kgdb_cause, kgdb_vaddr; kgdb_cause = cause; kgdb_vaddr = vaddr; /* * init global ddb_regs, used in db_interface.c routines * shared between ddb and gdb. Send ddb_regs to gdb so * that db_machdep.h macros will work with it, and * allow gdb to alter the PC. */ db_set_ddb_regs(type, (mips_reg_t *) frame); PC_BREAK_ADVANCE(f); if (kgdb_trap(type, &ddb_regs)) { ((mips_reg_t *)frame)[21] = f->f_regs[_R_PC]; return; } } #else panic("trap: dopanic: notyet"); #endif /*NOTREACHED*/ case T_TLB_MOD: panic("trap: T_TLB_MOD: notyet"); #if notyet if (KERNLAND(vaddr)) { pt_entry_t *pte; unsigned entry; paddr_t pa; pte = kvtopte(vaddr); entry = pte->pt_entry; if (!avr32_pte_v(entry) /*|| (entry & mips_pg_m_bit())*/) { panic("ktlbmod: invalid pte"); } if (entry & avr32_pte_ropage_bit()) { /* write to read only page in the kernel */ ftype = VM_PROT_WRITE; goto kernelfault; } entry |= mips_pg_m_bit(); /* XXXAVR32 Do it on tlbarlo/ tlbarhi? */ pte->pt_entry = entry; vaddr &= ~PGOFSET; MachTLBUpdate(vaddr, entry); pa = avr32_tlbpfn_to_paddr(entry); if (!IS_VM_PHYSADDR(pa)) { printf("ktlbmod: va %#lx pa %#llx\n", vaddr, (long long)pa); panic("ktlbmod: unmanaged page"); } pmap_set_modified(pa); return; /* KERN */ } /*FALLTHROUGH*/ #endif case T_TLB_MOD+T_USER: panic("trap: T_TLB_MOD+T_USER: notyet"); #if notyet { pt_entry_t *pte; unsigned entry; paddr_t pa; pmap_t pmap; pmap = p->p_vmspace->vm_map.pmap; if (!(pte = pmap_segmap(pmap, vaddr))) panic("utlbmod: invalid segmap"); pte += (vaddr >> PGSHIFT) & (NPTEPG - 1); entry = pte->pt_entry; if (!avr32_pte_v(entry)) panic("utlbmod: invalid pte"); if (entry & avr32_pte_ropage_bit()) { /* write to read only page */ ftype = VM_PROT_WRITE; goto pagefault; } /* entry |= mips_pg_m_bit(); XXXAVR32 Do it on tlbarlo/ tlbarhi? */ pte->pt_entry = entry; vaddr = (vaddr & ~PGOFSET) | (pmap->pm_asid << AVR32_TLB_PID_SHIFT); MachTLBUpdate(vaddr, entry); pa = avr32_tlbpfn_to_paddr(entry); if (!IS_VM_PHYSADDR(pa)) { printf("utlbmod: va %#lx pa %#llx\n", vaddr, (long long)pa); panic("utlbmod: unmanaged page"); } pmap_set_modified(pa); if (type & T_USER) userret(l); return; /* GEN */ } #endif case T_TLB_LD_MISS: panic("trap: T_TLB_LD_MISS: notyet"); case T_TLB_ST_MISS: ftype = (type == T_TLB_LD_MISS) ? VM_PROT_READ : VM_PROT_WRITE; if (KERNLAND(vaddr)) goto kernelfault; panic("trap: T_TLB_ST_MISS: notyet"); #if notyet /* * It is an error for the kernel to access user space except * through the copyin/copyout routines. */ if (l == NULL || l->l_addr->u_pcb.pcb_onfault == NULL) goto dopanic; /* check for fuswintr() or suswintr() getting a page fault */ if (l->l_addr->u_pcb.pcb_onfault == (void *)fswintrberr) { frame->tf_regs[TF_EPC] = (int)fswintrberr; return; /* KERN */ } goto pagefault; #endif case T_TLB_LD_MISS+T_USER: panic("trap: T_TLB_LD_MISS+T_USER: notyet"); #if notyet ftype = VM_PROT_READ; goto pagefault; #endif case T_TLB_ST_MISS+T_USER: panic("trap: T_TLB_ST_MISS+T_USER: notyet"); #if notyet ftype = VM_PROT_WRITE; #endif pagefault: ; { vaddr_t va; struct vmspace *vm; struct vm_map *map; int rv; vm = p->p_vmspace; map = &vm->vm_map; va = trunc_page(vaddr); if ((l->l_flag & LW_SA) && (~l->l_pflag & LP_SA_NOBLOCK)) { l->l_savp->savp_faultaddr = (vaddr_t)vaddr; l->l_pflag |= LP_SA_PAGEFAULT; } if (p->p_emul->e_fault) rv = (*p->p_emul->e_fault)(p, va, ftype); else rv = uvm_fault(map, va, ftype); #ifdef VMFAULT_TRACE printf( "uvm_fault(%p (pmap %p), %lx (0x%x), %d) -> %d at pc %p\n", map, vm->vm_map.pmap, va, vaddr, ftype, rv, (void*)opc); #endif /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if ((void *)va >= vm->vm_maxsaddr) { if (rv == 0){ uvm_grow(p, va); } else if (rv == EACCES) rv = EFAULT; } l->l_pflag &= ~LP_SA_PAGEFAULT; if (rv == 0) { if (type & T_USER) { userret(l); } return; /* GEN */ } if ((type & T_USER) == 0) goto copyfault; if (rv == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : (uid_t) -1); ksi.ksi_signo = SIGKILL; ksi.ksi_code = 0; } else { if (rv == EACCES) { ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; } else { ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; } } ksi.ksi_trap = type & ~T_USER; ksi.ksi_addr = (void *)vaddr; break; /* SIGNAL */ } kernelfault: ; { vaddr_t va; int rv; va = trunc_page(vaddr); rv = uvm_fault(kernel_map, va, ftype); if (rv == 0) return; /* KERN */ /*FALLTHROUGH*/ } case T_ADDR_ERR_LD: /* misaligned access */ case T_ADDR_ERR_ST: /* misaligned access */ case T_BUS_ERR_LD_ST: /* BERR asserted to CPU */ copyfault: panic("trap: copyfault: notyet"); #if notyet if (l == NULL || l->l_addr->u_pcb.pcb_onfault == NULL) goto dopanic; frame->tf_regs[TF_EPC] = (intptr_t)l->l_addr->u_pcb.pcb_onfault; return; /* KERN */ #endif #if notyet case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to CPU */ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to CPU */ ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGSEGV; /* XXX */ ksi.ksi_addr = (void *)vaddr; ksi.ksi_code = SEGV_MAPERR; /* XXX */ break; /* SIGNAL */ case T_BREAK: panic("trap: T_BREAK: notyet"); #if defined(DDB) kdb_trap(type, (avr32_reg_t *) frame); return; /* KERN */ #elif defined(KGDB) { struct frame *f = (struct frame *)&ddb_regs; extern avr32_reg_t kgdb_cause, kgdb_vaddr; kgdb_cause = cause; kgdb_vaddr = vaddr; /* * init global ddb_regs, used in db_interface.c routines * shared between ddb and gdb. Send ddb_regs to gdb so * that db_machdep.h macros will work with it, and * allow gdb to alter the PC. */ db_set_ddb_regs(type, (avr32_reg_t *) frame); PC_BREAK_ADVANCE(f); if (!kgdb_trap(type, &ddb_regs)) printf("kgdb: ignored %s\n", trap_type[TRAPTYPE(cause)]); else ((avr32_reg_t *)frame)[21] = f->f_regs[_R_PC]; return; } #else goto dopanic; #endif case T_BREAK+T_USER: { vaddr_t va; uint32_t instr; int rv; /* compute address of break instruction */ va = (DELAYBRANCH(cause)) ? opc + sizeof(int) : opc; /* read break instruction */ instr = fuiword((void *)va); if (l->l_md.md_ss_addr != va || instr != MIPS_BREAK_SSTEP) { ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGTRAP; ksi.ksi_addr = (void *)va; ksi.ksi_code = TRAP_TRACE; break; } /* * Restore original instruction and clear BP */ rv = suiword((void *)va, l->l_md.md_ss_instr); if (rv < 0) { vaddr_t sa, ea; sa = trunc_page(va); ea = round_page(va + sizeof(int) - 1); rv = uvm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_ALL, false); if (rv == 0) { rv = suiword((void *)va, l->l_md.md_ss_instr); (void)uvm_map_protect(&p->p_vmspace->vm_map, sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false); } } mips_icache_sync_all(); /* XXXJRT -- necessary? */ mips_dcache_wbinv_all(); /* XXXJRT -- necessary? */ if (rv < 0) printf("Warning: can't restore instruction at 0x%lx: 0x%x\n", l->l_md.md_ss_addr, l->l_md.md_ss_instr); l->l_md.md_ss_addr = 0; ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGTRAP; ksi.ksi_addr = (void *)va; ksi.ksi_code = TRAP_BRKPT; break; /* SIGNAL */ } case T_RES_INST+T_USER: case T_COP_UNUSABLE+T_USER: #if !defined(SOFTFLOAT) && !defined(NOFPU) if ((cause & MIPS_CR_COP_ERR) == 0x10000000) { struct frame *f; f = (struct frame *)l->l_md.md_regs; savefpregs(fpcurlwp); /* yield FPA */ loadfpregs(l); /* load FPA */ fpcurlwp = l; l->l_md.md_flags |= MDP_FPUSED; f->f_regs[_R_SR] |= MIPS_SR_COP_1_BIT; } else #endif { MachEmulateInst(status, cause, opc, l->l_md.md_regs); } userret(l); return; /* GEN */ case T_FPE+T_USER: panic ("trap: T_FPE+T_USER: notyet"); #if defined(SOFTFLOAT) MachEmulateInst(status, cause, opc, l->l_md.md_regs); #elif !defined(NOFPU) MachFPTrap(status, cause, opc, l->l_md.md_regs); #endif userret(l); return; /* GEN */ case T_OVFLOW+T_USER: case T_TRAP+T_USER: ksi.ksi_trap = type & ~T_USER; ksi.ksi_signo = SIGFPE; fp = (struct frame *)l->l_md.md_regs; ksi.ksi_addr = (void *)fp->f_regs[_R_PC]; ksi.ksi_code = FPE_FLTOVF; /* XXX */ break; /* SIGNAL */ #endif } panic("trap: post-switch: notyet"); #if notyet fp = (struct frame *)l->l_md.md_regs; fp->f_regs[_R_CAUSE] = cause; fp->f_regs[_R_BADVADDR] = vaddr; (*p->p_emul->e_trapsignal)(l, &ksi); if ((type & T_USER) == 0) panic("trapsignal"); userret(l); #endif return; }
/* * General page fault handler. */ void do_fault(struct trapframe *tf, struct lwp *l, struct vm_map *map, vaddr_t va, vm_prot_t atype) { int error; if (pmap_fault(map->pmap, va, atype)) return; struct pcb * const pcb = lwp_getpcb(l); void * const onfault = pcb->pcb_onfault; const bool user = TRAP_USERMODE(tf); if (cpu_intr_p()) { KASSERT(!user); error = EFAULT; } else { pcb->pcb_onfault = NULL; error = uvm_fault(map, va, atype); pcb->pcb_onfault = onfault; } if (error != 0) { ksiginfo_t ksi; if (onfault != NULL) { tf->tf_r0 = error; tf->tf_r15 = (tf->tf_r15 & ~R15_PC) | (register_t)onfault; return; } #ifdef DDB if (db_validating) { db_faulted = true; tf->tf_r15 += INSN_SIZE; return; } #endif if (!user) { #ifdef DDB db_printf("Unhandled data abort in kernel mode\n"); kdb_trap(T_FAULT, tf); #else #ifdef DEBUG printf("Unhandled data abort:\n"); printregs(tf); #endif panic("unhandled data abort in kernel mode"); #endif } KSI_INIT_TRAP(&ksi); if (error == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: " "out of swap\n", l->l_proc->p_pid, l->l_proc->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; } else ksi.ksi_signo = SIGSEGV; ksi.ksi_code = (error == EPERM) ? SEGV_ACCERR : SEGV_MAPERR; ksi.ksi_addr = (void *) va; trapsignal(l, &ksi); } else if (!user) { ucas_ras_check(tf); } }
void data_abort_handler(trapframe_t *tf) { struct vm_map *map; struct lwp * const l = curlwp; struct cpu_info * const ci = curcpu(); u_int far, fsr; vm_prot_t ftype; void *onfault; vaddr_t va; int error; ksiginfo_t ksi; UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); /* Grab FAR/FSR before enabling interrupts */ far = cpu_faultaddress(); fsr = cpu_faultstatus(); /* Update vmmeter statistics */ ci->ci_data.cpu_ntrap++; /* Re-enable interrupts if they were enabled previously */ KASSERT(!TRAP_USERMODE(tf) || (tf->tf_spsr & IF32_bits) == 0); if (__predict_true((tf->tf_spsr & IF32_bits) != IF32_bits)) restore_interrupts(tf->tf_spsr & IF32_bits); /* Get the current lwp structure */ UVMHIST_LOG(maphist, " (l=%#x, far=%#x, fsr=%#x", l, far, fsr, 0); UVMHIST_LOG(maphist, " tf=%#x, pc=%#x)", tf, tf->tf_pc, 0, 0); /* Data abort came from user mode? */ bool user = (TRAP_USERMODE(tf) != 0); if (user) LWP_CACHE_CREDS(l, l->l_proc); /* Grab the current pcb */ struct pcb * const pcb = lwp_getpcb(l); curcpu()->ci_abt_evs[fsr & FAULT_TYPE_MASK].ev_count++; /* Invoke the appropriate handler, if necessary */ if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { #ifdef DIAGNOSTIC printf("%s: data_aborts fsr=0x%x far=0x%x\n", __func__, fsr, far); #endif if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, l, &ksi)) goto do_trapsignal; goto out; } /* * At this point, we're dealing with one of the following data aborts: * * FAULT_TRANS_S - Translation -- Section * FAULT_TRANS_P - Translation -- Page * FAULT_DOMAIN_S - Domain -- Section * FAULT_DOMAIN_P - Domain -- Page * FAULT_PERM_S - Permission -- Section * FAULT_PERM_P - Permission -- Page * * These are the main virtual memory-related faults signalled by * the MMU. */ /* fusubailout is used by [fs]uswintr to avoid page faulting */ if (__predict_false(pcb->pcb_onfault == fusubailout)) { tf->tf_r0 = EFAULT; tf->tf_pc = (intptr_t) pcb->pcb_onfault; return; } if (user) { lwp_settrapframe(l, tf); } /* * Make sure the Program Counter is sane. We could fall foul of * someone executing Thumb code, in which case the PC might not * be word-aligned. This would cause a kernel alignment fault * further down if we have to decode the current instruction. */ #ifdef THUMB_CODE /* * XXX: It would be nice to be able to support Thumb in the kernel * at some point. */ if (__predict_false(!user && (tf->tf_pc & 3) != 0)) { printf("\n%s: Misaligned Kernel-mode Program Counter\n", __func__); dab_fatal(tf, fsr, far, l, NULL); } #else if (__predict_false((tf->tf_pc & 3) != 0)) { if (user) { /* * Give the user an illegal instruction signal. */ /* Deliver a SIGILL to the process */ KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; ksi.ksi_addr = (uint32_t *)(intptr_t) far; ksi.ksi_trap = fsr; goto do_trapsignal; } /* * The kernel never executes Thumb code. */ printf("\n%s: Misaligned Kernel-mode Program Counter\n", __func__); dab_fatal(tf, fsr, far, l, NULL); } #endif /* See if the CPU state needs to be fixed up */ switch (data_abort_fixup(tf, fsr, far, l)) { case ABORT_FIXUP_RETURN: return; case ABORT_FIXUP_FAILED: /* Deliver a SIGILL to the process */ KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; ksi.ksi_addr = (uint32_t *)(intptr_t) far; ksi.ksi_trap = fsr; goto do_trapsignal; default: break; } va = trunc_page((vaddr_t)far); /* * It is only a kernel address space fault iff: * 1. user == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. */ if (!user && (va >= VM_MIN_KERNEL_ADDRESS || (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && __predict_true((pcb->pcb_onfault == NULL || (read_insn(tf->tf_pc, false) & 0x05200000) != 0x04200000))) { map = kernel_map; /* Was the fault due to the FPE/IPKDB ? */ if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_addr = (uint32_t *)(intptr_t) far; ksi.ksi_trap = fsr; /* * Force exit via userret() * This is necessary as the FPE is an extension to * userland that actually runs in a priveledged mode * but uses USR mode permissions for its accesses. */ user = true; goto do_trapsignal; } } else { map = &l->l_proc->p_vmspace->vm_map; } /* * We need to know whether the page should be mapped as R or R/W. * Before ARMv6, the MMU did not give us the info as to whether the * fault was caused by a read or a write. * * However, we know that a permission fault can only be the result of * a write to a read-only location, so we can deal with those quickly. * * Otherwise we need to disassemble the instruction responsible to * determine if it was a write. */ if (CPU_IS_ARMV6_P() || CPU_IS_ARMV7_P()) { ftype = (fsr & FAULT_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; } else if (IS_PERMISSION_FAULT(fsr)) { ftype = VM_PROT_WRITE; } else { #ifdef THUMB_CODE /* Fast track the ARM case. */ if (__predict_false(tf->tf_spsr & PSR_T_bit)) { u_int insn = read_thumb_insn(tf->tf_pc, user); u_int insn_f8 = insn & 0xf800; u_int insn_fe = insn & 0xfe00; if (insn_f8 == 0x6000 || /* STR(1) */ insn_f8 == 0x7000 || /* STRB(1) */ insn_f8 == 0x8000 || /* STRH(1) */ insn_f8 == 0x9000 || /* STR(3) */ insn_f8 == 0xc000 || /* STM */ insn_fe == 0x5000 || /* STR(2) */ insn_fe == 0x5200 || /* STRH(2) */ insn_fe == 0x5400) /* STRB(2) */ ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; } else #endif { u_int insn = read_insn(tf->tf_pc, user); if (((insn & 0x0c100000) == 0x04000000) || /* STR[B] */ ((insn & 0x0e1000b0) == 0x000000b0) || /* STR[HD]*/ ((insn & 0x0a100000) == 0x08000000) || /* STM/CDT*/ ((insn & 0x0f9000f0) == 0x01800090)) /* STREX[BDH] */ ftype = VM_PROT_WRITE; else if ((insn & 0x0fb00ff0) == 0x01000090)/* SWP */ ftype = VM_PROT_READ | VM_PROT_WRITE; else ftype = VM_PROT_READ; } } /* * See if the fault is as a result of ref/mod emulation, * or domain mismatch. */ #ifdef DEBUG last_fault_code = fsr; #endif if (pmap_fault_fixup(map->pmap, va, ftype, user)) { UVMHIST_LOG(maphist, " <- ref/mod emul", 0, 0, 0, 0); goto out; } if (__predict_false(curcpu()->ci_intr_depth > 0)) { if (pcb->pcb_onfault) { tf->tf_r0 = EINVAL; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return; } printf("\nNon-emulated page fault with intr_depth > 0\n"); dab_fatal(tf, fsr, far, l, NULL); } onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = uvm_fault(map, va, ftype); pcb->pcb_onfault = onfault; if (__predict_true(error == 0)) { if (user) uvm_grow(l->l_proc, va); /* Record any stack growth */ else ucas_ras_check(tf); UVMHIST_LOG(maphist, " <- uvm", 0, 0, 0, 0); goto out; } if (user == 0) { if (pcb->pcb_onfault) { tf->tf_r0 = error; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return; } printf("\nuvm_fault(%p, %lx, %x) -> %x\n", map, va, ftype, error); dab_fatal(tf, fsr, far, l, NULL); } KSI_INIT_TRAP(&ksi); if (error == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: " "out of swap\n", l->l_proc->p_pid, l->l_proc->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; } else ksi.ksi_signo = SIGSEGV; ksi.ksi_code = (error == EACCES) ? SEGV_ACCERR : SEGV_MAPERR; ksi.ksi_addr = (uint32_t *)(intptr_t) far; ksi.ksi_trap = fsr; UVMHIST_LOG(maphist, " <- error (%d)", error, 0, 0, 0); do_trapsignal: call_trapsignal(l, tf, &ksi); out: /* If returning to user mode, make sure to invoke userret() */ if (user) userret(l); }
/* * void prefetch_abort_handler(trapframe_t *tf) * * Abort handler called when instruction execution occurs at * a non existent or restricted (access permissions) memory page. * If the address is invalid and we were in SVC mode then panic as * the kernel should never prefetch abort. * If the address is invalid and the page is mapped then the user process * does no have read permission so send it a signal. * Otherwise fault the page in and try again. */ void prefetch_abort_handler(trapframe_t *tf) { struct lwp *l; struct pcb *pcb __diagused; struct vm_map *map; vaddr_t fault_pc, va; ksiginfo_t ksi; int error, user; UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); /* Update vmmeter statistics */ curcpu()->ci_data.cpu_ntrap++; l = curlwp; pcb = lwp_getpcb(l); if ((user = TRAP_USERMODE(tf)) != 0) LWP_CACHE_CREDS(l, l->l_proc); /* * Enable IRQ's (disabled by the abort) This always comes * from user mode so we know interrupts were not disabled. * But we check anyway. */ KASSERT(!TRAP_USERMODE(tf) || (tf->tf_spsr & IF32_bits) == 0); if (__predict_true((tf->tf_spsr & I32_bit) != IF32_bits)) restore_interrupts(tf->tf_spsr & IF32_bits); /* See if the CPU state needs to be fixed up */ switch (prefetch_abort_fixup(tf)) { case ABORT_FIXUP_RETURN: KASSERT(!TRAP_USERMODE(tf) || (tf->tf_spsr & IF32_bits) == 0); return; case ABORT_FIXUP_FAILED: /* Deliver a SIGILL to the process */ KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; ksi.ksi_addr = (uint32_t *)(intptr_t) tf->tf_pc; lwp_settrapframe(l, tf); goto do_trapsignal; default: break; } /* Prefetch aborts cannot happen in kernel mode */ if (__predict_false(!user)) dab_fatal(tf, 0, tf->tf_pc, NULL, NULL); /* Get fault address */ fault_pc = tf->tf_pc; lwp_settrapframe(l, tf); UVMHIST_LOG(maphist, " (pc=0x%x, l=0x%x, tf=0x%x)", fault_pc, l, tf, 0); /* Ok validate the address, can only execute in USER space */ if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS || (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) { KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_addr = (uint32_t *)(intptr_t) fault_pc; ksi.ksi_trap = fault_pc; goto do_trapsignal; } map = &l->l_proc->p_vmspace->vm_map; va = trunc_page(fault_pc); /* * See if the pmap can handle this fault on its own... */ #ifdef DEBUG last_fault_code = -1; #endif if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ|VM_PROT_EXECUTE, 1)) { UVMHIST_LOG (maphist, " <- emulated", 0, 0, 0, 0); goto out; } #ifdef DIAGNOSTIC if (__predict_false(curcpu()->ci_intr_depth > 0)) { printf("\nNon-emulated prefetch abort with intr_depth > 0\n"); dab_fatal(tf, 0, tf->tf_pc, NULL, NULL); } #endif KASSERT(pcb->pcb_onfault == NULL); error = uvm_fault(map, va, VM_PROT_READ|VM_PROT_EXECUTE); if (__predict_true(error == 0)) { UVMHIST_LOG (maphist, " <- uvm", 0, 0, 0, 0); goto out; } KSI_INIT_TRAP(&ksi); UVMHIST_LOG (maphist, " <- fatal (%d)", error, 0, 0, 0); if (error == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: " "out of swap\n", l->l_proc->p_pid, l->l_proc->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; } else ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; ksi.ksi_addr = (uint32_t *)(intptr_t) fault_pc; ksi.ksi_trap = fault_pc; do_trapsignal: call_trapsignal(l, tf, &ksi); out: KASSERT(!TRAP_USERMODE(tf) || (tf->tf_spsr & IF32_bits) == 0); userret(l); }
void m88100_trap(unsigned type, struct trapframe *frame) { struct proc *p; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type, pbus_type; u_long fault_code; unsigned fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; u_int psr; #endif int sig = 0; extern struct vm_map *kernel_map; uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; if (USERMODE(frame->tf_epsr)) { type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } fault_type = 0; fault_code = 0; fault_addr = frame->tf_sxip & XIP_ADDR; switch (type) { default: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #if defined(DDB) case T_KDB_BREAK: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_ENTRY: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); set_psr(psr); splx(s); return; #endif /* DDB */ case T_ILLFLT: printf("Unimplemented opcode!\n"); panictrap(frame->tf_vector, frame); break; case T_INT: case T_INT+T_USER: curcpu()->ci_intrdepth++; md_interrupt_func(T_INT, frame); curcpu()->ci_intrdepth--; return; case T_MISALGNFLT: printf("kernel misaligned access exception @ 0x%08x\n", frame->tf_sxip); panictrap(frame->tf_vector, frame); break; case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif panictrap(frame->tf_vector, frame); break; case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dmt0 & DMT_DAS) == 0) { type = T_DATAFLT + T_USER; goto user_fault; } fault_addr = frame->tf_dma0; if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); if (va == 0) { panic("trap: bad kernel access at %x", fault_addr); } KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE); vm = p->p_vmspace; map = kernel_map; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif switch (pbus_type) { case CMMU_PFSR_SUCCESS: /* * The fault was resolved. Call data_access_emulation * to drain the data unit pipe line and reset dmt0 * so that trap won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; KERNEL_UNLOCK(); return; case CMMU_PFSR_SFAULT: case CMMU_PFSR_PFAULT: if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) { /* * We could resolve the fault. Call * data_access_emulation to drain the data * unit pipe line and reset dmt0 so that trap * won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; KERNEL_UNLOCK(); return; } break; } #ifdef TRAPDEBUG printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type, pbus_exception_type[pbus_type], va); #endif KERNEL_UNLOCK(); panictrap(frame->tf_vector, frame); /* NOTREACHED */ case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: user_fault: if (type == T_INSTFLT + T_USER) { pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); #ifdef TRAPDEBUG printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } else { fault_addr = frame->tf_dma0; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) { ftype = VM_PROT_READ | VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); KERNEL_PROC_LOCK(p); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* Call uvm_fault() to resolve non-bus error faults */ switch (pbus_type) { case CMMU_PFSR_SUCCESS: result = 0; break; case CMMU_PFSR_BERROR: result = EACCES; break; default: result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); break; } p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) uvm_grow(p, va); else if (result == EACCES) result = EFAULT; } KERNEL_PROC_UNLOCK(p); /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_snip = pcb_onfault | NIP_V; frame->tf_sfip = (pcb_onfault + 4) | FIP_V; frame->tf_sxip = 0; /* * Continue as if the fault had been resolved, but * do not try to complete the faulting access. */ frame->tf_dmt0 |= DMT_SKIP; result = 0; } if (result == 0) { if (type == T_DATAFLT+T_USER) { /* * We could resolve the fault. Call * data_access_emulation to drain the data unit * pipe line and reset dmt0 so that trap won't * get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; } else { /* * back up SXIP, SNIP, * clearing the Error bit */ frame->tf_sfip = frame->tf_snip & ~FIP_E; frame->tf_snip = frame->tf_sxip & ~NIP_E; frame->tf_ipfsr = 0; } } else { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; break; case T_PRIVINFLT+T_USER: case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; break; case T_FPEPFLT+T_USER: sig = SIGFPE; break; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { u_int instr; vaddr_t pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(u_int)); /* check and see if we got here by accident */ if ((p->p_md.md_bp0va != pc && p->p_md.md_bp1va != pc) || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } /* restore original instruction and clear breakpoint */ if (p->p_md.md_bp0va == pc) { ss_put_value(p, pc, p->p_md.md_bp0save); p->p_md.md_bp0va = 0; } if (p->p_md.md_bp1va == pc) { ss_put_value(p, pc, p->p_md.md_bp1save); p->p_md.md_bp1va = 0; } #if 1 frame->tf_sfip = frame->tf_snip; frame->tf_snip = pc | NIP_V; #endif sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ frame->tf_sfip = frame->tf_snip; frame->tf_snip = frame->tf_sxip; sig = SIGTRAP; fault_type = TRAP_BRKPT; break; case T_ASTFLT+T_USER: uvmexp.softs++; p->p_md.md_astpending = 0; if (p->p_flag & P_OWEUPC) { KERNEL_PROC_LOCK(p); ADDUPROF(p); KERNEL_PROC_UNLOCK(p); } if (curcpu()->ci_want_resched) preempt(NULL); break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { sv.sival_int = fault_addr; KERNEL_PROC_LOCK(p); trapsignal(p, sig, fault_code, fault_type, sv); KERNEL_PROC_UNLOCK(p); /* * don't want multiple faults - we are going to * deliver signal. */ frame->tf_dmt0 = 0; frame->tf_ipfsr = frame->tf_dpfsr = 0; } userret(p); }
void m88110_trap(u_int type, struct trapframe *frame) { struct proc *p; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type; u_long fault_code; vaddr_t fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; u_int psr; #endif int sig = 0; uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; fault_type = SI_NOINFO; fault_code = 0; fault_addr = frame->tf_exip & XIP_ADDR; /* * 88110 errata #16 (4.2) or #3 (5.1.1): * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension * can cause the enip value to be incremented by 4 incorrectly * if the instruction in the delay slot is the first word of a * page which misses in the mmu and results in a hardware * tablewalk which encounters an exception or an invalid * descriptor. The exip value in this case will point to the * first word of the page, and the D bit will be set. * * Note: if the instruction is a jsr.n r1, r1 will be overwritten * with erroneous data. Therefore, no recovery is possible. Do * not allow this instruction to occupy the last word of a page. * * Suggested fix: recover in general by backing up the exip by 4 * and clearing the delay bit before an rte when the lower 3 hex * digits of the exip are 001.'' */ if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) { u_int instr; /* * Note that we have initialized fault_addr above, so that * signals provide the correct address if necessary. */ frame->tf_exip = (frame->tf_exip & ~1) - 4; /* * Check the instruction at the (backed up) exip. * If it is a jsr.n, abort. */ if (!USERMODE(frame->tf_epsr)) { instr = *(u_int *)fault_addr; if (instr == 0xf400cc01) panic("mc88110 errata #16, exip %p enip %p", (frame->tf_exip + 4) | 1, frame->tf_enip); } else { /* copyin here should not fail */ if (copyin((const void *)frame->tf_exip, &instr, sizeof instr) == 0 && instr == 0xf400cc01) { uprintf("mc88110 errata #16, exip %p enip %p", (frame->tf_exip + 4) | 1, frame->tf_enip); sig = SIGILL; } } } if (USERMODE(frame->tf_epsr)) { type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } if (sig != 0) goto deliver; switch (type) { default: lose: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #ifdef DEBUG case T_110_DRM+T_USER: case T_110_DRM: printf("DMMU read miss: Hardware Table Searches should be enabled!\n"); goto lose; case T_110_DWM+T_USER: case T_110_DWM: printf("DMMU write miss: Hardware Table Searches should be enabled!\n"); goto lose; case T_110_IAM+T_USER: case T_110_IAM: printf("IMMU miss: Hardware Table Searches should be enabled!\n"); goto lose; #endif #ifdef DDB case T_KDB_TRACE: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_BREAK: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_ENTRY: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); set_psr(psr); /* skip trap instruction */ m88110_skip_insn(frame); splx(s); return; #endif /* DDB */ case T_ILLFLT: /* * The 88110 seems to trigger an instruction fault in * supervisor mode when running the following sequence: * * bcnd.n cond, reg, 1f * arithmetic insn * ... * the same exact arithmetic insn * 1: another arithmetic insn stalled by the previous one * ... * * The exception is reported with exip pointing to the * branch address. I don't know, at this point, if there * is any better workaround than the aggressive one * implemented below; I don't see how this could relate to * any of the 88110 errata (although it might be related to * branch prediction). * * For the record, the exact sequence triggering the * spurious exception is: * * bcnd.n eq0, r2, 1f * or r25, r0, r22 * bsr somewhere * or r25, r0, r22 * 1: cmp r13, r25, r20 * * within the same cache line. * * Simply ignoring the exception and returning does not * cause the exception to disappear. Clearing the * instruction cache works, but on 88110+88410 systems, * the 88410 needs to be invalidated as well. (note that * the size passed to the flush routines does not matter * since there is no way to flush a subset of the 88110 * I$ anyway) */ { extern void *kernel_text, *etext; if (fault_addr >= (vaddr_t)&kernel_text && fault_addr < (vaddr_t)&etext) { cmmu_icache_inv(curcpu()->ci_cpuid, trunc_page(fault_addr), PAGE_SIZE); cmmu_cache_wbinv(curcpu()->ci_cpuid, trunc_page(fault_addr), PAGE_SIZE); return; } } goto lose; case T_MISALGNFLT: printf("kernel misaligned access exception @%p\n", frame->tf_exip); goto lose; case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG printf("Kernel Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif goto lose; case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dsr & CMMU_DSR_SU) == 0) { KERNEL_LOCK(); goto m88110_user_fault; } #ifdef TRAPDEBUG printf("Kernel Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } va = trunc_page((vaddr_t)fault_addr); KERNEL_LOCK(); vm = p->p_vmspace; map = kernel_map; if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { /* * On a segment or a page fault, call uvm_fault() to * resolve the fault. */ if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; /* * This could be a fault caused in copyout*() * while accessing kernel space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_exip = pcb_onfault; /* * Continue as if the fault had been resolved. */ result = 0; } if (result == 0) { KERNEL_UNLOCK(); return; } } KERNEL_UNLOCK(); goto lose; case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: KERNEL_LOCK(); m88110_user_fault: if (type == T_INSTFLT+T_USER) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; #ifdef TRAPDEBUG printf("User Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif } else { fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } #ifdef TRAPDEBUG printf("User Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif } va = trunc_page((vaddr_t)fault_addr); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* * Call uvm_fault() to resolve non-bus error faults * whenever possible. */ if (type == T_INSTFLT+T_USER) { /* instruction faults */ if (frame->tf_isr & (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) { /* bus error, supervisor protection */ result = EACCES; } else if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); } else { #ifdef TRAPDEBUG printf("Unexpected Instruction fault isr %x\n", frame->tf_isr); #endif KERNEL_UNLOCK(); goto lose; } } else { /* data faults */ if (frame->tf_dsr & CMMU_DSR_BE) { /* bus error */ result = EACCES; } else if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); } else if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) { /* copyback or write allocate error */ result = EACCES; } else if (frame->tf_dsr & CMMU_DSR_WE) { /* write fault */ /* This could be a write protection fault or an * exception to set the used and modified bits * in the pte. Basically, if we got a write * error, then we already have a pte entry that * faulted in from a previous seg fault or page * fault. * Get the pte and check the status of the * modified and valid bits to determine if this * indeed a real write fault. XXX smurph */ if (pmap_set_modify(map->pmap, va)) { #ifdef TRAPDEBUG printf("Corrected userland write fault, pmap %p va %p\n", map->pmap, va); #endif result = 0; } else { /* must be a real wp fault */ #ifdef TRAPDEBUG printf("Uncorrected userland write fault, pmap %p va %p\n", map->pmap, va); #endif result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); } } else { #ifdef TRAPDEBUG printf("Unexpected Data access fault dsr %x\n", frame->tf_dsr); #endif KERNEL_UNLOCK(); goto lose; } } p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) uvm_grow(p, va); else if (result == EACCES) result = EFAULT; } KERNEL_UNLOCK(); /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_exip = pcb_onfault; /* * Continue as if the fault had been resolved. */ result = 0; } if (result != 0) { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; if (sig == 0) { /* skip recovered instruction */ m88110_skip_insn(frame); goto userexit; } break; case T_PRIVINFLT+T_USER: fault_type = ILL_PRVREG; /* FALLTHROUGH */ case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: case T_KDB_TRACE: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; /* skip trap instruction */ m88110_skip_insn(frame); break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; /* skip trap instruction */ m88110_skip_insn(frame); break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; /* skip trap instruction */ m88110_skip_insn(frame); break; case T_FPEPFLT+T_USER: m88110_fpu_exception(frame); goto userexit; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { u_int instr; vaddr_t pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(u_int)); /* check and see if we got here by accident */ if ((p->p_md.md_bp0va != pc && p->p_md.md_bp1va != pc) || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } /* restore original instruction and clear breakpoint */ if (p->p_md.md_bp0va == pc) { ss_put_value(p, pc, p->p_md.md_bp0save); p->p_md.md_bp0va = 0; } if (p->p_md.md_bp1va == pc) { ss_put_value(p, pc, p->p_md.md_bp1save); p->p_md.md_bp1va = 0; } sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ sig = SIGTRAP; fault_type = TRAP_BRKPT; break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { deliver: sv.sival_ptr = (void *)fault_addr; KERNEL_LOCK(); trapsignal(p, sig, fault_code, fault_type, sv); KERNEL_UNLOCK(); } userexit: userret(p); }
void trap(int type, struct trapframe *frame) { struct lwp *l; struct proc *p; struct pcb *pcbp; vaddr_t va; struct vm_map *map; struct vmspace *vm; vm_prot_t vftype; pa_space_t space; u_int opcode; int ret; const char *tts; int type_raw; #ifdef DIAGNOSTIC extern int emergency_stack_start, emergency_stack_end; #endif type_raw = type & ~T_USER; opcode = frame->tf_iir; if (type_raw == T_ITLBMISS || type_raw == T_ITLBMISSNA) { va = frame->tf_iioq_head; space = frame->tf_iisq_head; vftype = VM_PROT_READ; /* XXX VM_PROT_EXECUTE ??? */ } else { va = frame->tf_ior; space = frame->tf_isr; vftype = inst_store(opcode) ? VM_PROT_WRITE : VM_PROT_READ; } if ((l = curlwp) == NULL) l = &lwp0; p = l->l_proc; #ifdef DIAGNOSTIC /* * If we are on the emergency stack, then we either got * a fault on the kernel stack, or we're just handling * a trap for the machine check handler (which also * runs on the emergency stack). * * We *very crudely* differentiate between the two cases * by checking the faulting instruction: if it is the * function prologue instruction that stores the old * frame pointer and updates the stack pointer, we assume * that we faulted on the kernel stack. * * In this case, not completing that instruction will * probably confuse backtraces in kgdb/ddb. Completing * it would be difficult, because we already faulted on * that part of the stack, so instead we fix up the * frame as if the function called has just returned. * This has peculiar knowledge about what values are in * what registers during the "normal gcc -g" prologue. */ if (&type >= &emergency_stack_start && &type < &emergency_stack_end && type != T_IBREAK && STWM_R1_D_SR0_SP(opcode)) { /* Restore the caller's frame pointer. */ frame->tf_r3 = frame->tf_r1; /* Restore the caller's instruction offsets. */ frame->tf_iioq_head = frame->tf_rp; frame->tf_iioq_tail = frame->tf_iioq_head + 4; goto dead_end; } #endif /* DIAGNOSTIC */ #ifdef DEBUG frame_sanity_check(frame, l); #endif /* DEBUG */ /* If this is a trap, not an interrupt, reenable interrupts. */ if (type_raw != T_INTERRUPT) mtctl(frame->tf_eiem, CR_EIEM); if (frame->tf_flags & TFF_LAST) l->l_md.md_regs = frame; if ((type & ~T_USER) > trap_types) tts = "reserved"; else tts = trap_type[type & ~T_USER]; #ifdef TRAPDEBUG if (type_raw != T_INTERRUPT && type_raw != T_IBREAK) printf("trap: %d, %s for %x:%x at %x:%x, fp=%p, rp=%x\n", type, tts, space, (u_int)va, frame->tf_iisq_head, frame->tf_iioq_head, frame, frame->tf_rp); else if (type_raw == T_IBREAK) printf("trap: break instruction %x:%x at %x:%x, fp=%p\n", break5(opcode), break13(opcode), frame->tf_iisq_head, frame->tf_iioq_head, frame); { extern int etext; if (frame < (struct trapframe *)&etext) { printf("trap: bogus frame ptr %p\n", frame); goto dead_end; } } #endif switch (type) { case T_NONEXIST: case T_NONEXIST|T_USER: #if !defined(DDB) && !defined(KGDB) /* we've got screwed up by the central scrutinizer */ panic ("trap: elvis has just left the building!"); break; #else goto dead_end; #endif case T_RECOVERY|T_USER: #ifdef USERTRACE for(;;) { if (frame->tf_iioq_head != rctr_next_iioq) printf("-%08x\nr %08x", rctr_next_iioq - 4, frame->tf_iioq_head); rctr_next_iioq = frame->tf_iioq_head + 4; if (frame->tf_ipsw & PSW_N) { /* Advance the program counter. */ frame->tf_iioq_head = frame->tf_iioq_tail; frame->tf_iioq_tail = frame->tf_iioq_head + 4; /* Clear flags. */ frame->tf_ipsw &= ~(PSW_N|PSW_X|PSW_Y|PSW_Z|PSW_B|PSW_T|PSW_H|PSW_L); /* Simulate another trap. */ continue; } break; } frame->tf_rctr = 0; break; #endif /* USERTRACE */ case T_RECOVERY: #if !defined(DDB) && !defined(KGDB) /* XXX will implement later */ printf ("trap: handicapped"); break; #else goto dead_end; #endif case T_EMULATION | T_USER: #ifdef FPEMUL hppa_fpu_emulate(frame, l); #else /* !FPEMUL */ /* * We don't have FPU emulation, so signal the * process with a SIGFPE. */ hppa_trapsignal_hack(l, SIGFPE, frame->tf_iioq_head); #endif /* !FPEMUL */ break; #ifdef DIAGNOSTIC case T_EXCEPTION: panic("FPU/SFU emulation botch"); /* these just can't happen ever */ case T_PRIV_OP: case T_PRIV_REG: /* these just can't make it to the trap() ever */ case T_HPMC: case T_HPMC | T_USER: case T_EMULATION: #endif case T_IBREAK: case T_DATALIGN: case T_DBREAK: dead_end: if (type & T_USER) { #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, frame->tf_iioq_head); break; } if (trap_kdebug(type, va, frame)) return; else if (type == T_DATALIGN) panic ("trap: %s at 0x%x", tts, (u_int) va); else panic ("trap: no debugger for \"%s\" (%d)", tts, type); break; case T_IBREAK | T_USER: case T_DBREAK | T_USER: /* pass to user debugger */ break; case T_EXCEPTION | T_USER: /* co-proc assist trap */ hppa_trapsignal_hack(l, SIGFPE, va); break; case T_OVERFLOW | T_USER: hppa_trapsignal_hack(l, SIGFPE, va); break; case T_CONDITION | T_USER: break; case T_ILLEGAL | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, va); break; case T_PRIV_OP | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, va); break; case T_PRIV_REG | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGILL, va); break; /* these should never got here */ case T_HIGHERPL | T_USER: case T_LOWERPL | T_USER: hppa_trapsignal_hack(l, SIGSEGV, va); break; case T_IPROT | T_USER: case T_DPROT | T_USER: hppa_trapsignal_hack(l, SIGSEGV, va); break; case T_DATACC: case T_USER | T_DATACC: case T_ITLBMISS: case T_USER | T_ITLBMISS: case T_DTLBMISS: case T_USER | T_DTLBMISS: case T_ITLBMISSNA: case T_USER | T_ITLBMISSNA: case T_DTLBMISSNA: case T_USER | T_DTLBMISSNA: case T_TLB_DIRTY: case T_USER | T_TLB_DIRTY: vm = p->p_vmspace; if (!vm) { #ifdef TRAPDEBUG printf("trap: no vm, p=%p\n", p); #endif goto dead_end; } /* * it could be a kernel map for exec_map faults */ if (!(type & T_USER) && space == HPPA_SID_KERNEL) map = kernel_map; else { map = &vm->vm_map; if (l->l_flag & L_SA) { l->l_savp->savp_faultaddr = va; l->l_flag |= L_SA_PAGEFAULT; } } va = hppa_trunc_page(va); if (map->pmap->pmap_space != space) { #ifdef TRAPDEBUG printf("trap: space missmatch %d != %d\n", space, map->pmap->pmap_space); #endif /* actually dump the user, crap the kernel */ goto dead_end; } /* Never call uvm_fault in interrupt context. */ KASSERT(hppa_intr_depth == 0); ret = uvm_fault(map, va, 0, vftype); #ifdef TRAPDEBUG printf("uvm_fault(%p, %x, %d, %d)=%d\n", map, (u_int)va, 0, vftype, ret); #endif if (map != kernel_map) l->l_flag &= ~L_SA_PAGEFAULT; /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if uvm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (va >= (vaddr_t)vm->vm_maxsaddr + vm->vm_ssize) { if (ret == 0) { vsize_t nss = btoc(va - USRSTACK + PAGE_SIZE); if (nss > vm->vm_ssize) vm->vm_ssize = nss; } else if (ret == EACCES) ret = EFAULT; } if (ret != 0) { if (type & T_USER) { printf("trapsignal: uvm_fault(%p, %x, %d, %d)=%d\n", map, (u_int)va, 0, vftype, ret); #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGSEGV, frame->tf_ior); } else { if (l && l->l_addr->u_pcb.pcb_onfault) { #ifdef PMAPDEBUG printf("trap: copyin/out %d\n",ret); #endif pcbp = &l->l_addr->u_pcb; frame->tf_iioq_tail = 4 + (frame->tf_iioq_head = pcbp->pcb_onfault); pcbp->pcb_onfault = 0; break; } #if 1 if (trap_kdebug (type, va, frame)) return; #else panic("trap: uvm_fault(%p, %x, %d, %d): %d", map, va, 0, vftype, ret); #endif } } break; case T_DATALIGN | T_USER: #ifdef DEBUG user_backtrace(frame, l, type); #endif hppa_trapsignal_hack(l, SIGBUS, va); break; case T_INTERRUPT: case T_INTERRUPT|T_USER: hppa_intr(frame); mtctl(frame->tf_eiem, CR_EIEM); #if 0 if (trap_kdebug (type, va, frame)) return; #endif break; case T_LOWERPL: case T_DPROT: case T_IPROT: case T_OVERFLOW: case T_CONDITION: case T_ILLEGAL: case T_HIGHERPL: case T_TAKENBR: case T_POWERFAIL: case T_LPMC: case T_PAGEREF: case T_DATAPID: case T_DATAPID | T_USER: if (0 /* T-chip */) { break; } /* FALLTHROUGH to unimplemented */ default: #if 1 if (trap_kdebug (type, va, frame)) return; #endif panic ("trap: unimplemented \'%s\' (%d)", tts, type); } if (type & T_USER) userret(l, l->l_md.md_regs->tf_iioq_head, 0); #ifdef DEBUG frame_sanity_check(frame, l); if (frame->tf_flags & TFF_LAST && curlwp != NULL) frame_sanity_check(curlwp->l_md.md_regs, curlwp); #endif /* DEBUG */ }
/*ARGSUSED*/ void trap(struct trapframe *frame) { struct proc *p = curproc; int type = (int)frame->tf_trapno; struct pcb *pcb; extern char doreti_iret[], resume_iret[]; caddr_t onfault; int error; uint64_t cr2; union sigval sv; uvmexp.traps++; pcb = (p != NULL && p->p_addr != NULL) ? &p->p_addr->u_pcb : NULL; #ifdef DEBUG if (trapdebug) { printf("trap %d code %lx rip %lx cs %lx rflags %lx cr2 %lx " "cpl %x\n", type, frame->tf_err, frame->tf_rip, frame->tf_cs, frame->tf_rflags, rcr2(), curcpu()->ci_ilevel); printf("curproc %p\n", curproc); if (curproc) printf("pid %d\n", p->p_pid); } #endif if (!KERNELMODE(frame->tf_cs, frame->tf_rflags)) { type |= T_USER; p->p_md.md_regs = frame; } switch (type) { default: we_re_toast: #ifdef KGDB if (kgdb_trap(type, frame)) return; else { /* * If this is a breakpoint, don't panic * if we're not connected. */ if (type == T_BPTFLT) { printf("kgdb: ignored %s\n", trap_type[type]); return; } } #endif #ifdef DDB if (kdb_trap(type, 0, frame)) return; #endif if (frame->tf_trapno < trap_types) printf("fatal %s", trap_type[frame->tf_trapno]); else printf("unknown trap %ld", (u_long)frame->tf_trapno); printf(" in %s mode\n", (type & T_USER) ? "user" : "supervisor"); printf("trap type %d code %lx rip %lx cs %lx rflags %lx cr2 " " %lx cpl %x rsp %lx\n", type, frame->tf_err, (u_long)frame->tf_rip, frame->tf_cs, frame->tf_rflags, rcr2(), curcpu()->ci_ilevel, frame->tf_rsp); panic("trap type %d, code=%lx, pc=%lx", type, frame->tf_err, frame->tf_rip); /*NOTREACHED*/ case T_PROTFLT: case T_SEGNPFLT: case T_ALIGNFLT: case T_TSSFLT: if (p == NULL) goto we_re_toast; /* Check for copyin/copyout fault. */ if (pcb->pcb_onfault != 0) { error = EFAULT; copyfault: frame->tf_rip = (u_int64_t)pcb->pcb_onfault; frame->tf_rax = error; return; } /* * Check for failure during return to user mode. * We do this by looking at the address of the * instruction that faulted. */ if (frame->tf_rip == (u_int64_t)doreti_iret) { frame->tf_rip = (u_int64_t)resume_iret; return; } goto we_re_toast; case T_PROTFLT|T_USER: /* protection fault */ case T_TSSFLT|T_USER: case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: case T_NMI|T_USER: #ifdef TRAP_SIGDEBUG printf("pid %d (%s): BUS at rip %lx addr %lx\n", p->p_pid, p->p_comm, frame->tf_rip, rcr2()); frame_dump(frame); #endif sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGBUS, type & ~T_USER, BUS_OBJERR, sv); KERNEL_UNLOCK(); goto out; case T_ALIGNFLT|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGBUS, type & ~T_USER, BUS_ADRALN, sv); KERNEL_UNLOCK(); goto out; case T_PRIVINFLT|T_USER: /* privileged instruction fault */ sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVOPC, sv); KERNEL_UNLOCK(); goto out; case T_FPOPFLT|T_USER: /* coprocessor operand fault */ #ifdef TRAP_SIGDEBUG printf("pid %d (%s): ILL at rip %lx addr %lx\n", p->p_pid, p->p_comm, frame->tf_rip, rcr2()); frame_dump(frame); #endif sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGILL, type & ~T_USER, ILL_COPROC, sv); KERNEL_UNLOCK(); goto out; case T_ASTFLT|T_USER: /* Allow process switch */ uvmexp.softs++; if (p->p_flag & P_OWEUPC) { KERNEL_LOCK(); ADDUPROF(p); KERNEL_UNLOCK(); } /* Allow a forced task switch. */ if (curcpu()->ci_want_resched) preempt(NULL); goto out; case T_BOUND|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGFPE, type &~ T_USER, FPE_FLTSUB, sv); KERNEL_UNLOCK(); goto out; case T_OFLOW|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGFPE, type &~ T_USER, FPE_INTOVF, sv); KERNEL_UNLOCK(); goto out; case T_DIVIDE|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGFPE, type &~ T_USER, FPE_INTDIV, sv); KERNEL_UNLOCK(); goto out; case T_ARITHTRAP|T_USER: case T_XMM|T_USER: fputrap(frame); goto out; case T_PAGEFLT: /* allow page faults in kernel mode */ if (p == NULL) goto we_re_toast; cr2 = rcr2(); KERNEL_LOCK(); goto faultcommon; case T_PAGEFLT|T_USER: { /* page fault */ vaddr_t va, fa; struct vmspace *vm; struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; cr2 = rcr2(); KERNEL_LOCK(); faultcommon: vm = p->p_vmspace; if (vm == NULL) goto we_re_toast; fa = cr2; va = trunc_page((vaddr_t)cr2); /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if (type == T_PAGEFLT && va >= VM_MIN_KERNEL_ADDRESS) map = kernel_map; else map = &vm->vm_map; if (frame->tf_err & PGEX_W) ftype = VM_PROT_WRITE; else if (frame->tf_err & PGEX_I) ftype = VM_PROT_EXECUTE; else ftype = VM_PROT_READ; #ifdef DIAGNOSTIC if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %lx\n", va); goto we_re_toast; } #endif /* Fault the original page in. */ onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = uvm_fault(map, va, frame->tf_err & PGEX_P? VM_FAULT_PROTECT : VM_FAULT_INVALID, ftype); pcb->pcb_onfault = onfault; if (error == 0) { if (map != kernel_map) uvm_grow(p, va); if (type == T_PAGEFLT) { KERNEL_UNLOCK(); return; } KERNEL_UNLOCK(); goto out; } if (error == EACCES) { error = EFAULT; } if (type == T_PAGEFLT) { if (pcb->pcb_onfault != 0) { KERNEL_UNLOCK(); goto copyfault; } printf("uvm_fault(%p, 0x%lx, 0, %d) -> %x\n", map, va, ftype, error); goto we_re_toast; } if (error == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, p->p_cred && p->p_ucred ? (int)p->p_ucred->cr_uid : -1); sv.sival_ptr = (void *)fa; trapsignal(p, SIGKILL, T_PAGEFLT, SEGV_MAPERR, sv); } else { #ifdef TRAP_SIGDEBUG printf("pid %d (%s): SEGV at rip %lx addr %lx\n", p->p_pid, p->p_comm, frame->tf_rip, va); frame_dump(frame); #endif sv.sival_ptr = (void *)fa; trapsignal(p, SIGSEGV, T_PAGEFLT, SEGV_MAPERR, sv); } KERNEL_UNLOCK(); break; } case T_TRCTRAP: goto we_re_toast; case T_BPTFLT|T_USER: /* bpt instruction fault */ case T_TRCTRAP|T_USER: /* trace trap */ #ifdef MATH_EMULATE trace: #endif KERNEL_LOCK(); trapsignal(p, SIGTRAP, type &~ T_USER, TRAP_BRKPT, sv); KERNEL_UNLOCK(); break; #if NISA > 0 case T_NMI: #if defined(KGDB) || defined(DDB) /* NMI can be hooked up to a pushbutton for debugging */ printf ("NMI ... going to debugger\n"); #ifdef KGDB if (kgdb_trap(type, frame)) return; #endif #ifdef DDB if (kdb_trap(type, 0, frame)) return; #endif #endif /* KGDB || DDB */ /* machine/parity/power fail/"kitchen sink" faults */ if (x86_nmi() != 0) goto we_re_toast; else return; #endif /* NISA > 0 */ } if ((type & T_USER) == 0) return; out: userret(p); }
/* * Handle a TLB miss exception for a page marked as able to trigger the * end-of-page errata. * Returns nonzero if the exception has been completely serviced, and no * further processing in the trap handler is necessary. */ int eop_tlb_miss_handler(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p) { struct pcb *pcb; vaddr_t va, faultva; struct vmspace *vm; vm_map_t map; pmap_t pmap; pt_entry_t *pte, entry; int onfault; u_long asid; uint i, npairs; int64_t tlbidx; /* * Check for a valid pte with the `special' bit set (PG_SP) * in order to apply the end-of-page errata workaround. */ vm = p->p_vmspace; map = &vm->vm_map; faultva = trunc_page((vaddr_t)trapframe->badvaddr); pmap = map->pmap; pte = pmap_segmap(pmap, faultva); if (pte == NULL) return 0; pte += uvtopte(faultva); entry = *pte; if ((entry & PG_SP) == 0) return 0; pcb = &p->p_addr->u_pcb; asid = pmap->pm_asid[ci->ci_cpuid].pma_asid << PG_ASID_SHIFT; /* * For now, only allow one EOP vulnerable page to get a wired TLB * entry. We will aggressively attempt to recycle the wired TLB * entries created for that purpose, as soon as we are no longer * needing the EOP page resident in the TLB. */ /* * Figure out how many pages to wire in the TLB. */ if ((faultva & PG_ODDPG) != 0) { /* odd page: need two pairs */ npairs = 2; } else { /* even page: only need one pair */ npairs = 1; } /* * Fault-in the next page. */ va = faultva + PAGE_SIZE; pte = pmap_segmap(pmap, va); if (pte != NULL) pte += uvtopte(va); if (pte == NULL || (*pte & PG_V) == 0) { onfault = pcb->pcb_onfault; pcb->pcb_onfault = 0; KERNEL_LOCK(); (void)uvm_fault(map, va, 0, PROT_READ | PROT_EXEC); KERNEL_UNLOCK(); pcb->pcb_onfault = onfault; } /* * Clear possible TLB entries for the pages we're about to wire. */ for (i = npairs, va = faultva & PG_HVPN; i != 0; i--, va += 2 * PAGE_SIZE) { tlbidx = tlb_probe(va | asid); if (tlbidx >= 0) tlb_update_indexed(CKSEG0_BASE, PG_NV, PG_NV, tlbidx); } /* * Reserve the extra wired TLB, and fill them with the existing ptes. */ tlb_set_wired((UPAGES / 2) + npairs); for (i = 0, va = faultva & PG_HVPN; i != npairs; i++, va += 2 * PAGE_SIZE) { pte = pmap_segmap(pmap, va); if (pte == NULL) tlb_update_indexed(va | asid, PG_NV, PG_NV, (UPAGES / 2) + i); else { pte += uvtopte(va); tlb_update_indexed(va | asid, pte[0], pte[1], (UPAGES / 2) + i); } } /* * Save the base address of the EOP vulnerable page, to be able to * figure out when the wired entry is no longer necessary. */ pcb->pcb_nwired = npairs; pcb->pcb_wiredva = faultva & PG_HVPN; pcb->pcb_wiredpc = faultva; return 1; }
/*ARGSUSED*/ void trap(struct frame *fp, int type, unsigned code, unsigned v) { extern char fubail[], subail[]; struct lwp *l; struct proc *p; struct pcb *pcb; void *onfault; ksiginfo_t ksi; int s; int rv; u_quad_t sticks = 0 /* XXX initialiser works around compiler bug */; static int panicking __diagused; curcpu()->ci_data.cpu_ntrap++; l = curlwp; p = l->l_proc; pcb = lwp_getpcb(l); KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; if (USERMODE(fp->f_sr)) { type |= T_USER; sticks = p->p_sticks; l->l_md.md_regs = fp->f_regs; LWP_CACHE_CREDS(l, p); } switch (type) { default: dopanic: /* * Let the kernel debugger see the trap frame that * caused us to panic. This is a convenience so * one can see registers at the point of failure. */ s = splhigh(); panicking = 1; printf("trap type %d, code = 0x%x, v = 0x%x\n", type, code, v); printf("%s program counter = 0x%x\n", (type & T_USER) ? "user" : "kernel", fp->f_pc); #ifdef KGDB /* If connected, step or cont returns 1 */ if (kgdb_trap(type, (db_regs_t *)fp)) goto kgdb_cont; #endif #ifdef DDB (void)kdb_trap(type, (db_regs_t *)fp); #endif #ifdef KGDB kgdb_cont: #endif splx(s); if (panicstr) { printf("trap during panic!\n"); #ifdef DEBUG /* XXX should be a machine-dependent hook */ printf("(press a key)\n"); (void)cngetc(); #endif } regdump((struct trapframe *)fp, 128); type &= ~T_USER; if ((u_int)type < trap_types) panic(trap_type[type]); panic("trap"); case T_BUSERR: /* kernel bus error */ onfault = pcb->pcb_onfault; if (onfault == NULL) goto dopanic; rv = EFAULT; /* FALLTHROUGH */ copyfault: /* * If we have arranged to catch this fault in any of the * copy to/from user space routines, set PC to return to * indicated location and set flag informing buserror code * that it may need to clean up stack frame. */ fp->f_stackadj = exframesize[fp->f_format]; fp->f_format = fp->f_vector = 0; fp->f_pc = (int)onfault; fp->f_regs[D0] = rv; return; case T_BUSERR|T_USER: /* bus error */ case T_ADDRERR|T_USER: /* address error */ ksi.ksi_addr = (void *)v; ksi.ksi_signo = SIGBUS; ksi.ksi_code = (type == (T_BUSERR|T_USER)) ? BUS_OBJERR : BUS_ADRERR; break; case T_COPERR: /* kernel coprocessor violation */ case T_FMTERR|T_USER: /* do all RTE errors come in as T_USER? */ case T_FMTERR: /* ...just in case... */ /* * The user has most likely trashed the RTE or FP state info * in the stack frame of a signal handler. */ printf("pid %d: kernel %s exception\n", p->p_pid, type==T_COPERR ? "coprocessor" : "format"); type |= T_USER; mutex_enter(p->p_lock); SIGACTION(p, SIGILL).sa_handler = SIG_DFL; sigdelset(&p->p_sigctx.ps_sigignore, SIGILL); sigdelset(&p->p_sigctx.ps_sigcatch, SIGILL); sigdelset(&l->l_sigmask, SIGILL); mutex_exit(p->p_lock); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_RESAD_FAULT */ ksi.ksi_code = (type == T_COPERR) ? ILL_COPROC : ILL_ILLOPC; break; case T_COPERR|T_USER: /* user coprocessor violation */ /* What is a proper response here? */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; case T_FPERR|T_USER: /* 68881 exceptions */ /* * We pass along the 68881 status register which locore stashed * in code for us. */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = fpsr2siginfocode(code); break; #ifdef M68040 case T_FPEMULI|T_USER: /* unimplemented FP instruction */ case T_FPEMULD|T_USER: /* unimplemented FP data type */ /* XXX need to FSAVE */ printf("pid %d(%s): unimplemented FP %s at %x (EA %x)\n", p->p_pid, p->p_comm, fp->f_format == 2 ? "instruction" : "data type", fp->f_pc, fp->f_fmt2.f_iaddr); /* XXX need to FRESTORE */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; #endif case T_ILLINST|T_USER: /* illegal instruction fault */ case T_PRIVINST|T_USER: /* privileged instruction fault */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_PRIVIN_FAULT */ ksi.ksi_signo = SIGILL; ksi.ksi_code = (type == (T_PRIVINST|T_USER)) ? ILL_PRVOPC : ILL_ILLOPC; break; case T_ZERODIV|T_USER: /* Divide by zero */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was FPE_INTDIV_TRAP */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTDIV; break; case T_CHKINST|T_USER: /* CHK instruction trap */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was FPE_SUBRNG_TRAP */ ksi.ksi_signo = SIGFPE; break; case T_TRAPVINST|T_USER: /* TRAPV instruction trap */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was FPE_INTOVF_TRAP */ ksi.ksi_signo = SIGFPE; break; /* * XXX: Trace traps are a nightmare. * * HP-UX uses trap #1 for breakpoints, * NetBSD/m68k uses trap #2, * SUN 3.x uses trap #15, * DDB and KGDB uses trap #15 (for kernel breakpoints; * handled elsewhere). * * NetBSD and HP-UX traps both get mapped by locore.s into T_TRACE. * SUN 3.x traps get passed through as T_TRAP15 and are not really * supported yet. * * XXX: We should never get kernel-mode T_TRAP15 * XXX: because locore.s now gives them special treatment. */ case T_TRAP15: /* kernel breakpoint */ #ifdef DEBUG printf("unexpected kernel trace trap, type = %d\n", type); printf("program counter = 0x%x\n", fp->f_pc); #endif fp->f_sr &= ~PSL_T; return; case T_TRACE|T_USER: /* user trace trap */ #ifdef COMPAT_SUNOS /* * SunOS uses Trap #2 for a "CPU cache flush". * Just flush the on-chip caches and return. */ if (p->p_emul == &emul_sunos) { ICIA(); DCIU(); return; } #endif /* FALLTHROUGH */ case T_TRACE: /* tracing a trap instruction */ case T_TRAP15|T_USER: /* SUN user trace trap */ fp->f_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_ASTFLT: /* system async trap, cannot happen */ goto dopanic; case T_ASTFLT|T_USER: /* user async trap */ astpending = 0; /* * We check for software interrupts first. This is because * they are at a higher level than ASTs, and on a VAX would * interrupt the AST. We assume that if we are processing * an AST that we must be at IPL0 so we don't bother to * check. Note that we ensure that we are at least at SIR * IPL while processing the SIR. */ spl1(); /* fall into... */ case T_SSIR: /* software interrupt */ case T_SSIR|T_USER: /* * If this was not an AST trap, we are all done. */ if (type != (T_ASTFLT|T_USER)) { curcpu()->ci_data.cpu_ntrap--; return; } spl0(); if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } if (curcpu()->ci_want_resched) preempt(); goto out; case T_MMUFLT: /* kernel mode page fault */ /* * If we were doing profiling ticks or other user mode * stuff from interrupt code, Just Say No. */ onfault = pcb->pcb_onfault; if (onfault == fubail || onfault == subail) { rv = EFAULT; goto copyfault; } /* fall into ... */ case T_MMUFLT|T_USER: /* page fault */ { vaddr_t va; struct vmspace *vm = p->p_vmspace; struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; onfault = pcb->pcb_onfault; #ifdef DEBUG if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) printf("trap: T_MMUFLT pid=%d, code=%x, v=%x, pc=%x, sr=%x\n", p->p_pid, code, v, fp->f_pc, fp->f_sr); #endif /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space data fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if ((type & T_USER) == 0 && (onfault == NULL || KDFAULT(code))) map = kernel_map; else { map = vm ? &vm->vm_map : kernel_map; } if (WRFAULT(code)) ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; va = trunc_page((vaddr_t)v); if (map == kernel_map && va == 0) { printf("trap: bad kernel %s access at 0x%x\n", (ftype & VM_PROT_WRITE) ? "read/write" : "read", v); goto dopanic; } #ifdef DIAGNOSTIC if (interrupt_depth && !panicking) { printf("trap: calling uvm_fault() from interrupt!\n"); goto dopanic; } #endif pcb->pcb_onfault = NULL; rv = uvm_fault(map, va, ftype); pcb->pcb_onfault = onfault; #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); #endif /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (rv == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); if (type == T_MMUFLT) { if (ucas_ras_check(&fp->F_t)) { return; } #ifdef M68040 if (cputype == CPU_68040) (void) writeback(fp, 1); #endif return; } goto out; } if (rv == EACCES) { ksi.ksi_code = SEGV_ACCERR; rv = EFAULT; } else ksi.ksi_code = SEGV_MAPERR; if (type == T_MMUFLT) { if (onfault) goto copyfault; printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); printf(" type %x, code [mmu,,ssw]: %x\n", type, code); goto dopanic; } ksi.ksi_addr = (void *)v; switch (rv) { case ENOMEM: printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; break; case EINVAL: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case EACCES: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; break; default: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; break; } break; } } trapsignal(l, &ksi); if ((type & T_USER) == 0) return; out: userret(l, fp, sticks, v, 1); }
/*ARGSUSED*/ void trap(struct frame *fp, int type, u_int code, u_int v) { extern char fubail[], subail[]; struct lwp *l; struct proc *p; ksiginfo_t ksi; int s; u_quad_t sticks; uvmexp.traps++; l = curlwp; KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; p = l->l_proc; if (USERMODE(fp->f_sr)) { type |= T_USER; sticks = p->p_sticks; l->l_md.md_regs = fp->f_regs; LWP_CACHE_CREDS(l, p); } else sticks = 0; #ifdef DIAGNOSTIC if (l->l_addr == NULL) panic("trap: type 0x%x, code 0x%x, v 0x%x -- no pcb", type, code, v); #endif switch (type) { default: dopanic: printf("trap type %d, code = 0x%x, v = 0x%x\n", type, code, v); printf("%s program counter = 0x%x\n", (type & T_USER) ? "user" : "kernel", fp->f_pc); /* * Let the kernel debugger see the trap frame that * caused us to panic. This is a convenience so * one can see registers at the point of failure. */ s = splhigh(); #ifdef KGDB /* If connected, step or cont returns 1 */ if (kgdb_trap(type, (db_regs_t *)fp)) goto kgdb_cont; #endif #ifdef DDB (void)kdb_trap(type, (db_regs_t *)fp); #endif #ifdef KGDB kgdb_cont: #endif splx(s); if (panicstr) { printf("trap during panic!\n"); #ifdef DEBUG /* XXX should be a machine-dependent hook */ printf("(press a key)\n"); (void)cngetc(); #endif } regdump((struct trapframe *)fp, 128); type &= ~T_USER; if ((u_int)type < trap_types) panic(trap_type[type]); panic("trap"); case T_BUSERR: /* Kernel bus error */ if (!l->l_addr->u_pcb.pcb_onfault) goto dopanic; /* * If we have arranged to catch this fault in any of the * copy to/from user space routines, set PC to return to * indicated location and set flag informing buserror code * that it may need to clean up stack frame. */ copyfault: fp->f_stackadj = exframesize[fp->f_format]; fp->f_format = fp->f_vector = 0; fp->f_pc = (int)l->l_addr->u_pcb.pcb_onfault; return; case T_BUSERR|T_USER: /* Bus error */ case T_ADDRERR|T_USER: /* Address error */ ksi.ksi_addr = (void *)v; ksi.ksi_signo = SIGBUS; ksi.ksi_code = (type == (T_BUSERR|T_USER)) ? BUS_OBJERR : BUS_ADRERR; break; case T_ILLINST|T_USER: /* Illegal instruction fault */ case T_PRIVINST|T_USER: /* Privileged instruction fault */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_PRIVIN_FAULT */ ksi.ksi_signo = SIGILL; ksi.ksi_code = (type == (T_PRIVINST|T_USER)) ? ILL_PRVOPC : ILL_ILLOPC; break; /* * divde by zero, CHK/TRAPV inst */ case T_ZERODIV|T_USER: /* Divide by zero trap */ ksi.ksi_code = FPE_FLTDIV; case T_CHKINST|T_USER: /* CHK instruction trap */ case T_TRAPVINST|T_USER: /* TRAPV instruction trap */ ksi.ksi_addr = (void *)(int)fp->f_format; ksi.ksi_signo = SIGFPE; break; /* * User coprocessor violation */ case T_COPERR|T_USER: /* XXX What is a proper response here? */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; /* * 6888x exceptions */ case T_FPERR|T_USER: /* * We pass along the 68881 status register which locore * stashed in code for us. Note that there is a * possibility that the bit pattern of this register * will conflict with one of the FPE_* codes defined * in signal.h. Fortunately for us, the only such * codes we use are all in the range 1-7 and the low * 3 bits of the status register are defined as 0 so * there is no clash. */ ksi.ksi_signo = SIGFPE; ksi.ksi_addr = (void *)code; break; /* * FPU faults in supervisor mode. */ case T_ILLINST: /* fnop generates this, apparently. */ case T_FPEMULI: case T_FPEMULD: { extern label_t *nofault; if (nofault) /* If we're probing. */ longjmp(nofault); if (type == T_ILLINST) printf("Kernel Illegal Instruction trap.\n"); else printf("Kernel FPU trap.\n"); goto dopanic; } /* * Unimplemented FPU instructions/datatypes. */ case T_FPEMULI|T_USER: case T_FPEMULD|T_USER: #ifdef FPU_EMULATE if (fpu_emulate(fp, &l->l_addr->u_pcb.pcb_fpregs, &ksi) == 0) ; /* XXX - Deal with tracing? (fp->f_sr & PSL_T) */ #else uprintf("pid %d killed: no floating point support.\n", p->p_pid); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; #endif break; case T_COPERR: /* Kernel coprocessor violation */ case T_FMTERR: /* Kernel format error */ case T_FMTERR|T_USER: /* User format error */ /* * The user has most likely trashed the RTE or FP state info * in the stack frame of a signal handler. */ printf("pid %d: kernel %s exception\n", p->p_pid, type==T_COPERR ? "coprocessor" : "format"); type |= T_USER; mutex_enter(p->p_lock); SIGACTION(p, SIGILL).sa_handler = SIG_DFL; sigdelset(&p->p_sigctx.ps_sigignore, SIGILL); sigdelset(&p->p_sigctx.ps_sigcatch, SIGILL); sigdelset(&l->l_sigmask, SIGILL); mutex_exit(p->p_lock); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_RESAD_FAULT */ ksi.ksi_code = (type == T_COPERR) ? ILL_COPROC : ILL_ILLOPC; break; /* * XXX: Trace traps are a nightmare. * * HP-UX uses trap #1 for breakpoints, * NetBSD/m68k uses trap #2, * SUN 3.x uses trap #15, * DDB and KGDB uses trap #15 (for kernel breakpoints; * handled elsewhere). * * NetBSD and HP-UX traps both get mapped by locore.s into T_TRACE. * SUN 3.x traps get passed through as T_TRAP15 and are not really * supported yet. * * XXX: We should never get kernel-mode T_TRAP15 because * XXX: locore.s now gives it special treatment. */ case T_TRAP15: /* SUN trace trap */ #ifdef DEBUG printf("unexpected kernel trace trap, type = %d\n", type); printf("program counter = 0x%x\n", fp->f_pc); #endif fp->f_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_TRACE|T_USER: /* user trace trap */ #ifdef COMPAT_SUNOS /* * SunOS uses Trap #2 for a "CPU cache flush". * Just flush the on-chip caches and return. */ if (p->p_emul == &emul_sunos) { ICIA(); DCIU(); return; } #endif /* FALLTHROUGH */ case T_TRACE: /* tracing a trap instruction */ case T_TRAP15|T_USER: /* SUN user trace trap */ fp->f_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_ASTFLT: /* System async trap, cannot happen */ goto dopanic; case T_ASTFLT|T_USER: /* User async trap. */ astpending = 0; /* * We check for software interrupts first. This is because * they are at a higher level than ASTs, and on a VAX would * interrupt the AST. We assume that if we are processing * an AST that we must be at IPL0 so we don't bother to * check. Note that we ensure that we are at least at SIR * IPL while processing the SIR. */ spl1(); /* fall into... */ case T_SSIR: /* Software interrupt */ case T_SSIR|T_USER: /* * If this was not an AST trap, we are all done. */ if (type != (T_ASTFLT|T_USER)) { uvmexp.traps--; return; } spl0(); if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } if (curcpu()->ci_want_resched) preempt(); goto out; case T_MMUFLT: /* Kernel mode page fault */ /* * If we were doing profiling ticks or other user mode * stuff from interrupt code, Just Say No. */ if (l->l_addr->u_pcb.pcb_onfault == fubail || l->l_addr->u_pcb.pcb_onfault == subail) goto copyfault; /* fall into... */ case T_MMUFLT|T_USER: /* page fault */ { vaddr_t va; struct vmspace *vm = p->p_vmspace; struct vm_map *map; int rv; vm_prot_t ftype; extern struct vm_map *kernel_map; #ifdef DEBUG if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) printf("trap: T_MMUFLT pid=%d, code=%x, v=%x, pc=%x, sr=%x\n", p->p_pid, code, v, fp->f_pc, fp->f_sr); #endif /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor data fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if (type == T_MMUFLT && (!l->l_addr->u_pcb.pcb_onfault || KDFAULT(code))) map = kernel_map; else { map = vm ? &vm->vm_map : kernel_map; if ((l->l_flag & LW_SA) && (~l->l_pflag & LP_SA_NOBLOCK)) { l->l_savp->savp_faultaddr = (vaddr_t)v; l->l_pflag |= LP_SA_PAGEFAULT; } } if (WRFAULT(code)) ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; va = trunc_page((vaddr_t)v); #ifdef DEBUG if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %x\n", v); goto dopanic; } #endif rv = uvm_fault(map, va, ftype); #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); #endif /* * If this was a stack access, we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure, it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (rv == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); if (type == T_MMUFLT) { #if defined(M68040) if (mmutype == MMU_68040) (void)writeback(fp, 1); #endif return; } l->l_pflag &= ~LP_SA_PAGEFAULT; goto out; } if (rv == EACCES) { ksi.ksi_code = SEGV_ACCERR; rv = EFAULT; } else ksi.ksi_code = SEGV_MAPERR; if (type == T_MMUFLT) { if (l->l_addr->u_pcb.pcb_onfault) goto copyfault; printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); printf(" type %x, code [mmu,,ssw]: %x\n", type, code); goto dopanic; } l->l_pflag &= ~LP_SA_PAGEFAULT; ksi.ksi_addr = (void *)v; if (rv == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; } else { ksi.ksi_signo = SIGSEGV; } break; } } if (ksi.ksi_signo) trapsignal(l, &ksi); if ((type & T_USER) == 0) return; out: userret(l, fp, sticks, v, 1); }