void ast(struct trapframe *tf) { struct proc *p = curproc;; #ifdef acorn26 /* Enable interrupts if they were enabled before the trap. */ if ((tf->tf_r15 & R15_IRQ_DISABLE) == 0) int_on(); #else /* Interrupts were restored by exception_exit. */ #endif uvmexp.traps++; uvmexp.softs++; #ifdef DEBUG if (p == NULL) panic("ast: no curproc!"); if (&p->p_addr->u_pcb == 0) panic("ast: no pcb!"); #endif if (p->p_flag & P_OWEUPC) { p->p_flag &= ~P_OWEUPC; ADDUPROF(p); } /* Allow a forced task switch. */ if (want_resched) preempt(0); userret(p, tf->tf_pc, p->p_sticks); /* XXX */ }
/* * Handle asynchronous software traps. */ void ast(struct trapframe *frame) { struct cpu_info *ci = curcpu(); struct proc *p = ci->ci_curproc; uvmexp.softs++; p->p_md.md_astpending = 0; if (p->p_flag & P_OWEUPC) { KERNEL_LOCK(); ADDUPROF(p); KERNEL_UNLOCK(); } if (ci->ci_want_resched) preempt(NULL); userret(p); }
/* * Handle an AST for the current process. */ void ast() { struct cpu_info *ci = curcpu(); struct proc *p = ci->ci_curproc; uvmexp.softs++; if (p->p_md.md_astpending == 0) panic("unexpected ast p %p astpending %p\n", p, &p->p_md.md_astpending); p->p_md.md_astpending = 0; if (p->p_flag & P_OWEUPC) { ADDUPROF(p); } if (ci->ci_want_resched) preempt(NULL); userret(p); }
/* * Define the code needed before returning to user mode, for * trap, mem_access_fault, and syscall. */ static inline void userret(struct proc *p, int pc, u_quad_t oticks) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) psig(sig); p->p_pri = p->p_usrpri; if (want_ast) { want_ast = 0; if (p->p_flag & SOWEUPC) { p->p_flag &= ~SOWEUPC; ADDUPROF(p); } } if (want_resched) { /* * Since we are curproc, a clock interrupt could * change our priority without changing run queues * (the running process is not kept on a run queue). * If this happened after we setrq ourselves but * before we swtch()'ed, we might not be on the queue * indicated by our priority. */ (void) splstatclock(); setrq(p); p->p_stats->p_ru.ru_nivcsw++; swtch(); (void) spl0(); while ((sig = CURSIG(p)) != 0) psig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_flag & SPROFIL) addupc_task(p, pc, (int)(p->p_sticks - oticks)); curpri = p->p_pri; }
/* * Define the code needed before returning to user mode, for * trap, mem_access_fault, and syscall. */ static inline void userret(struct proc *p, int pc, u_quad_t oticks) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); p->p_priority = p->p_usrpri; if (want_ast) { want_ast = 0; if (p->p_flag & P_OWEUPC) { p->p_flag &= ~P_OWEUPC; ADDUPROF(p); } } if (want_resched) { /* * Since we are curproc, clock will normally just change * our priority without moving us from one queue to another * (since the running process is not on a queue.) * If that happened after we put ourselves on the run queue * but before we switched, we might not be on the queue * indicated by our priority. */ (void) splstatclock(); setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); (void) spl0(); while ((sig = CURSIG(p)) != 0) postsig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_flag & P_PROFIL) addupc_task(p, pc, (int)(p->p_sticks - oticks)); curpriority = p->p_priority; }
void ast(struct trapframe *tf) { struct lwp * const l = curlwp; #ifdef acorn26 /* Enable interrupts if they were enabled before the trap. */ if ((tf->tf_r15 & R15_IRQ_DISABLE) == 0) int_on(); #else /* Interrupts were restored by exception_exit. */ #endif #ifdef __PROG32 KASSERT(VALID_R15_PSR(tf->tf_pc, tf->tf_spsr)); #endif #ifdef __HAVE_PREEMPTION kpreempt_disable(); #endif struct cpu_info * const ci = curcpu(); ci->ci_data.cpu_ntrap++; KDASSERT(ci->ci_cpl == IPL_NONE); const int want_resched = ci->ci_want_resched; #ifdef __HAVE_PREEMPTION kpreempt_enable(); #endif if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } /* Allow a forced task switch. */ if (want_resched) preempt(); userret(l); }
void userret(struct proc *p, register_t pc, u_quad_t oticks) { int sig; /* take pending signals */ while ((sig = CURSIG(p)) != 0) postsig(sig); p->p_priority = p->p_usrpri; if (astpending) { astpending = 0; if (p->p_flag & P_OWEUPC) { ADDUPROF(p); } } if (want_resched) { /* * We're being preempted. */ preempt(NULL); while ((sig = CURSIG(p)) != 0) postsig(sig); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_flag & P_PROFIL) { extern int psratio; addupc_task(p, pc, (int)(p->p_sticks - oticks) * psratio); } p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority; }
/*ARGSUSED*/ void trap(struct frame *fp, int type, unsigned code, unsigned v) { extern char fubail[], subail[]; struct lwp *l; struct proc *p; struct pcb *pcb; void *onfault; ksiginfo_t ksi; int s; int rv; u_quad_t sticks = 0 /* XXX initialiser works around compiler bug */; static int panicking __diagused; curcpu()->ci_data.cpu_ntrap++; l = curlwp; p = l->l_proc; pcb = lwp_getpcb(l); KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; if (USERMODE(fp->f_sr)) { type |= T_USER; sticks = p->p_sticks; l->l_md.md_regs = fp->f_regs; LWP_CACHE_CREDS(l, p); } switch (type) { default: dopanic: /* * Let the kernel debugger see the trap frame that * caused us to panic. This is a convenience so * one can see registers at the point of failure. */ s = splhigh(); panicking = 1; printf("trap type %d, code = 0x%x, v = 0x%x\n", type, code, v); printf("%s program counter = 0x%x\n", (type & T_USER) ? "user" : "kernel", fp->f_pc); #ifdef KGDB /* If connected, step or cont returns 1 */ if (kgdb_trap(type, (db_regs_t *)fp)) goto kgdb_cont; #endif #ifdef DDB (void)kdb_trap(type, (db_regs_t *)fp); #endif #ifdef KGDB kgdb_cont: #endif splx(s); if (panicstr) { printf("trap during panic!\n"); #ifdef DEBUG /* XXX should be a machine-dependent hook */ printf("(press a key)\n"); (void)cngetc(); #endif } regdump((struct trapframe *)fp, 128); type &= ~T_USER; if ((u_int)type < trap_types) panic(trap_type[type]); panic("trap"); case T_BUSERR: /* kernel bus error */ onfault = pcb->pcb_onfault; if (onfault == NULL) goto dopanic; rv = EFAULT; /* FALLTHROUGH */ copyfault: /* * If we have arranged to catch this fault in any of the * copy to/from user space routines, set PC to return to * indicated location and set flag informing buserror code * that it may need to clean up stack frame. */ fp->f_stackadj = exframesize[fp->f_format]; fp->f_format = fp->f_vector = 0; fp->f_pc = (int)onfault; fp->f_regs[D0] = rv; return; case T_BUSERR|T_USER: /* bus error */ case T_ADDRERR|T_USER: /* address error */ ksi.ksi_addr = (void *)v; ksi.ksi_signo = SIGBUS; ksi.ksi_code = (type == (T_BUSERR|T_USER)) ? BUS_OBJERR : BUS_ADRERR; break; case T_COPERR: /* kernel coprocessor violation */ case T_FMTERR|T_USER: /* do all RTE errors come in as T_USER? */ case T_FMTERR: /* ...just in case... */ /* * The user has most likely trashed the RTE or FP state info * in the stack frame of a signal handler. */ printf("pid %d: kernel %s exception\n", p->p_pid, type==T_COPERR ? "coprocessor" : "format"); type |= T_USER; mutex_enter(p->p_lock); SIGACTION(p, SIGILL).sa_handler = SIG_DFL; sigdelset(&p->p_sigctx.ps_sigignore, SIGILL); sigdelset(&p->p_sigctx.ps_sigcatch, SIGILL); sigdelset(&l->l_sigmask, SIGILL); mutex_exit(p->p_lock); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_RESAD_FAULT */ ksi.ksi_code = (type == T_COPERR) ? ILL_COPROC : ILL_ILLOPC; break; case T_COPERR|T_USER: /* user coprocessor violation */ /* What is a proper response here? */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; case T_FPERR|T_USER: /* 68881 exceptions */ /* * We pass along the 68881 status register which locore stashed * in code for us. */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = fpsr2siginfocode(code); break; #ifdef M68040 case T_FPEMULI|T_USER: /* unimplemented FP instruction */ case T_FPEMULD|T_USER: /* unimplemented FP data type */ /* XXX need to FSAVE */ printf("pid %d(%s): unimplemented FP %s at %x (EA %x)\n", p->p_pid, p->p_comm, fp->f_format == 2 ? "instruction" : "data type", fp->f_pc, fp->f_fmt2.f_iaddr); /* XXX need to FRESTORE */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; #endif case T_ILLINST|T_USER: /* illegal instruction fault */ case T_PRIVINST|T_USER: /* privileged instruction fault */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_PRIVIN_FAULT */ ksi.ksi_signo = SIGILL; ksi.ksi_code = (type == (T_PRIVINST|T_USER)) ? ILL_PRVOPC : ILL_ILLOPC; break; case T_ZERODIV|T_USER: /* Divide by zero */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was FPE_INTDIV_TRAP */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTDIV; break; case T_CHKINST|T_USER: /* CHK instruction trap */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was FPE_SUBRNG_TRAP */ ksi.ksi_signo = SIGFPE; break; case T_TRAPVINST|T_USER: /* TRAPV instruction trap */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was FPE_INTOVF_TRAP */ ksi.ksi_signo = SIGFPE; break; /* * XXX: Trace traps are a nightmare. * * HP-UX uses trap #1 for breakpoints, * NetBSD/m68k uses trap #2, * SUN 3.x uses trap #15, * DDB and KGDB uses trap #15 (for kernel breakpoints; * handled elsewhere). * * NetBSD and HP-UX traps both get mapped by locore.s into T_TRACE. * SUN 3.x traps get passed through as T_TRAP15 and are not really * supported yet. * * XXX: We should never get kernel-mode T_TRAP15 * XXX: because locore.s now gives them special treatment. */ case T_TRAP15: /* kernel breakpoint */ #ifdef DEBUG printf("unexpected kernel trace trap, type = %d\n", type); printf("program counter = 0x%x\n", fp->f_pc); #endif fp->f_sr &= ~PSL_T; return; case T_TRACE|T_USER: /* user trace trap */ #ifdef COMPAT_SUNOS /* * SunOS uses Trap #2 for a "CPU cache flush". * Just flush the on-chip caches and return. */ if (p->p_emul == &emul_sunos) { ICIA(); DCIU(); return; } #endif /* FALLTHROUGH */ case T_TRACE: /* tracing a trap instruction */ case T_TRAP15|T_USER: /* SUN user trace trap */ fp->f_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_ASTFLT: /* system async trap, cannot happen */ goto dopanic; case T_ASTFLT|T_USER: /* user async trap */ astpending = 0; /* * We check for software interrupts first. This is because * they are at a higher level than ASTs, and on a VAX would * interrupt the AST. We assume that if we are processing * an AST that we must be at IPL0 so we don't bother to * check. Note that we ensure that we are at least at SIR * IPL while processing the SIR. */ spl1(); /* fall into... */ case T_SSIR: /* software interrupt */ case T_SSIR|T_USER: /* * If this was not an AST trap, we are all done. */ if (type != (T_ASTFLT|T_USER)) { curcpu()->ci_data.cpu_ntrap--; return; } spl0(); if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } if (curcpu()->ci_want_resched) preempt(); goto out; case T_MMUFLT: /* kernel mode page fault */ /* * If we were doing profiling ticks or other user mode * stuff from interrupt code, Just Say No. */ onfault = pcb->pcb_onfault; if (onfault == fubail || onfault == subail) { rv = EFAULT; goto copyfault; } /* fall into ... */ case T_MMUFLT|T_USER: /* page fault */ { vaddr_t va; struct vmspace *vm = p->p_vmspace; struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; onfault = pcb->pcb_onfault; #ifdef DEBUG if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) printf("trap: T_MMUFLT pid=%d, code=%x, v=%x, pc=%x, sr=%x\n", p->p_pid, code, v, fp->f_pc, fp->f_sr); #endif /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space data fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if ((type & T_USER) == 0 && (onfault == NULL || KDFAULT(code))) map = kernel_map; else { map = vm ? &vm->vm_map : kernel_map; } if (WRFAULT(code)) ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; va = trunc_page((vaddr_t)v); if (map == kernel_map && va == 0) { printf("trap: bad kernel %s access at 0x%x\n", (ftype & VM_PROT_WRITE) ? "read/write" : "read", v); goto dopanic; } #ifdef DIAGNOSTIC if (interrupt_depth && !panicking) { printf("trap: calling uvm_fault() from interrupt!\n"); goto dopanic; } #endif pcb->pcb_onfault = NULL; rv = uvm_fault(map, va, ftype); pcb->pcb_onfault = onfault; #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); #endif /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (rv == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); if (type == T_MMUFLT) { if (ucas_ras_check(&fp->F_t)) { return; } #ifdef M68040 if (cputype == CPU_68040) (void) writeback(fp, 1); #endif return; } goto out; } if (rv == EACCES) { ksi.ksi_code = SEGV_ACCERR; rv = EFAULT; } else ksi.ksi_code = SEGV_MAPERR; if (type == T_MMUFLT) { if (onfault) goto copyfault; printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); printf(" type %x, code [mmu,,ssw]: %x\n", type, code); goto dopanic; } ksi.ksi_addr = (void *)v; switch (rv) { case ENOMEM: printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; break; case EINVAL: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case EACCES: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; break; default: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; break; } break; } } trapsignal(l, &ksi); if ((type & T_USER) == 0) return; out: userret(l, fp, sticks, v, 1); }
/*ARGSUSED*/ void trap(struct trapframe *frame) { struct proc *p = curproc; int type = (int)frame->tf_trapno; struct pcb *pcb; extern char doreti_iret[], resume_iret[]; caddr_t onfault; int error; uint64_t cr2; union sigval sv; uvmexp.traps++; pcb = (p != NULL && p->p_addr != NULL) ? &p->p_addr->u_pcb : NULL; #ifdef DEBUG if (trapdebug) { printf("trap %d code %lx rip %lx cs %lx rflags %lx cr2 %lx " "cpl %x\n", type, frame->tf_err, frame->tf_rip, frame->tf_cs, frame->tf_rflags, rcr2(), curcpu()->ci_ilevel); printf("curproc %p\n", curproc); if (curproc) printf("pid %d\n", p->p_pid); } #endif if (!KERNELMODE(frame->tf_cs, frame->tf_rflags)) { type |= T_USER; p->p_md.md_regs = frame; } switch (type) { default: we_re_toast: #ifdef KGDB if (kgdb_trap(type, frame)) return; else { /* * If this is a breakpoint, don't panic * if we're not connected. */ if (type == T_BPTFLT) { printf("kgdb: ignored %s\n", trap_type[type]); return; } } #endif #ifdef DDB if (kdb_trap(type, 0, frame)) return; #endif if (frame->tf_trapno < trap_types) printf("fatal %s", trap_type[frame->tf_trapno]); else printf("unknown trap %ld", (u_long)frame->tf_trapno); printf(" in %s mode\n", (type & T_USER) ? "user" : "supervisor"); printf("trap type %d code %lx rip %lx cs %lx rflags %lx cr2 " " %lx cpl %x rsp %lx\n", type, frame->tf_err, (u_long)frame->tf_rip, frame->tf_cs, frame->tf_rflags, rcr2(), curcpu()->ci_ilevel, frame->tf_rsp); panic("trap type %d, code=%lx, pc=%lx", type, frame->tf_err, frame->tf_rip); /*NOTREACHED*/ case T_PROTFLT: case T_SEGNPFLT: case T_ALIGNFLT: case T_TSSFLT: if (p == NULL) goto we_re_toast; /* Check for copyin/copyout fault. */ if (pcb->pcb_onfault != 0) { error = EFAULT; copyfault: frame->tf_rip = (u_int64_t)pcb->pcb_onfault; frame->tf_rax = error; return; } /* * Check for failure during return to user mode. * We do this by looking at the address of the * instruction that faulted. */ if (frame->tf_rip == (u_int64_t)doreti_iret) { frame->tf_rip = (u_int64_t)resume_iret; return; } goto we_re_toast; case T_PROTFLT|T_USER: /* protection fault */ case T_TSSFLT|T_USER: case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: case T_NMI|T_USER: #ifdef TRAP_SIGDEBUG printf("pid %d (%s): BUS at rip %lx addr %lx\n", p->p_pid, p->p_comm, frame->tf_rip, rcr2()); frame_dump(frame); #endif sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGBUS, type & ~T_USER, BUS_OBJERR, sv); KERNEL_UNLOCK(); goto out; case T_ALIGNFLT|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGBUS, type & ~T_USER, BUS_ADRALN, sv); KERNEL_UNLOCK(); goto out; case T_PRIVINFLT|T_USER: /* privileged instruction fault */ sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGILL, type & ~T_USER, ILL_PRVOPC, sv); KERNEL_UNLOCK(); goto out; case T_FPOPFLT|T_USER: /* coprocessor operand fault */ #ifdef TRAP_SIGDEBUG printf("pid %d (%s): ILL at rip %lx addr %lx\n", p->p_pid, p->p_comm, frame->tf_rip, rcr2()); frame_dump(frame); #endif sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGILL, type & ~T_USER, ILL_COPROC, sv); KERNEL_UNLOCK(); goto out; case T_ASTFLT|T_USER: /* Allow process switch */ uvmexp.softs++; if (p->p_flag & P_OWEUPC) { KERNEL_LOCK(); ADDUPROF(p); KERNEL_UNLOCK(); } /* Allow a forced task switch. */ if (curcpu()->ci_want_resched) preempt(NULL); goto out; case T_BOUND|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGFPE, type &~ T_USER, FPE_FLTSUB, sv); KERNEL_UNLOCK(); goto out; case T_OFLOW|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGFPE, type &~ T_USER, FPE_INTOVF, sv); KERNEL_UNLOCK(); goto out; case T_DIVIDE|T_USER: sv.sival_ptr = (void *)frame->tf_rip; KERNEL_LOCK(); trapsignal(p, SIGFPE, type &~ T_USER, FPE_INTDIV, sv); KERNEL_UNLOCK(); goto out; case T_ARITHTRAP|T_USER: case T_XMM|T_USER: fputrap(frame); goto out; case T_PAGEFLT: /* allow page faults in kernel mode */ if (p == NULL) goto we_re_toast; cr2 = rcr2(); KERNEL_LOCK(); goto faultcommon; case T_PAGEFLT|T_USER: { /* page fault */ vaddr_t va, fa; struct vmspace *vm; struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; cr2 = rcr2(); KERNEL_LOCK(); faultcommon: vm = p->p_vmspace; if (vm == NULL) goto we_re_toast; fa = cr2; va = trunc_page((vaddr_t)cr2); /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if (type == T_PAGEFLT && va >= VM_MIN_KERNEL_ADDRESS) map = kernel_map; else map = &vm->vm_map; if (frame->tf_err & PGEX_W) ftype = VM_PROT_WRITE; else if (frame->tf_err & PGEX_I) ftype = VM_PROT_EXECUTE; else ftype = VM_PROT_READ; #ifdef DIAGNOSTIC if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %lx\n", va); goto we_re_toast; } #endif /* Fault the original page in. */ onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = uvm_fault(map, va, frame->tf_err & PGEX_P? VM_FAULT_PROTECT : VM_FAULT_INVALID, ftype); pcb->pcb_onfault = onfault; if (error == 0) { if (map != kernel_map) uvm_grow(p, va); if (type == T_PAGEFLT) { KERNEL_UNLOCK(); return; } KERNEL_UNLOCK(); goto out; } if (error == EACCES) { error = EFAULT; } if (type == T_PAGEFLT) { if (pcb->pcb_onfault != 0) { KERNEL_UNLOCK(); goto copyfault; } printf("uvm_fault(%p, 0x%lx, 0, %d) -> %x\n", map, va, ftype, error); goto we_re_toast; } if (error == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, p->p_cred && p->p_ucred ? (int)p->p_ucred->cr_uid : -1); sv.sival_ptr = (void *)fa; trapsignal(p, SIGKILL, T_PAGEFLT, SEGV_MAPERR, sv); } else { #ifdef TRAP_SIGDEBUG printf("pid %d (%s): SEGV at rip %lx addr %lx\n", p->p_pid, p->p_comm, frame->tf_rip, va); frame_dump(frame); #endif sv.sival_ptr = (void *)fa; trapsignal(p, SIGSEGV, T_PAGEFLT, SEGV_MAPERR, sv); } KERNEL_UNLOCK(); break; } case T_TRCTRAP: goto we_re_toast; case T_BPTFLT|T_USER: /* bpt instruction fault */ case T_TRCTRAP|T_USER: /* trace trap */ #ifdef MATH_EMULATE trace: #endif KERNEL_LOCK(); trapsignal(p, SIGTRAP, type &~ T_USER, TRAP_BRKPT, sv); KERNEL_UNLOCK(); break; #if NISA > 0 case T_NMI: #if defined(KGDB) || defined(DDB) /* NMI can be hooked up to a pushbutton for debugging */ printf ("NMI ... going to debugger\n"); #ifdef KGDB if (kgdb_trap(type, frame)) return; #endif #ifdef DDB if (kdb_trap(type, 0, frame)) return; #endif #endif /* KGDB || DDB */ /* machine/parity/power fail/"kitchen sink" faults */ if (x86_nmi() != 0) goto we_re_toast; else return; #endif /* NISA > 0 */ } if ((type & T_USER) == 0) return; out: userret(p); }
/* * trap(frame): exception, fault, and trap interface to BSD kernel. * * This common code is called from assembly language IDT gate entry routines * that prepare a suitable stack frame, and restore this frame after the * exception has been processed. Note that the effect is as if the arguments * were passed call by reference. */ void trap(struct trapframe *frame) { struct lwp *l = curlwp; struct proc *p; struct pcb *pcb; extern char fusubail[], kcopy_fault[], return_address_fault[], IDTVEC(osyscall)[]; struct trapframe *vframe; ksiginfo_t ksi; void *onfault; int type, error; uint32_t cr2; bool pfail; if (__predict_true(l != NULL)) { pcb = lwp_getpcb(l); p = l->l_proc; } else { /* * this can happen eg. on break points in early on boot. */ pcb = NULL; p = NULL; } type = frame->tf_trapno; #ifdef DEBUG if (trapdebug) { trap_print(frame, l); } #endif if (type != T_NMI && !KERNELMODE(frame->tf_cs, frame->tf_eflags)) { type |= T_USER; l->l_md.md_regs = frame; pcb->pcb_cr2 = 0; LWP_CACHE_CREDS(l, p); } #ifdef KDTRACE_HOOKS /* * A trap can occur while DTrace executes a probe. Before * executing the probe, DTrace blocks re-scheduling and sets * a flag in its per-cpu flags to indicate that it doesn't * want to fault. On returning from the the probe, the no-fault * flag is cleared and finally re-scheduling is enabled. * * If the DTrace kernel module has registered a trap handler, * call it and if it returns non-zero, assume that it has * handled the trap and modified the trap frame so that this * function can return normally. */ if ((type == T_PROTFLT || type == T_PAGEFLT) && dtrace_trap_func != NULL) { if ((*dtrace_trap_func)(frame, type)) { return; } } #endif switch (type) { case T_ASTFLT: /*FALLTHROUGH*/ default: we_re_toast: if (type == T_TRCTRAP) check_dr0(); else trap_print(frame, l); if (kdb_trap(type, 0, frame)) return; if (kgdb_trap(type, frame)) return; /* * If this is a breakpoint, don't panic if we're not connected. */ if (type == T_BPTFLT && kgdb_disconnected()) { printf("kgdb: ignored %s\n", trap_type[type]); return; } panic("trap"); /*NOTREACHED*/ case T_PROTFLT: case T_SEGNPFLT: case T_ALIGNFLT: case T_TSSFLT: if (p == NULL) goto we_re_toast; /* Check for copyin/copyout fault. */ onfault = onfault_handler(pcb, frame); if (onfault != NULL) { copyefault: error = EFAULT; copyfault: frame->tf_eip = (uintptr_t)onfault; frame->tf_eax = error; return; } /* * Check for failure during return to user mode. * This can happen loading invalid values into the segment * registers, or during the 'iret' itself. * * We do this by looking at the instruction we faulted on. * The specific instructions we recognize only happen when * returning from a trap, syscall, or interrupt. */ kernelfault: KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_trap = type; switch (*(u_char *)frame->tf_eip) { case 0xcf: /* iret */ /* * The 'iret' instruction faulted, so we have the * 'user' registers saved after the kernel %eip:%cs:%fl * of the 'iret' and below that the user %eip:%cs:%fl * the 'iret' was processing. * We must delete the 3 words of kernel return address * from the stack to generate a normal stack frame * (eg for sending a SIGSEGV). */ vframe = (void *)((int *)frame + 3); if (KERNELMODE(vframe->tf_cs, vframe->tf_eflags)) goto we_re_toast; memmove(vframe, frame, offsetof(struct trapframe, tf_eip)); /* Set the faulting address to the user %eip */ ksi.ksi_addr = (void *)vframe->tf_eip; break; case 0x8e: switch (*(uint32_t *)frame->tf_eip) { case 0x8e242c8e: /* mov (%esp,%gs), then */ case 0x0424648e: /* mov 0x4(%esp),%fs */ case 0x0824448e: /* mov 0x8(%esp),%es */ case 0x0c245c8e: /* mov 0xc(%esp),%ds */ break; default: goto we_re_toast; } /* * We faulted loading one if the user segment registers. * The stack frame containing the user registers is * still valid and is just below the %eip:%cs:%fl of * the kernel fault frame. */ vframe = (void *)(&frame->tf_eflags + 1); if (KERNELMODE(vframe->tf_cs, vframe->tf_eflags)) goto we_re_toast; /* There is no valid address for the fault */ break; default: goto we_re_toast; } /* * We might have faulted trying to execute the * trampoline for a local (nested) signal handler. * Only generate SIGSEGV if the user %cs isn't changed. * (This is only strictly necessary in the 'iret' case.) */ if (!pmap_exec_fixup(&p->p_vmspace->vm_map, vframe, pcb)) { /* Save outer frame for any signal return */ l->l_md.md_regs = vframe; (*p->p_emul->e_trapsignal)(l, &ksi); } /* Return to user by reloading the user frame */ trap_return_fault_return(vframe); /* NOTREACHED */ case T_PROTFLT|T_USER: /* protection fault */ case T_TSSFLT|T_USER: case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: case T_ALIGNFLT|T_USER: KSI_INIT_TRAP(&ksi); ksi.ksi_addr = (void *)rcr2(); switch (type) { case T_SEGNPFLT|T_USER: case T_STKFLT|T_USER: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case T_TSSFLT|T_USER: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_OBJERR; break; case T_ALIGNFLT|T_USER: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRALN; break; case T_PROTFLT|T_USER: #ifdef VM86 if (frame->tf_eflags & PSL_VM) { vm86_gpfault(l, type & ~T_USER); goto out; } #endif /* * If pmap_exec_fixup does something, * let's retry the trap. */ if (pmap_exec_fixup(&p->p_vmspace->vm_map, frame, pcb)){ goto out; } ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; break; default: KASSERT(0); break; } goto trapsignal; case T_PRIVINFLT|T_USER: /* privileged instruction fault */ case T_FPOPFLT|T_USER: /* coprocessor operand fault */ KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *) frame->tf_eip; switch (type) { case T_PRIVINFLT|T_USER: ksi.ksi_code = ILL_PRVOPC; break; case T_FPOPFLT|T_USER: ksi.ksi_code = ILL_COPROC; break; default: ksi.ksi_code = 0; break; } goto trapsignal; case T_ASTFLT|T_USER: /* Allow process switch. */ //curcpu()->ci_data.cpu_nast++; if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } /* Allow a forced task switch. */ if (curcpu()->ci_want_resched) { preempt(); } goto out; case T_BOUND|T_USER: case T_OFLOW|T_USER: case T_DIVIDE|T_USER: KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGFPE; ksi.ksi_addr = (void *)frame->tf_eip; switch (type) { case T_BOUND|T_USER: ksi.ksi_code = FPE_FLTSUB; break; case T_OFLOW|T_USER: ksi.ksi_code = FPE_INTOVF; break; case T_DIVIDE|T_USER: ksi.ksi_code = FPE_INTDIV; break; default: ksi.ksi_code = 0; break; } goto trapsignal; case T_PAGEFLT: /* Allow page faults in kernel mode. */ if (__predict_false(l == NULL)) goto we_re_toast; /* * fusubail is used by [fs]uswintr() to prevent page faulting * from inside the profiling interrupt. */ onfault = pcb->pcb_onfault; if (onfault == fusubail || onfault == return_address_fault) { goto copyefault; } if (cpu_intr_p() || (l->l_pflag & LP_INTR) != 0) { goto we_re_toast; } cr2 = rcr2(); goto faultcommon; case T_PAGEFLT|T_USER: { /* page fault */ register vaddr_t va; register struct vmspace *vm; register struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; cr2 = rcr2(); faultcommon: vm = p->p_vmspace; if (__predict_false(vm == NULL)) { goto we_re_toast; } pcb->pcb_cr2 = cr2; va = trunc_page((vaddr_t)cr2); /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if (type == T_PAGEFLT && va >= KERNBASE) map = kernel_map; else map = &vm->vm_map; if (frame->tf_err & PGEX_W) ftype = VM_PROT_WRITE; else if (frame->tf_err & PGEX_X) ftype = VM_PROT_EXECUTE; else ftype = VM_PROT_READ; #ifdef DIAGNOSTIC if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %lx\n", va); goto we_re_toast; } #endif /* Fault the original page in. */ onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = uvm_fault(map, va, ftype); pcb->pcb_onfault = onfault; if (error == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); pfail = false; while (type == T_PAGEFLT) { /* * we need to switch pmap now if we're in * the middle of copyin/out. * * but we don't need to do so for kcopy as * it never touch userspace. */ kpreempt_disable(); if (curcpu()->ci_want_pmapload) { onfault = onfault_handler(pcb, frame); if (onfault != kcopy_fault) { pmap_load(); } } /* * We need to keep the pmap loaded and * so avoid being preempted until back * into the copy functions. Disable * interrupts at the hardware level before * re-enabling preemption. Interrupts * will be re-enabled by 'iret' when * returning back out of the trap stub. * They'll only be re-enabled when the * program counter is once again in * the copy functions, and so visible * to cpu_kpreempt_exit(). */ #ifndef XEN x86_disable_intr(); #endif l->l_nopreempt--; if (l->l_nopreempt > 0 || !l->l_dopreempt || pfail) { return; } #ifndef XEN x86_enable_intr(); #endif /* * If preemption fails for some reason, * don't retry it. The conditions won't * change under our nose. */ pfail = kpreempt(0); } goto out; } if (type == T_PAGEFLT) { onfault = onfault_handler(pcb, frame); if (onfault != NULL) goto copyfault; printf("uvm_fault(%p, %#lx, %d) -> %#x\n", map, va, ftype, error); goto kernelfault; } KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; ksi.ksi_addr = (void *)cr2; switch (error) { case EINVAL: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case EACCES: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; error = EFAULT; break; case ENOMEM: ksi.ksi_signo = SIGKILL; printf("UVM: pid %d.%d (%s), uid %d killed: " "out of swap\n", p->p_pid, l->l_lid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); break; default: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; break; } #ifdef TRAP_SIGDEBUG printf("pid %d.%d (%s): signal %d at eip %x addr %lx " "error %d\n", p->p_pid, l->l_lid, p->p_comm, ksi.ksi_signo, frame->tf_eip, va, error); #endif (*p->p_emul->e_trapsignal)(l, &ksi); break; } case T_TRCTRAP: /* Check whether they single-stepped into a lcall. */ if (frame->tf_eip == (int)IDTVEC(osyscall)) return; if (frame->tf_eip == (int)IDTVEC(osyscall) + 1) { frame->tf_eflags &= ~PSL_T; return; } goto we_re_toast; case T_BPTFLT|T_USER: /* bpt instruction fault */ case T_TRCTRAP|T_USER: /* trace trap */ /* * Don't go single-stepping into a RAS. */ if (p->p_raslist == NULL || (ras_lookup(p, (void *)frame->tf_eip) == (void *)-1)) { KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGTRAP; ksi.ksi_trap = type & ~T_USER; if (type == (T_BPTFLT|T_USER)) ksi.ksi_code = TRAP_BRKPT; else ksi.ksi_code = TRAP_TRACE; ksi.ksi_addr = (void *)frame->tf_eip; (*p->p_emul->e_trapsignal)(l, &ksi); } break; case T_NMI: if (nmi_dispatch(frame)) return; /* NMI can be hooked up to a pushbutton for debugging */ if (kgdb_trap(type, frame)) return; if (kdb_trap(type, 0, frame)) return; /* machine/parity/power fail/"kitchen sink" faults */ #if NMCA > 0 mca_nmi(); #endif x86_nmi(); } if ((type & T_USER) == 0) return; out: userret(l); return; trapsignal: ksi.ksi_trap = type & ~T_USER; (*p->p_emul->e_trapsignal)(l, &ksi); userret(l); }
void m88110_trap(unsigned type, struct trapframe *frame) { struct proc *p; u_quad_t sticks = 0; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type; u_long fault_code; unsigned nss, fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; #endif int sig = 0; pt_entry_t *pte; extern struct vm_map *kernel_map; extern unsigned guarded_access_start; extern unsigned guarded_access_end; extern unsigned guarded_access_bad; extern pt_entry_t *pmap_pte(pmap_t, vaddr_t); uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; if (USERMODE(frame->tf_epsr)) { sticks = p->p_sticks; type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } fault_type = 0; fault_code = 0; fault_addr = frame->tf_exip & XIP_ADDR; switch (type) { default: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_197_READ+T_USER: case T_197_READ: printf("DMMU read miss: Hardware Table Searches should be enabled!\n"); panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_197_WRITE+T_USER: case T_197_WRITE: printf("DMMU write miss: Hardware Table Searches should be enabled!\n"); panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_197_INST+T_USER: case T_197_INST: printf("IMMU miss: Hardware Table Searches should be enabled!\n"); panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #ifdef DDB case T_KDB_TRACE: s = splhigh(); db_enable_interrupt(); ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; case T_KDB_BREAK: s = splhigh(); db_enable_interrupt(); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; case T_KDB_ENTRY: s = splhigh(); db_enable_interrupt(); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); db_disable_interrupt(); /* skip one instruction */ if (frame->tf_exip & 1) frame->tf_exip = frame->tf_enip; else frame->tf_exip += 4; splx(s); return; #if 0 case T_ILLFLT: s = splhigh(); db_enable_interrupt(); ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" : "error fault", (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; #endif /* 0 */ #endif /* DDB */ case T_ILLFLT: printf("Unimplemented opcode!\n"); panictrap(frame->tf_vector, frame); break; case T_NON_MASK: case T_NON_MASK+T_USER: (*md.interrupt_func)(T_NON_MASK, frame); return; case T_INT: case T_INT+T_USER: (*md.interrupt_func)(T_INT, frame); return; case T_MISALGNFLT: printf("kernel mode misaligned access exception @ 0x%08x\n", frame->tf_exip); panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG printf("Kernel Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dsr & CMMU_DSR_SU) == 0) { type = T_DATAFLT + T_USER; goto m88110_user_fault; } #ifdef TRAPDEBUG printf("Kernel Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } va = trunc_page((vaddr_t)fault_addr); if (va == 0) { panic("trap: bad kernel access at %x", fault_addr); } vm = p->p_vmspace; map = kernel_map; if (frame->tf_dsr & CMMU_DSR_BE) { /* * If it is a guarded access, bus error is OK. */ if ((frame->tf_exip & XIP_ADDR) >= (unsigned)&guarded_access_start && (frame->tf_exip & XIP_ADDR) <= (unsigned)&guarded_access_end) { frame->tf_exip = (unsigned)&guarded_access_bad; return; } } if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { frame->tf_dsr &= ~CMMU_DSR_WE; /* undefined */ /* * On a segment or a page fault, call uvm_fault() to * resolve the fault. */ if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) return; } if (frame->tf_dsr & CMMU_DSR_WE) { /* write fault */ /* * This could be a write protection fault or an * exception to set the used and modified bits * in the pte. Basically, if we got a write error, * then we already have a pte entry that faulted * in from a previous seg fault or page fault. * Get the pte and check the status of the * modified and valid bits to determine if this * indeed a real write fault. XXX smurph */ pte = pmap_pte(map->pmap, va); #ifdef DEBUG if (pte == PT_ENTRY_NULL) panic("NULL pte on write fault??"); #endif if (!(*pte & PG_M) && !(*pte & PG_RO)) { /* Set modified bit and try the write again. */ #ifdef TRAPDEBUG printf("Corrected kernel write fault, map %x pte %x\n", map->pmap, *pte); #endif *pte |= PG_M; return; #if 1 /* shouldn't happen */ } else { /* must be a real wp fault */ #ifdef TRAPDEBUG printf("Uncorrected kernel write fault, map %x pte %x\n", map->pmap, *pte); #endif if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) return; #endif } } panictrap(frame->tf_vector, frame); /* NOTREACHED */ case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: m88110_user_fault: if (type == T_INSTFLT+T_USER) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; #ifdef TRAPDEBUG printf("User Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif } else { fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } #ifdef TRAPDEBUG printf("User Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif } va = trunc_page((vaddr_t)fault_addr); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* * Call uvm_fault() to resolve non-bus error faults * whenever possible. */ if (type == T_DATAFLT+T_USER) { /* data faults */ if (frame->tf_dsr & CMMU_DSR_BE) { /* bus error */ result = EACCES; } else if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == EACCES) result = EFAULT; } else if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) { /* copyback or write allocate error */ result = EACCES; } else if (frame->tf_dsr & CMMU_DSR_WE) { /* write fault */ /* This could be a write protection fault or an * exception to set the used and modified bits * in the pte. Basically, if we got a write * error, then we already have a pte entry that * faulted in from a previous seg fault or page * fault. * Get the pte and check the status of the * modified and valid bits to determine if this * indeed a real write fault. XXX smurph */ pte = pmap_pte(vm_map_pmap(map), va); #ifdef DEBUG if (pte == PT_ENTRY_NULL) panic("NULL pte on write fault??"); #endif if (!(*pte & PG_M) && !(*pte & PG_RO)) { /* * Set modified bit and try the * write again. */ #ifdef TRAPDEBUG printf("Corrected userland write fault, map %x pte %x\n", map->pmap, *pte); #endif *pte |= PG_M; /* * invalidate ATCs to force * table search */ set_dcmd(CMMU_DCMD_INV_UATC); return; } else { /* must be a real wp fault */ #ifdef TRAPDEBUG printf("Uncorrected userland write fault, map %x pte %x\n", map->pmap, *pte); #endif result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == EACCES) result = EFAULT; } } else { #ifdef TRAPDEBUG printf("Unexpected Data access fault dsr %x\n", frame->tf_dsr); #endif panictrap(frame->tf_vector, frame); } } else { /* instruction faults */ if (frame->tf_isr & (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) { /* bus error, supervisor protection */ result = EACCES; } else if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == EACCES) result = EFAULT; } else { #ifdef TRAPDEBUG printf("Unexpected Instruction fault isr %x\n", frame->tf_isr); #endif panictrap(frame->tf_vector, frame); } } if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) { nss = btoc(USRSTACK - va);/* XXX check this */ if (nss > vm->vm_ssize) vm->vm_ssize = nss; } } /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_exip = pcb_onfault; /* * Continue as if the fault had been resolved. */ result = 0; } if (result != 0) { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; break; case T_PRIVINFLT+T_USER: case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: case T_KDB_TRACE: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; break; case T_FPEPFLT+T_USER: case T_FPEIFLT+T_USER: sig = SIGFPE; break; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_SIGTRAP+T_USER: sig = SIGTRAP; fault_type = TRAP_TRACE; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { unsigned instr; unsigned pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(unsigned)); #if 0 printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n", p->p_comm, p->p_pid, instr, pc, p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */ #endif /* check and see if we got here by accident */ #ifdef notyet if (p->p_md.md_ss_addr != pc || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } #endif /* restore original instruction and clear BP */ instr = p->p_md.md_ss_instr; if (instr != 0) ss_put_value(p, pc, instr, sizeof(instr)); p->p_md.md_ss_addr = 0; p->p_md.md_ss_instr = 0; sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ sig = SIGTRAP; fault_type = TRAP_BRKPT; break; case T_ASTFLT+T_USER: uvmexp.softs++; want_ast = 0; if (p->p_flag & P_OWEUPC) { p->p_flag &= ~P_OWEUPC; ADDUPROF(p); } break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { sv.sival_int = fault_addr; trapsignal(p, sig, fault_code, fault_type, sv); } userret(p, frame, sticks); }
void m88100_trap(unsigned type, struct trapframe *frame) { struct proc *p; u_quad_t sticks = 0; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type, pbus_type; u_long fault_code; unsigned nss, fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; #endif int sig = 0; extern struct vm_map *kernel_map; extern caddr_t guarded_access_start; extern caddr_t guarded_access_end; extern caddr_t guarded_access_bad; uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; if (USERMODE(frame->tf_epsr)) { sticks = p->p_sticks; type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } fault_type = 0; fault_code = 0; fault_addr = frame->tf_sxip & XIP_ADDR; switch (type) { default: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #if defined(DDB) case T_KDB_BREAK: s = splhigh(); db_enable_interrupt(); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; case T_KDB_ENTRY: s = splhigh(); db_enable_interrupt(); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); db_disable_interrupt(); splx(s); return; #endif /* DDB */ case T_ILLFLT: printf("Unimplemented opcode!\n"); panictrap(frame->tf_vector, frame); break; case T_INT: case T_INT+T_USER: /* This function pointer is set in machdep.c It calls m188_ext_int or sbc_ext_int depending on the value of brdtyp - smurph */ (*md.interrupt_func)(T_INT, frame); return; case T_MISALGNFLT: printf("kernel misaligned access exception @ 0x%08x\n", frame->tf_sxip); panictrap(frame->tf_vector, frame); break; case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif panictrap(frame->tf_vector, frame); break; case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dmt0 & DMT_DAS) == 0) { type = T_DATAFLT + T_USER; goto user_fault; } fault_addr = frame->tf_dma0; if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); if (va == 0) { panic("trap: bad kernel access at %x", fault_addr); } vm = p->p_vmspace; map = kernel_map; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif switch (pbus_type) { case CMMU_PFSR_BERROR: /* * If it is a guarded access, bus error is OK. */ if ((frame->tf_sxip & XIP_ADDR) >= (unsigned)&guarded_access_start && (frame->tf_sxip & XIP_ADDR) <= (unsigned)&guarded_access_end) { frame->tf_snip = ((unsigned)&guarded_access_bad ) | NIP_V; frame->tf_sfip = ((unsigned)&guarded_access_bad + 4) | FIP_V; frame->tf_sxip = 0; /* We sort of resolved the fault ourselves * because we know where it came from * [guarded_access()]. But we must still think * about the other possible transactions in * dmt1 & dmt2. Mark dmt0 so that * data_access_emulation skips it. XXX smurph */ frame->tf_dmt0 |= DMT_SKIP; data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; return; } break; case CMMU_PFSR_SUCCESS: /* * The fault was resolved. Call data_access_emulation * to drain the data unit pipe line and reset dmt0 * so that trap won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; return; case CMMU_PFSR_SFAULT: case CMMU_PFSR_PFAULT: if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) { /* * We could resolve the fault. Call * data_access_emulation to drain the data * unit pipe line and reset dmt0 so that trap * won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; return; } break; } #ifdef TRAPDEBUG printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type, pbus_exception_type[pbus_type], va); #endif panictrap(frame->tf_vector, frame); /* NOTREACHED */ case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: user_fault: if (type == T_INSTFLT + T_USER) { pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); #ifdef TRAPDEBUG printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } else { fault_addr = frame->tf_dma0; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) { ftype = VM_PROT_READ | VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* Call uvm_fault() to resolve non-bus error faults */ switch (pbus_type) { case CMMU_PFSR_SUCCESS: result = 0; break; case CMMU_PFSR_BERROR: result = EACCES; break; default: result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); if (result == EACCES) result = EFAULT; break; } p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) { nss = btoc(USRSTACK - va);/* XXX check this */ if (nss > vm->vm_ssize) vm->vm_ssize = nss; } } /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_snip = pcb_onfault | NIP_V; frame->tf_sfip = (pcb_onfault + 4) | FIP_V; frame->tf_sxip = 0; /* * Continue as if the fault had been resolved, but * do not try to complete the faulting access. */ frame->tf_dmt0 |= DMT_SKIP; result = 0; } if (result == 0) { if (type == T_DATAFLT+T_USER) { /* * We could resolve the fault. Call * data_access_emulation to drain the data unit * pipe line and reset dmt0 so that trap won't * get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; } else { /* * back up SXIP, SNIP, * clearing the Error bit */ frame->tf_sfip = frame->tf_snip & ~FIP_E; frame->tf_snip = frame->tf_sxip & ~NIP_E; frame->tf_ipfsr = 0; } } else { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; break; case T_PRIVINFLT+T_USER: case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; break; case T_FPEPFLT+T_USER: case T_FPEIFLT+T_USER: sig = SIGFPE; break; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_SIGTRAP+T_USER: sig = SIGTRAP; fault_type = TRAP_TRACE; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { unsigned va; unsigned instr; unsigned pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(unsigned)); #if 0 printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n", p->p_comm, p->p_pid, instr, pc, p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */ #endif /* check and see if we got here by accident */ if ((p->p_md.md_ss_addr != pc && p->p_md.md_ss_taken_addr != pc) || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } /* restore original instruction and clear BP */ va = p->p_md.md_ss_addr; if (va != 0) { instr = p->p_md.md_ss_instr; ss_put_value(p, va, instr, sizeof(instr)); } /* branch taken instruction */ instr = p->p_md.md_ss_taken_instr; if (instr != 0) { va = p->p_md.md_ss_taken_addr; ss_put_value(p, va, instr, sizeof(instr)); } #if 1 frame->tf_sfip = frame->tf_snip; frame->tf_snip = pc | NIP_V; #endif p->p_md.md_ss_addr = 0; p->p_md.md_ss_instr = 0; p->p_md.md_ss_taken_addr = 0; p->p_md.md_ss_taken_instr = 0; sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ frame->tf_sfip = frame->tf_snip; frame->tf_snip = frame->tf_sxip; sig = SIGTRAP; fault_type = TRAP_BRKPT; break; case T_ASTFLT+T_USER: uvmexp.softs++; want_ast = 0; if (p->p_flag & P_OWEUPC) { p->p_flag &= ~P_OWEUPC; ADDUPROF(p); } break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { sv.sival_int = fault_addr; trapsignal(p, sig, fault_code, fault_type, sv); /* * don't want multiple faults - we are going to * deliver signal. */ frame->tf_dmt0 = 0; frame->tf_ipfsr = frame->tf_dpfsr = 0; } userret(p, frame, sticks); }
/*ARGSUSED*/ void trap(struct trapframe *tf, int type, u_int code, u_int v) { struct lwp *l; struct proc *p; struct pcb *pcb; ksiginfo_t ksi; int tmp; int rv; u_quad_t sticks; void *onfault; curcpu()->ci_data.cpu_ntrap++; l = curlwp; p = l->l_proc; pcb = lwp_getpcb(l); onfault = pcb->pcb_onfault; KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; KASSERT(pcb != NULL); if (USERMODE(tf->tf_sr)) { type |= T_USER; sticks = p->p_sticks; l->l_md.md_regs = tf->tf_regs; LWP_CACHE_CREDS(l, p); } else { sticks = 0; /* XXX: Detect trap recursion? */ } switch (type) { default: dopanic: printf("trap type=0x%x, code=0x%x, v=0x%x\n", type, code, v); /* * Let the kernel debugger see the trap frame that * caused us to panic. This is a convenience so * one can see registers at the point of failure. */ tmp = splhigh(); #ifdef KGDB /* If connected, step or cont returns 1 */ if (kgdb_trap(type, tf)) goto kgdb_cont; #endif #ifdef DDB (void) kdb_trap(type, (db_regs_t *) tf); #endif #ifdef KGDB kgdb_cont: #endif splx(tmp); if (panicstr) { /* * Note: panic is smart enough to do: * boot(RB_AUTOBOOT | RB_NOSYNC, NULL) * if we call it again. */ panic("trap during panic!"); } regdump(tf, 128); type &= ~T_USER; if ((u_int)type < trap_types) panic(trap_type[type]); panic("trap type 0x%x", type); case T_BUSERR: /* kernel bus error */ if (onfault == NULL) goto dopanic; rv = EFAULT; /*FALLTHROUGH*/ copyfault: /* * If we have arranged to catch this fault in any of the * copy to/from user space routines, set PC to return to * indicated location and set flag informing buserror code * that it may need to clean up stack frame. */ tf->tf_stackadj = exframesize[tf->tf_format]; tf->tf_format = tf->tf_vector = 0; tf->tf_pc = (int)onfault; tf->tf_regs[D0] = rv; goto done; case T_BUSERR|T_USER: /* bus error */ case T_ADDRERR|T_USER: /* address error */ ksi.ksi_addr = (void *)v; ksi.ksi_signo = SIGBUS; ksi.ksi_code = (type == (T_BUSERR|T_USER)) ? BUS_OBJERR : BUS_ADRERR; break; case T_COPERR: /* kernel coprocessor violation */ case T_FMTERR|T_USER: /* do all RTE errors come in as T_USER? */ case T_FMTERR: /* ...just in case... */ /* * The user has most likely trashed the RTE or FP state info * in the stack frame of a signal handler. */ printf("pid %d: kernel %s exception\n", p->p_pid, type==T_COPERR ? "coprocessor" : "format"); type |= T_USER; mutex_enter(p->p_lock); SIGACTION(p, SIGILL).sa_handler = SIG_DFL; sigdelset(&p->p_sigctx.ps_sigignore, SIGILL); sigdelset(&p->p_sigctx.ps_sigcatch, SIGILL); sigdelset(&l->l_sigmask, SIGILL); mutex_exit(p->p_lock); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *)(int)tf->tf_format; ksi.ksi_code = (type == T_COPERR) ? ILL_COPROC : ILL_ILLOPC; break; case T_COPERR|T_USER: /* user coprocessor violation */ /* What is a proper response here? */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; case T_FPERR|T_USER: /* 68881 exceptions */ /* * We pass along the 68881 status register which locore stashed * in code for us. */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = fpsr2siginfocode(code); break; case T_FPEMULI: /* FPU faults in supervisor mode */ case T_FPEMULD: if (nofault) /* Doing FPU probe? */ longjmp(nofault); goto dopanic; case T_FPEMULI|T_USER: /* unimplemented FP instruction */ case T_FPEMULD|T_USER: /* unimplemented FP data type */ #ifdef FPU_EMULATE if (fpu_emulate(tf, &pcb->pcb_fpregs, &ksi) == 0) ; /* XXX - Deal with tracing? (tf->tf_sr & PSL_T) */ #else uprintf("pid %d killed: no floating point support\n", p->p_pid); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; #endif break; case T_ILLINST|T_USER: /* illegal instruction fault */ case T_PRIVINST|T_USER: /* privileged instruction fault */ ksi.ksi_addr = (void *)(int)tf->tf_format; ksi.ksi_signo = SIGILL; ksi.ksi_code = (type == (T_PRIVINST|T_USER)) ? ILL_PRVOPC : ILL_ILLOPC; break; case T_ZERODIV|T_USER: /* Divide by zero */ ksi.ksi_code = FPE_FLTDIV; case T_CHKINST|T_USER: /* CHK instruction trap */ case T_TRAPVINST|T_USER: /* TRAPV instruction trap */ ksi.ksi_addr = (void *)(int)tf->tf_format; ksi.ksi_signo = SIGFPE; break; /* * XXX: Trace traps are a nightmare. * * HP-UX uses trap #1 for breakpoints, * NetBSD/m68k uses trap #2, * SUN 3.x uses trap #15, * DDB and KGDB uses trap #15 (for kernel breakpoints; * handled elsewhere). * * NetBSD and HP-UX traps both get mapped by locore.s into T_TRACE. * SUN 3.x traps get passed through as T_TRAP15 and are not really * supported yet. * * XXX: We should never get kernel-mode T_TRAP15 * XXX: because locore.s now gives them special treatment. */ case T_TRAP15: /* kernel breakpoint */ tf->tf_sr &= ~PSL_T; goto done; case T_TRACE|T_USER: /* user trace trap */ #ifdef COMPAT_SUNOS /* * SunOS uses Trap #2 for a "CPU cache flush" * Just flush the on-chip caches and return. * XXX - Too bad NetBSD uses trap 2... */ if (p->p_emul == &emul_sunos) { /* get out fast */ goto done; } #endif /* FALLTHROUGH */ case T_TRACE: /* tracing a trap instruction */ case T_TRAP15|T_USER: /* SUN user trace trap */ tf->tf_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_ASTFLT: /* system async trap, cannot happen */ goto dopanic; case T_ASTFLT|T_USER: /* user async trap */ astpending = 0; /* T_SSIR is not used on a Sun2. */ if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } if (curcpu()->ci_want_resched) preempt(); goto douret; case T_MMUFLT: /* kernel mode page fault */ /* Hacks to avoid calling VM code from debugger. */ #ifdef DDB if (db_recover != 0) goto dopanic; #endif #ifdef KGDB if (kgdb_recover != 0) goto dopanic; #endif /* * If we were doing profiling ticks or other user mode * stuff from interrupt code, Just Say No. */ if (onfault == (void *)fubail || onfault == (void *)subail) { #ifdef DEBUG if (mmudebug & MDB_CPFAULT) { printf("trap: copyfault fu/su bail\n"); Debugger(); } #endif rv = EFAULT; goto copyfault; } /*FALLTHROUGH*/ case T_MMUFLT|T_USER: { /* page fault */ vaddr_t va; struct vmspace *vm = p->p_vmspace; struct vm_map *map; vm_prot_t ftype; extern struct vm_map *kernel_map; #ifdef DEBUG if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) printf("trap: T_MMUFLT pid=%d, code=0x%x, v=0x%x, pc=0x%x, sr=0x%x\n", p->p_pid, code, v, tf->tf_pc, tf->tf_sr); #endif /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and: (2 or 3) * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor space data fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ map = &vm->vm_map; if ((type & T_USER) == 0) { /* supervisor mode fault */ if (onfault == NULL || KDFAULT(code)) map = kernel_map; } if (WRFAULT(code)) ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; va = m68k_trunc_page((vaddr_t)v); /* * Need to resolve the fault. * * We give the pmap code a chance to resolve faults by * reloading translations that it was forced to unload. * This function does that, and calls vm_fault if it * could not resolve the fault by reloading the MMU. * This function may also, for example, disallow any * faults in the kernel text segment, etc. */ pcb->pcb_onfault = NULL; rv = _pmap_fault(map, va, ftype); pcb->pcb_onfault = onfault; #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) { printf("vm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); if (mmudebug & MDB_WBFAILED) Debugger(); } #endif /* DEBUG */ /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (rv == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); if ((type & T_USER) == 0 && ucas_ras_check(tf)) { return; } goto finish; } if (rv == EACCES) { ksi.ksi_code = SEGV_ACCERR; rv = EFAULT; } else ksi.ksi_code = SEGV_MAPERR; if ((type & T_USER) == 0) { /* supervisor mode fault */ if (onfault) { #ifdef DEBUG if (mmudebug & MDB_CPFAULT) { printf("trap: copyfault pcb_onfault\n"); Debugger(); } #endif goto copyfault; } printf("vm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); goto dopanic; } ksi.ksi_addr = (void *)v; switch (rv) { case ENOMEM: printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; break; case EINVAL: ksi.ksi_signo = SIGBUS; ksi.ksi_code = BUS_ADRERR; break; case EACCES: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; break; default: ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; break; } break; } /* T_MMUFLT */ } /* switch */ finish: /* If trap was from supervisor mode, just return. */ if ((type & T_USER) == 0) goto done; /* Post a signal if necessary. */ if (ksi.ksi_signo) trapsignal(l, &ksi); douret: userret(l, tf, sticks); done:; /* XXX: Detect trap recursion? */ }
void m88100_trap(unsigned type, struct trapframe *frame) { struct proc *p; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type, pbus_type; u_long fault_code; unsigned fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; u_int psr; #endif int sig = 0; extern struct vm_map *kernel_map; uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; if (USERMODE(frame->tf_epsr)) { type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } fault_type = 0; fault_code = 0; fault_addr = frame->tf_sxip & XIP_ADDR; switch (type) { default: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #if defined(DDB) case T_KDB_BREAK: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_ENTRY: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); set_psr(psr); splx(s); return; #endif /* DDB */ case T_ILLFLT: printf("Unimplemented opcode!\n"); panictrap(frame->tf_vector, frame); break; case T_INT: case T_INT+T_USER: curcpu()->ci_intrdepth++; md_interrupt_func(T_INT, frame); curcpu()->ci_intrdepth--; return; case T_MISALGNFLT: printf("kernel misaligned access exception @ 0x%08x\n", frame->tf_sxip); panictrap(frame->tf_vector, frame); break; case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif panictrap(frame->tf_vector, frame); break; case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dmt0 & DMT_DAS) == 0) { type = T_DATAFLT + T_USER; goto user_fault; } fault_addr = frame->tf_dma0; if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); if (va == 0) { panic("trap: bad kernel access at %x", fault_addr); } KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE); vm = p->p_vmspace; map = kernel_map; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif switch (pbus_type) { case CMMU_PFSR_SUCCESS: /* * The fault was resolved. Call data_access_emulation * to drain the data unit pipe line and reset dmt0 * so that trap won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; KERNEL_UNLOCK(); return; case CMMU_PFSR_SFAULT: case CMMU_PFSR_PFAULT: if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) { /* * We could resolve the fault. Call * data_access_emulation to drain the data * unit pipe line and reset dmt0 so that trap * won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; KERNEL_UNLOCK(); return; } break; } #ifdef TRAPDEBUG printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type, pbus_exception_type[pbus_type], va); #endif KERNEL_UNLOCK(); panictrap(frame->tf_vector, frame); /* NOTREACHED */ case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: user_fault: if (type == T_INSTFLT + T_USER) { pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); #ifdef TRAPDEBUG printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } else { fault_addr = frame->tf_dma0; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) { ftype = VM_PROT_READ | VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); KERNEL_PROC_LOCK(p); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* Call uvm_fault() to resolve non-bus error faults */ switch (pbus_type) { case CMMU_PFSR_SUCCESS: result = 0; break; case CMMU_PFSR_BERROR: result = EACCES; break; default: result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); break; } p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) uvm_grow(p, va); else if (result == EACCES) result = EFAULT; } KERNEL_PROC_UNLOCK(p); /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_snip = pcb_onfault | NIP_V; frame->tf_sfip = (pcb_onfault + 4) | FIP_V; frame->tf_sxip = 0; /* * Continue as if the fault had been resolved, but * do not try to complete the faulting access. */ frame->tf_dmt0 |= DMT_SKIP; result = 0; } if (result == 0) { if (type == T_DATAFLT+T_USER) { /* * We could resolve the fault. Call * data_access_emulation to drain the data unit * pipe line and reset dmt0 so that trap won't * get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; } else { /* * back up SXIP, SNIP, * clearing the Error bit */ frame->tf_sfip = frame->tf_snip & ~FIP_E; frame->tf_snip = frame->tf_sxip & ~NIP_E; frame->tf_ipfsr = 0; } } else { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; break; case T_PRIVINFLT+T_USER: case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; break; case T_FPEPFLT+T_USER: sig = SIGFPE; break; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { u_int instr; vaddr_t pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(u_int)); /* check and see if we got here by accident */ if ((p->p_md.md_bp0va != pc && p->p_md.md_bp1va != pc) || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } /* restore original instruction and clear breakpoint */ if (p->p_md.md_bp0va == pc) { ss_put_value(p, pc, p->p_md.md_bp0save); p->p_md.md_bp0va = 0; } if (p->p_md.md_bp1va == pc) { ss_put_value(p, pc, p->p_md.md_bp1save); p->p_md.md_bp1va = 0; } #if 1 frame->tf_sfip = frame->tf_snip; frame->tf_snip = pc | NIP_V; #endif sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ frame->tf_sfip = frame->tf_snip; frame->tf_snip = frame->tf_sxip; sig = SIGTRAP; fault_type = TRAP_BRKPT; break; case T_ASTFLT+T_USER: uvmexp.softs++; p->p_md.md_astpending = 0; if (p->p_flag & P_OWEUPC) { KERNEL_PROC_LOCK(p); ADDUPROF(p); KERNEL_PROC_UNLOCK(p); } if (curcpu()->ci_want_resched) preempt(NULL); break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { sv.sival_int = fault_addr; KERNEL_PROC_LOCK(p); trapsignal(p, sig, fault_code, fault_type, sv); KERNEL_PROC_UNLOCK(p); /* * don't want multiple faults - we are going to * deliver signal. */ frame->tf_dmt0 = 0; frame->tf_ipfsr = frame->tf_dpfsr = 0; } userret(p); }
/*ARGSUSED*/ void trap(struct frame *fp, int type, u_int code, u_int v) { extern char fubail[], subail[]; struct lwp *l; struct proc *p; ksiginfo_t ksi; int s; u_quad_t sticks; uvmexp.traps++; l = curlwp; KSI_INIT_TRAP(&ksi); ksi.ksi_trap = type & ~T_USER; p = l->l_proc; if (USERMODE(fp->f_sr)) { type |= T_USER; sticks = p->p_sticks; l->l_md.md_regs = fp->f_regs; LWP_CACHE_CREDS(l, p); } else sticks = 0; #ifdef DIAGNOSTIC if (l->l_addr == NULL) panic("trap: type 0x%x, code 0x%x, v 0x%x -- no pcb", type, code, v); #endif switch (type) { default: dopanic: printf("trap type %d, code = 0x%x, v = 0x%x\n", type, code, v); printf("%s program counter = 0x%x\n", (type & T_USER) ? "user" : "kernel", fp->f_pc); /* * Let the kernel debugger see the trap frame that * caused us to panic. This is a convenience so * one can see registers at the point of failure. */ s = splhigh(); #ifdef KGDB /* If connected, step or cont returns 1 */ if (kgdb_trap(type, (db_regs_t *)fp)) goto kgdb_cont; #endif #ifdef DDB (void)kdb_trap(type, (db_regs_t *)fp); #endif #ifdef KGDB kgdb_cont: #endif splx(s); if (panicstr) { printf("trap during panic!\n"); #ifdef DEBUG /* XXX should be a machine-dependent hook */ printf("(press a key)\n"); (void)cngetc(); #endif } regdump((struct trapframe *)fp, 128); type &= ~T_USER; if ((u_int)type < trap_types) panic(trap_type[type]); panic("trap"); case T_BUSERR: /* Kernel bus error */ if (!l->l_addr->u_pcb.pcb_onfault) goto dopanic; /* * If we have arranged to catch this fault in any of the * copy to/from user space routines, set PC to return to * indicated location and set flag informing buserror code * that it may need to clean up stack frame. */ copyfault: fp->f_stackadj = exframesize[fp->f_format]; fp->f_format = fp->f_vector = 0; fp->f_pc = (int)l->l_addr->u_pcb.pcb_onfault; return; case T_BUSERR|T_USER: /* Bus error */ case T_ADDRERR|T_USER: /* Address error */ ksi.ksi_addr = (void *)v; ksi.ksi_signo = SIGBUS; ksi.ksi_code = (type == (T_BUSERR|T_USER)) ? BUS_OBJERR : BUS_ADRERR; break; case T_ILLINST|T_USER: /* Illegal instruction fault */ case T_PRIVINST|T_USER: /* Privileged instruction fault */ ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_PRIVIN_FAULT */ ksi.ksi_signo = SIGILL; ksi.ksi_code = (type == (T_PRIVINST|T_USER)) ? ILL_PRVOPC : ILL_ILLOPC; break; /* * divde by zero, CHK/TRAPV inst */ case T_ZERODIV|T_USER: /* Divide by zero trap */ ksi.ksi_code = FPE_FLTDIV; case T_CHKINST|T_USER: /* CHK instruction trap */ case T_TRAPVINST|T_USER: /* TRAPV instruction trap */ ksi.ksi_addr = (void *)(int)fp->f_format; ksi.ksi_signo = SIGFPE; break; /* * User coprocessor violation */ case T_COPERR|T_USER: /* XXX What is a proper response here? */ ksi.ksi_signo = SIGFPE; ksi.ksi_code = FPE_FLTINV; break; /* * 6888x exceptions */ case T_FPERR|T_USER: /* * We pass along the 68881 status register which locore * stashed in code for us. Note that there is a * possibility that the bit pattern of this register * will conflict with one of the FPE_* codes defined * in signal.h. Fortunately for us, the only such * codes we use are all in the range 1-7 and the low * 3 bits of the status register are defined as 0 so * there is no clash. */ ksi.ksi_signo = SIGFPE; ksi.ksi_addr = (void *)code; break; /* * FPU faults in supervisor mode. */ case T_ILLINST: /* fnop generates this, apparently. */ case T_FPEMULI: case T_FPEMULD: { extern label_t *nofault; if (nofault) /* If we're probing. */ longjmp(nofault); if (type == T_ILLINST) printf("Kernel Illegal Instruction trap.\n"); else printf("Kernel FPU trap.\n"); goto dopanic; } /* * Unimplemented FPU instructions/datatypes. */ case T_FPEMULI|T_USER: case T_FPEMULD|T_USER: #ifdef FPU_EMULATE if (fpu_emulate(fp, &l->l_addr->u_pcb.pcb_fpregs, &ksi) == 0) ; /* XXX - Deal with tracing? (fp->f_sr & PSL_T) */ #else uprintf("pid %d killed: no floating point support.\n", p->p_pid); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; #endif break; case T_COPERR: /* Kernel coprocessor violation */ case T_FMTERR: /* Kernel format error */ case T_FMTERR|T_USER: /* User format error */ /* * The user has most likely trashed the RTE or FP state info * in the stack frame of a signal handler. */ printf("pid %d: kernel %s exception\n", p->p_pid, type==T_COPERR ? "coprocessor" : "format"); type |= T_USER; mutex_enter(p->p_lock); SIGACTION(p, SIGILL).sa_handler = SIG_DFL; sigdelset(&p->p_sigctx.ps_sigignore, SIGILL); sigdelset(&p->p_sigctx.ps_sigcatch, SIGILL); sigdelset(&l->l_sigmask, SIGILL); mutex_exit(p->p_lock); ksi.ksi_signo = SIGILL; ksi.ksi_addr = (void *)(int)fp->f_format; /* XXX was ILL_RESAD_FAULT */ ksi.ksi_code = (type == T_COPERR) ? ILL_COPROC : ILL_ILLOPC; break; /* * XXX: Trace traps are a nightmare. * * HP-UX uses trap #1 for breakpoints, * NetBSD/m68k uses trap #2, * SUN 3.x uses trap #15, * DDB and KGDB uses trap #15 (for kernel breakpoints; * handled elsewhere). * * NetBSD and HP-UX traps both get mapped by locore.s into T_TRACE. * SUN 3.x traps get passed through as T_TRAP15 and are not really * supported yet. * * XXX: We should never get kernel-mode T_TRAP15 because * XXX: locore.s now gives it special treatment. */ case T_TRAP15: /* SUN trace trap */ #ifdef DEBUG printf("unexpected kernel trace trap, type = %d\n", type); printf("program counter = 0x%x\n", fp->f_pc); #endif fp->f_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_TRACE|T_USER: /* user trace trap */ #ifdef COMPAT_SUNOS /* * SunOS uses Trap #2 for a "CPU cache flush". * Just flush the on-chip caches and return. */ if (p->p_emul == &emul_sunos) { ICIA(); DCIU(); return; } #endif /* FALLTHROUGH */ case T_TRACE: /* tracing a trap instruction */ case T_TRAP15|T_USER: /* SUN user trace trap */ fp->f_sr &= ~PSL_T; ksi.ksi_signo = SIGTRAP; break; case T_ASTFLT: /* System async trap, cannot happen */ goto dopanic; case T_ASTFLT|T_USER: /* User async trap. */ astpending = 0; /* * We check for software interrupts first. This is because * they are at a higher level than ASTs, and on a VAX would * interrupt the AST. We assume that if we are processing * an AST that we must be at IPL0 so we don't bother to * check. Note that we ensure that we are at least at SIR * IPL while processing the SIR. */ spl1(); /* fall into... */ case T_SSIR: /* Software interrupt */ case T_SSIR|T_USER: /* * If this was not an AST trap, we are all done. */ if (type != (T_ASTFLT|T_USER)) { uvmexp.traps--; return; } spl0(); if (l->l_pflag & LP_OWEUPC) { l->l_pflag &= ~LP_OWEUPC; ADDUPROF(l); } if (curcpu()->ci_want_resched) preempt(); goto out; case T_MMUFLT: /* Kernel mode page fault */ /* * If we were doing profiling ticks or other user mode * stuff from interrupt code, Just Say No. */ if (l->l_addr->u_pcb.pcb_onfault == fubail || l->l_addr->u_pcb.pcb_onfault == subail) goto copyfault; /* fall into... */ case T_MMUFLT|T_USER: /* page fault */ { vaddr_t va; struct vmspace *vm = p->p_vmspace; struct vm_map *map; int rv; vm_prot_t ftype; extern struct vm_map *kernel_map; #ifdef DEBUG if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) printf("trap: T_MMUFLT pid=%d, code=%x, v=%x, pc=%x, sr=%x\n", p->p_pid, code, v, fp->f_pc, fp->f_sr); #endif /* * It is only a kernel address space fault iff: * 1. (type & T_USER) == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set but supervisor data fault * The last can occur during an exec() copyin where the * argument space is lazy-allocated. */ if (type == T_MMUFLT && (!l->l_addr->u_pcb.pcb_onfault || KDFAULT(code))) map = kernel_map; else { map = vm ? &vm->vm_map : kernel_map; if ((l->l_flag & LW_SA) && (~l->l_pflag & LP_SA_NOBLOCK)) { l->l_savp->savp_faultaddr = (vaddr_t)v; l->l_pflag |= LP_SA_PAGEFAULT; } } if (WRFAULT(code)) ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; va = trunc_page((vaddr_t)v); #ifdef DEBUG if (map == kernel_map && va == 0) { printf("trap: bad kernel access at %x\n", v); goto dopanic; } #endif rv = uvm_fault(map, va, ftype); #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); #endif /* * If this was a stack access, we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure, it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if (rv == 0) { if (map != kernel_map && (void *)va >= vm->vm_maxsaddr) uvm_grow(p, va); if (type == T_MMUFLT) { #if defined(M68040) if (mmutype == MMU_68040) (void)writeback(fp, 1); #endif return; } l->l_pflag &= ~LP_SA_PAGEFAULT; goto out; } if (rv == EACCES) { ksi.ksi_code = SEGV_ACCERR; rv = EFAULT; } else ksi.ksi_code = SEGV_MAPERR; if (type == T_MMUFLT) { if (l->l_addr->u_pcb.pcb_onfault) goto copyfault; printf("uvm_fault(%p, 0x%lx, 0x%x) -> 0x%x\n", map, va, ftype, rv); printf(" type %x, code [mmu,,ssw]: %x\n", type, code); goto dopanic; } l->l_pflag &= ~LP_SA_PAGEFAULT; ksi.ksi_addr = (void *)v; if (rv == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: out of swap\n", p->p_pid, p->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; } else { ksi.ksi_signo = SIGSEGV; } break; } } if (ksi.ksi_signo) trapsignal(l, &ksi); if ((type & T_USER) == 0) return; out: userret(l, fp, sticks, v, 1); }