void mapping_set_mod(ppnum_t pn) { pmap_set_modify(pn); }
/* * Handle a single exception. */ void itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p, int type) { int i; unsigned ucode = 0; vm_prot_t ftype; extern vaddr_t onfault_table[]; int onfault; int typ = 0; union sigval sv; struct pcb *pcb; switch (type) { case T_TLB_MOD: /* check for kernel address */ if (trapframe->badvaddr < 0) { pt_entry_t *pte, entry; paddr_t pa; vm_page_t pg; pte = kvtopte(trapframe->badvaddr); entry = *pte; #ifdef DIAGNOSTIC if (!(entry & PG_V) || (entry & PG_M)) panic("trap: ktlbmod: invalid pte"); #endif if (pmap_is_page_ro(pmap_kernel(), trunc_page(trapframe->badvaddr), entry)) { /* write to read only page in the kernel */ ftype = VM_PROT_WRITE; pcb = &p->p_addr->u_pcb; goto kernel_fault; } entry |= PG_M; *pte = entry; KERNEL_LOCK(); pmap_update_kernel_page(trapframe->badvaddr & ~PGOFSET, entry); pa = pfn_to_pad(entry); pg = PHYS_TO_VM_PAGE(pa); if (pg == NULL) panic("trap: ktlbmod: unmanaged page"); pmap_set_modify(pg); KERNEL_UNLOCK(); return; } /* FALLTHROUGH */ case T_TLB_MOD+T_USER: { pt_entry_t *pte, entry; paddr_t pa; vm_page_t pg; pmap_t pmap = p->p_vmspace->vm_map.pmap; if (!(pte = pmap_segmap(pmap, trapframe->badvaddr))) panic("trap: utlbmod: invalid segmap"); pte += uvtopte(trapframe->badvaddr); entry = *pte; #ifdef DIAGNOSTIC if (!(entry & PG_V) || (entry & PG_M)) panic("trap: utlbmod: invalid pte"); #endif if (pmap_is_page_ro(pmap, trunc_page(trapframe->badvaddr), entry)) { /* write to read only page */ ftype = VM_PROT_WRITE; pcb = &p->p_addr->u_pcb; goto fault_common_no_miss; } entry |= PG_M; *pte = entry; KERNEL_LOCK(); pmap_update_user_page(pmap, (trapframe->badvaddr & ~PGOFSET), entry); pa = pfn_to_pad(entry); pg = PHYS_TO_VM_PAGE(pa); if (pg == NULL) panic("trap: utlbmod: unmanaged page"); pmap_set_modify(pg); KERNEL_UNLOCK(); return; } case T_TLB_LD_MISS: case T_TLB_ST_MISS: ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; pcb = &p->p_addr->u_pcb; /* check for kernel address */ if (trapframe->badvaddr < 0) { vaddr_t va; int rv; kernel_fault: va = trunc_page((vaddr_t)trapframe->badvaddr); onfault = pcb->pcb_onfault; pcb->pcb_onfault = 0; KERNEL_LOCK(); rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype); KERNEL_UNLOCK(); pcb->pcb_onfault = onfault; if (rv == 0) return; if (onfault != 0) { pcb->pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; } /* * It is an error for the kernel to access user space except * through the copyin/copyout routines. */ if (pcb->pcb_onfault != 0) { /* * We want to resolve the TLB fault before invoking * pcb_onfault if necessary. */ goto fault_common; } else { goto err; } case T_TLB_LD_MISS+T_USER: ftype = VM_PROT_READ; pcb = &p->p_addr->u_pcb; goto fault_common; case T_TLB_ST_MISS+T_USER: ftype = VM_PROT_WRITE; pcb = &p->p_addr->u_pcb; fault_common: #ifdef CPU_R4000 if (r4000_errata != 0) { if (eop_tlb_miss_handler(trapframe, ci, p) != 0) return; } #endif fault_common_no_miss: #ifdef CPU_R4000 if (r4000_errata != 0) { eop_cleanup(trapframe, p); } #endif { vaddr_t va; struct vmspace *vm; vm_map_t map; int rv; vm = p->p_vmspace; map = &vm->vm_map; va = trunc_page((vaddr_t)trapframe->badvaddr); onfault = pcb->pcb_onfault; pcb->pcb_onfault = 0; KERNEL_LOCK(); rv = uvm_fault(map, va, 0, ftype); pcb->pcb_onfault = onfault; /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if ((caddr_t)va >= vm->vm_maxsaddr) { if (rv == 0) uvm_grow(p, va); else if (rv == EACCES) rv = EFAULT; } KERNEL_UNLOCK(); if (rv == 0) return; if (!USERMODE(trapframe->sr)) { if (onfault != 0) { pcb->pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; } ucode = ftype; i = SIGSEGV; typ = SEGV_MAPERR; break; } case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ ucode = 0; /* XXX should be VM_PROT_something */ i = SIGBUS; typ = BUS_ADRALN; break; case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ ucode = 0; /* XXX should be VM_PROT_something */ i = SIGBUS; typ = BUS_OBJERR; break; case T_SYSCALL+T_USER: { struct trap_frame *locr0 = p->p_md.md_regs; struct sysent *callp; unsigned int code; register_t tpc; int numsys, error; struct args { register_t i[8]; } args; register_t rval[2]; atomic_add_int(&uvmexp.syscalls, 1); /* compute next PC after syscall instruction */ tpc = trapframe->pc; /* Remember if restart */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; callp = p->p_p->ps_emul->e_sysent; numsys = p->p_p->ps_emul->e_nsysent; code = locr0->v0; switch (code) { case SYS_syscall: case SYS___syscall: /* * Code is first argument, followed by actual args. * __syscall provides the code as a quad to maintain * proper alignment of 64-bit arguments on 32-bit * platforms, which doesn't change anything here. */ code = locr0->a0; if (code >= numsys) callp += p->p_p->ps_emul->e_nosys; /* (illegal) */ else callp += code; i = callp->sy_argsize / sizeof(register_t); args.i[0] = locr0->a1; args.i[1] = locr0->a2; args.i[2] = locr0->a3; if (i > 3) { args.i[3] = locr0->a4; args.i[4] = locr0->a5; args.i[5] = locr0->a6; args.i[6] = locr0->a7; if (i > 7) if ((error = copyin((void *)locr0->sp, &args.i[7], sizeof(register_t)))) goto bad; } break; default: if (code >= numsys) callp += p->p_p->ps_emul->e_nosys; /* (illegal) */ else callp += code; i = callp->sy_narg; args.i[0] = locr0->a0; args.i[1] = locr0->a1; args.i[2] = locr0->a2; args.i[3] = locr0->a3; if (i > 4) { args.i[4] = locr0->a4; args.i[5] = locr0->a5; args.i[6] = locr0->a6; args.i[7] = locr0->a7; } } rval[0] = 0; rval[1] = locr0->v1; #if defined(DDB) || defined(DEBUG) trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ? TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code; #endif error = mi_syscall(p, code, callp, args.i, rval); switch (error) { case 0: locr0->v0 = rval[0]; locr0->v1 = rval[1]; locr0->a3 = 0; break; case ERESTART: locr0->pc = tpc; break; case EJUSTRETURN: break; /* nothing to do */ default: bad: locr0->v0 = error; locr0->a3 = 1; } mi_syscall_return(p, code, error, rval); return; } case T_BREAK: #ifdef DDB kdb_trap(type, trapframe); #endif /* Reenable interrupts if necessary */ if (trapframe->sr & SR_INT_ENAB) { enableintr(); } return; case T_BREAK+T_USER: { caddr_t va; u_int32_t instr; struct trap_frame *locr0 = p->p_md.md_regs; /* compute address of break instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; /* read break instruction */ copyin(va, &instr, sizeof(int32_t)); switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) { case 6: /* gcc range error */ i = SIGFPE; typ = FPE_FLTSUB; /* skip instruction */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; break; case 7: /* gcc3 divide by zero */ i = SIGFPE; typ = FPE_INTDIV; /* skip instruction */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; break; #ifdef PTRACE case BREAK_SSTEP_VAL: if (p->p_md.md_ss_addr == (long)va) { #ifdef DEBUG printf("trap: %s (%d): breakpoint at %p " "(insn %08x)\n", p->p_comm, p->p_pid, (void *)p->p_md.md_ss_addr, p->p_md.md_ss_instr); #endif /* Restore original instruction and clear BP */ process_sstep(p, 0); typ = TRAP_BRKPT; } else { typ = TRAP_TRACE; } i = SIGTRAP; break; #endif #ifdef FPUEMUL case BREAK_FPUEMUL_VAL: /* * If this is a genuine FP emulation break, * resume execution to our branch destination. */ if ((p->p_md.md_flags & MDP_FPUSED) != 0 && p->p_md.md_fppgva + 4 == (vaddr_t)va) { struct vm_map *map = &p->p_vmspace->vm_map; p->p_md.md_flags &= ~MDP_FPUSED; locr0->pc = p->p_md.md_fpbranchva; /* * Prevent access to the relocation page. * XXX needs to be fixed to work with rthreads */ uvm_fault_unwire(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE); (void)uvm_map_protect(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE, UVM_PROT_NONE, FALSE); return; } /* FALLTHROUGH */ #endif default: typ = TRAP_TRACE; i = SIGTRAP; break; } break; } case T_IWATCH+T_USER: case T_DWATCH+T_USER: { caddr_t va; /* compute address of trapped instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; printf("watch exception @ %p\n", va); #ifdef RM7K_PERFCNTR if (rm7k_watchintr(trapframe)) { /* Return to user, don't add any more overhead */ return; } #endif i = SIGTRAP; typ = TRAP_BRKPT; break; } case T_TRAP+T_USER: { caddr_t va; u_int32_t instr; struct trap_frame *locr0 = p->p_md.md_regs; /* compute address of trap instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; /* read break instruction */ copyin(va, &instr, sizeof(int32_t)); if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; #ifdef RM7K_PERFCNTR if (instr == 0x040c0000) { /* Performance cntr trap */ int result; result = rm7k_perfcntr(trapframe->a0, trapframe->a1, trapframe->a2, trapframe->a3); locr0->v0 = -result; /* Return to user, don't add any more overhead */ return; } else #endif /* * GCC 4 uses teq with code 7 to signal divide by * zero at runtime. This is one instruction shorter * than the BEQ + BREAK combination used by gcc 3. */ if ((instr & 0xfc00003f) == 0x00000034 /* teq */ && (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) { i = SIGFPE; typ = FPE_INTDIV; } else { i = SIGEMT; /* Stuff it with something for now */ typ = 0; } break; } case T_RES_INST+T_USER: i = SIGILL; typ = ILL_ILLOPC; break; case T_COP_UNUSABLE+T_USER: /* * Note MIPS IV COP1X instructions issued with FPU * disabled correctly report coprocessor 1 as the * unusable coprocessor number. */ if ((trapframe->cause & CR_COP_ERR) != CR_COP1_ERR) { i = SIGILL; /* only FPU instructions allowed */ typ = ILL_ILLOPC; break; } #ifdef FPUEMUL MipsFPTrap(trapframe); #else enable_fpu(p); #endif return; case T_FPE: printf("FPU Trap: PC %lx CR %lx SR %lx\n", trapframe->pc, trapframe->cause, trapframe->sr); goto err; case T_FPE+T_USER: MipsFPTrap(trapframe); return; case T_OVFLOW+T_USER: i = SIGFPE; typ = FPE_FLTOVF; break; case T_ADDR_ERR_LD: /* misaligned access */ case T_ADDR_ERR_ST: /* misaligned access */ case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ pcb = &p->p_addr->u_pcb; if ((onfault = pcb->pcb_onfault) != 0) { pcb->pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; default: err: disableintr(); #if !defined(DDB) && defined(DEBUG) trapDump("trap", printf); #endif printf("\nTrap cause = %d Frame %p\n", type, trapframe); printf("Trap PC %p RA %p fault %p\n", (void *)trapframe->pc, (void *)trapframe->ra, (void *)trapframe->badvaddr); #ifdef DDB stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs); kdb_trap(type, trapframe); #endif panic("trap"); } #ifdef FPUEMUL /* * If a relocated delay slot causes an exception, blame the * original delay slot address - userland is not supposed to * know anything about emulation bowels. */ if ((p->p_md.md_flags & MDP_FPUSED) != 0 && trapframe->badvaddr == p->p_md.md_fppgva) trapframe->badvaddr = p->p_md.md_fpslotva; #endif p->p_md.md_regs->pc = trapframe->pc; p->p_md.md_regs->cause = trapframe->cause; p->p_md.md_regs->badvaddr = trapframe->badvaddr; sv.sival_ptr = (void *)trapframe->badvaddr; KERNEL_LOCK(); trapsignal(p, i, ucode, typ, sv); KERNEL_UNLOCK(); }
void m88110_trap(u_int type, struct trapframe *frame) { struct proc *p; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type; u_long fault_code; vaddr_t fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; u_int psr; #endif int sig = 0; uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; fault_type = SI_NOINFO; fault_code = 0; fault_addr = frame->tf_exip & XIP_ADDR; /* * 88110 errata #16 (4.2) or #3 (5.1.1): * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension * can cause the enip value to be incremented by 4 incorrectly * if the instruction in the delay slot is the first word of a * page which misses in the mmu and results in a hardware * tablewalk which encounters an exception or an invalid * descriptor. The exip value in this case will point to the * first word of the page, and the D bit will be set. * * Note: if the instruction is a jsr.n r1, r1 will be overwritten * with erroneous data. Therefore, no recovery is possible. Do * not allow this instruction to occupy the last word of a page. * * Suggested fix: recover in general by backing up the exip by 4 * and clearing the delay bit before an rte when the lower 3 hex * digits of the exip are 001.'' */ if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) { u_int instr; /* * Note that we have initialized fault_addr above, so that * signals provide the correct address if necessary. */ frame->tf_exip = (frame->tf_exip & ~1) - 4; /* * Check the instruction at the (backed up) exip. * If it is a jsr.n, abort. */ if (!USERMODE(frame->tf_epsr)) { instr = *(u_int *)fault_addr; if (instr == 0xf400cc01) panic("mc88110 errata #16, exip %p enip %p", (frame->tf_exip + 4) | 1, frame->tf_enip); } else { /* copyin here should not fail */ if (copyin((const void *)frame->tf_exip, &instr, sizeof instr) == 0 && instr == 0xf400cc01) { uprintf("mc88110 errata #16, exip %p enip %p", (frame->tf_exip + 4) | 1, frame->tf_enip); sig = SIGILL; } } } if (USERMODE(frame->tf_epsr)) { type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } if (sig != 0) goto deliver; switch (type) { default: lose: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #ifdef DEBUG case T_110_DRM+T_USER: case T_110_DRM: printf("DMMU read miss: Hardware Table Searches should be enabled!\n"); goto lose; case T_110_DWM+T_USER: case T_110_DWM: printf("DMMU write miss: Hardware Table Searches should be enabled!\n"); goto lose; case T_110_IAM+T_USER: case T_110_IAM: printf("IMMU miss: Hardware Table Searches should be enabled!\n"); goto lose; #endif #ifdef DDB case T_KDB_TRACE: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_BREAK: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_ENTRY: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); set_psr(psr); /* skip trap instruction */ m88110_skip_insn(frame); splx(s); return; #endif /* DDB */ case T_ILLFLT: /* * The 88110 seems to trigger an instruction fault in * supervisor mode when running the following sequence: * * bcnd.n cond, reg, 1f * arithmetic insn * ... * the same exact arithmetic insn * 1: another arithmetic insn stalled by the previous one * ... * * The exception is reported with exip pointing to the * branch address. I don't know, at this point, if there * is any better workaround than the aggressive one * implemented below; I don't see how this could relate to * any of the 88110 errata (although it might be related to * branch prediction). * * For the record, the exact sequence triggering the * spurious exception is: * * bcnd.n eq0, r2, 1f * or r25, r0, r22 * bsr somewhere * or r25, r0, r22 * 1: cmp r13, r25, r20 * * within the same cache line. * * Simply ignoring the exception and returning does not * cause the exception to disappear. Clearing the * instruction cache works, but on 88110+88410 systems, * the 88410 needs to be invalidated as well. (note that * the size passed to the flush routines does not matter * since there is no way to flush a subset of the 88110 * I$ anyway) */ { extern void *kernel_text, *etext; if (fault_addr >= (vaddr_t)&kernel_text && fault_addr < (vaddr_t)&etext) { cmmu_icache_inv(curcpu()->ci_cpuid, trunc_page(fault_addr), PAGE_SIZE); cmmu_cache_wbinv(curcpu()->ci_cpuid, trunc_page(fault_addr), PAGE_SIZE); return; } } goto lose; case T_MISALGNFLT: printf("kernel misaligned access exception @%p\n", frame->tf_exip); goto lose; case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG printf("Kernel Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif goto lose; case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dsr & CMMU_DSR_SU) == 0) { KERNEL_LOCK(); goto m88110_user_fault; } #ifdef TRAPDEBUG printf("Kernel Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } va = trunc_page((vaddr_t)fault_addr); KERNEL_LOCK(); vm = p->p_vmspace; map = kernel_map; if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { /* * On a segment or a page fault, call uvm_fault() to * resolve the fault. */ if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; /* * This could be a fault caused in copyout*() * while accessing kernel space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_exip = pcb_onfault; /* * Continue as if the fault had been resolved. */ result = 0; } if (result == 0) { KERNEL_UNLOCK(); return; } } KERNEL_UNLOCK(); goto lose; case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: KERNEL_LOCK(); m88110_user_fault: if (type == T_INSTFLT+T_USER) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; #ifdef TRAPDEBUG printf("User Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif } else { fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } #ifdef TRAPDEBUG printf("User Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif } va = trunc_page((vaddr_t)fault_addr); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* * Call uvm_fault() to resolve non-bus error faults * whenever possible. */ if (type == T_INSTFLT+T_USER) { /* instruction faults */ if (frame->tf_isr & (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) { /* bus error, supervisor protection */ result = EACCES; } else if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); } else { #ifdef TRAPDEBUG printf("Unexpected Instruction fault isr %x\n", frame->tf_isr); #endif KERNEL_UNLOCK(); goto lose; } } else { /* data faults */ if (frame->tf_dsr & CMMU_DSR_BE) { /* bus error */ result = EACCES; } else if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); } else if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) { /* copyback or write allocate error */ result = EACCES; } else if (frame->tf_dsr & CMMU_DSR_WE) { /* write fault */ /* This could be a write protection fault or an * exception to set the used and modified bits * in the pte. Basically, if we got a write * error, then we already have a pte entry that * faulted in from a previous seg fault or page * fault. * Get the pte and check the status of the * modified and valid bits to determine if this * indeed a real write fault. XXX smurph */ if (pmap_set_modify(map->pmap, va)) { #ifdef TRAPDEBUG printf("Corrected userland write fault, pmap %p va %p\n", map->pmap, va); #endif result = 0; } else { /* must be a real wp fault */ #ifdef TRAPDEBUG printf("Uncorrected userland write fault, pmap %p va %p\n", map->pmap, va); #endif result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); } } else { #ifdef TRAPDEBUG printf("Unexpected Data access fault dsr %x\n", frame->tf_dsr); #endif KERNEL_UNLOCK(); goto lose; } } p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) uvm_grow(p, va); else if (result == EACCES) result = EFAULT; } KERNEL_UNLOCK(); /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_exip = pcb_onfault; /* * Continue as if the fault had been resolved. */ result = 0; } if (result != 0) { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; if (sig == 0) { /* skip recovered instruction */ m88110_skip_insn(frame); goto userexit; } break; case T_PRIVINFLT+T_USER: fault_type = ILL_PRVREG; /* FALLTHROUGH */ case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: case T_KDB_TRACE: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; /* skip trap instruction */ m88110_skip_insn(frame); break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; /* skip trap instruction */ m88110_skip_insn(frame); break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; /* skip trap instruction */ m88110_skip_insn(frame); break; case T_FPEPFLT+T_USER: m88110_fpu_exception(frame); goto userexit; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { u_int instr; vaddr_t pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(u_int)); /* check and see if we got here by accident */ if ((p->p_md.md_bp0va != pc && p->p_md.md_bp1va != pc) || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } /* restore original instruction and clear breakpoint */ if (p->p_md.md_bp0va == pc) { ss_put_value(p, pc, p->p_md.md_bp0save); p->p_md.md_bp0va = 0; } if (p->p_md.md_bp1va == pc) { ss_put_value(p, pc, p->p_md.md_bp1save); p->p_md.md_bp1va = 0; } sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ sig = SIGTRAP; fault_type = TRAP_BRKPT; break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { deliver: sv.sival_ptr = (void *)fault_addr; KERNEL_LOCK(); trapsignal(p, sig, fault_code, fault_type, sv); KERNEL_UNLOCK(); } userexit: userret(p); }