void abort_handler(struct trapframe *tf, int type) { struct vm_map *map; struct pcb *pcb; struct thread *td; u_int user, far, fsr; vm_prot_t ftype; void *onfault; vm_offset_t va; int error = 0; struct ksig ksig; struct proc *p; if (type == 1) return (prefetch_abort_handler(tf)); /* Grab FAR/FSR before enabling interrupts */ far = cpu_faultaddress(); fsr = cpu_faultstatus(); #if 0 printf("data abort: fault address=%p (from pc=%p lr=%p)\n", (void*)far, (void*)tf->tf_pc, (void*)tf->tf_svc_lr); #endif /* Update vmmeter statistics */ #if 0 vmexp.traps++; #endif td = curthread; p = td->td_proc; PCPU_INC(cnt.v_trap); /* Data abort came from user mode? */ user = TRAP_USERMODE(tf); if (user) { td->td_pticks = 0; td->td_frame = tf; if (td->td_cowgen != td->td_proc->p_cowgen) thread_cow_update(td); } /* Grab the current pcb */ pcb = td->td_pcb; /* Re-enable interrupts if they were enabled previously */ if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } /* Invoke the appropriate handler, if necessary */ if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, td, &ksig)) { goto do_trapsignal; } goto out; } /* * At this point, we're dealing with one of the following data aborts: * * FAULT_TRANS_S - Translation -- Section * FAULT_TRANS_P - Translation -- Page * FAULT_DOMAIN_S - Domain -- Section * FAULT_DOMAIN_P - Domain -- Page * FAULT_PERM_S - Permission -- Section * FAULT_PERM_P - Permission -- Page * * These are the main virtual memory-related faults signalled by * the MMU. */ /* * Make sure the Program Counter is sane. We could fall foul of * someone executing Thumb code, in which case the PC might not * be word-aligned. This would cause a kernel alignment fault * further down if we have to decode the current instruction. * XXX: It would be nice to be able to support Thumb at some point. */ if (__predict_false((tf->tf_pc & 3) != 0)) { if (user) { /* * Give the user an illegal instruction signal. */ /* Deliver a SIGILL to the process */ ksig.signb = SIGILL; ksig.code = 0; goto do_trapsignal; } /* * The kernel never executes Thumb code. */ printf("\ndata_abort_fault: Misaligned Kernel-mode " "Program Counter\n"); dab_fatal(tf, fsr, far, td, &ksig); } va = trunc_page((vm_offset_t)far); /* * It is only a kernel address space fault iff: * 1. user == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. */ if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS || (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && __predict_true((pcb->pcb_onfault == NULL || (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) { map = kernel_map; /* Was the fault due to the FPE/IPKDB ? */ if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { /* * Force exit via userret() * This is necessary as the FPE is an extension to * userland that actually runs in a priveledged mode * but uses USR mode permissions for its accesses. */ user = 1; ksig.signb = SIGSEGV; ksig.code = 0; goto do_trapsignal; } } else { map = &td->td_proc->p_vmspace->vm_map; } /* * We need to know whether the page should be mapped as R or R/W. * On armv4, the fault status register does not indicate whether * the access was a read or write. We know that a permission fault * can only be the result of a write to a read-only location, so we * can deal with those quickly. Otherwise we need to disassemble * the faulting instruction to determine if it was a write. */ if (IS_PERMISSION_FAULT(fsr)) ftype = VM_PROT_WRITE; else { u_int insn = ReadWord(tf->tf_pc); if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */ ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */ ((insn & 0x0a100000) == 0x08000000)) { /* STM/CDT */ ftype = VM_PROT_WRITE; } else { if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */ ftype = VM_PROT_READ | VM_PROT_WRITE; else ftype = VM_PROT_READ; } } /* * See if the fault is as a result of ref/mod emulation, * or domain mismatch. */ #ifdef DEBUG last_fault_code = fsr; #endif if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) goto fatal_pagefault; if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, user)) { goto out; } onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); pcb->pcb_onfault = onfault; if (__predict_true(error == 0)) goto out; fatal_pagefault: if (user == 0) { if (pcb->pcb_onfault) { tf->tf_r0 = error; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return; } printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype, error); dab_fatal(tf, fsr, far, td, &ksig); } if (error == ENOMEM) { printf("VM: pid %d (%s), uid %d killed: " "out of swap\n", td->td_proc->p_pid, td->td_name, (td->td_proc->p_ucred) ? td->td_proc->p_ucred->cr_uid : -1); ksig.signb = SIGKILL; } else { ksig.signb = SIGSEGV; } ksig.code = 0; do_trapsignal: call_trapsignal(td, ksig.signb, ksig.code); out: /* If returning to user mode, make sure to invoke userret() */ if (user) userret(td, tf); }
void data_abort_handler(trapframe_t *tf) { struct vm_map *map; struct lwp * const l = curlwp; struct cpu_info * const ci = curcpu(); u_int far, fsr; vm_prot_t ftype; void *onfault; vaddr_t va; int error; ksiginfo_t ksi; UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); /* Grab FAR/FSR before enabling interrupts */ far = cpu_faultaddress(); fsr = cpu_faultstatus(); /* Update vmmeter statistics */ ci->ci_data.cpu_ntrap++; /* Re-enable interrupts if they were enabled previously */ KASSERT(!TRAP_USERMODE(tf) || (tf->tf_spsr & IF32_bits) == 0); if (__predict_true((tf->tf_spsr & IF32_bits) != IF32_bits)) restore_interrupts(tf->tf_spsr & IF32_bits); /* Get the current lwp structure */ UVMHIST_LOG(maphist, " (l=%#x, far=%#x, fsr=%#x", l, far, fsr, 0); UVMHIST_LOG(maphist, " tf=%#x, pc=%#x)", tf, tf->tf_pc, 0, 0); /* Data abort came from user mode? */ bool user = (TRAP_USERMODE(tf) != 0); if (user) LWP_CACHE_CREDS(l, l->l_proc); /* Grab the current pcb */ struct pcb * const pcb = lwp_getpcb(l); curcpu()->ci_abt_evs[fsr & FAULT_TYPE_MASK].ev_count++; /* Invoke the appropriate handler, if necessary */ if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { #ifdef DIAGNOSTIC printf("%s: data_aborts fsr=0x%x far=0x%x\n", __func__, fsr, far); #endif if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, l, &ksi)) goto do_trapsignal; goto out; } /* * At this point, we're dealing with one of the following data aborts: * * FAULT_TRANS_S - Translation -- Section * FAULT_TRANS_P - Translation -- Page * FAULT_DOMAIN_S - Domain -- Section * FAULT_DOMAIN_P - Domain -- Page * FAULT_PERM_S - Permission -- Section * FAULT_PERM_P - Permission -- Page * * These are the main virtual memory-related faults signalled by * the MMU. */ /* fusubailout is used by [fs]uswintr to avoid page faulting */ if (__predict_false(pcb->pcb_onfault == fusubailout)) { tf->tf_r0 = EFAULT; tf->tf_pc = (intptr_t) pcb->pcb_onfault; return; } if (user) { lwp_settrapframe(l, tf); } /* * Make sure the Program Counter is sane. We could fall foul of * someone executing Thumb code, in which case the PC might not * be word-aligned. This would cause a kernel alignment fault * further down if we have to decode the current instruction. */ #ifdef THUMB_CODE /* * XXX: It would be nice to be able to support Thumb in the kernel * at some point. */ if (__predict_false(!user && (tf->tf_pc & 3) != 0)) { printf("\n%s: Misaligned Kernel-mode Program Counter\n", __func__); dab_fatal(tf, fsr, far, l, NULL); } #else if (__predict_false((tf->tf_pc & 3) != 0)) { if (user) { /* * Give the user an illegal instruction signal. */ /* Deliver a SIGILL to the process */ KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; ksi.ksi_addr = (uint32_t *)(intptr_t) far; ksi.ksi_trap = fsr; goto do_trapsignal; } /* * The kernel never executes Thumb code. */ printf("\n%s: Misaligned Kernel-mode Program Counter\n", __func__); dab_fatal(tf, fsr, far, l, NULL); } #endif /* See if the CPU state needs to be fixed up */ switch (data_abort_fixup(tf, fsr, far, l)) { case ABORT_FIXUP_RETURN: return; case ABORT_FIXUP_FAILED: /* Deliver a SIGILL to the process */ KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGILL; ksi.ksi_code = ILL_ILLOPC; ksi.ksi_addr = (uint32_t *)(intptr_t) far; ksi.ksi_trap = fsr; goto do_trapsignal; default: break; } va = trunc_page((vaddr_t)far); /* * It is only a kernel address space fault iff: * 1. user == 0 and * 2. pcb_onfault not set or * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. */ if (!user && (va >= VM_MIN_KERNEL_ADDRESS || (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && __predict_true((pcb->pcb_onfault == NULL || (read_insn(tf->tf_pc, false) & 0x05200000) != 0x04200000))) { map = kernel_map; /* Was the fault due to the FPE/IPKDB ? */ if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { KSI_INIT_TRAP(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_addr = (uint32_t *)(intptr_t) far; ksi.ksi_trap = fsr; /* * Force exit via userret() * This is necessary as the FPE is an extension to * userland that actually runs in a priveledged mode * but uses USR mode permissions for its accesses. */ user = true; goto do_trapsignal; } } else { map = &l->l_proc->p_vmspace->vm_map; } /* * We need to know whether the page should be mapped as R or R/W. * Before ARMv6, the MMU did not give us the info as to whether the * fault was caused by a read or a write. * * However, we know that a permission fault can only be the result of * a write to a read-only location, so we can deal with those quickly. * * Otherwise we need to disassemble the instruction responsible to * determine if it was a write. */ if (CPU_IS_ARMV6_P() || CPU_IS_ARMV7_P()) { ftype = (fsr & FAULT_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; } else if (IS_PERMISSION_FAULT(fsr)) { ftype = VM_PROT_WRITE; } else { #ifdef THUMB_CODE /* Fast track the ARM case. */ if (__predict_false(tf->tf_spsr & PSR_T_bit)) { u_int insn = read_thumb_insn(tf->tf_pc, user); u_int insn_f8 = insn & 0xf800; u_int insn_fe = insn & 0xfe00; if (insn_f8 == 0x6000 || /* STR(1) */ insn_f8 == 0x7000 || /* STRB(1) */ insn_f8 == 0x8000 || /* STRH(1) */ insn_f8 == 0x9000 || /* STR(3) */ insn_f8 == 0xc000 || /* STM */ insn_fe == 0x5000 || /* STR(2) */ insn_fe == 0x5200 || /* STRH(2) */ insn_fe == 0x5400) /* STRB(2) */ ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; } else #endif { u_int insn = read_insn(tf->tf_pc, user); if (((insn & 0x0c100000) == 0x04000000) || /* STR[B] */ ((insn & 0x0e1000b0) == 0x000000b0) || /* STR[HD]*/ ((insn & 0x0a100000) == 0x08000000) || /* STM/CDT*/ ((insn & 0x0f9000f0) == 0x01800090)) /* STREX[BDH] */ ftype = VM_PROT_WRITE; else if ((insn & 0x0fb00ff0) == 0x01000090)/* SWP */ ftype = VM_PROT_READ | VM_PROT_WRITE; else ftype = VM_PROT_READ; } } /* * See if the fault is as a result of ref/mod emulation, * or domain mismatch. */ #ifdef DEBUG last_fault_code = fsr; #endif if (pmap_fault_fixup(map->pmap, va, ftype, user)) { UVMHIST_LOG(maphist, " <- ref/mod emul", 0, 0, 0, 0); goto out; } if (__predict_false(curcpu()->ci_intr_depth > 0)) { if (pcb->pcb_onfault) { tf->tf_r0 = EINVAL; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return; } printf("\nNon-emulated page fault with intr_depth > 0\n"); dab_fatal(tf, fsr, far, l, NULL); } onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; error = uvm_fault(map, va, ftype); pcb->pcb_onfault = onfault; if (__predict_true(error == 0)) { if (user) uvm_grow(l->l_proc, va); /* Record any stack growth */ else ucas_ras_check(tf); UVMHIST_LOG(maphist, " <- uvm", 0, 0, 0, 0); goto out; } if (user == 0) { if (pcb->pcb_onfault) { tf->tf_r0 = error; tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; return; } printf("\nuvm_fault(%p, %lx, %x) -> %x\n", map, va, ftype, error); dab_fatal(tf, fsr, far, l, NULL); } KSI_INIT_TRAP(&ksi); if (error == ENOMEM) { printf("UVM: pid %d (%s), uid %d killed: " "out of swap\n", l->l_proc->p_pid, l->l_proc->p_comm, l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); ksi.ksi_signo = SIGKILL; } else ksi.ksi_signo = SIGSEGV; ksi.ksi_code = (error == EACCES) ? SEGV_ACCERR : SEGV_MAPERR; ksi.ksi_addr = (uint32_t *)(intptr_t) far; ksi.ksi_trap = fsr; UVMHIST_LOG(maphist, " <- error (%d)", error, 0, 0, 0); do_trapsignal: call_trapsignal(l, tf, &ksi); out: /* If returning to user mode, make sure to invoke userret() */ if (user) userret(l); }