void MachineCheckException(struct pt_regs *regs) { unsigned long fixup, val; if ((fixup = search_exception_table(regs->nip)) != 0) { regs->nip = fixup; val = mfspr(MCSR); /* Clear MCSR */ mtspr(SPRN_MCSR, val); return; } rt_kprintf("Machine Check Exception.\n"); rt_kprintf("Caused by (from msr): "); rt_kprintf("regs %p ", regs); val = get_esr(); if (val& ESR_IMCP) { rt_kprintf("Instruction"); mtspr(ESR, val & ~ESR_IMCP); } else { rt_kprintf("Data"); } rt_kprintf(" machine check.\n"); show_regs(regs); print_backtrace((unsigned long *)regs->gpr[1]); panic("machine check"); }
/* * Oops. The kernel tried to access some page that wasn't present. */ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, struct pt_regs *regs) { unsigned long fixup; /* * Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_table(instruction_pointer(regs))) != 0) { #ifdef DEBUG printk(KERN_DEBUG "%s: Exception at [<%lx>] addr=%lx (fixup: %lx)\n", current->comm, regs->ARM_pc, addr, fixup); #endif regs->ARM_pc = fixup; return; } /* * No handler, we'll have to terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel %s at virtual address %08lx\n", (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); show_pte(mm, addr); die("Oops", regs, fsr); do_exit(SIGKILL); }
/* * instruction or data access exception */ asmlinkage void memory_access_exception(unsigned long esr0, unsigned long ear0, unsigned long epcr0) { siginfo_t info; #ifdef CONFIG_MMU unsigned long fixup; fixup = search_exception_table(__frame->pc); if (fixup) { __frame->pc = fixup; return; } #endif die_if_kernel("-- Memory Access Exception --\n" "ESR0 : %08lx\n" "EAR0 : %08lx\n" "EPCR0 : %08lx\n", esr0, ear0, epcr0); info.si_signo = SIGSEGV; info.si_code = SEGV_ACCERR; info.si_errno = 0; info.si_addr = NULL; if ((esr0 & (ESRx_VALID | ESR0_EAV)) == (ESRx_VALID | ESR0_EAV)) info.si_addr = (void __user *) ear0; force_sig_info(info.si_signo, &info, current); } /* end memory_access_exception() */
int arch_fixup(unsigned long address, void *sc_ptr) { struct sigcontext *sc = sc_ptr; unsigned long fixup; fixup = search_exception_table(address); if(fixup != 0) { sc->eip = fixup; return(1); } return(0); }
int send_fault_sig(struct pt_regs *regs) { siginfo_t siginfo = { 0, 0, 0, }; siginfo.si_signo = current->thread.signo; siginfo.si_code = current->thread.code; siginfo.si_addr = (void *)current->thread.faddr; #ifdef DEBUG printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code); #endif if (user_mode(regs)) { force_sig_info(siginfo.si_signo, &siginfo, current); } else { unsigned long fixup; /* Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_table(regs->pc))) { struct pt_regs *tregs; /* Create a new four word stack frame, discarding the old one. */ regs->stkadj = frame_extra_sizes[regs->format]; tregs = (struct pt_regs *)((ulong)regs + regs->stkadj); tregs->vector = regs->vector; tregs->format = 0; tregs->pc = fixup; tregs->sr = regs->sr; return -1; } //if (siginfo.si_signo == SIGBUS) // force_sig_info(siginfo.si_signo, // &siginfo, current); /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if ((unsigned long)siginfo.si_addr < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel access"); printk(" at virtual address %p\n", siginfo.si_addr); die_if_kernel("Oops", regs, 0 /*error_code*/); do_exit(SIGKILL); } return 1; }
void MachineCheckException(struct pt_regs *regs) { #ifdef CONFIG_ALL_PPC unsigned long fixup; #endif /* CONFIG_ALL_PPC */ if (user_mode(regs)) { _exception(SIGSEGV, regs); return; } #if defined(CONFIG_8xx) && defined(CONFIG_PCI) /* the qspan pci read routines can cause machine checks -- Cort */ bad_page_fault(regs, regs->dar); return; #endif #if defined(CONFIG_XMON) || defined(CONFIG_KGDB) if (debugger_fault_handler) { debugger_fault_handler(regs); return; } #endif #ifdef CONFIG_ALL_PPC /* * I/O accesses can cause machine checks on powermacs. * Check if the NIP corresponds to the address of a sync * instruction for which there is an entry in the exception * table. */ if (regs->msr & (0x80000 | 0x40000) && (fixup = search_exception_table(regs->nip)) != 0) { /* * Check that it's a sync instruction. * As the address is in the exception table * we should be able to read the instr there. */ if (*(unsigned int *)regs->nip == 0x7c0004ac) { unsigned int lsi = ((unsigned int *)regs->nip)[-1]; int rb = (lsi >> 11) & 0x1f; printk(KERN_DEBUG "%s bad port %lx at %lx\n", (lsi & 0x100)? "OUT to": "IN from", regs->gpr[rb] - _IO_BASE, regs->nip); regs->nip = fixup; return; } }
/* * bad_page_fault is called when we have a bad access from the kernel. * It is called from do_page_fault above and from some of the procedures * in traps.c. */ void bad_page_fault(struct pt_regs *regs, unsigned long address) { unsigned long fixup; /* Are we prepared to handle this fault? */ if ((fixup = search_exception_table(regs->nip)) != 0) { regs->nip = fixup; return; } /* kernel has accessed a bad area */ show_regs(regs); #if defined(CONFIG_XMON) || defined(CONFIG_KGDB) if (debugger_kernel_faults) debugger(regs); #endif print_backtrace( (unsigned long *)regs->gpr[1] ); panic("kernel access of bad area pc %lx lr %lx address %lX tsk %s/%d", regs->nip,regs->link,address,current->comm,current->pid); }
/* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * error_code: * ****0004 Protection -> Write-Protection (suprression) * ****0010 Segment translation -> Not present (nullification) * ****0011 Page translation -> Not present (nullification) * ****003B Region third exception -> Not present (nullification) */ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; unsigned long address; unsigned long fixup; int write; int si_code = SEGV_MAPERR; int kernel_address = 0; tsk = current; mm = tsk->mm; /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ if ((error_code & 0xff) == 4 && !(S390_lowcore.trans_exc_code & 4)) { /* Low-address protection hit in kernel mode means NULL pointer write access in kernel mode. */ if (!(regs->psw.mask & PSW_PROBLEM_STATE)) { address = 0; kernel_address = 1; goto no_context; } /* Low-address protection hit in user mode 'cannot happen'. */ die ("Low-address protection", regs, error_code); do_exit(SIGKILL); } /* * get the failing address * more specific the segment and page table portion of * the address */ address = S390_lowcore.trans_exc_code&-4096L; /* * Check which address space the address belongs to */ switch (S390_lowcore.trans_exc_code & 3) { case 0: /* Primary Segment Table Descriptor */ kernel_address = 1; goto no_context; case 1: /* STD determined via access register */ if (S390_lowcore.exc_access_id == 0) { kernel_address = 1; goto no_context; } if (regs && S390_lowcore.exc_access_id < NUM_ACRS) { if (regs->acrs[S390_lowcore.exc_access_id] == 0) { kernel_address = 1; goto no_context; } if (regs->acrs[S390_lowcore.exc_access_id] == 1) { /* user space address */ break; } } die("page fault via unknown access register", regs, error_code); do_exit(SIGKILL); break; case 2: /* Secondary Segment Table Descriptor */ case 3: /* Home Segment Table Descriptor */ /* user space address */ break; } /* * Check whether we have a user MM in the first place. */ if (in_interrupt() || !mm || !(regs->psw.mask & _PSW_IO_MASK_BIT)) goto no_context; /* * When we get here, the fault happened in the current * task's user address space, so we can switch on the * interrupts again and then search the VMAs */ __sti(); down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: write = 0; si_code = SEGV_ACCERR; switch (error_code & 0xFF) { case 0x04: /* write, present*/ write = 1; break; case 0x10: /* not present*/ case 0x11: /* not present*/ case 0x3B: /* not present*/ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) goto bad_area; break; default: printk("code should be 4, 10 or 11 (%lX) \n",error_code&0xFF); goto bad_area; } survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ switch (handle_mm_fault(mm, vma, address, write)) { case 1: tsk->min_flt++; break; case 2: tsk->maj_flt++; break; case 0: goto do_sigbus; default: goto out_of_memory; } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); /* User mode accesses just cause a SIGSEGV */ if (regs->psw.mask & PSW_PROBLEM_STATE) { tsk->thread.prot_addr = address; tsk->thread.trap_no = error_code; #ifndef CONFIG_SYSCTL #ifdef CONFIG_PROCESS_DEBUG printk("User process fault: interruption code 0x%lX\n",error_code); printk("failing address: %lX\n",address); show_regs(regs); #endif #else if (sysctl_userprocess_debug) { printk("User process fault: interruption code 0x%lX\n", error_code); printk("failing address: %lX\n", address); show_regs(regs); } #endif force_sigsegv(tsk, si_code, (void *)address); return; } no_context: /* Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_table(regs->psw.addr)) != 0) { regs->psw.addr = fixup; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if (kernel_address) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %016lx\n", address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %016lx\n", address); die("Oops", regs, error_code); do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (tsk->pid == 1) { tsk->policy |= SCHED_YIELD; schedule(); down_read(&mm->mmap_sem); goto survive; } printk("VM: killing process %s\n", tsk->comm); if (regs->psw.mask & PSW_PROBLEM_STATE) do_exit(SIGKILL); goto no_context; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.prot_addr = address; tsk->thread.trap_no = error_code; force_sig(SIGBUS, tsk); /* Kernel mode? Handle exceptions or die */ if (!(regs->psw.mask & PSW_PROBLEM_STATE)) goto no_context; }
/* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. */ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address) { struct vm_area_struct * vma; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; unsigned long fixup; siginfo_t info; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ if (address >= VMALLOC_START) goto vmalloc_fault; info.si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_interrupt() || !mm) goto no_context; #if 0 printk("[%s:%d:%08lx:%ld:%08lx]\n", current->comm, current->pid, address, write, regs->cp0_epc); #endif down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ switch (handle_mm_fault(mm, vma, address, write)) { case 1: tsk->min_flt++; break; case 2: tsk->maj_flt++; break; case 0: goto do_sigbus; default: goto out_of_memory; } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { tsk->thread.cp0_badvaddr = address; tsk->thread.error_code = write; #if 0 printk("do_page_fault() #2: sending SIGSEGV to %s for illegal %s\n" "%08lx (epc == %08lx, ra == %08lx)\n", tsk->comm, write ? "write access to" : "read access from", address, (unsigned long) regs->cp0_epc, (unsigned long) regs->regs[31]); #endif info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void *) address; force_sig_info(SIGSEGV, &info, tsk); return; } no_context: /* Are we prepared to handle this kernel fault? */ fixup = search_exception_table(exception_epc(regs)); if (fixup) { long new_epc; tsk->thread.cp0_baduaddr = address; new_epc = fixup_exception(dpf_reg, fixup, regs->cp0_epc); if (development_version) printk(KERN_DEBUG "%s: Exception at [<%lx>] (%lx)\n", tsk->comm, regs->cp0_epc, new_epc); regs->cp0_epc = new_epc; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel paging request at virtual " "address %08lx, epc == %08lx, ra == %08lx\n", address, regs->cp0_epc, regs->regs[31]); die("Oops", regs); /* Game over. */ /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: if (tsk->pid == 1) { yield(); goto survive; } up_read(&mm->mmap_sem); printk(KERN_NOTICE "VM: killing process %s\n", tsk->comm); if (user_mode(regs)) do_exit(SIGKILL); goto no_context; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.cp0_badvaddr = address; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void *) address; force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. */ int offset = pgd_index(address); pgd_t *pgd, *pgd_k; pmd_t *pmd, *pmd_k; pgd = tsk->active_mm->pgd + offset; pgd_k = init_mm.pgd + offset; if (!pgd_present(*pgd)) { if (!pgd_present(*pgd_k)) goto bad_area_nosemaphore; set_pgd(pgd, *pgd_k); return; } pmd = pmd_offset(pgd, address); pmd_k = pmd_offset(pgd_k, address); if (pmd_present(*pmd) || !pmd_present(*pmd_k)) goto bad_area_nosemaphore; set_pmd(pmd, *pmd_k); } }
static int do_page_fault(unsigned long addr, int mode, struct pt_regs *regs) { struct task_struct *tsk; struct mm_struct *mm; unsigned long fixup; int fault; tsk = current; mm = tsk->mm; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ if (addr >= TASK_SIZE) goto vmalloc_fault; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_interrupt() || !mm) goto no_context; down(&mm->mmap_sem); fault = __do_page_fault(mm, addr, mode, tsk); up(&mm->mmap_sem); ret: /* * Handle the "normal" case first */ if (fault > 0) return 0; /* * We had some memory, but were unable to * successfully fix up this page fault. */ if (fault == 0) goto do_sigbus; /* * If we are in kernel mode at this point, we * have no context to handle this fault with. */ if (!user_mode(regs)) goto no_context; if (fault == -3) { /* * We ran out of memory, or some other thing happened to * us that made us unable to handle the page fault gracefully. */ printk("VM: killing process %s\n", tsk->comm); do_exit(SIGKILL); } else { /* * Something tried to access memory that isn't in our memory map.. * User mode accesses just cause a SIGSEGV */ struct siginfo si; #ifdef CONFIG_DEBUG_USER printk(KERN_DEBUG "%s: unhandled page fault at pc=0x%08lx, " "lr=0x%08lx (bad address=0x%08lx, code %d)\n", tsk->comm, regs->ARM_pc, regs->ARM_lr, addr, mode); #endif tsk->thread.address = addr; tsk->thread.error_code = mode; tsk->thread.trap_no = 14; si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = fault == -1 ? SEGV_ACCERR : SEGV_MAPERR; si.si_addr = (void *)addr; force_sig_info(SIGSEGV, &si, tsk); } return 0; /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ do_sigbus: /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.address = addr; tsk->thread.error_code = mode; tsk->thread.trap_no = 14; force_sig(SIGBUS, tsk); /* Kernel mode? Handle exceptions or die */ if (user_mode(regs)) return 0; no_context: /* Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_table(instruction_pointer(regs))) != 0) { #ifdef DEBUG printk(KERN_DEBUG "%s: Exception at [<%lx>] addr=%lx (fixup: %lx)\n", tsk->comm, regs->ARM_pc, addr, fixup); #endif regs->ARM_pc = fixup; return 0; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel %s at virtual address %08lx\n", (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); show_pte(mm, addr); die("Oops", regs, mode); do_exit(SIGKILL); return 0; vmalloc_fault: fault = __do_vmalloc_fault(addr, mm); goto ret; }
static inline void emulate_load_store_insn(struct pt_regs *regs, unsigned long addr, unsigned long pc) { union mips_instruction insn; unsigned long value, fixup; regs->regs[0] = 0; /* * This load never faults. */ __get_user(insn.word, (unsigned int *)pc); switch (insn.i_format.opcode) { /* * These are instructions that a compiler doesn't generate. We * can assume therefore that the code is MIPS-aware and * really buggy. Emulating these instructions would break the * semantics anyway. */ case ll_op: case lld_op: case sc_op: case scd_op: /* * For these instructions the only way to create an address * error is an attempted access to kernel/supervisor address * space. */ case ldl_op: case ldr_op: case lwl_op: case lwr_op: case sdl_op: case sdr_op: case swl_op: case swr_op: case lb_op: case lbu_op: case sb_op: goto sigbus; /* * The remaining opcodes are the ones that are really of interest. */ case lh_op: check_axs(pc, addr, 2); __asm__( ".set\tnoat\n" #ifdef __BIG_ENDIAN "1:\tlb\t%0,0(%1)\n" "2:\tlbu\t$1,1(%1)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tlb\t%0,1(%1)\n" "2:\tlbu\t$1,0(%1)\n\t" #endif "sll\t%0,0x8\n\t" "or\t%0,$1\n\t" ".set\tat\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b,%2\n\t" STR(PTR)"\t2b,%2\n\t" ".previous" :"=&r" (value) :"r" (addr), "i" (&&fault) :"$1"); regs->regs[insn.i_format.rt] = value; return; case lw_op: check_axs(pc, addr, 4); __asm__( #ifdef __BIG_ENDIAN "1:\tlwl\t%0,(%1)\n" "2:\tlwr\t%0,3(%1)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tlwl\t%0,3(%1)\n" "2:\tlwr\t%0,(%1)\n\t" #endif ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b,%2\n\t" STR(PTR)"\t2b,%2\n\t" ".previous" :"=&r" (value) :"r" (addr), "i" (&&fault)); regs->regs[insn.i_format.rt] = value; return; case lhu_op: check_axs(pc, addr, 2); __asm__( ".set\tnoat\n" #ifdef __BIG_ENDIAN "1:\tlbu\t%0,0(%1)\n" "2:\tlbu\t$1,1(%1)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tlbu\t%0,1(%1)\n" "2:\tlbu\t$1,0(%1)\n\t" #endif "sll\t%0,0x8\n\t" "or\t%0,$1\n\t" ".set\tat\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b,%2\n\t" STR(PTR)"\t2b,%2\n\t" ".previous" :"=&r" (value) :"r" (addr), "i" (&&fault) :"$1"); regs->regs[insn.i_format.rt] = value; return; case lwu_op: check_axs(pc, addr, 4); __asm__( #ifdef __BIG_ENDIAN "1:\tlwl\t%0,(%1)\n" "2:\tlwr\t%0,3(%1)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tlwl\t%0,3(%1)\n" "2:\tlwr\t%0,(%1)\n\t" #endif ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b,%2\n\t" STR(PTR)"\t2b,%2\n\t" ".previous" :"=&r" (value) :"r" (addr), "i" (&&fault)); value &= 0xffffffff; regs->regs[insn.i_format.rt] = value; return; case ld_op: check_axs(pc, addr, 8); __asm__( ".set\tmips3\n" #ifdef __BIG_ENDIAN "1:\tldl\t%0,(%1)\n" "2:\tldr\t%0,7(%1)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tldl\t%0,7(%1)\n" "2:\tldr\t%0,(%1)\n\t" #endif ".set\tmips0\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b,%2\n\t" STR(PTR)"\t2b,%2\n\t" ".previous" :"=&r" (value) :"r" (addr), "i" (&&fault)); regs->regs[insn.i_format.rt] = value; return; case sh_op: check_axs(pc, addr, 2); value = regs->regs[insn.i_format.rt]; __asm__( #ifdef __BIG_ENDIAN ".set\tnoat\n" "1:\tsb\t%0,1(%1)\n\t" "srl\t$1,%0,0x8\n" "2:\tsb\t$1,0(%1)\n\t" ".set\tat\n\t" #endif #ifdef __LITTLE_ENDIAN ".set\tnoat\n" "1:\tsb\t%0,0(%1)\n\t" "srl\t$1,%0,0x8\n" "2:\tsb\t$1,1(%1)\n\t" ".set\tat\n\t" #endif ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b,%2\n\t" STR(PTR)"\t2b,%2\n\t" ".previous" : /* no outputs */ :"r" (value), "r" (addr), "i" (&&fault) :"$1"); return; case sw_op: check_axs(pc, addr, 4); value = regs->regs[insn.i_format.rt]; __asm__( #ifdef __BIG_ENDIAN "1:\tswl\t%0,(%1)\n" "2:\tswr\t%0,3(%1)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tswl\t%0,3(%1)\n" "2:\tswr\t%0,(%1)\n\t" #endif ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b,%2\n\t" STR(PTR)"\t2b,%2\n\t" ".previous" : /* no outputs */ :"r" (value), "r" (addr), "i" (&&fault)); return; case sd_op: check_axs(pc, addr, 8); value = regs->regs[insn.i_format.rt]; __asm__( ".set\tmips3\n" #ifdef __BIG_ENDIAN "1:\tsdl\t%0,(%1)\n" "2:\tsdr\t%0,7(%1)\n\t" #endif #ifdef __LITTLE_ENDIAN "1:\tsdl\t%0,7(%1)\n" "2:\tsdr\t%0,(%1)\n\t" #endif ".set\tmips0\n\t" ".section\t__ex_table,\"a\"\n\t" STR(PTR)"\t1b,%2\n\t" STR(PTR)"\t2b,%2\n\t" ".previous" : /* no outputs */ :"r" (value), "r" (addr), "i" (&&fault)); return; case lwc1_op: case ldc1_op: case swc1_op: case sdc1_op: /* * I herewith declare: this does not happen. So send SIGBUS. */ goto sigbus; case lwc2_op: case ldc2_op: case swc2_op: case sdc2_op: /* * These are the coprocessor 2 load/stores. The current * implementations don't use cp2 and cp2 should always be * disabled in c0_status. So send SIGILL. * (No longer true: The Sony Praystation uses cp2 for * 3D matrix operations. Dunno if that thingy has a MMU ...) */ default: /* * Pheeee... We encountered an yet unknown instruction or * cache coherence problem. Die sucker, die ... */ goto sigill; } return; fault: /* Did we have an exception handler installed? */ fixup = search_exception_table(regs->cp0_epc); if (fixup) { long new_epc; new_epc = fixup_exception(dpf_reg, fixup, regs->cp0_epc); printk(KERN_DEBUG "%s: Forwarding exception at [<%lx>] (%lx)\n", current->comm, regs->cp0_epc, new_epc); regs->cp0_epc = new_epc; return; } lock_kernel(); send_sig(SIGSEGV, current, 1); unlock_kernel(); return; sigbus: lock_kernel(); send_sig(SIGBUS, current, 1); unlock_kernel(); return; sigill: lock_kernel(); send_sig(SIGILL, current, 1); unlock_kernel(); return; }
/* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * error_code: * 04 Protection -> Write-Protection (suprression) * 10 Segment translation -> Not present (nullification) * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ extern inline void do_exception(struct pt_regs *regs, unsigned long error_code) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; unsigned long address; int user_address; unsigned long fixup; int si_code = SEGV_MAPERR; tsk = current; mm = tsk->mm; /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ if (error_code == 4 && !(S390_lowcore.trans_exc_code & 4)) { /* Low-address protection hit in kernel mode means NULL pointer write access in kernel mode. */ if (!(regs->psw.mask & PSW_PROBLEM_STATE)) { address = 0; user_address = 0; goto no_context; } /* Low-address protection hit in user mode 'cannot happen'. */ die ("Low-address protection", regs, error_code); do_exit(SIGKILL); } /* * get the failing address * more specific the segment and page table portion of * the address */ address = S390_lowcore.trans_exc_code & -4096L; user_address = check_user_space(regs, error_code); /* * Verify that the fault happened in user space, that * we are not in an interrupt and that there is a * user context. */ if (user_address == 0 || in_interrupt() || !mm) goto no_context; /* * When we get here, the fault happened in the current * task's user address space, so we can switch on the * interrupts again and then search the VMAs */ __sti(); down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: si_code = SEGV_ACCERR; if (error_code != 4) { /* page not present, check vm flags */ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) goto bad_area; } else { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ switch (handle_mm_fault(mm, vma, address, error_code == 4)) { case 1: tsk->min_flt++; break; case 2: tsk->maj_flt++; break; case 0: goto do_sigbus; default: goto out_of_memory; } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); /* User mode accesses just cause a SIGSEGV */ if (regs->psw.mask & PSW_PROBLEM_STATE) { tsk->thread.prot_addr = address; tsk->thread.trap_no = error_code; force_sigsegv(regs, error_code, si_code, address); return; } no_context: /* Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_table(regs->psw.addr)) != 0) { regs->psw.addr = fixup; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if (user_address == 0) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %016lx\n", address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %016lx\n", address); die("Oops", regs, error_code); do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: if (tsk->pid == 1) { yield(); goto survive; } up_read(&mm->mmap_sem); printk("VM: killing process %s\n", tsk->comm); if (regs->psw.mask & PSW_PROBLEM_STATE) do_exit(SIGKILL); goto no_context; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.prot_addr = address; tsk->thread.trap_no = error_code; force_sig(SIGBUS, tsk); /* Kernel mode? Handle exceptions or die */ if (!(regs->psw.mask & PSW_PROBLEM_STATE)) goto no_context; }
void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address) { struct vm_area_struct * vma; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; const struct exception_table_entry *fix; unsigned long acc_type; if (in_interrupt() || !mm) goto no_context; down_read(&mm->mmap_sem); vma = pa_find_vma(mm, address); if (!vma) goto bad_area; if (address < vma->vm_end) goto good_area; if (!(vma->vm_flags & VM_GROWSUP) || expand_stackup(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access. We still need to * check the access permissions. */ good_area: acc_type = parisc_acctyp(code,regs->iir); if ((vma->vm_flags & acc_type) != acc_type) goto bad_area; /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the * fault. */ switch (handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0)) { case 1: ++current->min_flt; break; case 2: ++current->maj_flt; break; case 0: /* * We ran out of memory, or some other thing happened * to us that made us unable to handle the page fault * gracefully. */ goto bad_area; default: goto out_of_memory; } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. */ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { struct siginfo si; printk("\ndo_page_fault() pid=%d command='%s'\n", tsk->pid, tsk->comm); show_regs(regs); /* FIXME: actually we need to get the signo and code correct */ si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SEGV_MAPERR; si.si_addr = (void *) address; force_sig_info(SIGSEGV, &si, current); return; } no_context: if (!user_mode(regs)) { fix = search_exception_table(regs->iaoq[0]); if (fix) { if (fix->skip & 1) regs->gr[8] = -EFAULT; if (fix->skip & 2) regs->gr[9] = 0; regs->iaoq[0] += ((fix->skip) & ~3); /* * NOTE: In some cases the faulting instruction * may be in the delay slot of a branch. We * don't want to take the branch, so we don't * increment iaoq[1], instead we set it to be * iaoq[0]+4, and clear the B bit in the PSW */ regs->iaoq[1] = regs->iaoq[0] + 4; regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */ return; } } parisc_terminate("Bad Address (null pointer deref?)",regs,code,address); out_of_memory: up_read(&mm->mmap_sem); printk("VM: killing process %s\n", current->comm); if (user_mode(regs)) do_exit(SIGKILL); goto no_context; }