static noinline void do_no_context(struct pt_regs *regs) { const struct exception_table_entry *fixup; unsigned long address; /* */ fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) { regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; return; } /* */ address = regs->int_parm_long & __FAIL_ADDR_MASK; if (!user_space_fault(regs->int_parm_long)) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %p\n", (void *)address); die(regs, "Oops"); do_exit(SIGKILL); }
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) { siginfo_t info; if (user_mode(regs)) { info.si_signo = si_signo; info.si_errno = 0; info.si_code = si_code; info.si_addr = get_trap_ip(regs); force_sig_info(si_signo, &info, current); report_user_fault(regs, si_signo); } else { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; else { enum bug_trap_type btt; btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); if (btt == BUG_TRAP_TYPE_WARN) return; die(regs, str); } } }
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) { extern void die(const char*, struct pt_regs*, long); const struct exception_table_entry *entry; /* Are we prepared to handle this kernel fault? */ if ((entry = search_exception_tables(regs->pc)) != NULL) { #ifdef DEBUG_PAGE_FAULT printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", current->comm, regs->pc, entry->fixup); #endif current->thread.bad_uaddr = address; regs->pc = entry->fixup; return; } /* Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel paging request at virtual " "address %08lx\n pc = %08lx, ra = %08lx\n", address, regs->pc, regs->areg[0]); die("Oops", regs, sig); do_exit(sig); }
static noinline void do_no_context(struct pt_regs *regs) { const struct exception_table_entry *fixup; unsigned long address; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) { regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ address = regs->int_parm_long & __FAIL_ADDR_MASK; if (!user_space_fault(regs->int_parm_long)) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %p\n", (void *)address); die(regs, "Oops"); do_exit(SIGKILL); }
void fixup_BUG(struct pt_regs *regs) { BUG_regs = *regs; #ifdef CONFIG_DEBUG_BUGVERBOSE /* * Fixup the BUG arguments through exception handlers. */ { const struct exception_table_entry *fixup; /* * ERP points at the "break 14" + 2, compensate for the 2 * bytes. */ fixup = search_exception_tables(instruction_pointer(regs) - 2); if (fixup) { /* Adjust the instruction pointer in the stackframe. */ instruction_pointer(regs) = fixup->fixup; arch_fixup(regs); } } #else /* Dont try to lookup the filename + line, just dump regs. */ do_BUG("unknown", 0); #endif }
asmlinkage void __kprobes do_general_protection(struct pt_regs * regs, long error_code) { conditional_sti(regs); if (user_mode(regs)) { struct task_struct *tsk = current; if (exception_trace && unhandled_signal(tsk, SIGSEGV)) printk(KERN_INFO "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n", tsk->comm, tsk->pid, regs->rip,regs->rsp,error_code); tsk->thread.error_code = error_code; tsk->thread.trap_no = 13; force_sig(SIGSEGV, tsk); return; } /* kernel gp */ { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->rip); if (fixup) { regs->rip = fixup->fixup; return; } if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 13, SIGSEGV) == NOTIFY_STOP) return; die("general protection fault", regs, error_code); } }
int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; #ifdef CONFIG_PNPBIOS if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) { extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; extern u32 pnp_bios_is_utter_crap; pnp_bios_is_utter_crap = 1; printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n"); __asm__ volatile( "movl %0, %%esp\n\t" "jmp *%1\n\t" : : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip)); panic("do_trap: can't hit this"); } #endif fixup = search_exception_tables(regs->ip); if (fixup) { /* If fixup is less than 16, it means uaccess error */ if (fixup->fixup < 16) { current_thread_info()->uaccess_err = -EFAULT; regs->ip += fixup->fixup; return 1; } regs->ip = fixup->fixup; return 1; } return 0; }
/* * see if there's a fixup handler available to deal with a kernel fault */ unsigned long search_exception_table(unsigned long pc) { const struct exception_table_entry *extab; /* determine if the fault lay during a memcpy_user or a memset_user */ if (__frame->lr == (unsigned long) &__memset_user_error_lr && (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end ) { /* the fault occurred in a protected memset * - we search for the return address (in LR) instead of the program counter * - it was probably during a clear_user() */ return (unsigned long) &__memset_user_error_handler; } if (__frame->lr == (unsigned long) &__memcpy_user_error_lr && (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end ) { /* the fault occurred in a protected memset * - we search for the return address (in LR) instead of the program counter * - it was probably during a copy_to/from_user() */ return (unsigned long) &__memcpy_user_error_handler; } extab = search_exception_tables(pc); if (extab) return extab->fixup; return 0; } /* end search_exception_table() */
int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; #ifdef CONFIG_PNPBIOS if (unlikely((regs->xcs & ~15) == (GDT_ENTRY_PNPBIOS_BASE << 3))) { extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; extern u32 pnp_bios_is_utter_crap; pnp_bios_is_utter_crap = 1; printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n"); __asm__ volatile( "movl %0, %%esp\n\t" "jmp *%1\n\t" : : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip)); panic("do_trap: can't hit this"); } #endif fixup = search_exception_tables(regs->eip); if (fixup) { regs->eip = fixup->fixup; return 1; } return 0; }
static void do_no_context(struct pt_regs *regs, unsigned long error_code, unsigned long address) { const struct exception_table_entry *fixup; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); if (fixup) { regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if (check_space(current) == 0) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %p\n", (void *)address); die("Oops", regs, error_code); do_exit(SIGKILL); }
static noinline void do_no_context(struct pt_regs *regs) { const struct exception_table_entry *fixup; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->psw.addr); if (fixup) { regs->psw.addr = extable_fixup(fixup); return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if (get_fault_type(regs) == KERNEL_FAULT) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " in virtual kernel address space\n"); else printk(KERN_ALERT "Unable to handle kernel paging request" " in virtual user address space\n"); dump_fault_info(regs); die(regs, "Oops"); do_exit(SIGKILL); }
int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; fixup = search_exception_tables(instruction_pointer(regs)); if (fixup) regs->ipc = fixup->fixup; return fixup != NULL; }
/* * Returns non-zero if opcode is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ static int __kprobes can_boost(kprobe_opcode_t *opcodes) { kprobe_opcode_t opcode; kprobe_opcode_t *orig_opcodes = opcodes; if (search_exception_tables((unsigned long)opcodes)) return 0; /* Page fault may occur on this address. */ retry: if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) return 0; opcode = *(opcodes++); /* 2nd-byte opcode */ if (opcode == 0x0f) { if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) return 0; return test_bit(*opcodes, (unsigned long *)twobyte_is_boostable); } switch (opcode & 0xf0) { #ifdef CONFIG_X86_64 case 0x40: goto retry; /* REX prefix is boostable */ #endif case 0x60: if (0x63 < opcode && opcode < 0x67) goto retry; /* prefixes */ /* can't boost Address-size override and bound */ return (opcode != 0x62 && opcode != 0x67); case 0x70: return 0; /* can't boost conditional jump */ case 0xc0: /* can't boost software-interruptions */ return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; case 0xd0: /* can boost AA* and XLAT */ return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); case 0xe0: /* can boost in/out and absolute jmps */ return ((opcode & 0x04) || opcode == 0xea); case 0xf0: if ((opcode & 0x0c) == 0 && opcode != 0xf1) goto retry; /* lock/rep(ne) prefix */ /* clear and set flags are boostable */ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); default: /* segment override prefixes are boostable */ if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) goto retry; /* prefixes */ /* CS override prefix and call are not boostable */ return (opcode != 0x2e && opcode != 0x9a); } }
int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->epc); if (fixup) { regs->epc = fixup->fixup; return 1; } return 0; }
/* Compare this to arch/i386/mm/extable.c:fixup_exception() */ int arch_fixup(unsigned long address, struct uml_pt_regs *regs) { const struct exception_table_entry *fixup; fixup = search_exception_tables(address); if (fixup != 0) { UPT_IP(regs) = fixup->fixup; return 1; } return 0; }
int arch_fixup(unsigned long address, void *sc_ptr) { struct sigcontext *sc = sc_ptr; unsigned long fixup; fixup = search_exception_tables(address); if(fixup != 0){ sc->eip = fixup; return(1); } return(0); }
static int kernel_math_error(struct pt_regs *regs, char *str) { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->rip); if (fixup) { regs->rip = fixup->fixup; return 1; } notify_die(DIE_GPF, str, regs, 0, 16, SIGFPE); /* Illegal floating point operation in the kernel */ die(str, regs, 0); return 0; }
int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; fixup = search_exception_tables(instruction_pointer(regs)); if (fixup) { regs->pc = fixup->nextinsn; return 1; } return 0; }
static int mpc7448_machine_check_exception(struct pt_regs *regs) { const struct exception_table_entry *entry; /* Are we prepared to handle this fault */ if ((entry = search_exception_tables(regs->nip)) != NULL) { tsi108_clear_pci_cfg_error(); regs->msr |= MSR_RI; regs->nip = entry->fixup; return 1; } return 0; }
asmlinkage void do_adedata(struct pt_regs *regs) { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->cp0_epc); if (fixup) { regs->cp0_epc = fixup->fixup; return; } printk("do_ADE-data:ema:0x%08lx:epc:0x%08lx\n", regs->cp0_ema, regs->cp0_epc); die_if_kernel("do_ade execution Exception\n", regs); force_sig(SIGBUS, current); }
/* * try and fix up kernelspace address errors * - userspace errors just cause EFAULT to be returned, resulting in SEGV * - kernel/userspace interfaces cause a jump to an appropriate handler * - other kernel errors are bad */ void die_if_no_fixup(const char *str, struct pt_regs *regs, long err) { if (!user_mode(regs)) { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; return; } die(str, regs, err); } }
/* Find fixup code. */ int find_fixup_code(struct pt_regs *regs) { const struct exception_table_entry *fixup; if ((fixup = search_exception_tables(instruction_pointer(regs))) != 0) { /* Adjust the instruction pointer in the stackframe. */ instruction_pointer(regs) = fixup->fixup; arch_fixup(regs); return 1; } return 0; }
int send_fault_sig(struct pt_regs *regs) { siginfo_t siginfo = { 0, 0, 0, }; siginfo.si_signo = current->thread.signo; siginfo.si_code = current->thread.code; siginfo.si_addr = (void *)current->thread.faddr; #ifdef DEBUG printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code); #endif if (user_mode(regs)) { force_sig_info(siginfo.si_signo, &siginfo, current); } else { const struct exception_table_entry *fixup; /* Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_tables(regs->pc))) { struct pt_regs *tregs; /* Create a new four word stack frame, discarding the old one. */ regs->stkadj = frame_extra_sizes[regs->format]; tregs = (struct pt_regs *)((ulong)regs + regs->stkadj); tregs->vector = regs->vector; tregs->format = 0; tregs->pc = fixup->fixup; tregs->sr = regs->sr; return -1; } //if (siginfo.si_signo == SIGBUS) // force_sig_info(siginfo.si_signo, // &siginfo, current); /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if ((unsigned long)siginfo.si_addr < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel access"); printk(" at virtual address %p\n", siginfo.si_addr); die_if_kernel("Oops", regs, 0 /*error_code*/); do_exit(SIGKILL); } return 1; }
/* Special extable search, which handles ranges. Returns fixup */ unsigned long search_extables_range(unsigned long addr, unsigned long *g2) { const struct exception_table_entry *entry; entry = search_exception_tables(addr); if (!entry) return 0; /* Inside range? Fix g2 and return correct fixup */ if (!entry->fixup) { *g2 = (addr - entry->insn) / 4; return (entry + 1)->fixup; } return entry->fixup; }
/* Find fixup code. */ int find_fixup_code(struct pt_regs *regs) { const struct exception_table_entry *fixup; /* in case of delay slot fault (v32) */ unsigned long ip = (instruction_pointer(regs) & ~0x1); fixup = search_exception_tables(ip); if (fixup != 0) { /* Adjust the instruction pointer in the stackframe. */ instruction_pointer(regs) = fixup->fixup; arch_fixup(regs); return 1; } return 0; }
/* * Returns non-zero if INSN is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ int can_boost(struct insn *insn, void *addr) { kprobe_opcode_t opcode; if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ /* 2nd-byte opcode */ if (insn->opcode.nbytes == 2) return test_bit(insn->opcode.bytes[1], (unsigned long *)twobyte_is_boostable); if (insn->opcode.nbytes != 1) return 0; /* Can't boost Address-size override prefix */ if (unlikely(inat_is_address_size_prefix(insn->attr))) return 0; opcode = insn->opcode.bytes[0]; switch (opcode & 0xf0) { case 0x60: /* can't boost "bound" */ return (opcode != 0x62); case 0x70: return 0; /* can't boost conditional jump */ case 0x90: return opcode != 0x9a; /* can't boost call far */ case 0xc0: /* can't boost software-interruptions */ return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; case 0xd0: /* can boost AA* and XLAT */ return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); case 0xe0: /* can boost in/out and absolute jmps */ return ((opcode & 0x04) || opcode == 0xea); case 0xf0: /* clear and set flags are boostable */ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); default: /* CS override prefix and call are not boostable */ return (opcode != 0x2e && opcode != 0x9a); } }
int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; return 1; } #ifdef CONFIG_KGDB if (atomic_read(&debugger_active) && kgdb_may_fault) /* Restore our previous state. */ kgdb_fault_longjmp(kgdb_fault_jmp_regs); /* Never reached. */ #endif return 0; }
/* * We can receive a page fault from a migrating PTE at any time. * Handle it by just waiting until the fault resolves. * * It's also possible to get a migrating kernel PTE that resolves * itself during the downcall from hypervisor to Linux. We just check * here to see if the PTE seems valid, and if so we retry it. * * NOTE! We MUST NOT take any locks for this case. We may be in an * interrupt or a critical region, and must do as little as possible. * Similarly, we can't use atomic ops here, since we may be handling a * fault caused by an atomic op access. * * If we find a migrating PTE while we're in an NMI context, and we're * at a PC that has a registered exception handler, we don't wait, * since this thread may (e.g.) have been interrupted while migrating * its own stack, which would then cause us to self-deadlock. */ static int handle_migrating_pte(pgd_t *pgd, int fault_num, unsigned long address, unsigned long pc, int is_kernel_mode, int write) { pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t pteval; if (pgd_addr_invalid(address)) return 0; pgd += pgd_index(address); pud = pud_offset(pgd, address); if (!pud || !pud_present(*pud)) return 0; pmd = pmd_offset(pud, address); if (!pmd || !pmd_present(*pmd)) return 0; pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) : pte_offset_kernel(pmd, address); pteval = *pte; if (pte_migrating(pteval)) { if (in_nmi() && search_exception_tables(pc)) return 0; wait_for_migration(pte); return 1; } if (!is_kernel_mode || !pte_present(pteval)) return 0; if (fault_num == INT_ITLB_MISS) { if (pte_exec(pteval)) return 1; } else if (write) { if (pte_write(pteval)) return 1; } else { if (pte_read(pteval)) return 1; } return 0; }
void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { unsigned short type = (type_ctx >> 16); unsigned short ctx = (type_ctx & 0xffff); siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) return; if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { /* Ouch, somebody is trying VM hole tricks on us... */ #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", regs->tpc, entry->fixup); #endif regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; return; } printk("sun4v_data_access_exception: ADDR[%016lx] " "CTX[%04x] TYPE[%04x], going.\n", addr, ctx, type); die_if_kernel("Dax", regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *) addr; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); }
void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x30, SIGTRAP) == NOTIFY_STOP) goto out; if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { /* Ouch, somebody is trying VM hole tricks on us... */ #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", regs->tpc, entry->fixup); #endif regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; goto out; } /* Shit... */ printk("spitfire_data_access_exception: SFSR[%016lx] " "SFAR[%016lx], going.\n", sfsr, sfar); die_if_kernel("Dax", regs); } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); out: exception_exit(prev_state); }