void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "instruction access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) goto out; if (regs->tstate & TSTATE_PRIV) { printk("spitfire_insn_access_exception: SFSR[%016lx] " "SFAR[%016lx], going.\n", sfsr, sfar); die_if_kernel("Iax", regs); } if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); out: exception_exit(prev_state); }
void do_page_fault(struct pt_regs *regs, int fault_num, unsigned long address, unsigned long write) { enum ctx_state prev_state = exception_enter(); __do_page_fault(regs, fault_num, address, write); exception_exit(prev_state); }
void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x30, SIGTRAP) == NOTIFY_STOP) goto out; if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { /* Ouch, somebody is trying VM hole tricks on us... */ #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", regs->tpc, entry->fixup); #endif regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; goto out; } /* Shit... */ printk("spitfire_data_access_exception: SFSR[%016lx] " "SFAR[%016lx], going.\n", sfsr, sfar); die_if_kernel("Dax", regs); } info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); out: exception_exit(prev_state); }
/** * preempt_schedule_context - preempt_schedule called by tracing * * The tracing infrastructure uses preempt_enable_notrace to prevent * recursion and tracing preempt enabling caused by the tracing * infrastructure itself. But as tracing can happen in areas coming * from userspace or just about to enter userspace, a preempt enable * can occur before user_exit() is called. This will cause the scheduler * to be called when the system is still in usermode. * * To prevent this, the preempt_enable_notrace will use this function * instead of preempt_schedule() to exit user context if needed before * calling the scheduler. */ asmlinkage void __sched notrace preempt_schedule_context(void) { enum ctx_state prev_ctx; if (likely(!preemptible())) return; /* * Need to disable preemption in case user_exit() is traced * and the tracer calls preempt_enable_notrace() causing * an infinite recursion. */ preempt_disable_notrace(); prev_ctx = exception_enter(); preempt_enable_no_resched_notrace(); preempt_schedule(); preempt_disable_notrace(); exception_exit(prev_ctx); preempt_enable_notrace(); }
/* Handle synthetic interrupt delivered only by the simulator. */ void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num) { enum ctx_state prev_state = exception_enter(); send_sigtrap(current, regs); exception_exit(prev_state); }
/* {set, get}context() needed for 64-bit SparcLinux userland. */ asmlinkage void sparc64_set_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; enum ctx_state prev_state = exception_enter(); mc_gregset_t __user *grp; unsigned long pc, npc, tstate; unsigned long fp, i7; unsigned char fenab; int err; flush_user_windows(); if (get_thread_wsaved() || (((unsigned long)ucp) & (sizeof(unsigned long)-1)) || (!__access_ok(ucp, sizeof(*ucp)))) goto do_sigsegv; grp = &ucp->uc_mcontext.mc_gregs; err = __get_user(pc, &((*grp)[MC_PC])); err |= __get_user(npc, &((*grp)[MC_NPC])); if (err || ((pc | npc) & 3)) goto do_sigsegv; if (regs->u_regs[UREG_I1]) { sigset_t set; if (_NSIG_WORDS == 1) { if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0])) goto do_sigsegv; } else { if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t))) goto do_sigsegv; } set_current_blocked(&set); } if (test_thread_flag(TIF_32BIT)) { pc &= 0xffffffff; npc &= 0xffffffff; } regs->tpc = pc; regs->tnpc = npc; err |= __get_user(regs->y, &((*grp)[MC_Y])); err |= __get_user(tstate, &((*grp)[MC_TSTATE])); regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC); regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1])); err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2])); err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3])); err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4])); err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5])); err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6])); /* Skip %g7 as that's the thread register in userspace. */ err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0])); err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1])); err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2])); err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3])); err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4])); err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5])); err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6])); err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7])); err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp)); err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7)); err |= __put_user(fp, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); err |= __put_user(i7, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab)); if (fenab) { unsigned long *fpregs = current_thread_info()->fpregs; unsigned long fprs; fprs_write(0); err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs)); if (fprs & FPRS_DL) err |= copy_from_user(fpregs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs), (sizeof(unsigned int) * 32)); if (fprs & FPRS_DU) err |= copy_from_user(fpregs+16, ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16, (sizeof(unsigned int) * 32)); err |= __get_user(current_thread_info()->xfsr[0], &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr)); err |= __get_user(current_thread_info()->gsr[0], &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr)); regs->tstate &= ~TSTATE_PEF; } if (err) goto do_sigsegv; out: exception_exit(prev_state); return; do_sigsegv: force_sig(SIGSEGV, current); goto out; }
asmlinkage void sparc64_get_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; enum ctx_state prev_state = exception_enter(); mc_gregset_t __user *grp; mcontext_t __user *mcp; unsigned long fp, i7; unsigned char fenab; int err; synchronize_user_stack(); if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp))) goto do_sigsegv; #if 1 fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */ #else fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF); #endif mcp = &ucp->uc_mcontext; grp = &mcp->mc_gregs; /* Skip over the trap instruction, first. */ if (test_thread_flag(TIF_32BIT)) { regs->tpc = (regs->tnpc & 0xffffffff); regs->tnpc = (regs->tnpc + 4) & 0xffffffff; } else { regs->tpc = regs->tnpc; regs->tnpc += 4; } err = 0; if (_NSIG_WORDS == 1) err |= __put_user(current->blocked.sig[0], (unsigned long __user *)&ucp->uc_sigmask); else err |= __copy_to_user(&ucp->uc_sigmask, ¤t->blocked, sizeof(sigset_t)); err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE])); err |= __put_user(regs->tpc, &((*grp)[MC_PC])); err |= __put_user(regs->tnpc, &((*grp)[MC_NPC])); err |= __put_user(regs->y, &((*grp)[MC_Y])); err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1])); err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2])); err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3])); err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4])); err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5])); err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6])); err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7])); err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0])); err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1])); err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2])); err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3])); err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4])); err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5])); err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6])); err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7])); err |= __get_user(fp, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6]))); err |= __get_user(i7, (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7]))); err |= __put_user(fp, &(mcp->mc_fp)); err |= __put_user(i7, &(mcp->mc_i7)); err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab)); if (fenab) { unsigned long *fpregs = current_thread_info()->fpregs; unsigned long fprs; fprs = current_thread_info()->fpsaved[0]; if (fprs & FPRS_DL) err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs, (sizeof(unsigned int) * 32)); if (fprs & FPRS_DU) err |= copy_to_user( ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16, (sizeof(unsigned int) * 32)); err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr)); err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr)); err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs)); } if (err) goto do_sigsegv; out: exception_exit(prev_state); return; do_sigsegv: force_sig(SIGSEGV, current); goto out; }
* last 8 bytes since the kernel stores the thread * pointer there. */ memcpy((void *)CVMSEG_BASE, current->thread.cvmseg.cvmseg, cvmseg_size - 8); # else /* Restore the processes CVMSEG data */ memcpy((void *)CVMSEG_BASE, current->thread.cvmseg.cvmseg, cvmseg_size); # endif preempt_enable(); return; } #endif prev_state = exception_enter(); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->cp0_badvaddr); /* * Did we catch a fault trying to load an instruction? */ if (regs->cp0_badvaddr == regs->cp0_epc) goto sigbus; if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) goto sigbus; if (unaligned_action == UNALIGNED_ACTION_SIGNAL) goto sigbus; /* * Do branch emulation only if we didn't forward the exception.
asmlinkage void do_ade(struct pt_regs *regs) { enum ctx_state prev_state; unsigned int __user *pc; mm_segment_t seg; prev_state = exception_enter(); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->cp0_badvaddr); /* * Did we catch a fault trying to load an instruction? */ if (regs->cp0_badvaddr == regs->cp0_epc) goto sigbus; if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) goto sigbus; if (unaligned_action == UNALIGNED_ACTION_SIGNAL) goto sigbus; /* * Do branch emulation only if we didn't forward the exception. * This is all so but ugly ... */ /* * Are we running in microMIPS mode? */ if (get_isa16_mode(regs->cp0_epc)) { /* * Did we catch a fault trying to load an instruction in * 16-bit mode? */ if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) goto sigbus; if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); if (cpu_has_mmips) { seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); emulate_load_store_microMIPS(regs, (void __user *)regs->cp0_badvaddr); set_fs(seg); return; } if (cpu_has_mips16) { seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); emulate_load_store_MIPS16e(regs, (void __user *)regs->cp0_badvaddr); set_fs(seg); return; } goto sigbus; } if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); pc = (unsigned int __user *)exception_epc(regs); seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); set_fs(seg); return; sigbus: die_if_kernel("Kernel unaligned instruction access", regs); force_sig(SIGBUS, current); /* * XXX On return from the signal handler we should advance the epc */ exception_exit(prev_state); }