/* * do a signal return; undo the signal stack. */ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long *_d0) { unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (is_using_fpu(current)) fpu_kill_state(current); #define COPY(x) err |= __get_user(regs->x, &sc->x) COPY(d1); COPY(d2); COPY(d3); COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(e0); COPY(e1); COPY(e2); COPY(e3); COPY(e4); COPY(e5); COPY(e6); COPY(e7); COPY(lar); COPY(lir); COPY(mdr); COPY(mdrq); COPY(mcvf); COPY(mcrl); COPY(mcrh); COPY(sp); COPY(pc); #undef COPY { unsigned int tmpflags; #ifndef CONFIG_MN10300_USING_JTAG #define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \ EPSW_T | EPSW_nAR) #else #define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \ EPSW_nAR) #endif err |= __get_user(tmpflags, &sc->epsw); regs->epsw = (regs->epsw & ~USER_EPSW) | (tmpflags & USER_EPSW); regs->orig_d0 = -1; /* disable syscall checks */ } { struct fpucontext *buf; err |= __get_user(buf, &sc->fpucontext); if (buf) { if (verify_area(VERIFY_READ, buf, sizeof(*buf))) goto badframe; err |= fpu_restore_sigcontext(buf); } } err |= __get_user(*_d0, &sc->d0); return err; badframe: return 1; }
/* * fill in the FPU structure for a core dump */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg) { struct task_struct *tsk = current; int fpvalid; fpvalid = is_using_fpu(tsk); if (fpvalid) { unlazy_fpu(tsk); memcpy(fpreg, &tsk->thread.fpu_state, sizeof(*fpreg)); } return fpvalid; }
/* * save the FPU state to a signal context */ int fpu_setup_sigcontext(struct fpucontext *fpucontext) { struct task_struct *tsk = current; if (!is_using_fpu(tsk)) return 0; /* transfer the current FPU state to memory and cause fpu_init() to be * triggered by the next attempted FPU operation by the current * process. */ preempt_disable(); #ifndef CONFIG_LAZY_SAVE_FPU if (tsk->thread.fpu_flags & THREAD_HAS_FPU) { fpu_save(&tsk->thread.fpu_state); tsk->thread.uregs->epsw &= ~EPSW_FE; tsk->thread.fpu_flags &= ~THREAD_HAS_FPU; } #else /* !CONFIG_LAZY_SAVE_FPU */ if (fpu_state_owner == tsk) { fpu_save(&tsk->thread.fpu_state); fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE; fpu_state_owner = NULL; } #endif /* !CONFIG_LAZY_SAVE_FPU */ preempt_enable(); /* we no longer have a valid current FPU state */ clear_using_fpu(tsk); /* transfer the saved FPU state onto the userspace stack */ if (copy_to_user(fpucontext, &tsk->thread.fpu_state, min(sizeof(struct fpu_state_struct), sizeof(struct fpucontext)))) return -1; return 1; }
/* * handle an FPU operational exception * - there's a possibility that if the FPU is asynchronous, the signal might * be meant for a process other than the current one */ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code) { struct task_struct *tsk = current; siginfo_t info; u32 fpcr; if (!user_mode(regs)) die_if_no_fixup("An FPU Operation exception happened in" " kernel space\n", regs, code); if (!is_using_fpu(tsk)) die_if_no_fixup("An FPU Operation exception happened," " but the FPU is not in use", regs, code); info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void *) tsk->thread.uregs->pc; info.si_code = FPE_FLTINV; unlazy_fpu(tsk); fpcr = tsk->thread.fpu_state.fpcr; if (fpcr & FPCR_EC_Z) info.si_code = FPE_FLTDIV; else if (fpcr & FPCR_EC_O) info.si_code = FPE_FLTOVF; else if (fpcr & FPCR_EC_U) info.si_code = FPE_FLTUND; else if (fpcr & FPCR_EC_I) info.si_code = FPE_FLTRES; force_sig_info(SIGFPE, &info, tsk); }
/* * determine if the FPU registers have actually been used */ static int fpuregs_active(struct task_struct *target, const struct user_regset *regset) { return is_using_fpu(target) ? regset->n : 0; }