static inline int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) regs->psr &= ~PSR_EF; #else if (current == last_task_used_math) { last_task_used_math = NULL; regs->psr &= ~PSR_EF; } #endif set_used_math(); clear_tsk_thread_flag(current, TIF_USEDFPU); if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) return -EFAULT; err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], (sizeof(unsigned long) * 32)); err |= __get_user(current->thread.fsr, &fpu->si_fsr); err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_from_user(¤t->thread.fpqueue[0], &fpu->si_fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); return err; }
static inline int restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) { struct task_struct *tsk = current; unsigned long used_math_flag; int ret = 0; clear_used_math(); __get_user_error(used_math_flag, &sc->used_math_flag, ret); if (!used_math_flag) return 0; set_used_math(); #if IS_ENABLED(CONFIG_LAZY_FPU) preempt_disable(); if (current == last_task_used_math) { last_task_used_math = NULL; disable_ptreg_fpu(regs); } preempt_enable(); #else clear_fpu(regs); #endif return __copy_from_user(&tsk->thread.fpu, &sc->fpu, sizeof(struct fpu_struct)); }
static inline int restore_sigcontext_fpu(struct sigcontext __user *sc) { struct task_struct *tsk = current; if (!(boot_cpu_data.flags & CPU_HAS_FPU)) return 0; set_used_math(); return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0], sizeof(long)*(16*2+2)); }
int ckpt_restore_fpu(ckpt_desc_t desc) { int ret; int flag; log_restore_fpu("restoring fpu ..."); if (ckpt_read(desc, &flag, sizeof(int)) != sizeof(int)) { log_err("failed to get file"); return -EIO; } kernel_fpu_begin(); clear_used_math(); if (flag) { if (!ckpt_get_i387(current)) { init_fpu(current); if (!ckpt_get_i387(current)) { log_err("failed to get i387"); return -EFAULT; } } if (ckpt_read(desc, ckpt_get_i387(current), xstate_size) != xstate_size) { log_err("failed to get i387"); return -EFAULT; } ret = ckpt_check_fpu_state(); if (ret) { log_err("failed to restore i387"); return ret; } set_used_math(); } kernel_fpu_end(); log_restore_pos(desc); return 0; }
int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) { int ia32_fxstate = (buf != buf_fx); struct task_struct *tsk = current; int state_size = xstate_size; u64 xstate_bv = 0; int fx_only = 0; ia32_fxstate &= (config_enabled(CONFIG_X86_32) || config_enabled(CONFIG_IA32_EMULATION)); if (!buf) { drop_init_fpu(tsk); return 0; } if (!access_ok(VERIFY_READ, buf, size)) return -EACCES; if (!used_math() && init_fpu(tsk)) return -1; if (!static_cpu_has(X86_FEATURE_FPU)) return fpregs_soft_set(current, NULL, 0, sizeof(struct user_i387_ia32_struct), NULL, buf) != 0; if (use_xsave()) { struct _fpx_sw_bytes fx_sw_user; if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) { /* * Couldn't find the extended state information in the * memory layout. Restore just the FP/SSE and init all * the other extended state. */ state_size = sizeof(struct i387_fxsave_struct); fx_only = 1; } else { state_size = fx_sw_user.xstate_size; xstate_bv = fx_sw_user.xstate_bv; } } if (ia32_fxstate) { /* * For 32-bit frames with fxstate, copy the user state to the * thread's fpu state, reconstruct fxstate from the fsave * header. Sanitize the copied state etc. */ struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; struct user_i387_ia32_struct env; int err = 0; /* * Drop the current fpu which clears used_math(). This ensures * that any context-switch during the copy of the new state, * avoids the intermediate state from getting restored/saved. * Thus avoiding the new restored state from getting corrupted. * We will be ready to restore/save the state only after * set_used_math() is again set. */ drop_fpu(tsk); if (__copy_from_user(xsave, buf_fx, state_size) || __copy_from_user(&env, buf, sizeof(env))) { err = -1; } else { sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); set_used_math(); } if (use_eager_fpu()) math_state_restore(); return err; } else { /* * For 64-bit frames and 32-bit fsave frames, restore the user * state to the registers directly (with exceptions handled). */ user_fpu_begin(); if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { drop_init_fpu(tsk); return -1; } } return 0; }