Exemplo n.º 1
0
static inline void
sanitize_restored_xstate(struct task_struct *tsk,
			 struct user_i387_ia32_struct *ia32_env,
			 u64 xfeatures, int fx_only)
{
	struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
	struct xstate_header *header = &xsave->header;

	if (use_xsave()) {
		/* These bits must be zero. */
		memset(header->reserved, 0, 48);

		/*
		 * Init the state that is not present in the memory
		 * layout and not enabled by the OS.
		 */
		if (fx_only)
			header->xfeatures = XFEATURE_MASK_FPSSE;
		else
			header->xfeatures &= (xfeatures_mask & xfeatures);
	}

	if (use_fxsr()) {
		/*
		 * mscsr reserved bits must be masked to zero for security
		 * reasons.
		 */
		xsave->i387.mxcsr &= mxcsr_feature_mask;

		convert_to_fxsr(tsk, ia32_env);
	}
}
Exemplo n.º 2
0
static inline void
sanitize_restored_xstate(struct task_struct *tsk,
			 struct user_i387_ia32_struct *ia32_env,
			 u64 xstate_bv, int fx_only)
{
	struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
	struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr;

	if (use_xsave()) {
		/* These bits must be zero. */
		xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;

		/*
		 * Init the state that is not present in the memory
		 * layout and not enabled by the OS.
		 */
		if (fx_only)
			xsave_hdr->xstate_bv = XSTATE_FPSSE;
		else
			xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv);
	}

	if (use_fxsr()) {
		/*
		 * mscsr reserved bits must be masked to zero for security
		 * reasons.
		 */
		xsave->i387.mxcsr &= mxcsr_feature_mask;

		convert_to_fxsr(tsk, ia32_env);
	}
}
Exemplo n.º 3
0
/*
 * This restores directly out of user space. Exceptions are handled.
 */
int restore_i387_xstate(void __user *buf)
{
    struct task_struct *tsk = current;
    int err = 0;

    if (!buf) {
        if (used_math())
            goto clear;
        return 0;
    } else if (!access_ok(VERIFY_READ, buf, sig_xstate_size))
        return -EACCES;

    if (!used_math()) {
        err = init_fpu(tsk);
        if (err)
            return err;
    }

    user_fpu_begin();
    if (use_xsave())
        err = restore_user_xstate(buf);
    else
        err = fxrstor_checking((__force struct i387_fxsave_struct *)
                               buf);
    if (unlikely(err)) {
        /*
         * Encountered an error while doing the restore from the
         * user buffer, clear the fpu state.
         */
clear:
        clear_fpu(tsk);
        clear_used_math();
    }
    return err;
}
Exemplo n.º 4
0
static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
{
	int err;

	if (use_xsave())
		err = copy_xregs_to_user(buf);
	else if (use_fxsr())
		err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
	else
		err = copy_fregs_to_user((struct fregs_state __user *) buf);

	if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
		err = -EFAULT;
	return err;
}
Exemplo n.º 5
0
static inline int save_user_xstate(struct xsave_struct __user *buf)
{
	int err;

	if (use_xsave())
		err = xsave_user(buf);
	else if (use_fxsr())
		err = fxsave_user((struct i387_fxsave_struct __user *) buf);
	else
		err = fsave_user((struct i387_fsave_struct __user *) buf);

	if (unlikely(err) && __clear_user(buf, xstate_size))
		err = -EFAULT;
	return err;
}
Exemplo n.º 6
0
/*
 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
 */
static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
{
	if (use_xsave()) {
		if ((unsigned long)buf % 64 || fx_only) {
			u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
			copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
			return copy_user_to_fxregs(buf);
		} else {
			u64 init_bv = xfeatures_mask & ~xbv;
			if (unlikely(init_bv))
				copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
			return copy_user_to_xregs(buf, xbv);
		}
	} else if (use_fxsr()) {
		return copy_user_to_fxregs(buf);
	} else
		return copy_user_to_fregs(buf);
}
Exemplo n.º 7
0
/*
 * Restore the extended state if present. Otherwise, restore the FP/SSE state.
 */
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
{
	if (use_xsave()) {
		if ((unsigned long)buf % 64 || fx_only) {
			u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
			xrstor_state(init_xstate_buf, init_bv);
			return fxrstor_user(buf);
		} else {
			u64 init_bv = pcntxt_mask & ~xbv;
			if (unlikely(init_bv))
				xrstor_state(init_xstate_buf, init_bv);
			return xrestore_user(buf, xbv);
		}
	} else if (use_fxsr()) {
		return fxrstor_user(buf);
	} else
		return frstor_user(buf);
}
Exemplo n.º 8
0
static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
{
	struct xregs_state __user *x = buf;
	struct _fpx_sw_bytes *sw_bytes;
	u32 xfeatures;
	int err;

	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));

	if (!use_xsave())
		return err;

	err |= __put_user(FP_XSTATE_MAGIC2,
			  (__u32 *)(buf + fpu_user_xstate_size));

	/*
	 * Read the xfeatures which we copied (directly from the cpu or
	 * from the state in task struct) to the user buffers.
	 */
	err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);

	/*
	 * For legacy compatible, we always set FP/SSE bits in the bit
	 * vector while saving the state to the user context. This will
	 * enable us capturing any changes(during sigreturn) to
	 * the FP/SSE bits by the legacy applications which don't touch
	 * xfeatures in the xsave header.
	 *
	 * xsave aware apps can change the xfeatures in the xsave
	 * header as well as change any contents in the memory layout.
	 * xrestore as part of sigreturn will capture all the changes.
	 */
	xfeatures |= XFEATURE_MASK_FPSSE;

	err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);

	return err;
}
Exemplo n.º 9
0
int save_i387_xstate(void __user *buf)
{
	struct task_struct *tsk = current;
	int err = 0;

	if (!access_ok(VERIFY_WRITE, buf, sig_xstate_size))
		return -EACCES;

	BUG_ON(sig_xstate_size < xstate_size);

	if ((unsigned long)buf % 64)
		printk("save_i387_xstate: bad fpstate %p\n", buf);

	if (!used_math())
		return 0;

	if (user_has_fpu()) {
		if (use_xsave())
			err = xsave_user(buf);
		else
			err = fxsave_user(buf);

		if (err)
			return err;
		user_fpu_end();
	} else {
		sanitize_i387_state(tsk);
		if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
				   xstate_size))
			return -1;
	}

	clear_used_math(); /* trigger finit */

	if (use_xsave()) {
		struct _fpstate __user *fx = buf;
		struct _xstate __user *x = buf;
		u64 xstate_bv;

		err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved,
				     sizeof(struct _fpx_sw_bytes));

		err |= __put_user(FP_XSTATE_MAGIC2,
				  (__u32 __user *) (buf + sig_xstate_size
						    - FP_XSTATE_MAGIC2_SIZE));

		/*
		 * Read the xstate_bv which we copied (directly from the cpu or
		 * from the state in task struct) to the user buffers and
		 * set the FP/SSE bits.
		 */
		err |= __get_user(xstate_bv, &x->xstate_hdr.xstate_bv);

		/*
		 * For legacy compatible, we always set FP/SSE bits in the bit
		 * vector while saving the state to the user context. This will
		 * enable us capturing any changes(during sigreturn) to
		 * the FP/SSE bits by the legacy applications which don't touch
		 * xstate_bv in the xsave header.
		 *
		 * xsave aware apps can change the xstate_bv in the xsave
		 * header as well as change any contents in the memory layout.
		 * xrestore as part of sigreturn will capture all the changes.
		 */
		xstate_bv |= XSTATE_FPSSE;

		err |= __put_user(xstate_bv, &x->xstate_hdr.xstate_bv);

		if (err)
			return err;
	}

	return 1;
}
Exemplo n.º 10
0
static inline int xstate_sigframe_size(void)
{
	return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
			fpu_user_xstate_size;
}
Exemplo n.º 11
0
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
{
	int ia32_fxstate = (buf != buf_fx);
	struct task_struct *tsk = current;
	struct fpu *fpu = &tsk->thread.fpu;
	int state_size = fpu_kernel_xstate_size;
	u64 xfeatures = 0;
	int fx_only = 0;

	ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
			 IS_ENABLED(CONFIG_IA32_EMULATION));

	if (!buf) {
		fpu__clear(fpu);
		return 0;
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

	fpu__activate_curr(fpu);

	if (!static_cpu_has(X86_FEATURE_FPU))
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;

	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct fxregs_state);
			fx_only = 1;
			trace_x86_fpu_xstate_check_failed(fpu);
		} else {
			state_size = fx_sw_user.xstate_size;
			xfeatures = fx_sw_user.xfeatures;
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
		struct fpu *fpu = &tsk->thread.fpu;
		struct user_i387_ia32_struct env;
		int err = 0;

		/*
		 * Drop the current fpu which clears fpu->fpstate_active. This ensures
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
		 * fpu->fpstate_active is again set.
		 */
		fpu__drop(fpu);

		if (using_compacted_format()) {
			err = copyin_to_xsaves(NULL, buf_fx,
					       &fpu->state.xsave);
		} else {
			err = __copy_from_user(&fpu->state.xsave,
					       buf_fx, state_size);
		}

		if (err || __copy_from_user(&env, buf, sizeof(env))) {
			fpstate_init(&fpu->state);
			trace_x86_fpu_init_state(fpu);
			err = -1;
		} else {
			sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
		}

		fpu->fpstate_active = 1;
		if (use_eager_fpu()) {
			preempt_disable();
			fpu__restore(fpu);
			preempt_enable();
		}

		return err;
	} else {
		/*
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
		 */
		user_fpu_begin();
		if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
			fpu__clear(fpu);
			return -1;
		}
	}

	return 0;
}
Exemplo n.º 12
0
int save_i387_xstate(void __user *buf)
{
	struct task_struct *tsk = current;
	int err = 0;

	if (!access_ok(VERIFY_WRITE, buf, sig_xstate_size))
		return -EACCES;

	BUG_ON(sig_xstate_size < xstate_size);

	if ((unsigned long)buf % 64)
		printk("save_i387_xstate: bad fpstate %p\n", buf);

	if (!used_math())
		return 0;

	if (user_has_fpu()) {
		if (use_xsave())
			err = xsave_user(buf);
		else
			err = fxsave_user(buf);

		if (err)
			return err;
		user_fpu_end();
	} else {
		sanitize_i387_state(tsk);
		if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
				   xstate_size))
			return -1;
	}

	clear_used_math(); /*               */

	if (use_xsave()) {
		struct _fpstate __user *fx = buf;
		struct _xstate __user *x = buf;
		u64 xstate_bv;

		err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved,
				     sizeof(struct _fpx_sw_bytes));

		err |= __put_user(FP_XSTATE_MAGIC2,
				  (__u32 __user *) (buf + sig_xstate_size
						    - FP_XSTATE_MAGIC2_SIZE));

		/*
                                                                 
                                                           
                         
   */
		err |= __get_user(xstate_bv, &x->xstate_hdr.xstate_bv);

		/*
                                                                
                                                                 
                                                         
                                                                 
                                   
    
                                                           
                                                                
                                                                
   */
		xstate_bv |= XSTATE_FPSSE;

		err |= __put_user(xstate_bv, &x->xstate_hdr.xstate_bv);

		if (err)
			return err;
	}

	return 1;
}
Exemplo n.º 13
0
int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{
	int ia32_fxstate = (buf != buf_fx);
	struct task_struct *tsk = current;
	int state_size = xstate_size;
	u64 xstate_bv = 0;
	int fx_only = 0;

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!buf) {
		drop_init_fpu(tsk);
		return 0;
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

	if (!used_math() && init_fpu(tsk))
		return -1;

	if (!static_cpu_has(X86_FEATURE_FPU))
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;

	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct i387_fxsave_struct);
			fx_only = 1;
		} else {
			state_size = fx_sw_user.xstate_size;
			xstate_bv = fx_sw_user.xstate_bv;
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
		struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
		struct user_i387_ia32_struct env;
		int err = 0;

		/*
		 * Drop the current fpu which clears used_math(). This ensures
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
		 * set_used_math() is again set.
		 */
		drop_fpu(tsk);

		if (__copy_from_user(xsave, buf_fx, state_size) ||
		    __copy_from_user(&env, buf, sizeof(env))) {
			err = -1;
		} else {
			sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
			set_used_math();
		}

		if (use_eager_fpu())
			math_state_restore();

		return err;
	} else {
		/*
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
		 */
		user_fpu_begin();
		if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
			drop_init_fpu(tsk);
			return -1;
		}
	}

	return 0;
}