Ejemplo n.º 1
0
int init_fpu(struct task_struct *tsk)
{
    if (tsk_used_math(tsk)) {
        if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
            unlazy_fpu(tsk, task_pt_regs(tsk));
        return 0;
    }

    /*
     * Memory allocation at the first usage of the FPU and other state.
     */
    if (!tsk->thread.xstate) {
        tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
                                              GFP_KERNEL);
        if (!tsk->thread.xstate)
            return -ENOMEM;
    }

    if (boot_cpu_data.flags & CPU_HAS_FPU) {
        struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
        memset(fp, 0, xstate_size);
        fp->fpscr = FPSCR_INIT;
    } else {
        struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
        memset(fp, 0, xstate_size);
        fp->fpscr = FPSCR_INIT;
    }

    set_stopped_child_used_math(tsk);
    return 0;
}
Ejemplo n.º 2
0
static inline int save_sigcontext_fpu(struct sigcontext *sc)
{
	struct task_struct *tsk = current;
	unsigned long flags;
	int val;

	if (!tsk->used_math) {
	  val = 0;
		__copy_to_user(&sc->sc_ownedfp, &val, sizeof(int));
		return 0;
	}

	val = 1;
	__copy_to_user(&sc->sc_ownedfp, &val, sizeof(int));

	/* This will cause a "finit" to be triggered by the next
	   attempted FPU operation by the 'current' process.
	   */
	tsk->used_math = 0;

	save_and_cli(flags);
	unlazy_fpu(tsk);
	restore_flags(flags);

	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
			      sizeof(long)*(16*2+2));
}
Ejemplo n.º 3
0
/*H:040
 * This is the i386-specific code to setup and run the Guest.  Interrupts
 * are disabled: we own the CPU.
 */
void lguest_arch_run_guest(struct lg_cpu *cpu)
{
	/*
	 * Remember the awfully-named TS bit?  If the Guest has asked to set it
	 * we set it now, so we can trap and pass that trap to the Guest if it
	 * uses the FPU.
	 */
	if (cpu->ts)
		unlazy_fpu(current);

	/*
	 * SYSENTER is an optimized way of doing system calls.  We can't allow
	 * it because it always jumps to privilege level 0.  A normal Guest
	 * won't try it because we don't advertise it in CPUID, but a malicious
	 * Guest (or malicious Guest userspace program) could, so we tell the
	 * CPU to disable it before running the Guest.
	 */
	if (boot_cpu_has(X86_FEATURE_SEP))
		wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);

	/*
	 * Now we actually run the Guest.  It will return when something
	 * interesting happens, and we can examine its registers to see what it
	 * was doing.
	 */
	run_guest_once(cpu, lguest_pages(raw_smp_processor_id()));

	/*
	 * Note that the "regs" structure contains two extra entries which are
	 * not really registers: a trap number which says what interrupt or
	 * trap made the switcher code come back, and an error code which some
	 * traps set.
	 */

	 /* Restore SYSENTER if it's supposed to be on. */
	 if (boot_cpu_has(X86_FEATURE_SEP))
		wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);

	/*
	 * If the Guest page faulted, then the cr2 register will tell us the
	 * bad virtual address.  We have to grab this now, because once we
	 * re-enable interrupts an interrupt could fault and thus overwrite
	 * cr2, or we could even move off to a different CPU.
	 */
	if (cpu->regs->trapnum == 14)
		cpu->arch.last_pagefault = read_cr2();
	/*
	 * Similarly, if we took a trap because the Guest used the FPU,
	 * we have to restore the FPU it expects to see.
	 * math_state_restore() may sleep and we may even move off to
	 * a different CPU. So all the critical stuff should be done
	 * before this.
	 */
	else if (cpu->regs->trapnum == 7)
		math_state_restore();
}
Ejemplo n.º 4
0
/*
 * fill in the FPU structure for a core dump
 */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
{
	struct task_struct *tsk = current;
	int fpvalid;

	fpvalid = is_using_fpu(tsk);
	if (fpvalid) {
		unlazy_fpu(tsk);
		memcpy(fpreg, &tsk->thread.fpu_state, sizeof(*fpreg));
	}

	return fpvalid;
}
Ejemplo n.º 5
0
static inline int 
elf32_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
{
	struct pt_regs *regs = task_pt_regs(t);
	if (!tsk_used_math(t))
		return 0;
	if (t == current)
		unlazy_fpu(t); 
	memcpy(xfpu, &t->thread.i387.fxsave, sizeof(elf_fpxregset_t));
	xfpu->fcs = regs->cs; 
	xfpu->fos = t->thread.ds; /* right? */ 
	return 1;
}
Ejemplo n.º 6
0
void save_rest_processor_state(void)
{
    if ( !is_idle_vcpu(current) )
        unlazy_fpu(current);

#if defined(CONFIG_X86_64)
    rdmsrl(MSR_CSTAR, saved_cstar);
    rdmsrl(MSR_LSTAR, saved_lstar);
    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
    {
        rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
        rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
    }
#endif
}
Ejemplo n.º 7
0
/*
 * this gets called so that we can store lazy state into memory and copy the
 * current task into the new thread.
 */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
	int ret;

	unlazy_fpu(src);

	*dst = *src;
	if (fpu_allocated(&src->thread.fpu)) {
		memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
		ret = fpu_alloc(&dst->thread.fpu);
		if (ret)
			return ret;
		fpu_copy(&dst->thread.fpu, &src->thread.fpu);
	}
	return 0;
}
Ejemplo n.º 8
0
/*
 * this gets called so that we can store lazy state into memory and copy the
 * current task into the new thread.
 */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
#ifdef CONFIG_SUPERH32
	unlazy_fpu(src, task_pt_regs(src));
#endif
	*dst = *src;

	if (src->thread.xstate) {
		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
						      GFP_KERNEL);
		if (!dst->thread.xstate)
			return -ENOMEM;
		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
	}

	return 0;
}
Ejemplo n.º 9
0
/*
 * retrieve the contents of MN10300 userspace FPU registers
 */
static int fpuregs_get(struct task_struct *target,
		       const struct user_regset *regset,
		       unsigned int pos, unsigned int count,
		       void *kbuf, void __user *ubuf)
{
	const struct fpu_state_struct *fpregs = &target->thread.fpu_state;
	int ret;

	unlazy_fpu(target);

	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
				  fpregs, 0, sizeof(*fpregs));
	if (ret < 0)
		return ret;

	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
					sizeof(*fpregs), -1);
}
Ejemplo n.º 10
0
/*
 * Save the fpu, extended register state to the user signal frame.
 *
 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
 *  state is copied.
 *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
 *
 *	buf == buf_fx for 64-bit frames and 32-bit fsave frame.
 *	buf != buf_fx for 32-bit frames with fxstate.
 *
 * If the fpu, extended register state is live, save the state directly
 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
 * copy the thread's fpu state to the user frame starting at 'buf_fx'.
 *
 * If this is a 32-bit frame with fxstate, put a fsave header before
 * the aligned state at 'buf_fx'.
 *
 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
 * indicating the absence/presence of the extended state to the user.
 */
int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{
	struct xsave_struct *xsave = &current->thread.fpu.state->xsave;
	struct task_struct *tsk = current;
	int ia32_fxstate = (buf != buf_fx);

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!access_ok(VERIFY_WRITE, buf, size))
		return -EACCES;

	if (!static_cpu_has(X86_FEATURE_FPU))
		return fpregs_soft_get(current, NULL, 0,
			sizeof(struct user_i387_ia32_struct), NULL,
			(struct _fpstate_ia32 __user *) buf) ? -1 : 1;

	if (!config_enabled(CONFIG_L4) && user_has_fpu()) {
		/* Save the live register state to the user directly. */
		if (save_user_xstate(buf_fx))
			return -1;
		/* Update the thread's fxstate to save the fsave header. */
		if (ia32_fxstate)
			fpu_fxsave(&tsk->thread.fpu);
	} else {
		if (config_enabled(CONFIG_L4) && user_has_fpu())
			unlazy_fpu(tsk);

		sanitize_i387_state(tsk);
		if (__copy_to_user(buf_fx, xsave, xstate_size))
			return -1;
	}

	/* Save the fsave header for the 32-bit frames. */
	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
		return -1;

	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
		return -1;

	drop_init_fpu(tsk);	/* trigger finit */

	return 0;
}
Ejemplo n.º 11
0
static inline int save_sigcontext_fpu(struct sigcontext *sc)
{
	struct task_struct *tsk = current;

	if (!tsk->used_math) {
		__put_user(0, &sc->sc_ownedfp);
		return 0;
	}

	__put_user(1, &sc->sc_ownedfp);

	/* This will cause a "finit" to be triggered by the next
	   attempted FPU operation by the 'current' process.
	   */
	tsk->used_math = 0;

	unlazy_fpu(tsk);
	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
			      sizeof(long)*(16*2+2));
}
Ejemplo n.º 12
0
int dump_fpu_ia32(struct pt_regs *regs, elf_fpregset_t *fp)
{
	struct _fpstate_ia32 *fpu = (void*)fp; 
	struct task_struct *tsk = current;
	mm_segment_t oldfs = get_fs();
	int ret;

	if (!tsk->used_math) 
		return 0;
	if (!(tsk->thread.flags & THREAD_IA32))
		BUG(); 
	unlazy_fpu(tsk);
	set_fs(KERNEL_DS); 
	ret = save_i387_ia32(current, fpu, regs, 1);
	/* Correct for i386 bug. It puts the fop into the upper 16bits of 
	   the tag word (like FXSAVE), not into the fcs*/ 
	fpu->cssel |= fpu->tag & 0xffff0000; 
	set_fs(oldfs); 
	return ret; 
}
Ejemplo n.º 13
0
void lguest_arch_run_guest(struct lg_cpu *cpu)
{
	if (cpu->ts)
		unlazy_fpu(current);

	if (boot_cpu_has(X86_FEATURE_SEP))
		wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);

	run_guest_once(cpu, lguest_pages(raw_smp_processor_id()));


	 
	 if (boot_cpu_has(X86_FEATURE_SEP))
		wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);

	if (cpu->regs->trapnum == 14)
		cpu->arch.last_pagefault = read_cr2();
	else if (cpu->regs->trapnum == 7)
		math_state_restore();
}
Ejemplo n.º 14
0
static inline int 
elf32_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs,
			    elf_fpregset_t *fpu)
{
	struct _fpstate_ia32 *fpstate = (void*)fpu; 
	mm_segment_t oldfs = get_fs();

	if (!tsk_used_math(tsk))
		return 0;
	if (!regs)
		regs = task_pt_regs(tsk);
	if (tsk == current)
		unlazy_fpu(tsk);
	set_fs(KERNEL_DS); 
	save_i387_ia32(tsk, fpstate, regs, 1);
	/* Correct for i386 bug. It puts the fop into the upper 16bits of 
	   the tag word (like FXSAVE), not into the fcs*/ 
	fpstate->cssel |= fpstate->tag & 0xffff0000; 
	set_fs(oldfs); 
	return 1; 
}
Ejemplo n.º 15
0
static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
				      struct pt_regs *regs)
{
	struct task_struct *tsk = current;

	if (!(boot_cpu_data.flags & CPU_HAS_FPU))
		return 0;

	if (!used_math())
		return __put_user(0, &sc->sc_ownedfp);

	if (__put_user(1, &sc->sc_ownedfp))
		return -EFAULT;

	/* This will cause a "finit" to be triggered by the next
	   attempted FPU operation by the 'current' process.
	   */
	clear_used_math();

	unlazy_fpu(tsk, regs);
	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
			      sizeof(long)*(16*2+2));
}
Ejemplo n.º 16
0
static inline int setup_sigcontext_fpu(struct pt_regs *regs,
				       struct sigcontext __user *sc)
{
	struct task_struct *tsk = current;
	int ret = 0;

	__put_user_error(used_math(), &sc->used_math_flag, ret);

	if (!used_math())
		return ret;

	preempt_disable();
#if IS_ENABLED(CONFIG_LAZY_FPU)
	if (last_task_used_math == tsk)
		save_fpu(last_task_used_math);
#else
	unlazy_fpu(tsk);
#endif
	ret = __copy_to_user(&sc->fpu, &tsk->thread.fpu,
			     sizeof(struct fpu_struct));
	preempt_enable();
	return ret;
}
Ejemplo n.º 17
0
/*
 * handle an FPU operational exception
 * - there's a possibility that if the FPU is asynchronous, the signal might
 *   be meant for a process other than the current one
 */
asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
{
	struct task_struct *tsk = current;
	siginfo_t info;
	u32 fpcr;

	if (!user_mode(regs))
		die_if_no_fixup("An FPU Operation exception happened in"
				" kernel space\n",
				regs, code);

	if (!is_using_fpu(tsk))
		die_if_no_fixup("An FPU Operation exception happened,"
				" but the FPU is not in use",
				regs, code);

	info.si_signo = SIGFPE;
	info.si_errno = 0;
	info.si_addr = (void *) tsk->thread.uregs->pc;
	info.si_code = FPE_FLTINV;

	unlazy_fpu(tsk);

	fpcr = tsk->thread.fpu_state.fpcr;

	if (fpcr & FPCR_EC_Z)
		info.si_code = FPE_FLTDIV;
	else if	(fpcr & FPCR_EC_O)
		info.si_code = FPE_FLTOVF;
	else if	(fpcr & FPCR_EC_U)
		info.si_code = FPE_FLTUND;
	else if	(fpcr & FPCR_EC_I)
		info.si_code = FPE_FLTRES;

	force_sig_info(SIGFPE, &info, tsk);
}
Ejemplo n.º 18
0
void prepare_to_export(struct task_struct *task)
{
	if (!task->exit_state)
		unlazy_fpu(task);
}
Ejemplo n.º 19
0
/*
 * This gets called before we allocate a new thread and copy
 * the current task into it.
 */
void prepare_to_copy(struct task_struct *tsk)
{
	unlazy_fpu(tsk);
}
Ejemplo n.º 20
0
/*
 * this gets called so that we can store lazy state into memory and copy the
 * current task into the new thread.
 */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
	unlazy_fpu(src);
	*dst = *src;
	return 0;
}