Example #1
0
int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u64 now;

	if (signal_pending(current)) {
		run->exit_reason = KVM_EXIT_INTR;
		return -EINTR;
	}

	flush_fp_to_thread(current);
	flush_altivec_to_thread(current);
	flush_vsx_to_thread(current);
	preempt_disable();

	/*
	 * Make sure we are running on thread 0, and that
	 * secondary threads are offline.
	 * XXX we should also block attempts to bring any
	 * secondary threads online.
	 */
	if (threads_per_core > 1) {
		int cpu = smp_processor_id();
		int thr = cpu_thread_in_core(cpu);

		if (thr)
			goto out;
		while (++thr < threads_per_core)
			if (cpu_online(cpu + thr))
				goto out;
	}

	kvm_guest_enter();

	__kvmppc_vcore_entry(NULL, vcpu);

	kvm_guest_exit();

	preempt_enable();
	kvm_resched(vcpu);

	now = get_tb();
	/* cancel pending dec exception if dec is positive */
	if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu))
		kvmppc_core_dequeue_dec(vcpu);

	return kvmppc_handle_exit(run, vcpu, current);

 out:
	preempt_enable();
	return -EBUSY;
}
Example #2
0
static long setup_sigcontext(struct sigcontext __user *sc,
		struct task_struct *tsk, int signr, sigset_t *set,
		unsigned long handler, int ctx_has_vsx_region)
{
	/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
	 * process never used altivec yet (MSR_VEC is zero in pt_regs of
	 * the context). This is very important because we must ensure we
	 * don't lose the VRSAVE content that may have been set prior to
	 * the process doing its first vector operation
	 * Userland shall check AT_HWCAP to know whether it can rely on the
	 * v_regs pointer or not
	 */
#ifdef CONFIG_ALTIVEC
	elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
	unsigned long vrsave;
#endif
	struct pt_regs *regs = tsk->thread.regs;
	unsigned long msr = regs->msr;
	long err = 0;

	BUG_ON(tsk != current);

#ifdef CONFIG_ALTIVEC
	err |= __put_user(v_regs, &sc->v_regs);

	/* save altivec registers */
	if (tsk->thread.used_vr) {
		flush_altivec_to_thread(tsk);
		/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
		err |= __copy_to_user(v_regs, &tsk->thread.vr_state,
				      33 * sizeof(vector128));
		/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
		 * contains valid data.
		 */
		msr |= MSR_VEC;
	}
	/* We always copy to/from vrsave, it's 0 if we don't have or don't
	 * use altivec.
	 */
	vrsave = 0;
	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
		vrsave = mfspr(SPRN_VRSAVE);
		tsk->thread.vrsave = vrsave;
	}

	err |= __put_user(vrsave, (u32 __user *)&v_regs[33]);
#else /* CONFIG_ALTIVEC */
	err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
	flush_fp_to_thread(tsk);
	/* copy fpr regs and fpscr */
	err |= copy_fpr_to_user(&sc->fp_regs, tsk);

	/*
	 * Clear the MSR VSX bit to indicate there is no valid state attached
	 * to this context, except in the specific case below where we set it.
	 */
	msr &= ~MSR_VSX;
#ifdef CONFIG_VSX
	/*
	 * Copy VSX low doubleword to local buffer for formatting,
	 * then out to userspace.  Update v_regs to point after the
	 * VMX data.
	 */
	if (tsk->thread.used_vsr && ctx_has_vsx_region) {
		flush_vsx_to_thread(tsk);
		v_regs += ELF_NVRREG;
		err |= copy_vsx_to_user(v_regs, tsk);
		/* set MSR_VSX in the MSR value in the frame to
		 * indicate that sc->vs_reg) contains valid data.
		 */
		msr |= MSR_VSX;
	}
#endif /* CONFIG_VSX */
	err |= __put_user(&sc->gp_regs, &sc->regs);
	WARN_ON(!FULL_REGS(regs));
	err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
	err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
	err |= __put_user(signr, &sc->signal);
	err |= __put_user(handler, &sc->handler);
	if (set != NULL)
		err |=  __put_user(set->sig[0], &sc->oldmask);

	return err;
}