示例#1
0
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	int r;

again:
	preempt_disable();

	kvm_prepare_guest_switch(vcpu);
	local_irq_disable();

	if (signal_pending(current)) {
		local_irq_enable();
		preempt_enable();
		r = -EINTR;
		kvm_run->exit_reason = KVM_EXIT_INTR;
		goto out;
	}

	vcpu->guest_mode = 1;
	kvm_guest_enter();

	r = vti_vcpu_run(vcpu, kvm_run);
	if (r < 0) {
		local_irq_enable();
		preempt_enable();
		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
		goto out;
	}

	vcpu->arch.launched = 1;
	vcpu->guest_mode = 0;
	local_irq_enable();

	/*
	 * We must have an instruction between local_irq_enable() and
	 * kvm_guest_exit(), so the timer interrupt isn't delayed by
	 * the interrupt shadow.  The stat.exits increment will do nicely.
	 * But we need to prevent reordering, hence this barrier():
	 */
	barrier();

	kvm_guest_exit();

	preempt_enable();

	r = kvm_handle_exit(kvm_run, vcpu);

	if (r > 0) {
		if (!need_resched())
			goto again;
	}

out:
	if (r > 0) {
		kvm_resched(vcpu);
		goto again;
	}

	return r;
}
示例#2
0
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	int r;
	sigset_t sigsaved;

	vcpu_load(vcpu);

	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

	if (vcpu->mmio_needed) {
		if (!vcpu->mmio_is_write)
			kvmppc_complete_mmio_load(vcpu, run);
		vcpu->mmio_needed = 0;
	} else if (vcpu->arch.dcr_needed) {
		if (!vcpu->arch.dcr_is_write)
			kvmppc_complete_dcr_load(vcpu, run);
		vcpu->arch.dcr_needed = 0;
	}

	kvmppc_core_deliver_interrupts(vcpu);

	local_irq_disable();
	kvm_guest_enter();
	r = __kvmppc_vcpu_run(run, vcpu);
	kvm_guest_exit();
	local_irq_enable();

	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	vcpu_put(vcpu);

	return r;
}
示例#3
0
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
	int ret;

	if (!vcpu->arch.sane) {
		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		return -EINVAL;
	}

	local_irq_disable();

	kvmppc_core_prepare_to_enter(vcpu);

	if (signal_pending(current)) {
		kvm_run->exit_reason = KVM_EXIT_INTR;
		ret = -EINTR;
		goto out;
	}

	kvm_guest_enter();
	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
	kvm_guest_exit();

out:
	local_irq_enable();
	return ret;
}
示例#4
0
int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u64 now;

	if (signal_pending(current)) {
		run->exit_reason = KVM_EXIT_INTR;
		return -EINTR;
	}

	flush_fp_to_thread(current);
	flush_altivec_to_thread(current);
	flush_vsx_to_thread(current);
	preempt_disable();

	/*
	 * Make sure we are running on thread 0, and that
	 * secondary threads are offline.
	 * XXX we should also block attempts to bring any
	 * secondary threads online.
	 */
	if (threads_per_core > 1) {
		int cpu = smp_processor_id();
		int thr = cpu_thread_in_core(cpu);

		if (thr)
			goto out;
		while (++thr < threads_per_core)
			if (cpu_online(cpu + thr))
				goto out;
	}

	kvm_guest_enter();

	__kvmppc_vcore_entry(NULL, vcpu);

	kvm_guest_exit();

	preempt_enable();
	kvm_resched(vcpu);

	now = get_tb();
	/* cancel pending dec exception if dec is positive */
	if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu))
		kvmppc_core_dequeue_dec(vcpu);

	return kvmppc_handle_exit(run, vcpu, current);

 out:
	preempt_enable();
	return -EBUSY;
}
示例#5
0
文件: powerpc.c 项目: 03199618/linux
/*
 * Common checks before entering the guest world.  Call with interrupts
 * disabled.
 *
 * returns:
 *
 * == 1 if we're ready to go into guest state
 * <= 0 if we need to go back to the host with return value
 */
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
	int r = 1;

	WARN_ON_ONCE(!irqs_disabled());
	while (true) {
		if (need_resched()) {
			local_irq_enable();
			cond_resched();
			local_irq_disable();
			continue;
		}

		if (signal_pending(current)) {
			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
			vcpu->run->exit_reason = KVM_EXIT_INTR;
			r = -EINTR;
			break;
		}

		vcpu->mode = IN_GUEST_MODE;

		/*
		 * Reading vcpu->requests must happen after setting vcpu->mode,
		 * so we don't miss a request because the requester sees
		 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
		 * before next entering the guest (and thus doesn't IPI).
		 */
		smp_mb();

		if (vcpu->requests) {
			/* Make sure we process requests preemptable */
			local_irq_enable();
			trace_kvm_check_requests(vcpu);
			r = kvmppc_core_check_requests(vcpu);
			local_irq_disable();
			if (r > 0)
				continue;
			break;
		}

		if (kvmppc_core_prepare_to_enter(vcpu)) {
			/* interrupts got enabled in between, so we
			   are back at square 1 */
			continue;
		}

#ifdef CONFIG_PPC64
		/* lazy EE magic */
		hard_irq_disable();
		if (lazy_irq_pending()) {
			/* Got an interrupt in between, try again */
			local_irq_enable();
			local_irq_disable();
			kvm_guest_exit();
			continue;
		}
#endif

		kvm_guest_enter();
		break;
	}

	return r;
}