static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; again: preempt_disable(); kvm_prepare_guest_switch(vcpu); local_irq_disable(); if (signal_pending(current)) { local_irq_enable(); preempt_enable(); r = -EINTR; kvm_run->exit_reason = KVM_EXIT_INTR; goto out; } vcpu->guest_mode = 1; kvm_guest_enter(); r = vti_vcpu_run(vcpu, kvm_run); if (r < 0) { local_irq_enable(); preempt_enable(); kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; goto out; } vcpu->arch.launched = 1; vcpu->guest_mode = 0; local_irq_enable(); /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); preempt_enable(); r = kvm_handle_exit(kvm_run, vcpu); if (r > 0) { if (!need_resched()) goto again; } out: if (r > 0) { kvm_resched(vcpu); goto again; } return r; }
int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) { u64 now; if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; return -EINTR; } flush_fp_to_thread(current); flush_altivec_to_thread(current); flush_vsx_to_thread(current); preempt_disable(); /* * Make sure we are running on thread 0, and that * secondary threads are offline. * XXX we should also block attempts to bring any * secondary threads online. */ if (threads_per_core > 1) { int cpu = smp_processor_id(); int thr = cpu_thread_in_core(cpu); if (thr) goto out; while (++thr < threads_per_core) if (cpu_online(cpu + thr)) goto out; } kvm_guest_enter(); __kvmppc_vcore_entry(NULL, vcpu); kvm_guest_exit(); preempt_enable(); kvm_resched(vcpu); now = get_tb(); /* cancel pending dec exception if dec is positive */ if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu)) kvmppc_core_dequeue_dec(vcpu); return kvmppc_handle_exit(run, vcpu, current); out: preempt_enable(); return -EBUSY; }