void kvmppc_vcpu_block(struct kvm_vcpu *vcpu) { u64 now; unsigned long dec_nsec; now = get_tb(); if (now >= vcpu->arch.dec_expires && !kvmppc_core_pending_dec(vcpu)) kvmppc_core_queue_dec(vcpu); if (vcpu->arch.pending_exceptions) return; if (vcpu->arch.dec_expires != ~(u64)0) { dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC / tb_ticks_per_sec; hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), HRTIMER_MODE_REL); } kvmppc_vcpu_blocked(vcpu); kvm_vcpu_block(vcpu); vcpu->stat.halt_wakeup++; if (vcpu->arch.dec_expires != ~(u64)0) hrtimer_try_to_cancel(&vcpu->arch.dec_timer); kvmppc_vcpu_unblocked(vcpu); }
int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) { u64 now; if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; return -EINTR; } flush_fp_to_thread(current); flush_altivec_to_thread(current); flush_vsx_to_thread(current); preempt_disable(); /* * Make sure we are running on thread 0, and that * secondary threads are offline. * XXX we should also block attempts to bring any * secondary threads online. */ if (threads_per_core > 1) { int cpu = smp_processor_id(); int thr = cpu_thread_in_core(cpu); if (thr) goto out; while (++thr < threads_per_core) if (cpu_online(cpu + thr)) goto out; } kvm_guest_enter(); __kvmppc_vcore_entry(NULL, vcpu); kvm_guest_exit(); preempt_enable(); kvm_resched(vcpu); now = get_tb(); /* cancel pending dec exception if dec is positive */ if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu)) kvmppc_core_dequeue_dec(vcpu); return kvmppc_handle_exit(run, vcpu, current); out: preempt_enable(); return -EBUSY; }
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return kvmppc_core_pending_dec(vcpu); }