/** * kvm_timer_sync_hwstate - sync timer state from cpu * @vcpu: The vcpu pointer * * Check if the virtual timer was armed and either schedule a corresponding * soft timer or inject directly if already expired. */ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; cycle_t cval, now; u64 ns; if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) || !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE)) return; cval = timer->cntv_cval; now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; BUG_ON(timer_is_armed(timer)); if (cval <= now) { /* * Timer has already expired while we were not * looking. Inject the interrupt and carry on. */ kvm_timer_inject_irq(vcpu); return; } ns = cyclecounter_cyc2ns(timecounter->cc, cval - now); timer_arm(timer, ns); }
/* * Schedule the background timer before calling kvm_vcpu_block, so that this * thread is removed from its waitqueue and made runnable when there's a timer * interrupt to handle. */ void kvm_timer_schedule(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; u64 ns; cycle_t cval, now; BUG_ON(timer_is_armed(timer)); /* * No need to schedule a background timer if the guest timer has * already expired, because kvm_vcpu_block will return before putting * the thread to sleep. */ if (kvm_timer_should_fire(vcpu)) return; /* * If the timer is not capable of raising interrupts (disabled or * masked), then there's no more work for us to do. */ if (!kvm_timer_irq_can_fire(vcpu)) return; /* The timer has not yet expired, schedule a background timer */ cval = timer->cntv_cval; now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask, &timecounter->frac); timer_arm(timer, ns); }
int kvm_timer_init(struct kvm *kvm) { if (timecounter && wqueue) { kvm->arch.timer.cntvoff = kvm_phys_timer_read(); kvm->arch.timer.enabled = 1; } return 0; }
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; cycle_t cval, now; if (!kvm_timer_irq_can_fire(vcpu)) return false; cval = timer->cntv_cval; now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; return cval <= now; }
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; switch (regid) { case KVM_REG_ARM_TIMER_CTL: return timer->cntv_ctl; case KVM_REG_ARM_TIMER_CNT: return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; case KVM_REG_ARM_TIMER_CVAL: return timer->cntv_cval; } return (u64)-1; }
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; switch (regid) { case KVM_REG_ARM_TIMER_CTL: timer->cntv_ctl = value; break; case KVM_REG_ARM_TIMER_CNT: vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value; break; case KVM_REG_ARM_TIMER_CVAL: timer->cntv_cval = value; break; default: return -1; } kvm_timer_update_state(vcpu); return 0; }
void kvm_timer_init(struct kvm *kvm) { kvm->arch.timer.cntvoff = kvm_phys_timer_read(); }