void generic_timer_vcpu_context_save(void *vcpu_ptr, void *context) { u64 ev_nsecs; struct generic_timer_context *cntx = context; if (!cntx) { return; } #ifdef HAVE_GENERIC_TIMER_REGS_SAVE generic_timer_regs_save(cntx); #else cntx->cntpctl = generic_timer_reg_read(GENERIC_TIMER_REG_PHYS_CTRL); cntx->cntvctl = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL); cntx->cntpcval = generic_timer_reg_read64(GENERIC_TIMER_REG_PHYS_CVAL); cntx->cntvcval = generic_timer_reg_read64(GENERIC_TIMER_REG_VIRT_CVAL); cntx->cntkctl = generic_timer_reg_read(GENERIC_TIMER_REG_KCTL); generic_timer_reg_write(GENERIC_TIMER_REG_PHYS_CTRL, GENERIC_TIMER_CTRL_IT_MASK); generic_timer_reg_write(GENERIC_TIMER_REG_VIRT_CTRL, GENERIC_TIMER_CTRL_IT_MASK); #endif if ((cntx->cntpctl & GENERIC_TIMER_CTRL_ENABLE) && !(cntx->cntpctl & GENERIC_TIMER_CTRL_IT_MASK)) { ev_nsecs = cntx->cntpcval - generic_timer_pcounter_read(); /* check if timer is expired while saving the context */ if (((s64)ev_nsecs) < 0) { ev_nsecs = 0; } else { ev_nsecs = vmm_clocksource_delta2nsecs(ev_nsecs, generic_timer_mult, generic_timer_shift); } vmm_timer_event_start(&cntx->phys_ev, ev_nsecs); } if ((cntx->cntvctl & GENERIC_TIMER_CTRL_ENABLE) && !(cntx->cntvctl & GENERIC_TIMER_CTRL_IT_MASK)) { ev_nsecs = cntx->cntvcval + cntx->cntvoff - generic_timer_pcounter_read(); /* check if timer is expired while saving the context */ if (((s64)ev_nsecs) < 0) { ev_nsecs = 0; } else { ev_nsecs = vmm_clocksource_delta2nsecs(ev_nsecs, generic_timer_mult, generic_timer_shift); } vmm_timer_event_start(&cntx->virt_ev, ev_nsecs); } }
void generic_timer_vcpu_context_init(struct generic_timer_context *cntx) { cntx->cntpctl = GENERIC_TIMER_CTRL_IT_MASK; cntx->cntvctl = GENERIC_TIMER_CTRL_IT_MASK; cntx->cntpcval = 0; cntx->cntvcval = 0; cntx->cntkctl = 0; cntx->cntvoff = generic_timer_pcounter_read(); }
void generic_timer_vcpu_context_post_restore(void *vcpu_ptr, void *context) { u64 pcnt; struct vmm_vcpu *vcpu = vcpu_ptr; struct generic_timer_context *cntx = context; if (!cntx) { return; } pcnt = generic_timer_pcounter_read(); if ((cntx->cntpctl & GENERIC_TIMER_CTRL_ENABLE) && !(cntx->cntpctl & GENERIC_TIMER_CTRL_IT_MASK) && (cntx->cntpcval <= pcnt)) { cntx->cntpctl |= GENERIC_TIMER_CTRL_IT_MASK; generic_phys_irq_inject(vcpu, cntx); } if ((cntx->cntvctl & GENERIC_TIMER_CTRL_ENABLE) && !(cntx->cntvctl & GENERIC_TIMER_CTRL_IT_MASK) && ((cntx->cntvoff + cntx->cntvcval) <= pcnt)) { cntx->cntvctl |= GENERIC_TIMER_CTRL_IT_MASK; generic_virt_irq_inject(vcpu, cntx); } #ifdef HAVE_GENERIC_TIMER_REGS_RESTORE generic_timer_regs_restore(cntx); #else generic_timer_reg_write64(GENERIC_TIMER_REG_VIRT_OFF, cntx->cntvoff); generic_timer_reg_write(GENERIC_TIMER_REG_KCTL, cntx->cntkctl); generic_timer_reg_write64(GENERIC_TIMER_REG_PHYS_CVAL, cntx->cntpcval); generic_timer_reg_write64(GENERIC_TIMER_REG_VIRT_CVAL, cntx->cntvcval); generic_timer_reg_write(GENERIC_TIMER_REG_PHYS_CTRL, cntx->cntpctl); generic_timer_reg_write(GENERIC_TIMER_REG_VIRT_CTRL, cntx->cntvctl); #endif }
static u64 generic_counter_read(struct vmm_clocksource *cs) { return generic_timer_pcounter_read(); }