void generic_timer_vcpu_context_save(struct generic_timer_context *cntx) { cntx->cntpctl = generic_timer_reg_read(GENERIC_TIMER_REG_PHYS_CTRL); cntx->cntvctl = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL); cntx->cntpcval = generic_timer_reg_read64(GENERIC_TIMER_REG_PHYS_CVAL); cntx->cntvcval = generic_timer_reg_read64(GENERIC_TIMER_REG_VIRT_CVAL); cntx->cntkctl = generic_timer_reg_read(GENERIC_TIMER_REG_KCTL); generic_timer_reg_write(GENERIC_TIMER_REG_PHYS_CTRL, GENERIC_TIMER_CTRL_IT_MASK); generic_timer_reg_write(GENERIC_TIMER_REG_VIRT_CTRL, GENERIC_TIMER_CTRL_IT_MASK); }
void generic_timer_vcpu_context_save(void *vcpu_ptr, void *context) { u64 ev_nsecs; struct generic_timer_context *cntx = context; if (!cntx) { return; } #ifdef HAVE_GENERIC_TIMER_REGS_SAVE generic_timer_regs_save(cntx); #else cntx->cntpctl = generic_timer_reg_read(GENERIC_TIMER_REG_PHYS_CTRL); cntx->cntvctl = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL); cntx->cntpcval = generic_timer_reg_read64(GENERIC_TIMER_REG_PHYS_CVAL); cntx->cntvcval = generic_timer_reg_read64(GENERIC_TIMER_REG_VIRT_CVAL); cntx->cntkctl = generic_timer_reg_read(GENERIC_TIMER_REG_KCTL); generic_timer_reg_write(GENERIC_TIMER_REG_PHYS_CTRL, GENERIC_TIMER_CTRL_IT_MASK); generic_timer_reg_write(GENERIC_TIMER_REG_VIRT_CTRL, GENERIC_TIMER_CTRL_IT_MASK); #endif if ((cntx->cntpctl & GENERIC_TIMER_CTRL_ENABLE) && !(cntx->cntpctl & GENERIC_TIMER_CTRL_IT_MASK)) { ev_nsecs = cntx->cntpcval - generic_timer_pcounter_read(); /* check if timer is expired while saving the context */ if (((s64)ev_nsecs) < 0) { ev_nsecs = 0; } else { ev_nsecs = vmm_clocksource_delta2nsecs(ev_nsecs, generic_timer_mult, generic_timer_shift); } vmm_timer_event_start(&cntx->phys_ev, ev_nsecs); } if ((cntx->cntvctl & GENERIC_TIMER_CTRL_ENABLE) && !(cntx->cntvctl & GENERIC_TIMER_CTRL_IT_MASK)) { ev_nsecs = cntx->cntvcval + cntx->cntvoff - generic_timer_pcounter_read(); /* check if timer is expired while saving the context */ if (((s64)ev_nsecs) < 0) { ev_nsecs = 0; } else { ev_nsecs = vmm_clocksource_delta2nsecs(ev_nsecs, generic_timer_mult, generic_timer_shift); } vmm_timer_event_start(&cntx->virt_ev, ev_nsecs); } }
static void generic_timer_stop(void) { unsigned long ctrl; ctrl = generic_timer_reg_read(GENERIC_TIMER_REG_HYP_CTRL); ctrl &= ~GENERIC_TIMER_CTRL_ENABLE; generic_timer_reg_write(GENERIC_TIMER_REG_HYP_CTRL, ctrl); }
static int generic_timer_set_next_event(unsigned long evt, struct vmm_clockchip *unused) { unsigned long ctrl; ctrl = generic_timer_reg_read(GENERIC_TIMER_REG_HYP_CTRL); ctrl |= GENERIC_TIMER_CTRL_ENABLE; ctrl &= ~GENERIC_TIMER_CTRL_IT_MASK; generic_timer_reg_write(GENERIC_TIMER_REG_HYP_TVAL, evt); generic_timer_reg_write(GENERIC_TIMER_REG_HYP_CTRL, ctrl); return 0; }
u64 generic_timer_wakeup_timeout(void) { u32 vtval = 0, ptval = 0; u64 nsecs = 0; if (generic_timer_hz == 0) { return 0; } if (generic_timer_reg_read(GENERIC_TIMER_REG_PHYS_CTRL) & GENERIC_TIMER_CTRL_ENABLE) { ptval = generic_timer_reg_read(GENERIC_TIMER_REG_PHYS_TVAL); } if (generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL) & GENERIC_TIMER_CTRL_ENABLE) { vtval = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_TVAL); } if ((ptval > 0) && (vtval > 0)) { nsecs = (ptval > vtval) ? vtval : ptval; } else { nsecs = (ptval > vtval) ? ptval : vtval; } if (nsecs) { if (generic_timer_hz == 100000000) { nsecs = nsecs * 10; } else { nsecs = udiv64((nsecs * 1000000000), (u64)generic_timer_hz); } } return nsecs; }
int __init generic_timer_clocksource_init(void) { int rc; struct vmm_clocksource *cs; struct vmm_devtree_node *node; node = vmm_devtree_find_matching(NULL, generic_timer_match); if (!node) { return VMM_ENODEV; } if (generic_timer_hz == 0) { rc = vmm_devtree_clock_frequency(node, &generic_timer_hz); if (rc) { /* Use preconfigured counter frequency * in absence of dts node */ generic_timer_hz = generic_timer_reg_read(GENERIC_TIMER_REG_FREQ); } else { if (generic_timer_freq_writeable()) { /* Program the counter frequency * as per the dts node */ generic_timer_reg_write(GENERIC_TIMER_REG_FREQ, generic_timer_hz); } } } if (generic_timer_hz == 0) { return VMM_EFAIL; } cs = vmm_zalloc(sizeof(struct vmm_clocksource)); if (!cs) { return VMM_EFAIL; } cs->name = "gen-timer"; cs->rating = 400; cs->read = &generic_counter_read; cs->mask = VMM_CLOCKSOURCE_MASK(56); vmm_clocks_calc_mult_shift(&cs->mult, &cs->shift, generic_timer_hz, VMM_NSEC_PER_SEC, 10); cs->priv = NULL; return vmm_clocksource_register(cs); }
static vmm_irq_return_t generic_hyp_timer_handler(int irq, void *dev) { struct vmm_clockchip *cc = dev; unsigned long ctrl; ctrl = generic_timer_reg_read(GENERIC_TIMER_REG_HYP_CTRL); if (ctrl & GENERIC_TIMER_CTRL_IT_STAT) { ctrl |= GENERIC_TIMER_CTRL_IT_MASK; ctrl &= ~GENERIC_TIMER_CTRL_ENABLE; generic_timer_reg_write(GENERIC_TIMER_REG_HYP_CTRL, ctrl); cc->event_handler(cc); return VMM_IRQ_HANDLED; } return VMM_IRQ_NONE; }
static vmm_irq_return_t generic_virt_timer_handler(int irq, void *dev) { int rc; u32 ctl, virq; struct vmm_vcpu *vcpu; ctl = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL); if (!(ctl & GENERIC_TIMER_CTRL_IT_STAT)) { /* We got interrupt without status bit set. * Looks like we are running on buggy hardware. */ vmm_printf("%s: suprious interrupt\n", __func__); return VMM_IRQ_NONE; } ctl |= GENERIC_TIMER_CTRL_IT_MASK; generic_timer_reg_write(GENERIC_TIMER_REG_VIRT_CTRL, ctl); vcpu = vmm_scheduler_current_vcpu(); if (!vcpu->is_normal) { /* We accidently got an interrupt meant for normal VCPU * that was previously running on this host CPU. */ vmm_printf("%s: In orphan context (current VCPU=%s)\n", __func__, vcpu->name); return VMM_IRQ_NONE; } virq = arm_gentimer_context(vcpu)->virt_timer_irq; if (virq == 0) { return VMM_IRQ_NONE; } rc = vmm_devemu_emulate_percpu_irq(vcpu->guest, virq, vcpu->subid, 0); if (rc) { vmm_printf("%s: Emulate VCPU=%s irq=%d level=0 failed\n", __func__, vcpu->name, virq); } rc = vmm_devemu_emulate_percpu_irq(vcpu->guest, virq, vcpu->subid, 1); if (rc) { vmm_printf("%s: Emulate VCPU=%s irq=%d level=1 failed\n", __func__, vcpu->name, virq); } return VMM_IRQ_HANDLED; }
static int __init bcm2836_early_init(struct vmm_devtree_node *node) { int rc = VMM_OK; void *base; u32 prescaler, cntfreq; virtual_addr_t base_va; struct vmm_devtree_node *np; np = vmm_devtree_find_compatible(NULL, NULL, "brcm,bcm2836-l1-intc"); if (!np) { return VMM_ENODEV; } rc = vmm_devtree_regmap(np, &base_va, 0); if (rc) { goto done; } base = (void *)base_va; cntfreq = generic_timer_reg_read(GENERIC_TIMER_REG_FREQ); switch (cntfreq) { case 19200000: prescaler = 0x80000000; case 1000000: prescaler = 0x06AAAAAB; default: prescaler = (u32)udiv64((u64)0x80000000 * (u64)cntfreq, (u64)19200000); break; }; if (!prescaler) { rc = VMM_EINVALID; goto done_unmap; } vmm_writel(prescaler, base + LOCAL_TIMER_PRESCALER); done_unmap: vmm_devtree_regunmap(node, base_va, 0); done: vmm_devtree_dref_node(np); return rc; }
static vmm_irq_return_t generic_virt_timer_handler(int irq, void *dev) { u32 ctl; struct vmm_vcpu *vcpu; struct generic_timer_context *cntx; ctl = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL); if (!(ctl & GENERIC_TIMER_CTRL_IT_STAT)) { /* We got interrupt without status bit set. * Looks like we are running on buggy hardware. */ DPRINTF("%s: suprious interrupt\n", __func__); return VMM_IRQ_NONE; } ctl |= GENERIC_TIMER_CTRL_IT_MASK; generic_timer_reg_write(GENERIC_TIMER_REG_VIRT_CTRL, ctl); vcpu = vmm_scheduler_current_vcpu(); if (!vcpu->is_normal) { /* We accidently got an interrupt meant for normal VCPU * that was previously running on this host CPU. */ DPRINTF("%s: In orphan context (current VCPU=%s)\n", __func__, vcpu->name); return VMM_IRQ_NONE; } cntx = arm_gentimer_context(vcpu); if (!cntx) { /* We accidently got an interrupt meant another normal VCPU */ DPRINTF("%s: Invalid normal context (current VCPU=%s)\n", __func__, vcpu->name); return VMM_IRQ_NONE; } generic_virt_irq_inject(vcpu, cntx); return VMM_IRQ_HANDLED; }
static void generic_timer_get_freq(struct vmm_devtree_node *node) { int rc; if (generic_timer_hz == 0) { rc = vmm_devtree_clock_frequency(node, &generic_timer_hz); if (rc) { /* Use preconfigured counter frequency * in absence of dts node */ generic_timer_hz = generic_timer_reg_read(GENERIC_TIMER_REG_FREQ); } else { if (generic_timer_freq_writeable()) { /* Program the counter frequency * as per the dts node */ generic_timer_reg_write(GENERIC_TIMER_REG_FREQ, generic_timer_hz); } } } }
static int __cpuinit generic_timer_clockchip_init(struct vmm_devtree_node *node) { int rc; u32 irq[4], num_irqs, val; struct vmm_clockchip *cc; /* Get and Check generic timer frequency */ generic_timer_get_freq(node); if (generic_timer_hz == 0) { return VMM_EFAIL; } /* Get hypervisor timer irq number */ irq[GENERIC_HYPERVISOR_TIMER] = vmm_devtree_irq_parse_map(node, GENERIC_HYPERVISOR_TIMER); if (!irq[GENERIC_HYPERVISOR_TIMER]) { return VMM_ENODEV; } /* Get physical timer irq number */ irq[GENERIC_PHYSICAL_TIMER] = vmm_devtree_irq_parse_map(node, GENERIC_PHYSICAL_TIMER); if (!irq[GENERIC_PHYSICAL_TIMER]) { return VMM_ENODEV; } /* Get virtual timer irq number */ irq[GENERIC_VIRTUAL_TIMER] = vmm_devtree_irq_parse_map(node, GENERIC_VIRTUAL_TIMER); if (!irq[GENERIC_VIRTUAL_TIMER]) { return VMM_ENODEV; } /* Number of generic timer irqs */ num_irqs = vmm_devtree_irq_count(node); if (!num_irqs) { return VMM_EFAIL; } /* Ensure hypervisor timer is stopped */ generic_timer_stop(); /* Create generic hypervisor timer clockchip */ cc = vmm_zalloc(sizeof(struct vmm_clockchip)); if (!cc) { return VMM_EFAIL; } cc->name = "gen-hyp-timer"; cc->hirq = irq[GENERIC_HYPERVISOR_TIMER]; cc->rating = 400; cc->cpumask = vmm_cpumask_of(vmm_smp_processor_id()); cc->features = VMM_CLOCKCHIP_FEAT_ONESHOT; vmm_clocks_calc_mult_shift(&cc->mult, &cc->shift, VMM_NSEC_PER_SEC, generic_timer_hz, 10); cc->min_delta_ns = vmm_clockchip_delta2ns(0xF, cc); cc->max_delta_ns = vmm_clockchip_delta2ns(0x7FFFFFFF, cc); cc->set_mode = &generic_timer_set_mode; cc->set_next_event = &generic_timer_set_next_event; cc->priv = NULL; /* Register hypervisor timer clockchip */ rc = vmm_clockchip_register(cc); if (rc) { goto fail_free_cc; } /* Register irq handler for hypervisor timer */ rc = vmm_host_irq_register(irq[GENERIC_HYPERVISOR_TIMER], "gen-hyp-timer", &generic_hyp_timer_handler, cc); if (rc) { goto fail_unreg_cc; } if (num_irqs > 1) { /* Register irq handler for physical timer */ rc = vmm_host_irq_register(irq[GENERIC_PHYSICAL_TIMER], "gen-phys-timer", &generic_phys_timer_handler, NULL); if (rc) { goto fail_unreg_htimer; } } if (num_irqs > 2) { /* Register irq handler for virtual timer */ rc = vmm_host_irq_register(irq[GENERIC_VIRTUAL_TIMER], "gen-virt-timer", &generic_virt_timer_handler, NULL); if (rc) { goto fail_unreg_ptimer; } } if (num_irqs > 1) { val = generic_timer_reg_read(GENERIC_TIMER_REG_HCTL); val |= GENERIC_TIMER_HCTL_KERN_PCNT_EN; val |= GENERIC_TIMER_HCTL_KERN_PTMR_EN; generic_timer_reg_write(GENERIC_TIMER_REG_HCTL, val); } return VMM_OK; fail_unreg_ptimer: if (num_irqs > 1) { vmm_host_irq_unregister(irq[GENERIC_PHYSICAL_TIMER], &generic_phys_timer_handler); } fail_unreg_htimer: vmm_host_irq_unregister(irq[GENERIC_HYPERVISOR_TIMER], &generic_hyp_timer_handler); fail_unreg_cc: vmm_clockchip_unregister(cc); fail_free_cc: vmm_free(cc); return rc; }
int __cpuinit generic_timer_clockchip_init(void) { int rc; u32 irq[3], num_irqs, val; struct vmm_clockchip *cc; struct vmm_devtree_node *node; /* Find generic timer device tree node */ node = vmm_devtree_find_matching(NULL, generic_timer_match); if (!node) { return VMM_ENODEV; } /* Determine generic timer frequency */ if (generic_timer_hz == 0) { rc = vmm_devtree_clock_frequency(node, &generic_timer_hz); if (rc) { /* Use preconfigured counter frequency * in absence of dts node */ generic_timer_hz = generic_timer_reg_read(GENERIC_TIMER_REG_FREQ); } else if (generic_timer_freq_writeable()) { /* Program the counter frequency as per the dts node */ generic_timer_reg_write(GENERIC_TIMER_REG_FREQ, generic_timer_hz); } } if (generic_timer_hz == 0) { return VMM_EFAIL; } /* Get hypervisor timer irq number */ rc = vmm_devtree_irq_get(node, &irq[GENERIC_HYPERVISOR_TIMER], GENERIC_HYPERVISOR_TIMER); if (rc) { return rc; } /* Get physical timer irq number */ rc = vmm_devtree_irq_get(node, &irq[GENERIC_PHYSICAL_TIMER], GENERIC_PHYSICAL_TIMER); if (rc) { return rc; } /* Get virtual timer irq number */ rc = vmm_devtree_irq_get(node, &irq[GENERIC_VIRTUAL_TIMER], GENERIC_VIRTUAL_TIMER); if (rc) { return rc; } /* Number of generic timer irqs */ num_irqs = vmm_devtree_irq_count(node); if (!num_irqs) { return VMM_EFAIL; } /* Ensure hypervisor timer is stopped */ generic_timer_stop(); /* Create generic hypervisor timer clockchip */ cc = vmm_zalloc(sizeof(struct vmm_clockchip)); if (!cc) { return VMM_EFAIL; } cc->name = "gen-hyp-timer"; cc->hirq = irq[GENERIC_HYPERVISOR_TIMER]; cc->rating = 400; cc->cpumask = vmm_cpumask_of(vmm_smp_processor_id()); cc->features = VMM_CLOCKCHIP_FEAT_ONESHOT; vmm_clocks_calc_mult_shift(&cc->mult, &cc->shift, VMM_NSEC_PER_SEC, generic_timer_hz, 10); cc->min_delta_ns = vmm_clockchip_delta2ns(0xF, cc); cc->max_delta_ns = vmm_clockchip_delta2ns(0x7FFFFFFF, cc); cc->set_mode = &generic_timer_set_mode; cc->set_next_event = &generic_timer_set_next_event; cc->priv = NULL; /* Register hypervisor timer clockchip */ rc = vmm_clockchip_register(cc); if (rc) { goto fail_free_cc; } if (!vmm_smp_processor_id()) { /* Register irq handler for hypervisor timer */ rc = vmm_host_irq_register(irq[GENERIC_HYPERVISOR_TIMER], "gen-hyp-timer", &generic_hyp_timer_handler, cc); if (rc) { goto fail_unreg_cc; } /* Mark hypervisor timer irq as per-CPU */ if ((rc = vmm_host_irq_mark_per_cpu(cc->hirq))) { goto fail_unreg_htimer; } if (num_irqs > 1) { /* Register irq handler for physical timer */ rc = vmm_host_irq_register(irq[GENERIC_PHYSICAL_TIMER], "gen-phys-timer", &generic_phys_timer_handler, NULL); if (rc) { goto fail_unreg_htimer; } /* Mark physical timer irq as per-CPU */ rc = vmm_host_irq_mark_per_cpu( irq[GENERIC_PHYSICAL_TIMER]); if (rc) { goto fail_unreg_ptimer; } } if (num_irqs > 2) { /* Register irq handler for virtual timer */ rc = vmm_host_irq_register(irq[GENERIC_VIRTUAL_TIMER], "gen-virt-timer", &generic_virt_timer_handler, NULL); if (rc) { goto fail_unreg_ptimer; } /* Mark virtual timer irq as per-CPU */ rc = vmm_host_irq_mark_per_cpu( irq[GENERIC_VIRTUAL_TIMER]); if (rc) { goto fail_unreg_vtimer; } } } if (num_irqs > 1) { val = generic_timer_reg_read(GENERIC_TIMER_REG_HCTL); val |= GENERIC_TIMER_HCTL_KERN_PCNT_EN; val |= GENERIC_TIMER_HCTL_KERN_PTMR_EN; generic_timer_reg_write(GENERIC_TIMER_REG_HCTL, val); } for (val = 0; val < num_irqs; val++) { gic_enable_ppi(irq[val]); } return VMM_OK; fail_unreg_vtimer: if (!vmm_smp_processor_id() && num_irqs > 2) { vmm_host_irq_unregister(irq[GENERIC_HYPERVISOR_TIMER], &generic_virt_timer_handler); } fail_unreg_ptimer: if (!vmm_smp_processor_id() && num_irqs > 1) { vmm_host_irq_unregister(irq[GENERIC_PHYSICAL_TIMER], &generic_phys_timer_handler); } fail_unreg_htimer: if (!vmm_smp_processor_id()) { vmm_host_irq_unregister(irq[GENERIC_HYPERVISOR_TIMER], &generic_hyp_timer_handler); } fail_unreg_cc: vmm_clockchip_register(cc); fail_free_cc: vmm_free(cc); return rc; }