static void phys_timer_expired(void *data) { struct vtimer *t = data; t->ctl |= CNTx_CTL_PENDING; t->ctl &= ~CNTx_CTL_MASK; vgic_vcpu_inject_irq(t->v, 30, 1); }
static void virt_timer_expired(void *data) { struct vtimer *t = data; t->ctl |= CNTx_CTL_MASK; vgic_vcpu_inject_irq(t->v, t->irq); perfc_incr(vtimer_virt_inject); }
static void phys_timer_expired(void *data) { struct vtimer *t = data; t->ctl |= CNTx_CTL_PENDING; if ( !(t->ctl & CNTx_CTL_MASK) ) vgic_vcpu_inject_irq(t->v, t->irq); }
static void phys_timer_expired(void *data) { struct vtimer *t = data; t->ctl |= CNTx_CTL_PENDING; if ( !(t->ctl & CNTx_CTL_MASK) ) { perfc_incr(vtimer_phys_inject); vgic_vcpu_inject_irq(t->v, t->irq); } else perfc_incr(vtimer_phys_masked); }
static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) { /* * Edge-triggered interrupts can be used for the virtual timer. Even * if the timer output signal is masked in the context switch, the * GIC will keep track that of any interrupts raised while IRQS are * disabled. As soon as IRQs are re-enabled, the virtual interrupt * will be injected to Xen. * * If an IDLE vCPU was scheduled next then we should ignore the * interrupt. */ if ( unlikely(is_idle_vcpu(current)) ) return; current->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0); WRITE_SYSREG32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL_EL0); vgic_vcpu_inject_irq(current, current->arch.virt_timer.irq); }
static void vtimer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) { current->arch.virt_timer.ctl = READ_SYSREG32(CNTV_CTL_EL0); WRITE_SYSREG32(current->arch.virt_timer.ctl | CNTx_CTL_MASK, CNTV_CTL_EL0); vgic_vcpu_inject_irq(current, current->arch.virt_timer.irq, 1); }
static void virt_timer_expired(void *data) { struct vtimer *t = data; t->ctl |= CNTx_CTL_MASK; vgic_vcpu_inject_irq(t->v, 27, 1); }
/* Dispatch an interrupt */ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, int is_fiq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action = desc->action; /* TODO: perfc_incr(irqs); */ /* TODO: this_cpu(irq_count)++; */ irq_enter(); spin_lock(&desc->lock); desc->handler->ack(desc); if ( action == NULL ) { printk("Unknown %s %#3.3x\n", is_fiq ? "FIQ" : "IRQ", irq); goto out; } if ( desc->status & IRQ_GUEST ) { struct domain *d = action->dev_id; desc->handler->end(desc); desc->status |= IRQ_INPROGRESS; desc->arch.eoi_cpu = smp_processor_id(); /* XXX: inject irq into all guest vcpus */ vgic_vcpu_inject_irq(d->vcpu[0], irq, 0); goto out_no_end; } desc->status |= IRQ_PENDING; /* * Since we set PENDING, if another processor is handling a different * instance of this same irq, the other processor will take care of it. */ if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) ) goto out; desc->status |= IRQ_INPROGRESS; action = desc->action; while ( desc->status & IRQ_PENDING ) { desc->status &= ~IRQ_PENDING; spin_unlock_irq(&desc->lock); action->handler(irq, action->dev_id, regs); spin_lock_irq(&desc->lock); } desc->status &= ~IRQ_INPROGRESS; out: desc->handler->end(desc); out_no_end: spin_unlock(&desc->lock); irq_exit(); }