int arch_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason) { u32 hcr; /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } hcr = arm_priv(vcpu)->hcr; switch(irq_no) { case CPU_EXTERNAL_IRQ: hcr &= ~HCR_VI_MASK; break; case CPU_EXTERNAL_FIQ: hcr &= ~HCR_VF_MASK; break; default: return VMM_EFAIL; break; }; arm_priv(vcpu)->hcr = hcr; if (vmm_scheduler_current_vcpu() == vcpu) { write_hcr(hcr); } return VMM_OK; }
int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu, arch_regs_t *regs, u32 irq_no, u32 reason) { /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } write_hcr(arm_priv(vcpu)->hcr); return VMM_OK; }
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason) { u64 hcr; bool update_hcr; irq_flags_t flags; /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags); hcr = arm_priv(vcpu)->hcr; update_hcr = FALSE; switch(irq_no) { case CPU_EXTERNAL_IRQ: hcr |= HCR_VI_MASK; /* VI bit will be cleared on deassertion */ update_hcr = TRUE; break; case CPU_EXTERNAL_FIQ: hcr |= HCR_VF_MASK; /* VF bit will be cleared on deassertion */ update_hcr = TRUE; break; default: break; }; if (update_hcr) { arm_priv(vcpu)->hcr = hcr; if (vcpu == vmm_scheduler_current_vcpu()) { msr(hcr_el2, hcr); } } vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags); return VMM_OK; }
int arch_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason) { u32 hcr; bool update_hcr; irq_flags_t flags; /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } vmm_spin_lock_irqsave_lite(&arm_priv(vcpu)->hcr_lock, flags); hcr = arm_priv(vcpu)->hcr; update_hcr = FALSE; switch(irq_no) { case CPU_EXTERNAL_IRQ: hcr &= ~HCR_VI_MASK; update_hcr = TRUE; break; case CPU_EXTERNAL_FIQ: hcr &= ~HCR_VF_MASK; update_hcr = TRUE; break; default: break; }; if (update_hcr) { arm_priv(vcpu)->hcr = hcr; if (vmm_scheduler_current_vcpu() == vcpu) { write_hcr(hcr); } } vmm_spin_unlock_irqrestore_lite(&arm_priv(vcpu)->hcr_lock, flags); return VMM_OK; }
int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu, arch_regs_t *regs, u32 irq_no, u64 reason) { int rc; irq_flags_t flags; /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } /* Undefined, Data abort, and Prefetch abort * can only be emulated in normal context. */ switch(irq_no) { case CPU_UNDEF_INST_IRQ: rc = cpu_vcpu_inject_undef(vcpu, regs); break; case CPU_PREFETCH_ABORT_IRQ: rc = cpu_vcpu_inject_pabt(vcpu, regs); break; case CPU_DATA_ABORT_IRQ: rc = cpu_vcpu_inject_dabt(vcpu, regs, (virtual_addr_t)reason); break; default: rc = VMM_OK; break; }; /* Update HCR in HW */ vmm_spin_lock_irqsave_lite(&arm_priv(vcpu)->hcr_lock, flags); write_hcr(arm_priv(vcpu)->hcr); vmm_spin_unlock_irqrestore_lite(&arm_priv(vcpu)->hcr_lock, flags); return rc; }
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason) { u32 hcr; /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } hcr = arm_priv(vcpu)->hcr; switch(irq_no) { case CPU_DATA_ABORT_IRQ: hcr |= HCR_VA_MASK; /* VA bit is auto-cleared */ break; case CPU_EXTERNAL_IRQ: hcr |= HCR_VI_MASK; /* VI bit will be cleared on deassertion */ break; case CPU_EXTERNAL_FIQ: hcr |= HCR_VF_MASK; /* VF bit will be cleared on deassertion */ break; default: return VMM_EFAIL; break; }; arm_priv(vcpu)->hcr = hcr; if (vmm_scheduler_current_vcpu() == vcpu) { write_hcr(hcr); } return VMM_OK; }