int arch_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason) { u32 hcr; /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } hcr = arm_priv(vcpu)->hcr; switch(irq_no) { case CPU_EXTERNAL_IRQ: hcr &= ~HCR_VI_MASK; break; case CPU_EXTERNAL_FIQ: hcr &= ~HCR_VF_MASK; break; default: return VMM_EFAIL; break; }; arm_priv(vcpu)->hcr = hcr; if (vmm_scheduler_current_vcpu() == vcpu) { write_hcr(hcr); } return VMM_OK; }
/** * @brief Enables or disables the stage-2 MMU. * * Configures Hyper Configuration Register(HCR) to enable or disable the * virtualization MMU. * * @param enable Enable or disable the MMU. * - 1 : Enable the MMU. * - 0 : Disable the MMU. * @return void */ static void guest_memory_stage2_enable(int enable) { uint32_t hcr; /* HCR.VM[0] = enable */ /* printf( "hcr:"); printf_hex32(hcr); printf("\n\r"); */ hcr = read_hcr(); if (enable) hcr |= (0x1); else hcr &= ~(0x1); write_hcr(hcr); }
int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu, arch_regs_t *regs, u32 irq_no, u32 reason) { /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } write_hcr(arm_priv(vcpu)->hcr); return VMM_OK; }
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason) { u32 hcr; bool update_hcr; irq_flags_t flags; /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } vmm_spin_lock_irqsave_lite(&arm_priv(vcpu)->hcr_lock, flags); hcr = arm_priv(vcpu)->hcr; update_hcr = FALSE; switch(irq_no) { case CPU_EXTERNAL_IRQ: hcr |= HCR_VI_MASK; /* VI bit will be cleared on deassertion */ update_hcr = TRUE; break; case CPU_EXTERNAL_FIQ: hcr |= HCR_VF_MASK; /* VF bit will be cleared on deassertion */ update_hcr = TRUE; break; default: break; }; if (update_hcr) { arm_priv(vcpu)->hcr = hcr; if (vmm_scheduler_current_vcpu() == vcpu) { write_hcr(hcr); } } vmm_spin_unlock_irqrestore_lite(&arm_priv(vcpu)->hcr_lock, flags); return VMM_OK; }
int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu, arch_regs_t *regs, u32 irq_no, u64 reason) { int rc; irq_flags_t flags; /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } /* Undefined, Data abort, and Prefetch abort * can only be emulated in normal context. */ switch(irq_no) { case CPU_UNDEF_INST_IRQ: rc = cpu_vcpu_inject_undef(vcpu, regs); break; case CPU_PREFETCH_ABORT_IRQ: rc = cpu_vcpu_inject_pabt(vcpu, regs); break; case CPU_DATA_ABORT_IRQ: rc = cpu_vcpu_inject_dabt(vcpu, regs, (virtual_addr_t)reason); break; default: rc = VMM_OK; break; }; /* Update HCR in HW */ vmm_spin_lock_irqsave_lite(&arm_priv(vcpu)->hcr_lock, flags); write_hcr(arm_priv(vcpu)->hcr); vmm_spin_unlock_irqrestore_lite(&arm_priv(vcpu)->hcr_lock, flags); return rc; }
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason) { u32 hcr; /* Skip IRQ & FIQ if VGIC available */ if (arm_vgic_avail(vcpu) && ((irq_no == CPU_EXTERNAL_IRQ) || (irq_no == CPU_EXTERNAL_FIQ))) { return VMM_OK; } hcr = arm_priv(vcpu)->hcr; switch(irq_no) { case CPU_DATA_ABORT_IRQ: hcr |= HCR_VA_MASK; /* VA bit is auto-cleared */ break; case CPU_EXTERNAL_IRQ: hcr |= HCR_VI_MASK; /* VI bit will be cleared on deassertion */ break; case CPU_EXTERNAL_FIQ: hcr |= HCR_VF_MASK; /* VF bit will be cleared on deassertion */ break; default: return VMM_EFAIL; break; }; arm_priv(vcpu)->hcr = hcr; if (vmm_scheduler_current_vcpu() == vcpu) { write_hcr(hcr); } return VMM_OK; }
/******************************************************************************* * Prepare the CPU system registers for first entry into secure or normal world * * If execution is requested to hyp mode, HSCTLR is initialized * If execution is requested to non-secure PL1, and the CPU supports * HYP mode then HYP mode is disabled by configuring all necessary HYP mode * registers. ******************************************************************************/ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t sctlr, scr, hcptr; cpu_context_t *ctx = cm_get_context(security_state); assert(ctx); if (security_state == NON_SECURE) { scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); if (scr & SCR_HCE_BIT) { /* Use SCTLR value to initialize HSCTLR */ sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); sctlr |= HSCTLR_RES1; /* Temporarily set the NS bit to access HSCTLR */ write_scr(read_scr() | SCR_NS_BIT); /* * Make sure the write to SCR is complete so that * we can access HSCTLR */ isb(); write_hsctlr(sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); } else if (read_id_pfr1() & (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { /* * Set the NS bit to access NS copies of certain banked * registers */ write_scr(read_scr() | SCR_NS_BIT); isb(); /* PL2 present but unused, need to disable safely */ write_hcr(0); /* HSCTLR : can be ignored when bypassing */ /* HCPTR : disable all traps TCPAC, TTA, TCP */ hcptr = read_hcptr(); hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); write_hcptr(hcptr); /* Enable EL1 access to timer */ write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); /* Reset CNTVOFF_EL2 */ write64_cntvoff(0); /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ write_vpidr(read_midr()); write_vmpidr(read_mpidr()); /* * Reset VTTBR. * Needed because cache maintenance operations depend on * the VMID even when non-secure EL1&0 stage 2 address * translation are disabled. */ write64_vttbr(0); /* * Avoid unexpected debug traps in case where HDCR * is not completely reset by the hardware - set * HDCR.HPMN to PMCR.N and zero the remaining bits. * The HDCR.HPMN and PMCR.N fields are the same size * (5 bits) and HPMN is at offset zero within HDCR. */ write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT); /* * Reset CNTHP_CTL to disable the EL2 physical timer and * therefore prevent timer interrupts. */ write_cnthp_ctl(0); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); }
/******************************************************************************* * Prepare the CPU system registers for first entry into secure or normal world * * If execution is requested to hyp mode, HSCTLR is initialized * If execution is requested to non-secure PL1, and the CPU supports * HYP mode then HYP mode is disabled by configuring all necessary HYP mode * registers. ******************************************************************************/ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t sctlr, scr, hcptr; cpu_context_t *ctx = cm_get_context(security_state); assert(ctx); if (security_state == NON_SECURE) { scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); if (scr & SCR_HCE_BIT) { /* Use SCTLR value to initialize HSCTLR */ sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); sctlr |= HSCTLR_RES1; /* Temporarily set the NS bit to access HSCTLR */ write_scr(read_scr() | SCR_NS_BIT); /* * Make sure the write to SCR is complete so that * we can access HSCTLR */ isb(); write_hsctlr(sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); } else if (read_id_pfr1() & (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */ write_scr(read_scr() | SCR_NS_BIT); isb(); /* PL2 present but unused, need to disable safely */ write_hcr(0); /* HSCTLR : can be ignored when bypassing */ /* HCPTR : disable all traps TCPAC, TTA, TCP */ hcptr = read_hcptr(); hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); write_hcptr(hcptr); /* Enable EL1 access to timer */ write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); /* Reset CNTVOFF_EL2 */ write64_cntvoff(0); /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ write_vpidr(read_midr()); write_vmpidr(read_mpidr()); /* * Reset VTTBR. * Needed because cache maintenance operations depend on * the VMID even when non-secure EL1&0 stage 2 address * translation are disabled. */ write64_vttbr(0); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); } } }