Ejemplo n.º 1
0
int cpu_vcpu_vfp_trap(struct vmm_vcpu *vcpu,
		      arch_regs_t *regs,
		      u32 il, u32 iss,
		      bool is_asimd)
{
	struct arm_priv *p = arm_priv(vcpu);
	struct arm_priv_vfp *vfp = &p->vfp;

	/* Inject undefined exception if:
	 * 1. VCPU does not have VFPv3 feature
	 */
	if (!arm_feature(vcpu, ARM_FEATURE_VFP3)) {
		/* Inject undefined exception */
		cpu_vcpu_inject_undef(vcpu, regs);
		return VMM_OK;
	}

	/* If VFP/ASIMD traps were enabled then:
	 * 1. Disable VFP/ASIMD traps
	 * 2. Restore VFP/ASIMD regs
	 */
	p->hcptr &= ~(HCPTR_TASE_MASK);
	p->hcptr &= ~(HCPTR_TCP11_MASK|HCPTR_TCP10_MASK);
	write_hcptr(p->hcptr);
	cpu_vcpu_vfp_regs_restore(vfp);

	return VMM_OK;
}
Ejemplo n.º 2
0
/*******************************************************************************
 * Prepare the CPU system registers for first entry into secure or normal world
 *
 * If execution is requested to hyp mode, HSCTLR is initialized
 * If execution is requested to non-secure PL1, and the CPU supports
 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
 * registers.
 ******************************************************************************/
void cm_prepare_el3_exit(uint32_t security_state)
{
	uint32_t sctlr, scr, hcptr;
	cpu_context_t *ctx = cm_get_context(security_state);

	assert(ctx);

	if (security_state == NON_SECURE) {
		scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
		if (scr & SCR_HCE_BIT) {
			/* Use SCTLR value to initialize HSCTLR */
			sctlr = read_ctx_reg(get_regs_ctx(ctx),
						 CTX_NS_SCTLR);
			sctlr |= HSCTLR_RES1;
			/* Temporarily set the NS bit to access HSCTLR */
			write_scr(read_scr() | SCR_NS_BIT);
			/*
			 * Make sure the write to SCR is complete so that
			 * we can access HSCTLR
			 */
			isb();
			write_hsctlr(sctlr);
			isb();

			write_scr(read_scr() & ~SCR_NS_BIT);
			isb();
		} else if (read_id_pfr1() &
			(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
			/*
			 * Set the NS bit to access NS copies of certain banked
			 * registers
			 */
			write_scr(read_scr() | SCR_NS_BIT);
			isb();

			/* PL2 present but unused, need to disable safely */
			write_hcr(0);

			/* HSCTLR : can be ignored when bypassing */

			/* HCPTR : disable all traps TCPAC, TTA, TCP */
			hcptr = read_hcptr();
			hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
			write_hcptr(hcptr);

			/* Enable EL1 access to timer */
			write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);

			/* Reset CNTVOFF_EL2 */
			write64_cntvoff(0);

			/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
			write_vpidr(read_midr());
			write_vmpidr(read_mpidr());

			/*
			 * Reset VTTBR.
			 * Needed because cache maintenance operations depend on
			 * the VMID even when non-secure EL1&0 stage 2 address
			 * translation are disabled.
			 */
			write64_vttbr(0);

			/*
			 * Avoid unexpected debug traps in case where HDCR
			 * is not completely reset by the hardware - set
			 * HDCR.HPMN to PMCR.N and zero the remaining bits.
			 * The HDCR.HPMN and PMCR.N fields are the same size
			 * (5 bits) and HPMN is at offset zero within HDCR.
			 */
			write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT);

			/*
			 * Reset CNTHP_CTL to disable the EL2 physical timer and
			 * therefore prevent timer interrupts.
			 */
			write_cnthp_ctl(0);
			isb();

			write_scr(read_scr() & ~SCR_NS_BIT);
			isb();
		}
/*******************************************************************************
 * Prepare the CPU system registers for first entry into secure or normal world
 *
 * If execution is requested to hyp mode, HSCTLR is initialized
 * If execution is requested to non-secure PL1, and the CPU supports
 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
 * registers.
 ******************************************************************************/
void cm_prepare_el3_exit(uint32_t security_state)
{
    uint32_t sctlr, scr, hcptr;
    cpu_context_t *ctx = cm_get_context(security_state);

    assert(ctx);

    if (security_state == NON_SECURE) {
        scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
        if (scr & SCR_HCE_BIT) {
            /* Use SCTLR value to initialize HSCTLR */
            sctlr = read_ctx_reg(get_regs_ctx(ctx),
                                 CTX_NS_SCTLR);
            sctlr |= HSCTLR_RES1;
            /* Temporarily set the NS bit to access HSCTLR */
            write_scr(read_scr() | SCR_NS_BIT);
            /*
             * Make sure the write to SCR is complete so that
             * we can access HSCTLR
             */
            isb();
            write_hsctlr(sctlr);
            isb();

            write_scr(read_scr() & ~SCR_NS_BIT);
            isb();
        } else if (read_id_pfr1() &
                   (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
            /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */
            write_scr(read_scr() | SCR_NS_BIT);
            isb();

            /* PL2 present but unused, need to disable safely */
            write_hcr(0);

            /* HSCTLR : can be ignored when bypassing */

            /* HCPTR : disable all traps TCPAC, TTA, TCP */
            hcptr = read_hcptr();
            hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
            write_hcptr(hcptr);

            /* Enable EL1 access to timer */
            write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);

            /* Reset CNTVOFF_EL2 */
            write64_cntvoff(0);

            /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
            write_vpidr(read_midr());
            write_vmpidr(read_mpidr());

            /*
             * Reset VTTBR.
             * Needed because cache maintenance operations depend on
             * the VMID even when non-secure EL1&0 stage 2 address
             * translation are disabled.
             */
            write64_vttbr(0);
            isb();

            write_scr(read_scr() & ~SCR_NS_BIT);
            isb();
        }
    }
}