/* * Restore our VFP state from saved state. If we own the VFP * unit, we leave it enabled, with valid register contents; * otherwise we just update the values in thread storage. */ void vfp_task_restore(struct thread_info *ti, void *storage) { u32 fpexc; union vfp_state *vfp = storage; preempt_disable(); fpexc = fmrx(FPEXC); #ifdef CONFIG_SMP if ((fpexc & FPEXC_EN) != 0 && last_VFP_context[ti->cpu] == &ti->vfpstate) { vfp_restore_state(vfp); preempt_enable(); return; } #else if (last_VFP_context[ti->cpu] == &ti->vfpstate) { if ((fpexc & FPEXC_EN) == 0) fmxr(FPEXC, fpexc | FPEXC_EN); vfp_restore_state(vfp); preempt_enable(); return; } #endif preempt_enable(); memcpy(&ti->vfpstate, vfp, sizeof *vfp); }
static void ctxt_switch_to(struct vcpu *n) { p2m_restore_state(n); WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2); WRITE_SYSREG(n->arch.vmpidr, VMPIDR_EL2); /* VGIC */ gic_restore_state(n); /* VFP */ vfp_restore_state(n); /* XXX MPU */ /* Fault Status */ #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.dfar, DFAR); WRITE_CP32(n->arch.ifar, IFAR); WRITE_CP32(n->arch.dfsr, DFSR); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.far, FAR_EL1); WRITE_SYSREG64(n->arch.esr, ESR_EL1); #endif if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.ifsr, IFSR32_EL2); WRITE_SYSREG(n->arch.afsr0, AFSR0_EL1); WRITE_SYSREG(n->arch.afsr1, AFSR1_EL1); /* MMU */ WRITE_SYSREG(n->arch.vbar, VBAR_EL1); WRITE_SYSREG(n->arch.ttbcr, TCR_EL1); WRITE_SYSREG64(n->arch.ttbr0, TTBR0_EL1); WRITE_SYSREG64(n->arch.ttbr1, TTBR1_EL1); if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.dacr, DACR32_EL2); WRITE_SYSREG64(n->arch.par, PAR_EL1); #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.mair0, MAIR0); WRITE_CP32(n->arch.mair1, MAIR1); WRITE_CP32(n->arch.amair0, AMAIR0); WRITE_CP32(n->arch.amair1, AMAIR1); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.mair, MAIR_EL1); WRITE_SYSREG64(n->arch.amair, AMAIR_EL1); #endif isb(); /* Control Registers */ WRITE_SYSREG(n->arch.cpacr, CPACR_EL1); WRITE_SYSREG(n->arch.contextidr, CONTEXTIDR_EL1); WRITE_SYSREG(n->arch.tpidr_el0, TPIDR_EL0); WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0); WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1); if ( is_32bit_domain(n->domain) && cpu_has_thumbee ) { WRITE_SYSREG32(n->arch.teecr, TEECR32_EL1); WRITE_SYSREG32(n->arch.teehbr, TEEHBR32_EL1); } #ifdef CONFIG_ARM_32 WRITE_CP32(n->arch.joscr, JOSCR); WRITE_CP32(n->arch.jmcr, JMCR); #endif isb(); /* CP 15 */ WRITE_SYSREG(n->arch.csselr, CSSELR_EL1); isb(); /* This is could trigger an hardware interrupt from the virtual * timer. The interrupt needs to be injected into the guest. */ WRITE_SYSREG32(n->arch.cntkctl, CNTKCTL_EL1); virt_timer_restore(n); }
static void ctxt_switch_to(struct vcpu *n) { /* When the idle VCPU is running, Xen will always stay in hypervisor * mode. Therefore we don't need to restore the context of an idle VCPU. */ if ( is_idle_vcpu(n) ) return; p2m_restore_state(n); WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2); WRITE_SYSREG(n->arch.vmpidr, VMPIDR_EL2); /* VGIC */ gic_restore_state(n); /* VFP */ vfp_restore_state(n); /* XXX MPU */ /* Fault Status */ #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.dfar, DFAR); WRITE_CP32(n->arch.ifar, IFAR); WRITE_CP32(n->arch.dfsr, DFSR); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.far, FAR_EL1); WRITE_SYSREG64(n->arch.esr, ESR_EL1); #endif if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.ifsr, IFSR32_EL2); WRITE_SYSREG(n->arch.afsr0, AFSR0_EL1); WRITE_SYSREG(n->arch.afsr1, AFSR1_EL1); /* MMU */ WRITE_SYSREG(n->arch.vbar, VBAR_EL1); WRITE_SYSREG(n->arch.ttbcr, TCR_EL1); WRITE_SYSREG64(n->arch.ttbr0, TTBR0_EL1); WRITE_SYSREG64(n->arch.ttbr1, TTBR1_EL1); /* * Erratum #852523: DACR32_EL2 must be restored before one of the * following sysregs: SCTLR_EL1, TCR_EL1, TTBR0_EL1, TTBR1_EL1 or * CONTEXTIDR_EL1. */ if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.dacr, DACR32_EL2); WRITE_SYSREG64(n->arch.par, PAR_EL1); #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.mair0, MAIR0); WRITE_CP32(n->arch.mair1, MAIR1); WRITE_CP32(n->arch.amair0, AMAIR0); WRITE_CP32(n->arch.amair1, AMAIR1); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.mair, MAIR_EL1); WRITE_SYSREG64(n->arch.amair, AMAIR_EL1); #endif isb(); /* Control Registers */ WRITE_SYSREG(n->arch.cpacr, CPACR_EL1); /* * This write to sysreg CONTEXTIDR_EL1 ensures we don't hit erratum * #852523. I.e DACR32_EL2 is not correctly synchronized. */ WRITE_SYSREG(n->arch.contextidr, CONTEXTIDR_EL1); WRITE_SYSREG(n->arch.tpidr_el0, TPIDR_EL0); WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0); WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1); if ( is_32bit_domain(n->domain) && cpu_has_thumbee ) { WRITE_SYSREG32(n->arch.teecr, TEECR32_EL1); WRITE_SYSREG32(n->arch.teehbr, TEEHBR32_EL1); } #ifdef CONFIG_ARM_32 WRITE_CP32(n->arch.joscr, JOSCR); WRITE_CP32(n->arch.jmcr, JMCR); #endif isb(); /* CP 15 */ WRITE_SYSREG(n->arch.csselr, CSSELR_EL1); isb(); /* This is could trigger an hardware interrupt from the virtual * timer. The interrupt needs to be injected into the guest. */ WRITE_SYSREG32(n->arch.cntkctl, CNTKCTL_EL1); virt_timer_restore(n); }