void __cpuinit init_traps(void) { /* Setup Hyp vector base */ WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); /* Setup hypervisor traps */ WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI|HCR_TSC| HCR_TAC, HCR_EL2); isb(); }
/* Injects an Abort exception into the current vcpu, PC is the exact * address of the faulting instruction (without pipeline * adjustments). See TakePrefetchAbortException and * TakeDataAbortException pseudocode in ARM ARM. */ static void inject_abt32_exception(struct cpu_user_regs *regs, int prefetch, register_t addr) { uint32_t spsr = regs->cpsr; int is_thumb = (regs->cpsr & PSR_THUMB); /* Saved PC points to the instruction past the faulting instruction. */ uint32_t return_offset = is_thumb ? 4 : 0; register_t fsr; BUG_ON( !is_pv32_domain(current->domain) ); cpsr_switch_mode(regs, PSR_MODE_ABT); /* Update banked registers */ regs->spsr_abt = spsr; regs->lr_abt = regs->pc32 + return_offset; regs->pc32 = exception_handler(prefetch ? VECTOR32_PABT : VECTOR32_DABT); /* Inject a debug fault, best we can do right now */ if ( READ_SYSREG(TCR_EL1) & TTBCR_EAE ) fsr = FSR_LPAE | FSRL_STATUS_DEBUG; else fsr = FSRS_FS_DEBUG; if ( prefetch ) { /* Set IFAR and IFSR */ #ifdef CONFIG_ARM_32 WRITE_SYSREG(addr, IFAR); WRITE_SYSREG(fsr, IFSR); #else /* FAR_EL1[63:32] is AArch32 register IFAR */ register_t far = READ_SYSREG(FAR_EL1) & 0xffffffffUL; far |= addr << 32; WRITE_SYSREG(far, FAR_EL1); WRITE_SYSREG(fsr, IFSR32_EL2); #endif } else { #ifdef CONFIG_ARM_32 /* Set DFAR and DFSR */ WRITE_SYSREG(addr, DFAR); WRITE_SYSREG(fsr, DFSR); #else /* FAR_EL1[31:0] is AArch32 register DFAR */ register_t far = READ_SYSREG(FAR_EL1) & ~0xffffffffUL; far |= addr; WRITE_SYSREG(far, FAR_EL1); /* ESR_EL1 is AArch32 register DFSR */ WRITE_SYSREG(fsr, ESR_EL1); #endif } }
void p2m_restore_state(struct vcpu *n) { register_t hcr; hcr = READ_SYSREG(HCR_EL2); WRITE_SYSREG(hcr & ~HCR_VM, HCR_EL2); isb(); p2m_load_VTTBR(n->domain); isb(); if ( is_32bit_domain(n->domain) ) hcr &= ~HCR_RW; else hcr |= HCR_RW; WRITE_SYSREG(n->arch.sctlr, SCTLR_EL1); isb(); WRITE_SYSREG(hcr, HCR_EL2); isb(); }
/* Boot the current CPU */ void __cpuinit start_secondary(unsigned long boot_phys_offset, unsigned long fdt_paddr, unsigned long cpuid) { struct cpuinfo_arm *c = cpu_data + cpuid; memset(get_cpu_info(), 0, sizeof (struct cpu_info)); /* TODO: handle boards where CPUIDs are not contiguous */ set_processor_id(cpuid); *c = boot_cpu_data; identify_cpu(c); /* Setup Hyp vector base */ WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2); mmu_init_secondary_cpu(); enable_vfp(); gic_init_secondary_cpu(); init_secondary_IRQ(); gic_route_ppis(); init_maintenance_interrupt(); init_timer_interrupt(); set_current(idle_vcpu[cpuid]); setup_cpu_sibling_map(cpuid); /* Run local notifiers */ notify_cpu_starting(cpuid); wmb(); /* Now report this CPU is up */ cpumask_set_cpu(cpuid, &cpu_online_map); wmb(); local_irq_enable(); dprintk(XENLOG_DEBUG, "CPU %u booted.\n", smp_processor_id()); startup_cpu_idle_loop(); }
static void ctxt_switch_to(struct vcpu *n) { p2m_restore_state(n); WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2); WRITE_SYSREG(n->arch.vmpidr, VMPIDR_EL2); /* VGIC */ gic_restore_state(n); /* VFP */ vfp_restore_state(n); /* XXX MPU */ /* Fault Status */ #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.dfar, DFAR); WRITE_CP32(n->arch.ifar, IFAR); WRITE_CP32(n->arch.dfsr, DFSR); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.far, FAR_EL1); WRITE_SYSREG64(n->arch.esr, ESR_EL1); #endif if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.ifsr, IFSR32_EL2); WRITE_SYSREG(n->arch.afsr0, AFSR0_EL1); WRITE_SYSREG(n->arch.afsr1, AFSR1_EL1); /* MMU */ WRITE_SYSREG(n->arch.vbar, VBAR_EL1); WRITE_SYSREG(n->arch.ttbcr, TCR_EL1); WRITE_SYSREG64(n->arch.ttbr0, TTBR0_EL1); WRITE_SYSREG64(n->arch.ttbr1, TTBR1_EL1); if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.dacr, DACR32_EL2); WRITE_SYSREG64(n->arch.par, PAR_EL1); #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.mair0, MAIR0); WRITE_CP32(n->arch.mair1, MAIR1); WRITE_CP32(n->arch.amair0, AMAIR0); WRITE_CP32(n->arch.amair1, AMAIR1); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.mair, MAIR_EL1); WRITE_SYSREG64(n->arch.amair, AMAIR_EL1); #endif isb(); /* Control Registers */ WRITE_SYSREG(n->arch.cpacr, CPACR_EL1); WRITE_SYSREG(n->arch.contextidr, CONTEXTIDR_EL1); WRITE_SYSREG(n->arch.tpidr_el0, TPIDR_EL0); WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0); WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1); if ( is_32bit_domain(n->domain) && cpu_has_thumbee ) { WRITE_SYSREG32(n->arch.teecr, TEECR32_EL1); WRITE_SYSREG32(n->arch.teehbr, TEEHBR32_EL1); } #ifdef CONFIG_ARM_32 WRITE_CP32(n->arch.joscr, JOSCR); WRITE_CP32(n->arch.jmcr, JMCR); #endif isb(); /* CP 15 */ WRITE_SYSREG(n->arch.csselr, CSSELR_EL1); isb(); /* This is could trigger an hardware interrupt from the virtual * timer. The interrupt needs to be injected into the guest. */ WRITE_SYSREG32(n->arch.cntkctl, CNTKCTL_EL1); virt_timer_restore(n); }
/* Inject an undefined exception into a 64 bit guest */ static void inject_undef64_exception(struct cpu_user_regs *regs, int instr_len) { union hsr esr = { .iss = 0, .len = instr_len, .ec = HSR_EC_UNKNOWN, }; BUG_ON( is_pv32_domain(current->domain) ); regs->spsr_el1 = regs->cpsr; regs->elr_el1 = regs->pc; regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \ PSR_IRQ_MASK | PSR_DBG_MASK; regs->pc = READ_SYSREG(VBAR_EL1) + VECTOR64_CURRENT_SPx_SYNC; WRITE_SYSREG32(esr.bits, ESR_EL1); } /* Inject an abort exception into a 64 bit guest */ static void inject_abt64_exception(struct cpu_user_regs *regs, int prefetch, register_t addr, int instr_len) { union hsr esr = { .iss = 0, .len = instr_len, }; /* * Trap may have been taken from EL0, which might be in AArch32 * mode (PSR_MODE_BIT set), or in AArch64 mode (PSR_MODE_EL0t). * * Since we know the kernel must be 64-bit any trap from a 32-bit * mode must have been from EL0. */ if ( psr_mode_is_32bit(regs->cpsr) || psr_mode(regs->cpsr,PSR_MODE_EL0t) ) esr.ec = prefetch ? HSR_EC_INSTR_ABORT_LOWER_EL : HSR_EC_DATA_ABORT_LOWER_EL; else esr.ec = prefetch ? HSR_EC_INSTR_ABORT_CURR_EL : HSR_EC_DATA_ABORT_CURR_EL; BUG_ON( is_pv32_domain(current->domain) ); regs->spsr_el1 = regs->cpsr; regs->elr_el1 = regs->pc; regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \ PSR_IRQ_MASK | PSR_DBG_MASK; regs->pc = READ_SYSREG(VBAR_EL1) + VECTOR64_CURRENT_SPx_SYNC; WRITE_SYSREG(addr, FAR_EL1); WRITE_SYSREG32(esr.bits, ESR_EL1); } static void inject_dabt64_exception(struct cpu_user_regs *regs, register_t addr, int instr_len) { inject_abt64_exception(regs, 0, addr, instr_len); } static void inject_iabt64_exception(struct cpu_user_regs *regs, register_t addr, int instr_len) { inject_abt64_exception(regs, 1, addr, instr_len); }
static void ctxt_switch_to(struct vcpu *n) { /* When the idle VCPU is running, Xen will always stay in hypervisor * mode. Therefore we don't need to restore the context of an idle VCPU. */ if ( is_idle_vcpu(n) ) return; p2m_restore_state(n); WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2); WRITE_SYSREG(n->arch.vmpidr, VMPIDR_EL2); /* VGIC */ gic_restore_state(n); /* VFP */ vfp_restore_state(n); /* XXX MPU */ /* Fault Status */ #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.dfar, DFAR); WRITE_CP32(n->arch.ifar, IFAR); WRITE_CP32(n->arch.dfsr, DFSR); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.far, FAR_EL1); WRITE_SYSREG64(n->arch.esr, ESR_EL1); #endif if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.ifsr, IFSR32_EL2); WRITE_SYSREG(n->arch.afsr0, AFSR0_EL1); WRITE_SYSREG(n->arch.afsr1, AFSR1_EL1); /* MMU */ WRITE_SYSREG(n->arch.vbar, VBAR_EL1); WRITE_SYSREG(n->arch.ttbcr, TCR_EL1); WRITE_SYSREG64(n->arch.ttbr0, TTBR0_EL1); WRITE_SYSREG64(n->arch.ttbr1, TTBR1_EL1); /* * Erratum #852523: DACR32_EL2 must be restored before one of the * following sysregs: SCTLR_EL1, TCR_EL1, TTBR0_EL1, TTBR1_EL1 or * CONTEXTIDR_EL1. */ if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.dacr, DACR32_EL2); WRITE_SYSREG64(n->arch.par, PAR_EL1); #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.mair0, MAIR0); WRITE_CP32(n->arch.mair1, MAIR1); WRITE_CP32(n->arch.amair0, AMAIR0); WRITE_CP32(n->arch.amair1, AMAIR1); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.mair, MAIR_EL1); WRITE_SYSREG64(n->arch.amair, AMAIR_EL1); #endif isb(); /* Control Registers */ WRITE_SYSREG(n->arch.cpacr, CPACR_EL1); /* * This write to sysreg CONTEXTIDR_EL1 ensures we don't hit erratum * #852523. I.e DACR32_EL2 is not correctly synchronized. */ WRITE_SYSREG(n->arch.contextidr, CONTEXTIDR_EL1); WRITE_SYSREG(n->arch.tpidr_el0, TPIDR_EL0); WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0); WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1); if ( is_32bit_domain(n->domain) && cpu_has_thumbee ) { WRITE_SYSREG32(n->arch.teecr, TEECR32_EL1); WRITE_SYSREG32(n->arch.teehbr, TEEHBR32_EL1); } #ifdef CONFIG_ARM_32 WRITE_CP32(n->arch.joscr, JOSCR); WRITE_CP32(n->arch.jmcr, JMCR); #endif isb(); /* CP 15 */ WRITE_SYSREG(n->arch.csselr, CSSELR_EL1); isb(); /* This is could trigger an hardware interrupt from the virtual * timer. The interrupt needs to be injected into the guest. */ WRITE_SYSREG32(n->arch.cntkctl, CNTKCTL_EL1); virt_timer_restore(n); }