/* Injects an Abort exception into the current vcpu, PC is the exact * address of the faulting instruction (without pipeline * adjustments). See TakePrefetchAbortException and * TakeDataAbortException pseudocode in ARM ARM. */ static void inject_abt32_exception(struct cpu_user_regs *regs, int prefetch, register_t addr) { uint32_t spsr = regs->cpsr; int is_thumb = (regs->cpsr & PSR_THUMB); /* Saved PC points to the instruction past the faulting instruction. */ uint32_t return_offset = is_thumb ? 4 : 0; register_t fsr; BUG_ON( !is_pv32_domain(current->domain) ); cpsr_switch_mode(regs, PSR_MODE_ABT); /* Update banked registers */ regs->spsr_abt = spsr; regs->lr_abt = regs->pc32 + return_offset; regs->pc32 = exception_handler(prefetch ? VECTOR32_PABT : VECTOR32_DABT); /* Inject a debug fault, best we can do right now */ if ( READ_SYSREG(TCR_EL1) & TTBCR_EAE ) fsr = FSR_LPAE | FSRL_STATUS_DEBUG; else fsr = FSRS_FS_DEBUG; if ( prefetch ) { /* Set IFAR and IFSR */ #ifdef CONFIG_ARM_32 WRITE_SYSREG(addr, IFAR); WRITE_SYSREG(fsr, IFSR); #else /* FAR_EL1[63:32] is AArch32 register IFAR */ register_t far = READ_SYSREG(FAR_EL1) & 0xffffffffUL; far |= addr << 32; WRITE_SYSREG(far, FAR_EL1); WRITE_SYSREG(fsr, IFSR32_EL2); #endif } else { #ifdef CONFIG_ARM_32 /* Set DFAR and DFSR */ WRITE_SYSREG(addr, DFAR); WRITE_SYSREG(fsr, DFSR); #else /* FAR_EL1[31:0] is AArch32 register DFAR */ register_t far = READ_SYSREG(FAR_EL1) & ~0xffffffffUL; far |= addr; WRITE_SYSREG(far, FAR_EL1); /* ESR_EL1 is AArch32 register DFSR */ WRITE_SYSREG(fsr, ESR_EL1); #endif } }
static vaddr_t exception_handler(vaddr_t offset) { uint32_t sctlr = READ_SYSREG32(SCTLR_EL1); if (sctlr & SCTLR_V) return 0xffff0000 + offset; else /* always have security exceptions */ return READ_SYSREG(VBAR_EL1) + offset; }
void __init smp_clear_cpu_maps (void) { cpumask_clear(&cpu_possible_map); cpumask_clear(&cpu_online_map); cpumask_set_cpu(0, &cpu_online_map); cpumask_set_cpu(0, &cpu_possible_map); cpu_logical_map(0) = READ_SYSREG(MPIDR_EL1) & MPIDR_HWID_MASK; }
void p2m_restore_state(struct vcpu *n) { register_t hcr; hcr = READ_SYSREG(HCR_EL2); WRITE_SYSREG(hcr & ~HCR_VM, HCR_EL2); isb(); p2m_load_VTTBR(n->domain); isb(); if ( is_32bit_domain(n->domain) ) hcr &= ~HCR_RW; else hcr |= HCR_RW; WRITE_SYSREG(n->arch.sctlr, SCTLR_EL1); isb(); WRITE_SYSREG(hcr, HCR_EL2); isb(); }
static void ctxt_switch_from(struct vcpu *p) { p2m_save_state(p); /* CP 15 */ p->arch.csselr = READ_SYSREG(CSSELR_EL1); /* Control Registers */ p->arch.cpacr = READ_SYSREG(CPACR_EL1); p->arch.contextidr = READ_SYSREG(CONTEXTIDR_EL1); p->arch.tpidr_el0 = READ_SYSREG(TPIDR_EL0); p->arch.tpidrro_el0 = READ_SYSREG(TPIDRRO_EL0); p->arch.tpidr_el1 = READ_SYSREG(TPIDR_EL1); /* Arch timer */ p->arch.cntkctl = READ_SYSREG32(CNTKCTL_EL1); virt_timer_save(p); if ( is_32bit_domain(p->domain) && cpu_has_thumbee ) { p->arch.teecr = READ_SYSREG32(TEECR32_EL1); p->arch.teehbr = READ_SYSREG32(TEEHBR32_EL1); } #ifdef CONFIG_ARM_32 p->arch.joscr = READ_CP32(JOSCR); p->arch.jmcr = READ_CP32(JMCR); #endif isb(); /* MMU */ p->arch.vbar = READ_SYSREG(VBAR_EL1); p->arch.ttbcr = READ_SYSREG(TCR_EL1); p->arch.ttbr0 = READ_SYSREG64(TTBR0_EL1); p->arch.ttbr1 = READ_SYSREG64(TTBR1_EL1); if ( is_32bit_domain(p->domain) ) p->arch.dacr = READ_SYSREG(DACR32_EL2); p->arch.par = READ_SYSREG64(PAR_EL1); #if defined(CONFIG_ARM_32) p->arch.mair0 = READ_CP32(MAIR0); p->arch.mair1 = READ_CP32(MAIR1); p->arch.amair0 = READ_CP32(AMAIR0); p->arch.amair1 = READ_CP32(AMAIR1); #else p->arch.mair = READ_SYSREG64(MAIR_EL1); p->arch.amair = READ_SYSREG64(AMAIR_EL1); #endif /* Fault Status */ #if defined(CONFIG_ARM_32) p->arch.dfar = READ_CP32(DFAR); p->arch.ifar = READ_CP32(IFAR); p->arch.dfsr = READ_CP32(DFSR); #elif defined(CONFIG_ARM_64) p->arch.far = READ_SYSREG64(FAR_EL1); p->arch.esr = READ_SYSREG64(ESR_EL1); #endif if ( is_32bit_domain(p->domain) ) p->arch.ifsr = READ_SYSREG(IFSR32_EL2); p->arch.afsr0 = READ_SYSREG(AFSR0_EL1); p->arch.afsr1 = READ_SYSREG(AFSR1_EL1); /* XXX MPU */ /* VFP */ vfp_save_state(p); /* VGIC */ gic_save_state(p); isb(); context_saved(p); }
/* Inject an undefined exception into a 64 bit guest */ static void inject_undef64_exception(struct cpu_user_regs *regs, int instr_len) { union hsr esr = { .iss = 0, .len = instr_len, .ec = HSR_EC_UNKNOWN, }; BUG_ON( is_pv32_domain(current->domain) ); regs->spsr_el1 = regs->cpsr; regs->elr_el1 = regs->pc; regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \ PSR_IRQ_MASK | PSR_DBG_MASK; regs->pc = READ_SYSREG(VBAR_EL1) + VECTOR64_CURRENT_SPx_SYNC; WRITE_SYSREG32(esr.bits, ESR_EL1); } /* Inject an abort exception into a 64 bit guest */ static void inject_abt64_exception(struct cpu_user_regs *regs, int prefetch, register_t addr, int instr_len) { union hsr esr = { .iss = 0, .len = instr_len, }; /* * Trap may have been taken from EL0, which might be in AArch32 * mode (PSR_MODE_BIT set), or in AArch64 mode (PSR_MODE_EL0t). * * Since we know the kernel must be 64-bit any trap from a 32-bit * mode must have been from EL0. */ if ( psr_mode_is_32bit(regs->cpsr) || psr_mode(regs->cpsr,PSR_MODE_EL0t) ) esr.ec = prefetch ? HSR_EC_INSTR_ABORT_LOWER_EL : HSR_EC_DATA_ABORT_LOWER_EL; else esr.ec = prefetch ? HSR_EC_INSTR_ABORT_CURR_EL : HSR_EC_DATA_ABORT_CURR_EL; BUG_ON( is_pv32_domain(current->domain) ); regs->spsr_el1 = regs->cpsr; regs->elr_el1 = regs->pc; regs->cpsr = PSR_MODE_EL1h | PSR_ABT_MASK | PSR_FIQ_MASK | \ PSR_IRQ_MASK | PSR_DBG_MASK; regs->pc = READ_SYSREG(VBAR_EL1) + VECTOR64_CURRENT_SPx_SYNC; WRITE_SYSREG(addr, FAR_EL1); WRITE_SYSREG32(esr.bits, ESR_EL1); } static void inject_dabt64_exception(struct cpu_user_regs *regs, register_t addr, int instr_len) { inject_abt64_exception(regs, 0, addr, instr_len); } static void inject_iabt64_exception(struct cpu_user_regs *regs, register_t addr, int instr_len) { inject_abt64_exception(regs, 1, addr, instr_len); }
void p2m_save_state(struct vcpu *p) { p->arch.sctlr = READ_SYSREG(SCTLR_EL1); }