setup_mmu_el2(struct per_cpu *cpu_data, phys2virt_t phys2virt, u64 ttbr) { u32 tcr = T0SZ | (TCR_RGN_WB_WA << TCR_IRGN0_SHIFT) | (TCR_RGN_WB_WA << TCR_ORGN0_SHIFT) | (TCR_INNER_SHAREABLE << TCR_SH0_SHIFT) | HTCR_RES1; u32 sctlr_el1, sctlr_el2; /* Ensure that MMU is disabled. */ arm_read_sysreg(SCTLR_EL2, sctlr_el2); if (sctlr_el2 & SCTLR_M_BIT) return; /* * This setup code is always preceded by a complete cache flush, so * there is already a few memory barriers between the page table writes * and here. */ isb(); arm_write_sysreg(HMAIR0, DEFAULT_HMAIR0); arm_write_sysreg(HMAIR1, DEFAULT_HMAIR1); arm_write_sysreg(TTBR0_EL2, ttbr); arm_write_sysreg(TCR_EL2, tcr); /* * Flush HYP TLB. It should only be necessary if a previous hypervisor * was running. */ arm_write_sysreg(TLBIALLH, 1); dsb(nsh); /* * We need coherency with the kernel in order to use the setup * spinlocks: only enable the caches if they are enabled at EL1. */ arm_read_sysreg(SCTLR_EL1, sctlr_el1); sctlr_el1 &= (SCTLR_I_BIT | SCTLR_C_BIT); /* Enable stage-1 translation */ arm_read_sysreg(SCTLR_EL2, sctlr_el2); sctlr_el2 |= SCTLR_M_BIT | sctlr_el1; arm_write_sysreg(SCTLR_EL2, sctlr_el2); isb(); /* * Inlined epilogue that returns to switch_exception_level. * Must not touch anything else than the stack */ cpu_switch_phys2virt(phys2virt); /* Not reached (cannot be a while(1), it confuses the compiler) */ asm volatile("b .\n"); }
unsigned long phys_processor_id(void) { unsigned long mpidr; arm_read_sysreg(MPIDR_EL1, mpidr); return mpidr & MPIDR_CPUID_MASK; }
static void check_mmu_map(unsigned long virt_addr, unsigned long phys_addr) { unsigned long phys_base; u64 par; arm_write_sysreg(ATS1HR, virt_addr); isb(); arm_read_sysreg(PAR_EL1, par); phys_base = (unsigned long)(par & PAR_PA_MASK); if ((par & PAR_F_BIT) || (phys_base != phys_addr)) { printk("VA->PA check failed, expected %x, got %x\n", phys_addr, phys_base); while (1); } }
shutdown_el2(struct registers *regs, unsigned long vectors) { u32 sctlr_el2; /* Disable stage-1 translation, caches must be cleaned. */ arm_read_sysreg(SCTLR_EL2, sctlr_el2); sctlr_el2 &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT); arm_write_sysreg(SCTLR_EL2, sctlr_el2); isb(); /* Clean the MMU registers */ arm_write_sysreg(HMAIR0, 0); arm_write_sysreg(HMAIR1, 0); arm_write_sysreg(TTBR0_EL2, 0); arm_write_sysreg(TCR_EL2, 0); isb(); /* Reset the vectors as late as possible */ arm_write_sysreg(HVBAR, vectors); vmreturn(regs); }