int __init arch_cpu_irq_setup(void) { static const struct cpu_page zero_filled_cpu_page = { 0 }; int rc; extern u32 _start_vect[]; u32 *vectors, *vectors_data; u32 vec; struct cpu_page vec_page; #if defined(CONFIG_ARM32_HIGHVEC) /* Enable high vectors in SCTLR */ write_sctlr(read_sctlr() | SCTLR_V_MASK); vectors = (u32 *) CPU_IRQ_HIGHVEC_BASE; #else #if defined(CONFIG_ARMV7A_SECUREX) write_vbar(CPU_IRQ_LOWVEC_BASE); #endif vectors = (u32 *) CPU_IRQ_LOWVEC_BASE; #endif vectors_data = vectors + CPU_IRQ_NR; /* If vectors are at correct location then do nothing */ if ((u32) _start_vect == (u32) vectors) { return VMM_OK; } /* If vectors are not mapped in virtual memory then map them. */ vec_page = zero_filled_cpu_page; rc = cpu_mmu_get_reserved_page((virtual_addr_t)vectors, &vec_page); if (rc) { rc = vmm_host_ram_alloc(&vec_page.pa, TTBL_L2TBL_SMALL_PAGE_SIZE, TRUE); if (rc) { return rc; } vec_page.va = (virtual_addr_t)vectors; vec_page.sz = TTBL_L2TBL_SMALL_PAGE_SIZE; vec_page.dom = TTBL_L1TBL_TTE_DOM_RESERVED; vec_page.ap = TTBL_AP_SRW_U; if ((rc = cpu_mmu_map_reserved_page(&vec_page))) { return rc; } } /* * Loop through the vectors we're taking over, and copy the * vector's insn and data word. */ for (vec = 0; vec < CPU_IRQ_NR; vec++) { vectors[vec] = _start_vect[vec]; vectors_data[vec] = _start_vect[vec + CPU_IRQ_NR]; } return VMM_OK; }
void prot_init() { write_vbar((reg_t)&exc_vector_table); /* Set up a new post-relocate bootstrap pagetable so that * we can map in VM, and we no longer rely on pre-relocated * data. */ pg_clear(); pg_identity(&kinfo); /* Still need 1:1 for device memory . */ pg_mapkernel(); pg_load(); prot_init_done = 1; }