int __init arch_cpu_irq_setup(void) { static const struct cpu_page zero_filled_cpu_page = { 0 }; int rc; extern u32 _start_vect[]; u32 *vectors, *vectors_data; u32 vec; struct cpu_page vec_page; #if defined(CONFIG_ARM32_HIGHVEC) /* Enable high vectors in SCTLR */ write_sctlr(read_sctlr() | SCTLR_V_MASK); vectors = (u32 *) CPU_IRQ_HIGHVEC_BASE; #else #if defined(CONFIG_ARMV7A_SECUREX) write_vbar(CPU_IRQ_LOWVEC_BASE); #endif vectors = (u32 *) CPU_IRQ_LOWVEC_BASE; #endif vectors_data = vectors + CPU_IRQ_NR; /* If vectors are at correct location then do nothing */ if ((u32) _start_vect == (u32) vectors) { return VMM_OK; } /* If vectors are not mapped in virtual memory then map them. */ vec_page = zero_filled_cpu_page; rc = cpu_mmu_get_reserved_page((virtual_addr_t)vectors, &vec_page); if (rc) { rc = vmm_host_ram_alloc(&vec_page.pa, TTBL_L2TBL_SMALL_PAGE_SIZE, TRUE); if (rc) { return rc; } vec_page.va = (virtual_addr_t)vectors; vec_page.sz = TTBL_L2TBL_SMALL_PAGE_SIZE; vec_page.dom = TTBL_L1TBL_TTE_DOM_RESERVED; vec_page.ap = TTBL_AP_SRW_U; if ((rc = cpu_mmu_map_reserved_page(&vec_page))) { return rc; } } /* * Loop through the vectors we're taking over, and copy the * vector's insn and data word. */ for (vec = 0; vec < CPU_IRQ_NR; vec++) { vectors[vec] = _start_vect[vec]; vectors_data[vec] = _start_vect[vec + CPU_IRQ_NR]; } return VMM_OK; }
int arch_guest_init(struct vmm_guest *guest) { int rc; u32 ovect_flags; virtual_addr_t ovect_va; struct cpu_page pg; if (!guest->reset_count) { guest->arch_priv = vmm_malloc(sizeof(arm_guest_priv_t)); if (!guest->arch_priv) { return VMM_EFAIL; } ovect_flags = 0x0; ovect_flags |= VMM_MEMORY_READABLE; ovect_flags |= VMM_MEMORY_WRITEABLE; ovect_flags |= VMM_MEMORY_CACHEABLE; ovect_flags |= VMM_MEMORY_EXECUTABLE; ovect_va = vmm_host_alloc_pages(1, ovect_flags); if (!ovect_va) { return VMM_EFAIL; } if ((rc = cpu_mmu_get_reserved_page(ovect_va, &pg))) { return rc; } if ((rc = cpu_mmu_unmap_reserved_page(&pg))) { return rc; } #if defined(CONFIG_ARMV5) pg.ap = TTBL_AP_SRW_UR; #else if (pg.ap == TTBL_AP_SR_U) { pg.ap = TTBL_AP_SR_UR; } else { pg.ap = TTBL_AP_SRW_UR; } #endif if ((rc = cpu_mmu_map_reserved_page(&pg))) { return rc; } arm_guest_priv(guest)->ovect = (u32 *)ovect_va; } return VMM_OK; }