void do_data_abort(arch_regs_t * uregs) { int rc = VMM_EFAIL; bool crash_dump = FALSE; u32 dfsr, dfar, fs, dom, wnr; struct vmm_vcpu * vcpu; struct cpu_l1tbl * l1; struct cpu_page pg; dfsr = read_dfsr(); dfar = read_dfar(); fs = (dfsr & DFSR_FS_MASK); #if !defined(CONFIG_ARMV5) fs |= (dfsr & DFSR_FS4_MASK) >> (DFSR_FS4_SHIFT - 4); #endif wnr = (dfsr & DFSR_WNR_MASK) >> DFSR_WNR_SHIFT; dom = (dfsr & DFSR_DOM_MASK) >> DFSR_DOM_SHIFT; if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) { if (fs != DFSR_FS_TRANS_FAULT_SECTION && fs != DFSR_FS_TRANS_FAULT_PAGE) { vmm_panic("%s: unexpected data abort\n" "%s: pc = 0x%08x, dfsr = 0x%08x, dfar = 0x%08x\n", __func__, __func__, uregs->pc, dfsr, dfar); } rc = cpu_mmu_get_reserved_page(dfar, &pg); if (rc) { /* If we were in normal context then just handle * trans fault for current normal VCPU and exit * else there is nothing we can do so panic. */ if (vmm_scheduler_normal_context()) { vcpu = vmm_scheduler_current_vcpu(); cpu_vcpu_cp15_trans_fault(vcpu, uregs, dfar, fs, dom, wnr, 1, FALSE); return; } vmm_panic("%s: cannot find reserved page\n" "%s: dfsr = 0x%08x, dfar = 0x%08x\n", __func__, __func__, dfsr, dfar); } l1 = cpu_mmu_l1tbl_current(); if (!l1) { vmm_panic("%s: cannot find l1 table\n" "%s: dfsr = 0x%08x, dfar = 0x%08x\n", __func__, __func__, dfsr, dfar); } rc = cpu_mmu_map_page(l1, &pg); if (rc) { vmm_panic("%s: cannot map page in l1 table\n" "%s: dfsr = 0x%08x, dfar = 0x%08x\n", __func__, __func__, dfsr, dfar); } return; } vcpu = vmm_scheduler_current_vcpu(); vmm_scheduler_irq_enter(uregs, TRUE); switch(fs) { case DFSR_FS_ALIGN_FAULT: break; case DFSR_FS_ICACHE_MAINT_FAULT: break; case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1: case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2: break; case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1: case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2: break; case DFSR_FS_TRANS_FAULT_SECTION: case DFSR_FS_TRANS_FAULT_PAGE: rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, dfar, fs, dom, wnr, 1, FALSE); crash_dump = TRUE; break; case DFSR_FS_ACCESS_FAULT_SECTION: case DFSR_FS_ACCESS_FAULT_PAGE: rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, dfar, fs, dom, wnr, 1); crash_dump = TRUE; break; case DFSR_FS_DOMAIN_FAULT_SECTION: case DFSR_FS_DOMAIN_FAULT_PAGE: rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, dfar, fs, dom, wnr, 1); crash_dump = TRUE; break; case DFSR_FS_PERM_FAULT_SECTION: case DFSR_FS_PERM_FAULT_PAGE: rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, dfar, fs, dom, wnr, 1); if ((dfar & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) != arm_priv(vcpu)->cp15.ovect_base) { crash_dump = FALSE; } break; case DFSR_FS_DEBUG_EVENT: case DFSR_FS_SYNC_EXT_ABORT: case DFSR_FS_IMP_VALID_LOCKDOWN: case DFSR_FS_IMP_VALID_COPROC_ABORT: case DFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR: case DFSR_FS_ASYNC_EXT_ABORT: case DFSR_FS_MEM_ACCESS_ASYNC_PARITY_ERROR: break; default: break; }; if (rc && crash_dump) { vmm_printf("\n"); vmm_printf("%s: error %d\n", __func__, rc); vmm_printf("%s: vcpu_id = %d, dfar = 0x%x, dfsr = 0x%x\n", __func__, vcpu->id, dfar, dfsr); cpu_vcpu_dump_user_reg(vcpu, uregs); } vmm_scheduler_irq_exit(uregs); }
void do_data_abort(vmm_user_regs_t * uregs) { int rc = VMM_EFAIL; bool crash_dump = FALSE; u32 dfsr, dfar, fs, dom, wnr; vmm_vcpu_t * vcpu; cpu_l1tbl_t * l1; cpu_page_t pg; dfsr = read_dfsr(); dfar = read_dfar(); fs = (dfsr & DFSR_FS4_MASK) >> DFSR_FS4_SHIFT; fs = (fs << 4) | (dfsr & DFSR_FS_MASK); wnr = (dfsr & DFSR_WNR_MASK) >> DFSR_WNR_SHIFT; dom = (dfsr & DFSR_DOM_MASK) >> DFSR_DOM_SHIFT; if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) { if (fs != DFSR_FS_TRANS_FAULT_SECTION || fs != DFSR_FS_TRANS_FAULT_PAGE) { vmm_panic("%s: unexpected prefetch abort\n" "%s: dfsr = 0x%08x, dfar = 0x%08x\n", __func__, __func__, dfsr, dfar); } rc = cpu_mmu_get_reserved_page(dfar, &pg); if (rc) { vmm_panic("%s: cannot find reserved page\n" "%s: dfsr = 0x%08x, dfar = 0x%08x\n", __func__, __func__, dfsr, dfar); } l1 = cpu_mmu_l1tbl_current(); if (!l1) { vmm_panic("%s: cannot find l1 table\n" "%s: dfsr = 0x%08x, dfar = 0x%08x\n", __func__, __func__, dfsr, dfar); } rc = cpu_mmu_map_page(l1, &pg); if (rc) { vmm_panic("%s: cannot map page in l1 table\n" "%s: dfsr = 0x%08x, dfar = 0x%08x\n", __func__, __func__, dfsr, dfar); } return; } vmm_scheduler_irq_enter(uregs, TRUE); vcpu = vmm_scheduler_current_vcpu(); switch(fs) { case DFSR_FS_ALIGN_FAULT: break; case DFSR_FS_ICACHE_MAINT_FAULT: break; case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1: case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2: break; case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1: case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2: break; case DFSR_FS_TRANS_FAULT_SECTION: case DFSR_FS_TRANS_FAULT_PAGE: rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, dfar, fs, dom, wnr, 1, FALSE); crash_dump = TRUE; break; case DFSR_FS_ACCESS_FAULT_SECTION: case DFSR_FS_ACCESS_FAULT_PAGE: rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, dfar, fs, dom, wnr, 1); crash_dump = TRUE; break; case DFSR_FS_DOMAIN_FAULT_SECTION: case DFSR_FS_DOMAIN_FAULT_PAGE: rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, dfar, fs, dom, wnr, 1); crash_dump = TRUE; break; case DFSR_FS_PERM_FAULT_SECTION: case DFSR_FS_PERM_FAULT_PAGE: rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, dfar, fs, dom, wnr, 1); if ((dfar & ~(sizeof(vcpu->sregs->cp15.ovect) - 1)) != vcpu->sregs->cp15.ovect_base) { crash_dump = FALSE; } break; case DFSR_FS_DEBUG_EVENT: case DFSR_FS_SYNC_EXT_ABORT: case DFSR_FS_IMP_VALID_LOCKDOWN: case DFSR_FS_IMP_VALID_COPROC_ABORT: case DFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR: case DFSR_FS_ASYNC_EXT_ABORT: case DFSR_FS_MEM_ACCESS_ASYNC_PARITY_ERROR: break; default: break; }; if (rc && crash_dump) { vmm_printf("\n"); vmm_printf("%s: error %d\n", __func__, rc); vmm_printf("%s: vcpu_id = %d, dfar = 0x%x, dfsr = 0x%x\n", __func__, vcpu->id, dfar, dfsr); cpu_vcpu_dump_user_reg(vcpu, uregs); } vmm_scheduler_irq_exit(uregs); }
void do_prefetch_abort(arch_regs_t * uregs) { int rc = VMM_EFAIL; bool crash_dump = FALSE; u32 ifsr, ifar, fs; struct vmm_vcpu * vcpu; ifsr = read_ifsr(); ifar = read_ifar(); fs = (ifsr & IFSR_FS_MASK); #if !defined(CONFIG_ARMV5) fs |= (ifsr & IFSR_FS4_MASK) >> (IFSR_FS4_SHIFT - 4); #endif if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) { struct cpu_l1tbl * l1; struct cpu_page pg; if (fs != IFSR_FS_TRANS_FAULT_SECTION && fs != IFSR_FS_TRANS_FAULT_PAGE) { vmm_panic("%s: unexpected prefetch abort\n" "%s: pc = 0x%08x, ifsr = 0x%08x, ifar = 0x%08x\n", __func__, __func__, uregs->pc, ifsr, ifar); } rc = cpu_mmu_get_reserved_page((virtual_addr_t)ifar, &pg); if (rc) { vmm_panic("%s: cannot find reserved page\n" "%s: ifsr = 0x%08x, ifar = 0x%08x\n", __func__, __func__, ifsr, ifar); } l1 = cpu_mmu_l1tbl_current(); if (!l1) { vmm_panic("%s: cannot find l1 table\n" "%s: ifsr = 0x%08x, ifar = 0x%08x\n", __func__, __func__, ifsr, ifar); } rc = cpu_mmu_map_page(l1, &pg); if (rc) { vmm_panic("%s: cannot map page in l1 table\n" "%s: ifsr = 0x%08x, ifar = 0x%08x\n", __func__, __func__, ifsr, ifar); } return; } vcpu = vmm_scheduler_current_vcpu(); if ((uregs->pc & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) == arm_priv(vcpu)->cp15.ovect_base) { uregs->pc = (virtual_addr_t)arm_guest_priv(vcpu->guest)->ovect + (uregs->pc & (TTBL_L2TBL_SMALL_PAGE_SIZE - 1)); return; } vmm_scheduler_irq_enter(uregs, TRUE); switch(fs) { case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1: case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2: break; case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1: case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2: break; case IFSR_FS_TRANS_FAULT_SECTION: case IFSR_FS_TRANS_FAULT_PAGE: rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, ifar, fs, 0, 0, 0, FALSE); crash_dump = TRUE; break; case IFSR_FS_ACCESS_FAULT_SECTION: case IFSR_FS_ACCESS_FAULT_PAGE: rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, ifar, fs, 0, 0, 0); crash_dump = TRUE; break; case IFSR_FS_DOMAIN_FAULT_SECTION: case IFSR_FS_DOMAIN_FAULT_PAGE: rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, ifar, fs, 0, 0, 0); crash_dump = TRUE; break; case IFSR_FS_PERM_FAULT_SECTION: case IFSR_FS_PERM_FAULT_PAGE: rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, ifar, fs, 0, 0, 0); crash_dump = TRUE; break; case IFSR_FS_DEBUG_EVENT: case IFSR_FS_SYNC_EXT_ABORT: case IFSR_FS_IMP_VALID_LOCKDOWN: case IFSR_FS_IMP_VALID_COPROC_ABORT: case IFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR: break; default: break; }; if (rc && crash_dump) { vmm_printf("\n"); vmm_printf("%s: error %d\n", __func__, rc); vmm_printf("%s: vcpu_id = %d, ifar = 0x%x, ifsr = 0x%x\n", __func__, vcpu->id, ifar, ifsr); cpu_vcpu_dump_user_reg(vcpu, uregs); } vmm_scheduler_irq_exit(uregs); }
void do_prefetch_abort(vmm_user_regs_t * uregs) { int rc = VMM_EFAIL; bool crash_dump = FALSE; u32 ifsr, ifar, fs; vmm_vcpu_t * vcpu; cpu_l1tbl_t * l1; cpu_page_t pg; ifsr = read_ifsr(); ifar = read_ifar(); fs = (ifsr & IFSR_FS4_MASK) >> IFSR_FS4_SHIFT; fs = (fs << 4) | (ifsr & IFSR_FS_MASK); if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) { if (fs != IFSR_FS_TRANS_FAULT_SECTION || fs != IFSR_FS_TRANS_FAULT_PAGE) { vmm_panic("%s: unexpected prefetch abort\n" "%s: ifsr = 0x%08x, ifar = 0x%08x\n", __func__, __func__, ifsr, ifar); } rc = cpu_mmu_get_reserved_page((virtual_addr_t)ifar, &pg); if (rc) { vmm_panic("%s: cannot find reserved page\n" "%s: ifsr = 0x%08x, ifar = 0x%08x\n", __func__, __func__, ifsr, ifar); } l1 = cpu_mmu_l1tbl_current(); if (!l1) { vmm_panic("%s: cannot find l1 table\n" "%s: ifsr = 0x%08x, ifar = 0x%08x\n", __func__, __func__, ifsr, ifar); } rc = cpu_mmu_map_page(l1, &pg); if (rc) { vmm_panic("%s: cannot map page in l1 table\n" "%s: ifsr = 0x%08x, ifar = 0x%08x\n", __func__, __func__, ifsr, ifar); } return; } vmm_scheduler_irq_enter(uregs, TRUE); vcpu = vmm_scheduler_current_vcpu(); switch(fs) { case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1: case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2: break; case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1: case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2: break; case IFSR_FS_TRANS_FAULT_SECTION: case IFSR_FS_TRANS_FAULT_PAGE: rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, ifar, fs, 0, 0, 0, FALSE); crash_dump = TRUE; break; case IFSR_FS_ACCESS_FAULT_SECTION: case IFSR_FS_ACCESS_FAULT_PAGE: rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, ifar, fs, 0, 0, 0); crash_dump = TRUE; break; case IFSR_FS_DOMAIN_FAULT_SECTION: case IFSR_FS_DOMAIN_FAULT_PAGE: rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, ifar, fs, 0, 0, 0); crash_dump = TRUE; break; case IFSR_FS_PERM_FAULT_SECTION: case IFSR_FS_PERM_FAULT_PAGE: rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, ifar, fs, 0, 0, 0); crash_dump = TRUE; break; case IFSR_FS_DEBUG_EVENT: case IFSR_FS_SYNC_EXT_ABORT: case IFSR_FS_IMP_VALID_LOCKDOWN: case IFSR_FS_IMP_VALID_COPROC_ABORT: case IFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR: break; default: break; }; if (rc && crash_dump) { vmm_printf("\n"); vmm_printf("%s: error %d\n", __func__, rc); vmm_printf("%s: vcpu_id = %d, ifar = 0x%x, ifsr = 0x%x\n", __func__, vcpu->id, ifar, ifsr); cpu_vcpu_dump_user_reg(vcpu, uregs); } vmm_scheduler_irq_exit(uregs); }