int cpu_lm32_handle_mmu_fault(CPULM32State *env, target_ulong address, int rw, int mmu_idx) { int prot; address &= TARGET_PAGE_MASK; prot = PAGE_BITS; if (env->flags & LM32_FLAG_IGNORE_MSB) { tlb_set_page(env, address, address & 0x7fffffff, prot, mmu_idx, TARGET_PAGE_SIZE); } else { tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); } return 0; }
void tlb_fill(CPUXtensaState *env1, target_ulong vaddr, int is_write, int mmu_idx, uintptr_t retaddr) { CPUXtensaState *saved_env = env; env = env1; { uint32_t paddr; uint32_t page_size; unsigned access; int ret = xtensa_get_physical_addr(env, true, vaddr, is_write, mmu_idx, &paddr, &page_size, &access); qemu_log("%s(%08x, %d, %d) -> %08x, ret = %d\n", __func__, vaddr, is_write, mmu_idx, paddr, ret); if (ret == 0) { tlb_set_page(env, vaddr & TARGET_PAGE_MASK, paddr & TARGET_PAGE_MASK, access, mmu_idx, page_size); } else { do_restore_state(retaddr); HELPER(exception_cause_vaddr)(env->pc, ret, vaddr); } } env = saved_env; }
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int rw, int mmu_idx) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx); target_ulong vaddr, raddr; int prot; DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n", __func__, orig_vaddr, rw, mmu_idx); orig_vaddr &= TARGET_PAGE_MASK; vaddr = orig_vaddr; /* 31-Bit mode */ if (!(env->psw.mask & PSW_MASK_64)) { vaddr &= 0x7fffffff; } if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) { /* Translation ended in exception */ return 1; } /* check out of RAM access */ if (raddr > (ram_size + virtio_size)) { DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__, (uint64_t)raddr, (uint64_t)ram_size); trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER); return 1; } qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", __func__, (uint64_t)vaddr, (uint64_t)raddr, prot); tlb_set_page(cs, orig_vaddr, raddr, prot, mmu_idx, TARGET_PAGE_SIZE); return 0; }
void tlb_fill(CPUState *cs, target_ulong vaddr, int is_write, int mmu_idx, uintptr_t retaddr) { XtensaCPU *cpu = XTENSA_CPU(cs); CPUXtensaState *env = &cpu->env; uint32_t paddr; uint32_t page_size; unsigned access; int ret = xtensa_get_physical_addr(env, true, vaddr, is_write, mmu_idx, &paddr, &page_size, &access); qemu_log("%s(%08x, %d, %d) -> %08x, ret = %d\n", __func__, vaddr, is_write, mmu_idx, paddr, ret); if (ret == 0) { tlb_set_page(cs, vaddr & TARGET_PAGE_MASK, paddr & TARGET_PAGE_MASK, access, mmu_idx, page_size); } else { cpu_restore_state(cs, retaddr); HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr); } }
int cpu_mb_handle_mmu_fault (CPUMBState *env, target_ulong address, int rw, int mmu_idx) { unsigned int hit; unsigned int mmu_available; int r = 1; int prot; mmu_available = 0; if (env->pvr.regs[0] & PVR0_USE_MMU) { mmu_available = 1; if ((env->pvr.regs[0] & PVR0_PVR_FULL_MASK) && (env->pvr.regs[11] & PVR11_USE_MMU) != PVR11_USE_MMU) { mmu_available = 0; } } /* Translate if the MMU is available and enabled. */ if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) { target_ulong vaddr, paddr; struct microblaze_mmu_lookup lu; hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx); if (hit) { vaddr = address & TARGET_PAGE_MASK; paddr = lu.paddr + vaddr - lu.vaddr; DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n", mmu_idx, vaddr, paddr, lu.prot)); tlb_set_page(env, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE); r = 0; } else { env->sregs[SR_EAR] = address; DMMU(qemu_log("mmu=%d miss v=%x\n", mmu_idx, address)); switch (lu.err) { case ERR_PROT: env->sregs[SR_ESR] = rw == 2 ? 17 : 16; env->sregs[SR_ESR] |= (rw == 1) << 10; break; case ERR_MISS: env->sregs[SR_ESR] = rw == 2 ? 19 : 18; env->sregs[SR_ESR] |= (rw == 1) << 10; break; default: abort(); break; } if (env->exception_index == EXCP_MMU) { cpu_abort(env, "recursive faults\n"); } /* TLB miss. */ env->exception_index = EXCP_MMU; } } else { /* MMU disabled or not available. */ address &= TARGET_PAGE_MASK; prot = PAGE_BITS; tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); r = 0; } return r; }
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); hwaddr raddr, pte_addr; uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte; int page_size, prot, fault_cause = 0; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); assert(!msr_hv); /* For now there is no Radix PowerNV Support */ assert(cpu->vhyp); assert(ppc64_use_proc_tbl(cpu)); /* Real Mode Access */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* In real mode top 4 effective addr bits (mostly) ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* Virtual Mode Access - get the fully qualified address */ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { ppc_radix64_raise_segi(cpu, rwx, eaddr); return 1; } /* Get Process Table */ patbe = vhc->get_patbe(cpu->vhyp); /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); return 1; } prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset); /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ page_size = PRTBE_R_GET_RTS(prtbe0); pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, &raddr, &page_size, &fault_cause, &pte_addr); if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) { /* Couldn't get pte or access denied due to protection */ ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); return 1; } /* Update Reference and Change Bits */ ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, 1UL << page_size); return 0; }
int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx) { MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); CPUMBState *env = &cpu->env; unsigned int hit; unsigned int mmu_available; int r = 1; int prot; mmu_available = 0; if (cpu->cfg.use_mmu) { mmu_available = 1; if ((cpu->cfg.pvr == C_PVR_FULL) && (env->pvr.regs[11] & PVR11_USE_MMU) != PVR11_USE_MMU) { mmu_available = 0; } } /* Translate if the MMU is available and enabled. */ if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) { target_ulong vaddr, paddr; struct microblaze_mmu_lookup lu; hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx); if (hit) { vaddr = address & TARGET_PAGE_MASK; paddr = lu.paddr + vaddr - lu.vaddr; qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n", mmu_idx, vaddr, paddr, lu.prot); tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE); r = 0; } else { env->sregs[SR_EAR] = address; qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n", mmu_idx, address); switch (lu.err) { case ERR_PROT: env->sregs[SR_ESR] = rw == 2 ? 17 : 16; env->sregs[SR_ESR] |= (rw == 1) << 10; break; case ERR_MISS: env->sregs[SR_ESR] = rw == 2 ? 19 : 18; env->sregs[SR_ESR] |= (rw == 1) << 10; break; default: abort(); break; } if (cs->exception_index == EXCP_MMU) { cpu_abort(cs, "recursive faults\n"); } /* TLB miss. */ cs->exception_index = EXCP_MMU; } } else { /* MMU disabled or not available. */ address &= TARGET_PAGE_MASK; prot = PAGE_BITS; tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); r = 0; } return r; }