int lima_mmu_init(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int err; u32 v; if (ip->id == lima_ip_ppmmu_bcast) return 0; mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE); if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) { dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip)); return -EIO; } mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET); err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET, LIMA_MMU_DTE_ADDR, v, v == 0); if (err) return err; err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler, IRQF_SHARED, lima_ip_name(ip), ip); if (err) { dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip)); return err; } mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR); mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma); return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING, LIMA_MMU_STATUS, v, v & LIMA_MMU_STATUS_PAGING_ENABLED); }
static irqreturn_t lima_mmu_irq_handler(int irq, void *data) { struct lima_ip *ip = data; struct lima_device *dev = ip->dev; u32 status = mmu_read(LIMA_MMU_INT_STATUS); struct lima_sched_pipe *pipe; /* for shared irq case */ if (!status) return IRQ_NONE; if (status & LIMA_MMU_INT_PAGE_FAULT) { u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR); dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n", fault, LIMA_MMU_STATUS_BUS_ID(status), status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read", lima_ip_name(ip)); } if (status & LIMA_MMU_INT_READ_BUS_ERROR) dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip)); /* mask all interrupts before resume */ mmu_write(LIMA_MMU_INT_MASK, 0); mmu_write(LIMA_MMU_INT_CLEAR, status); pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp); lima_sched_pipe_mmu_error(pipe); return IRQ_HANDLED; }
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm) { struct lima_device *dev = ip->dev; u32 v; lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_STALL, LIMA_MMU_STATUS, v, v & LIMA_MMU_STATUS_STALL_ACTIVE); if (vm) mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma); /* flush the TLB */ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE); lima_mmu_send_command(LIMA_MMU_COMMAND_DISABLE_STALL, LIMA_MMU_STATUS, v, !(v & LIMA_MMU_STATUS_STALL_ACTIVE)); }
void lima_mmu_page_fault_resume(struct lima_ip *ip) { struct lima_device *dev = ip->dev; u32 status = mmu_read(LIMA_MMU_STATUS); u32 v; if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) { dev_info(dev->dev, "mmu resume\n"); mmu_write(LIMA_MMU_INT_MASK, 0); mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE); lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET, LIMA_MMU_DTE_ADDR, v, v == 0); mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR); mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma); lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING, LIMA_MMU_STATUS, v, v & LIMA_MMU_STATUS_PAGING_ENABLED); } }
void test_mmu () { s_ckone k; int32_t mem[2]; k.mem_size = sizeof(mem)/sizeof(int32_t); k.mem = mem; { clear (&k); k.mmu_base = 1; k.mmu_limit = 1; k.mem[1] = 1337; k.mar = 0; mmu_read (&k); TEST (int32_t, "%u", 1337, k.mbr); TEST (int32_t, "0x%x", 0, k.sr & SR_M); } { clear (&k); k.mmu_base = 1; k.mmu_limit = 1; k.mar = 2; mmu_read (&k); TEST (int32_t, "0x%x", SR_M, k.sr & SR_M); } { clear (&k); k.mmu_base = 1; k.mmu_limit = 1; TEST (int32_t, "%u", 0, k.mem[1]); k.mar = 0; k.mbr = 42; mmu_write (&k); TEST (int32_t, "%u", 42, k.mem[1]); } }
void helper_mmu_write(CPUMBState *env, uint32_t rn, uint32_t v) { mmu_write(env, rn, v); }
void helper_mmu_write(uint32_t rn, uint32_t v) { mmu_write(env, rn, v); }
int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) { unsigned long flags, mmuar, mmutr; struct mm_struct *mm; pgd_t *pgd; pmd_t *pmd; pte_t *pte; int asid; local_irq_save(flags); mmuar = (dtlb) ? mmu_read(MMUAR) : regs->pc + (extension_word * sizeof(long)); mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; if (!mm) { local_irq_restore(flags); return -1; } pgd = pgd_offset(mm, mmuar); if (pgd_none(*pgd)) { local_irq_restore(flags); return -1; } pmd = pmd_offset(pgd, mmuar); if (pmd_none(*pmd)) { local_irq_restore(flags); return -1; } pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) : pte_offset_map(pmd, mmuar); if (pte_none(*pte) || !pte_present(*pte)) { local_irq_restore(flags); return -1; } if (write) { if (!pte_write(*pte)) { local_irq_restore(flags); return -1; } set_pte(pte, pte_mkdirty(*pte)); } set_pte(pte, pte_mkyoung(*pte)); asid = mm->context & 0xff; if (!pte_dirty(*pte) && !KMAPAREA(mmuar)) set_pte(pte, pte_wrprotect(*pte)); mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V; if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE)) mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT; mmu_write(MMUTR, mmutr); mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) | ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X); if (dtlb) mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); else mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); local_irq_restore(flags); return 0; }