static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) { u32 stat, da; u32 errs = 0; stat = iommu_read_reg(obj, MMU_IRQSTATUS); stat &= MMU_IRQ_MASK; if (!stat) { *ra = 0; return 0; } da = iommu_read_reg(obj, MMU_FAULT_AD); *ra = da; if (stat & MMU_IRQ_TLBMISS) errs |= OMAP_IOMMU_ERR_TLB_MISS; if (stat & MMU_IRQ_TRANSLATIONFAULT) errs |= OMAP_IOMMU_ERR_TRANS_FAULT; if (stat & MMU_IRQ_EMUMISS) errs |= OMAP_IOMMU_ERR_EMU_MISS; if (stat & MMU_IRQ_TABLEWALKFAULT) errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT; if (stat & MMU_IRQ_MULTIHITFAULT) errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT; iommu_write_reg(obj, stat, MMU_IRQSTATUS); return errs; }
static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra) { int i; u32 stat, da; const char *err_msg[] = { "tlb miss", "translation fault", "emulation miss", "table walk fault", "multi hit fault", }; stat = iommu_read_reg(obj, MMU_IRQSTATUS); stat &= MMU_IRQ_MASK; if (!stat) return 0; da = iommu_read_reg(obj, MMU_FAULT_AD); *ra = da; dev_err(obj->dev, "%s:\tda:%08x ", __func__, da); for (i = 0; i < ARRAY_SIZE(err_msg); i++) { if (stat & (1 << i)) printk("%s ", err_msg[i]); } printk("\n"); iommu_write_reg(obj, stat, MMU_IRQSTATUS); return stat; }
static void omap2_iommu_disable(struct omap_iommu *obj) { struct omap_hwmod *oh; u32 l; oh = omap_hwmod_lookup(obj->name); if (!oh) return; /* * IPU and DSP iommus are not directly connected to the processor * instead they are behind a shared MMU. Therefore in the case of * a mmu fault and the mmu fault was not handled, even if the processor * is under reset, the shared MMU will try to translation the address * again causing that the status flag cannot be clear and therefore * as soon as the clkdm wants to go to idle the clkdm will be stuck * in transition state. The only way to reset the shared MMU is doing * a hardreset of the L2 iommu which shared the reset line with the * shared MMU. That way we can clean the status bit and turn off * the iommu without any issue. */ if (!strcmp(obj->name, "ipu") || !strcmp(obj->name, "dsp")) { omap_hwmod_assert_hardreset(oh, oh->rst_lines->name); omap_hwmod_deassert_hardreset(oh, oh->rst_lines->name); goto out; } l = iommu_read_reg(obj, MMU_IRQSTATUS); iommu_write_reg(obj, l, MMU_IRQSTATUS); l = iommu_read_reg(obj, MMU_CNTL); l &= ~MMU_CNTL_MASK; iommu_write_reg(obj, l, MMU_CNTL); out: clkdm_allow_idle(oh->clkdm); dev_dbg(obj->dev, "%s is shutting down\n", obj->name); }
static int omap2_iommu_enable(struct iommu *obj) { u32 l, pa; unsigned long timeout; int ret = 0; if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) return -EINVAL; pa = virt_to_phys(obj->iopgd); if (!IS_ALIGNED(pa, SZ_16K)) return -EINVAL; ret = omap_device_enable(obj->pdev); if (ret) return ret; iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG); timeout = jiffies + msecs_to_jiffies(20); do { l = iommu_read_reg(obj, MMU_SYSSTATUS); if (l & MMU_SYS_RESETDONE) break; } while (!time_after(jiffies, timeout)); if (!(l & MMU_SYS_RESETDONE)) { dev_err(obj->dev, "can't take mmu out of reset\n"); return -ENODEV; } l = iommu_read_reg(obj, MMU_REVISION); dev_info(obj->dev, "%s: version %d.%d\n", obj->name, (l >> 4) & 0xf, l & 0xf); l = iommu_read_reg(obj, MMU_SYSCONFIG); l &= ~MMU_SYS_IDLE_MASK; l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE); iommu_write_reg(obj, l, MMU_SYSCONFIG); iommu_write_reg(obj, pa, MMU_TTB); omap2_iommu_set_twl(obj, true); if (cpu_is_omap44xx()) iommu_write_reg(obj, 0x1, MMU_GP_REG); return 0; }
static int omap2_iommu_enable(struct omap_iommu *obj) { u32 l, pa; if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) return -EINVAL; pa = virt_to_phys(obj->iopgd); if (!IS_ALIGNED(pa, SZ_16K)) return -EINVAL; l = iommu_read_reg(obj, MMU_REVISION); dev_info(obj->dev, "%s: version %d.%d\n", obj->name, (l >> 4) & 0xf, l & 0xf); iommu_write_reg(obj, pa, MMU_TTB); dra7_cfg_dspsys_mmu(obj, true); if (obj->has_bus_err_back) iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); __iommu_set_twl(obj, true); return 0; }
static int omap2_iommu_enable(struct omap_iommu *obj) { u32 l, pa; struct iommu_platform_data *pdata = obj->dev->platform_data; if (!obj->secure_mode) { if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) return -EINVAL; pa = virt_to_phys(obj->iopgd); if (!IS_ALIGNED(pa, SZ_16K)) return -EINVAL; } else { pa = (u32)obj->secure_ttb; if (!pa || !IS_ALIGNED(pa, SZ_16K)) return -EINVAL; } l = iommu_read_reg(obj, MMU_REVISION); dev_info(obj->dev, "%s: version %d.%d\n", obj->name, (l >> 4) & 0xf, l & 0xf); iommu_write_reg(obj, pa, MMU_TTB); __iommu_set_twl(obj, true); if (pdata->has_bus_err_back) iommu_write_reg(obj, MMU_BUS_ERR_BACK_EN, MMU_GP_REG); return 0; }
static int omap2_iommu_enable(struct omap_iommu *obj) { u32 l, pa; /* * HACK: without this, we blow imprecise external abort on uEVM * followed by L3 bus exception spew */ if (cpu_is_omap54xx()) { pr_info("omap2_iommu_enable: doing Benelli reset HACK\n"); __raw_writel(3, OMAP2_L4_IO_ADDRESS(0x4AE06910)); /* We need some ugly wait here as reread or mb() are not * sufficient... */ mdelay(500); } if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) return -EINVAL; pa = virt_to_phys(obj->iopgd); if (!IS_ALIGNED(pa, SZ_16K)) return -EINVAL; l = iommu_read_reg(obj, MMU_REVISION); dev_info(obj->dev, "%s: version %d.%d\n", obj->name, (l >> 4) & 0xf, l & 0xf); iommu_write_reg(obj, pa, MMU_TTB); __iommu_set_twl(obj, true); return 0; }
static int omap2_iommu_enable(struct iommu *obj) { u32 l, pa; unsigned long timeout; if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) return -EINVAL; pa = virt_to_phys(obj->iopgd); if (!IS_ALIGNED(pa, SZ_16K)) return -EINVAL; iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG); timeout = jiffies + msecs_to_jiffies(20); do { l = iommu_read_reg(obj, MMU_SYSSTATUS); if (l & MMU_SYS_RESETDONE) break; } while (time_after(jiffies, timeout)); if (!(l & MMU_SYS_RESETDONE)) { dev_err(obj->dev, "can't take mmu out of reset\n"); return -ENODEV; } l = iommu_read_reg(obj, MMU_REVISION); dev_info(obj->dev, "%s: version %d.%d\n", obj->name, (l >> 4) & 0xf, l & 0xf); l = iommu_read_reg(obj, MMU_SYSCONFIG); l &= ~MMU_SYS_IDLE_MASK; l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE); iommu_write_reg(obj, l, MMU_SYSCONFIG); iommu_write_reg(obj, MMU_IRQ_MASK, MMU_IRQENABLE); iommu_write_reg(obj, pa, MMU_TTB); l = iommu_read_reg(obj, MMU_CNTL); l &= ~MMU_CNTL_MASK; l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); iommu_write_reg(obj, l, MMU_CNTL); return 0; }
static void omap2_iommu_disable(struct omap_iommu *obj) { u32 l = iommu_read_reg(obj, MMU_CNTL); l &= ~MMU_CNTL_MASK; iommu_write_reg(obj, l, MMU_CNTL); dev_dbg(obj->dev, "%s is shutting down\n", obj->name); }
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) { u32 status, fault_addr; status = iommu_read_reg(obj, MMU_IRQSTATUS); status &= MMU_IRQ_MASK; if (!status) { *da = 0; return 0; } fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); *da = fault_addr; iommu_write_reg(obj, status, MMU_IRQSTATUS); return status; }
void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) { u32 val; val = iommu_read_reg(obj, MMU_LOCK); l->base = MMU_LOCK_BASE(val); l->vict = MMU_LOCK_VICT(val); }
static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) { u32 val; val = iommu_read_reg(obj, MMU_LOCK); l->base = MMU_LOCK_BASE(val); l->vict = MMU_LOCK_VICT(val); BUG_ON(l->base != 0); /* Currently no preservation is used */ }
/** * omap_iommu_save_ctx - Save registers for pm off-mode support * @dev: client device **/ void omap_iommu_save_ctx(struct device *dev) { struct omap_iommu *obj = dev_to_omap_iommu(dev); u32 *p = obj->ctx; int i; for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { p[i] = iommu_read_reg(obj, i * sizeof(u32)); dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); } }
static void omap2_iommu_save_ctx(struct iommu *obj) { int i; u32 *p = obj->ctx; for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { p[i] = iommu_read_reg(obj, i * sizeof(u32)); dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); } BUG_ON(p[0] != IOMMU_ARCH_VERSION); }
static void omap2_iommu_disable(struct iommu *obj) { int ret = 0; u32 l = iommu_read_reg(obj, MMU_CNTL); l &= ~MMU_CNTL_MASK; iommu_write_reg(obj, l, MMU_CNTL); iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG); dev_dbg(obj->dev, "%s is shutting down\n", obj->name); if (omap_device_shutdown(obj->pdev)) dev_err(obj->dev, "%s err 0x%x\n", __func__, ret); }
static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) { struct omap_hwmod *oh; u32 stat, da; u32 errs = 0; oh = omap_hwmod_lookup(obj->name); if (!oh) return 0; stat = iommu_read_reg(obj, MMU_IRQSTATUS); stat &= MMU_IRQ_MASK; if (!stat) { *ra = 0; return 0; } da = iommu_read_reg(obj, MMU_FAULT_AD); *ra = da; if (stat & MMU_IRQ_TLBMISS) errs |= OMAP_IOMMU_ERR_TLB_MISS; if (stat & MMU_IRQ_TRANSLATIONFAULT) errs |= OMAP_IOMMU_ERR_TRANS_FAULT; if (stat & MMU_IRQ_EMUMISS) errs |= OMAP_IOMMU_ERR_EMU_MISS; if (stat & MMU_IRQ_TABLEWALKFAULT) errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT; if (stat & MMU_IRQ_MULTIHITFAULT) errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT; iommu_write_reg(obj, stat, MMU_IRQSTATUS); clkdm_deny_idle(oh->clkdm); return errs; }
static void __iommu_set_twl(struct iommu *obj, bool on) { u32 l = iommu_read_reg(obj, MMU_CNTL); if (on) iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); else iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); l &= ~MMU_CNTL_MASK; if (on) l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); else l |= (MMU_CNTL_MMU_EN); iommu_write_reg(obj, l, MMU_CNTL); }
static int omap2_iommu_enable(struct omap_iommu *obj) { u32 l, pa; if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) return -EINVAL; pa = virt_to_phys(obj->iopgd); if (!IS_ALIGNED(pa, SZ_16K)) return -EINVAL; l = iommu_read_reg(obj, MMU_REVISION); dev_info(obj->dev, "%s: version %d.%d\n", obj->name, (l >> 4) & 0xf, l & 0xf); iommu_write_reg(obj, pa, MMU_TTB); __iommu_set_twl(obj, true); return 0; }
static void omap2_tlb_read_cr(struct iommu *obj, struct cr_regs *cr) { cr->cam = iommu_read_reg(obj, MMU_READ_CAM); cr->ram = iommu_read_reg(obj, MMU_READ_RAM); }
static u32 omap2_get_version(struct iommu *obj) { return iommu_read_reg(obj, MMU_REVISION); }