static void iopgtable_clear_entry_all(struct iommu *obj) { int i; spin_lock(&obj->page_table_lock); for (i = 0; i < PTRS_PER_IOPGD; i++) { u32 da; u32 *iopgd; da = i << IOPGD_SHIFT; iopgd = iopgd_offset(obj, da); if (!*iopgd) continue; if (*iopgd & IOPGD_TABLE) iopte_free(iopte_offset(iopgd, 0)); *iopgd = 0; flush_iopgd_range(iopgd, iopgd); } flush_iotlb_all(obj); spin_unlock(&obj->page_table_lock); }
static void dump_ioptable(struct seq_file *s) { int i, j; u32 da; u32 *iopgd, *iopte; struct omap_iommu *obj = s->private; spin_lock(&obj->page_table_lock); iopgd = iopgd_offset(obj, 0); for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { if (!*iopgd) continue; if (!(*iopgd & IOPGD_TABLE)) { da = i << IOPGD_SHIFT; seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd); continue; } iopte = iopte_offset(iopgd, 0); for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { if (!*iopte) continue; da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte); } } spin_unlock(&obj->page_table_lock); }
static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) { size_t bytes; u32 *iopgd = iopgd_offset(obj, da); int nent = 1; if (!*iopgd) return 0; if (*iopgd & IOPGD_TABLE) { int i; u32 *iopte = iopte_offset(iopgd, da); bytes = IOPTE_SIZE; if (*iopte & IOPTE_LARGE) { nent *= 16; /* rewind to the 1st entry */ iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); } bytes *= nent; memset(iopte, 0, nent * sizeof(*iopte)); flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); /* * do table walk to check if this table is necessary or not */ iopte = iopte_offset(iopgd, 0); for (i = 0; i < PTRS_PER_IOPTE; i++) if (iopte[i]) goto out; iopte_free(iopte); nent = 1; /* for the next L1 entry */ } else { bytes = IOPGD_SIZE; if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { nent *= 16; /* rewind to the 1st entry */ iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); } bytes *= nent; } memset(iopgd, 0, nent * sizeof(*iopgd)); flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); out: return bytes; }
static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; flush_iopgd_range(iopgd, iopgd); return 0; }
static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); int i; for (i = 0; i < 16; i++) *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; flush_iopgd_range(iopgd, iopgd + 15); return 0; }
/** * iopgtable_lookup_entry - Lookup an iommu pte entry * @obj: target iommu * @da: iommu device virtual address * @ppgd: iommu pgd entry pointer to be returned * @ppte: iommu pte entry pointer to be returned **/ void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) { u32 *iopgd, *iopte = NULL; iopgd = iopgd_offset(obj, da); if (!*iopgd) goto out; if (*iopgd & IOPGD_TABLE) iopte = iopte_offset(iopgd, da); out: *ppgd = iopgd; *ppte = iopte; }
static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); u32 *iopte = iopte_alloc(obj, iopgd, da); int i; if (IS_ERR(iopte)) return PTR_ERR(iopte); for (i = 0; i < 16; i++) *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; flush_iopte_range(iopte, iopte + 15); return 0; }
static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); u32 *iopte = iopte_alloc(obj, iopgd, da); if (IS_ERR(iopte)) return PTR_ERR(iopte); *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; flush_iopte_range(iopte, iopte); dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", __func__, da, pa, iopte, *iopte); return 0; }
static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len) { int i; u32 *iopgd; char *p = buf; spin_lock(&obj->page_table_lock); iopgd = iopgd_offset(obj, 0); for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { int j, err; u32 *iopte; u32 da; if (!*iopgd) continue; if (!(*iopgd & IOPGD_TABLE)) { da = i << IOPGD_SHIFT; err = dump_ioptable_entry_one(1, da, *iopgd); if (err) goto out; continue; } iopte = iopte_offset(iopgd, 0); for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { if (!*iopte) continue; da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); err = dump_ioptable_entry_one(2, da, *iopgd); if (err) goto out; } } out: spin_unlock(&obj->page_table_lock); return p - buf; }
/* * Device IOMMU generic operations */ static irqreturn_t iommu_fault_handler(int irq, void *data) { u32 stat, da; u32 *iopgd, *iopte; int err = -EIO; struct iommu *obj = data; if (!obj->refcount) return IRQ_NONE; eventfd_notification(obj); /* Dynamic loading TLB or PTE */ err = iommu_notify_event(obj, IOMMU_FAULT, data); if (err == NOTIFY_OK) return IRQ_HANDLED; if (!cpu_is_omap44xx()) clk_enable(obj->clk); stat = iommu_report_fault(obj, &da); if (!cpu_is_omap44xx()) clk_disable(obj->clk); if (!stat) return IRQ_HANDLED; iopgd = iopgd_offset(obj, da); if (!(*iopgd & IOPGD_TABLE)) { dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__, da, iopgd, *iopgd); return IRQ_NONE; } iopte = iopte_offset(iopgd, da); dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", __func__, da, iopgd, *iopgd, iopte, *iopte); return IRQ_NONE; }
/* * Device IOMMU generic operations */ static irqreturn_t iommu_fault_handler(int irq, void *data) { u32 stat, da; u32 *iopgd, *iopte; int err = -EIO; struct iommu *obj = data; if (!obj->refcount) return IRQ_NONE; /* Dynamic loading TLB or PTE */ if (obj->isr) err = obj->isr(obj); if (!err) return IRQ_HANDLED; clk_enable(obj->clk); stat = iommu_report_fault(obj, &da); clk_disable(obj->clk); if (!stat) return IRQ_HANDLED; iopgd = iopgd_offset(obj, da); if (!(*iopgd & IOPGD_TABLE)) { dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__, da, iopgd, *iopgd); return IRQ_NONE; } iopte = iopte_offset(iopgd, da); dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", __func__, da, iopgd, *iopgd, iopte, *iopte); return IRQ_NONE; }