/** * flush_iotlb_page - Clear an iommu tlb entry * @obj: target iommu * @da: iommu device virtual address * * Clear an iommu tlb entry which includes 'da' address. **/ void flush_iotlb_page(struct iommu *obj, u32 da) { struct iotlb_lock l; int i; clk_enable(obj->clk); for (i = 0; i < obj->nr_tlb_entries; i++) { struct cr_regs cr; u32 start; size_t bytes; iotlb_lock_get(obj, &l); l.vict = i; iotlb_lock_set(obj, &l); iotlb_read_cr(obj, &cr); if (!iotlb_cr_valid(&cr)) continue; start = iotlb_cr_to_virt(&cr); bytes = iopgsz_to_bytes(cr.cam & 3); if ((start <= da) && (da < start + bytes)) { dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", __func__, start, da, bytes); iotlb_load_cr(obj, &cr); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); } } clk_disable(obj->clk); if (i == obj->nr_tlb_entries) dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); }
/** * load_iotlb_entry - Set an iommu tlb entry * @obj: target iommu * @e: an iommu tlb entry info **/ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) { int i; int err = 0; struct iotlb_lock l; struct cr_regs *cr; if (!obj || !obj->nr_tlb_entries || !e) return -EINVAL; clk_enable(obj->clk); for (i = 0; i < obj->nr_tlb_entries; i++) { struct cr_regs tmp; iotlb_lock_get(obj, &l); l.vict = i; iotlb_lock_set(obj, &l); iotlb_read_cr(obj, &tmp); if (!iotlb_cr_valid(&tmp)) break; } if (i == obj->nr_tlb_entries) { dev_dbg(obj->dev, "%s: full: no entry\n", __func__); err = -EBUSY; goto out; } cr = iotlb_alloc_cr(obj, e); if (IS_ERR(cr)) { clk_disable(obj->clk); return PTR_ERR(cr); } iotlb_load_cr(obj, cr); kfree(cr); /* increment victim for next tlb load */ if (++l.vict == obj->nr_tlb_entries) l.vict = 0; iotlb_lock_set(obj, &l); out: clk_disable(obj->clk); return err; }
/** * load_iotlb_entry - Set an iommu tlb entry * @obj: target iommu * @e: an iommu tlb entry info **/ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) { int i; int err = 0; struct iotlb_lock l; struct cr_regs *cr; if (!obj || !obj->nr_tlb_entries || !e) return -EINVAL; if (!cpu_is_omap44xx()) clk_enable(obj->clk); iotlb_lock_get(obj, &l); if (l.base == obj->nr_tlb_entries) { dev_warn(obj->dev, "%s: preserve entries full\n", __func__); err = -EBUSY; goto out; } if (!e->prsvd) { for (i = l.base; i < obj->nr_tlb_entries; i++) { struct cr_regs tmp; iotlb_lock_get(obj, &l); l.vict = i; iotlb_lock_set(obj, &l); iotlb_read_cr(obj, &tmp); if (!iotlb_cr_valid(&tmp)) break; } if (i == obj->nr_tlb_entries) { dev_dbg(obj->dev, "%s: full: no entry\n", __func__); err = -EBUSY; goto out; } } else { l.vict = l.base; iotlb_lock_set(obj, &l); } cr = iotlb_alloc_cr(obj, e); if (IS_ERR(cr)) { if (!cpu_is_omap44xx()) clk_disable(obj->clk); return PTR_ERR(cr); } iotlb_load_cr(obj, cr); kfree(cr); /* Increment base number if preservation is set */ if (e->prsvd) l.base++; /* increment victim for next tlb load */ if (++l.vict == obj->nr_tlb_entries) { l.vict = l.base; goto out; } iotlb_lock_set(obj, &l); out: if (!cpu_is_omap44xx()) clk_disable(obj->clk); return err; }