static int omap_dmm_release(struct inode *inode, struct file *filp) { int status = 0; struct iodmm_struct *obj; if (!filp->private_data) { status = -EIO; goto err_out; } obj = filp->private_data; flush_signals(current); status = mutex_lock_interruptible(&obj->iovmm->dmm_map_lock); if (status == 0) { /* * Report to remote Processor of the cleanup of these * resources before cleaning in order to avoid MMU fault * type of behavior */ if (!list_empty(&obj->map_list)) { iommu_notify_event(obj->iovmm->iommu, IOMMU_CLOSE, NULL); } mutex_unlock(&obj->iovmm->dmm_map_lock); } else { pr_err("%s mutex_lock_interruptible returned 0x%x\n", __func__, status); } user_remove_resources(obj); iommu_put(obj->iovmm->iommu); /* Delete all the DMM pools after the reference count goes to zero */ if (--obj->iovmm->refcount == 0) omap_delete_dmm_pools(obj); kfree(obj); filp->private_data = NULL; err_out: return status; }
/* * Device IOMMU generic operations */ static irqreturn_t iommu_fault_handler(int irq, void *data) { u32 stat, da; u32 *iopgd, *iopte; int err = -EIO; struct iommu *obj = data; if (!obj->refcount) return IRQ_NONE; eventfd_notification(obj); /* Dynamic loading TLB or PTE */ err = iommu_notify_event(obj, IOMMU_FAULT, data); if (err == NOTIFY_OK) return IRQ_HANDLED; if (!cpu_is_omap44xx()) clk_enable(obj->clk); stat = iommu_report_fault(obj, &da); if (!cpu_is_omap44xx()) clk_disable(obj->clk); if (!stat) return IRQ_HANDLED; iopgd = iopgd_offset(obj, da); if (!(*iopgd & IOPGD_TABLE)) { dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__, da, iopgd, *iopgd); return IRQ_NONE; } iopte = iopte_offset(iopgd, da); dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", __func__, da, iopgd, *iopgd, iopte, *iopte); return IRQ_NONE; }