void ispmmu_cleanup(void) { isp_get(); if (isp_iommu) iommu_put(isp_iommu); isp_put(); isp_iommu = NULL; }
static int omap_rproc_iommu_init(struct rproc *rproc, int (*callback)(struct rproc *rproc, u64 fa, u32 flags)) { struct device *dev = rproc->dev; struct omap_rproc_pdata *pdata = dev->platform_data; int ret, i; struct iommu *iommu; struct omap_rproc_priv *rpp; rpp = kzalloc(sizeof(*rpp), GFP_KERNEL); if (!rpp) return -ENOMEM; if (pdata->clkdm) clkdm_wakeup(pdata->clkdm); iommu_set_isr(pdata->iommu_name, omap_rproc_iommu_isr, rproc); iommu_set_secure(pdata->iommu_name, rproc->secure_mode, rproc->secure_ttb); iommu = iommu_get(pdata->iommu_name); if (IS_ERR(iommu)) { ret = PTR_ERR(iommu); dev_err(dev, "iommu_get error: %d\n", ret); goto err_mmu; } rpp->iommu = iommu; rpp->iommu_cb = callback; rproc->priv = rpp; if (!rproc->secure_mode) { for (i = 0; rproc->memory_maps[i].size; i++) { const struct rproc_mem_entry *me = &rproc->memory_maps[i]; ret = omap_rproc_map(dev, iommu, me->da, me->pa, me->size); if (ret) goto err_map; } } if (pdata->clkdm) clkdm_allow_idle(pdata->clkdm); return 0; err_map: iommu_put(iommu); err_mmu: iommu_set_secure(pdata->iommu_name, false, NULL); if (pdata->clkdm) clkdm_allow_idle(pdata->clkdm); kfree(rpp); return ret; }
static void h_put_tce(struct cpu_user_regs *regs) { u32 liobn = regs->gprs[4]; ulong ioba = regs->gprs[5]; u64 tce_dword = regs->gprs[6]; union tce tce; tce.tce_dword = tce_dword; if (iommu_put(liobn, ioba, tce) == -1) { regs->gprs[3] = H_Parameter; } else { regs->gprs[3] = H_Success; } }
static int omap_rproc_iommu_exit(struct rproc *rproc) { struct omap_rproc_priv *rpp = rproc->priv; struct omap_rproc_pdata *pdata = rproc->dev->platform_data; if (pdata->clkdm) clkdm_wakeup(pdata->clkdm); if (rpp->iommu) iommu_put(rpp->iommu); kfree(rpp); if (pdata->clkdm) clkdm_allow_idle(pdata->clkdm); return 0; }
static int omap_dmm_release(struct inode *inode, struct file *filp) { int status = 0; struct iodmm_struct *obj; if (!filp->private_data) { status = -EIO; goto err_out; } obj = filp->private_data; flush_signals(current); status = mutex_lock_interruptible(&obj->iovmm->dmm_map_lock); if (status == 0) { /* * Report to remote Processor of the cleanup of these * resources before cleaning in order to avoid MMU fault * type of behavior */ if (!list_empty(&obj->map_list)) { iommu_notify_event(obj->iovmm->iommu, IOMMU_CLOSE, NULL); } mutex_unlock(&obj->iovmm->dmm_map_lock); } else { pr_err("%s mutex_lock_interruptible returned 0x%x\n", __func__, status); } user_remove_resources(obj); iommu_put(obj->iovmm->iommu); /* Delete all the DMM pools after the reference count goes to zero */ if (--obj->iovmm->refcount == 0) omap_delete_dmm_pools(obj); kfree(obj); filp->private_data = NULL; err_out: return status; }
int omap_rproc_deactivate(struct omap_device *od) { int i, ret = 0; struct rproc *rproc = platform_get_drvdata(&od->pdev); struct device *dev = rproc->dev; struct omap_rproc_pdata *pdata = dev->platform_data; struct omap_rproc_timers_info *timers = pdata->timers; #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND struct omap_rproc_priv *rpp = rproc->priv; #endif if (pdata->clkdm) clkdm_wakeup(pdata->clkdm); for (i = 0; i < od->hwmods_cnt; i++) { ret = omap_hwmod_shutdown(od->hwmods[i]); if (ret) goto err; } for (i = 0; i < pdata->timers_cnt; i++) omap_dm_timer_stop(timers[i].odt); #ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND if (rpp->iommu) { iommu_put(rpp->iommu); rpp->iommu = NULL; } if (rpp->mbox) { omap_mbox_put(rpp->mbox, NULL); rpp->mbox = NULL; } #endif err: if (pdata->clkdm) clkdm_allow_idle(pdata->clkdm); return ret; }