/* should be called under struct_mutex.. although it can be called * from atomic context without struct_mutex to acquire an extra * iova ref if you know one is already held. * * That means when I do eventually need to add support for unpinning * the refcnt counter needs to be atomic_t. */ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, uint32_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; if (!msm_obj->domain[id].iova) { struct msm_drm_private *priv = obj->dev->dev_private; struct page **pages = get_pages(obj); if (IS_ERR(pages)) return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { struct msm_mmu *mmu = priv->mmus[id]; uint32_t offset; if (WARN_ON(!mmu)) return -EINVAL; offset = (uint32_t)mmap_offset(obj); ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, obj->size, IOMMU_READ | IOMMU_WRITE); msm_obj->domain[id].iova = offset; } else { msm_obj->domain[id].iova = physaddr(obj); } } if (!ret) *iova = msm_obj->domain[id].iova; return ret; }
{ struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; if (!msm_obj->domain[id].iova) { struct msm_drm_private *priv = obj->dev->dev_private; struct page **pages = get_pages(obj); if (IS_ERR(pages)) return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], msm_obj->sgt, obj->size >> PAGE_SHIFT); } else { msm_obj->domain[id].iova = physaddr(obj); } } if (!ret) *iova = msm_obj->domain[id].iova; return ret; } /* get iova, taking a reference. Should have a matching put */ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret;