/* * drm_iommu_attach_device- attach device to iommu mapping * * @drm_dev: DRM device * @subdrv_dev: device to be attach * * This function should be called by sub drivers to attach it to iommu * mapping. */ static int drm_iommu_attach_device(struct drm_device *drm_dev, struct device *subdrv_dev) { struct exynos_drm_private *priv = drm_dev->dev_private; int ret; if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) { DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n", dev_name(subdrv_dev)); return -EINVAL; } ret = configure_dma_max_seg_size(subdrv_dev); if (ret) return ret; if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { if (to_dma_iommu_mapping(subdrv_dev)) arm_iommu_detach_device(subdrv_dev); ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) { ret = iommu_attach_device(priv->mapping, subdrv_dev); } if (ret) clear_dma_max_seg_size(subdrv_dev); return 0; }
static void nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) { #if IS_ENABLED(CONFIG_IOMMU_API) struct device *dev = &tdev->pdev->dev; unsigned long pgsize_bitmap; int ret; #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) if (dev->archdata.mapping) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); arm_iommu_detach_device(dev); arm_iommu_release_mapping(mapping); } #endif if (!tdev->func->iommu_bit) return; mutex_init(&tdev->iommu.mutex); if (iommu_present(&platform_bus_type)) { tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); if (!tdev->iommu.domain) goto error; /* * A IOMMU is only usable if it supports page sizes smaller * or equal to the system's PAGE_SIZE, with a preference if * both are equal. */ pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; if (pgsize_bitmap & PAGE_SIZE) { tdev->iommu.pgshift = PAGE_SHIFT; } else { tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); if (tdev->iommu.pgshift == 0) { dev_warn(dev, "unsupported IOMMU page size\n"); goto free_domain; } tdev->iommu.pgshift -= 1; } ret = iommu_attach_device(tdev->iommu.domain, dev); if (ret) goto free_domain; ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0, (1ULL << tdev->func->iommu_bit) >> tdev->iommu.pgshift, 1); if (ret) goto detach_device; }
/* * drm_iommu_detach_device -detach device address space mapping from device * * @drm_dev: DRM device * @subdrv_dev: device to be detached * * This function should be called by sub drivers to detach it from iommu * mapping */ static void drm_iommu_detach_device(struct drm_device *drm_dev, struct device *subdrv_dev) { struct exynos_drm_private *priv = drm_dev->dev_private; if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) arm_iommu_detach_device(subdrv_dev); else if (IS_ENABLED(CONFIG_IOMMU_DMA)) iommu_detach_device(priv->mapping, subdrv_dev); clear_dma_max_seg_size(subdrv_dev); }
void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, struct device *dev) { arm_iommu_detach_device(dev); }