int iommu_device_group(struct device *dev, unsigned int *groupid) { if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group) return dev->bus->iommu_ops->device_group(dev, groupid); return -ENODEV; }
/* should be called under struct_mutex.. although it can be called * from atomic context without struct_mutex to acquire an extra * iova ref if you know one is already held. * * That means when I do eventually need to add support for unpinning * the refcnt counter needs to be atomic_t. */ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, uint32_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; if (!msm_obj->domain[id].iova) { struct msm_drm_private *priv = obj->dev->dev_private; struct page **pages = get_pages(obj); if (IS_ERR(pages)) return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { struct msm_mmu *mmu = priv->mmus[id]; uint32_t offset; if (WARN_ON(!mmu)) return -EINVAL; offset = (uint32_t)mmap_offset(obj); ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, obj->size, IOMMU_READ | IOMMU_WRITE); msm_obj->domain[id].iova = offset; } else { msm_obj->domain[id].iova = physaddr(obj); } } if (!ret) *iova = msm_obj->domain[id].iova; return ret; }
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_MP_STATE: case KVM_CAP_IRQ_INJECT_STATUS: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; #endif default: r = 0; } return r; }
static int msm_gem_new_impl(struct drm_device *dev, uint32_t size, uint32_t flags, struct reservation_object *resv, struct drm_gem_object **obj) { struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; unsigned sz; bool use_vram = false; switch (flags & MSM_BO_CACHE_MASK) { case MSM_BO_UNCACHED: case MSM_BO_CACHED: case MSM_BO_WC: break; default: dev_err(dev->dev, "invalid cache flag: %x\n", (flags & MSM_BO_CACHE_MASK)); return -EINVAL; } if (!iommu_present(&platform_bus_type)) use_vram = true; else if ((flags & MSM_BO_STOLEN) && priv->vram.size) use_vram = true; if (WARN_ON(use_vram && !priv->vram.size)) return -EINVAL; sz = sizeof(*msm_obj); if (use_vram) sz += sizeof(struct drm_mm_node); msm_obj = kzalloc(sz, GFP_KERNEL); if (!msm_obj) return -ENOMEM; if (use_vram) msm_obj->vram_node = (void *)&msm_obj[1]; msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; if (resv) { msm_obj->resv = resv; } else { msm_obj->resv = &msm_obj->_resv; reservation_object_init(msm_obj->resv); } INIT_LIST_HEAD(&msm_obj->submit_entry); list_add_tail(&msm_obj->mm_list, &priv->inactive_list); *obj = &msm_obj->base; return 0; }
static void nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) { #if IS_ENABLED(CONFIG_IOMMU_API) struct device *dev = &tdev->pdev->dev; unsigned long pgsize_bitmap; int ret; #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) if (dev->archdata.mapping) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); arm_iommu_detach_device(dev); arm_iommu_release_mapping(mapping); } #endif if (!tdev->func->iommu_bit) return; mutex_init(&tdev->iommu.mutex); if (iommu_present(&platform_bus_type)) { tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); if (!tdev->iommu.domain) goto error; /* * A IOMMU is only usable if it supports page sizes smaller * or equal to the system's PAGE_SIZE, with a preference if * both are equal. */ pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; if (pgsize_bitmap & PAGE_SIZE) { tdev->iommu.pgshift = PAGE_SHIFT; } else { tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); if (tdev->iommu.pgshift == 0) { dev_warn(dev, "unsupported IOMMU page size\n"); goto free_domain; } tdev->iommu.pgshift -= 1; } ret = iommu_attach_device(tdev->iommu.domain, dev); if (ret) goto free_domain; ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0, (1ULL << tdev->func->iommu_bit) >> tdev->iommu.pgshift, 1); if (ret) goto detach_device; }
int hisi_ion_enable_iommu(struct platform_device *pdev) { int ret; struct iommu_domain_capablity data; struct device *dev = &pdev->dev; struct hisi_iommu_domain *hisi_domain; struct device_node *np = pdev->dev.of_node; printk(KERN_ERR"in %s start \n",__func__); hisi_domain = kzalloc(sizeof(*hisi_domain), GFP_KERNEL); if (!hisi_domain) { dbg("alloc hisi_domain object fail \n"); return -ENOMEM; } if (!iommu_present(dev->bus)) { dbg("iommu not found\n"); kfree(hisi_domain); return 0; } /* create iommu domain */ hisi_domain->domain = iommu_domain_alloc(dev->bus); if (!hisi_domain->domain) { ret = -EINVAL; goto error; } iommu_attach_device(hisi_domain->domain,dev); get_range_info(np,hisi_domain,&data); /* align mean in this pool allocation buffer is aligned by iommu align request*/ hisi_domain->iova_pool = iova_pool_setup(data.iova_start, data.iova_end, data.iova_align); if (!hisi_domain->iova_pool) { ret = -EINVAL; goto error; } /* this is a global pointer */ hisi_iommu_domain_p = hisi_domain; dbg("in %s end \n",__func__); return 0; error: WARN(1, "hisi_iommu_domain_init failed!\n"); if (hisi_domain->iova_pool) iova_pool_destory(hisi_domain->iova_pool); if (hisi_domain->domain) iommu_domain_free(hisi_domain->domain); if (hisi_domain) kfree(hisi_domain); return ret; }
static ssize_t iommu_dma_protection_show(struct device *dev, struct device_attribute *attr, char *buf) { /* * Kernel DMA protection is a feature where Thunderbolt security is * handled natively using IOMMU. It is enabled when IOMMU is * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set. */ return sprintf(buf, "%d\n", iommu_present(&pci_bus_type) && dmar_platform_optin()); }
/* should be called under struct_mutex.. although it can be called * from atomic context without struct_mutex to acquire an extra * iova ref if you know one is already held. * * That means when I do eventually need to add support for unpinning * the refcnt counter needs to be atomic_t. */ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; if (!msm_obj->domain[id].iova) { struct msm_drm_private *priv = obj->dev->dev_private; struct page **pages = get_pages(obj); if (IS_ERR(pages)) return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], msm_obj->sgt, obj->size >> PAGE_SHIFT); } else {
static int tegra_drm_load(struct drm_device *drm, unsigned long flags) { struct host1x_device *device = to_host1x_device(drm->dev); struct tegra_drm *tegra; int err; tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); if (!tegra) return -ENOMEM; if (iommu_present(&platform_bus_type)) { u64 carveout_start, carveout_end, gem_start, gem_end; struct iommu_domain_geometry *geometry; unsigned long order; tegra->domain = iommu_domain_alloc(&platform_bus_type); if (!tegra->domain) { err = -ENOMEM; goto free; } err = iova_cache_get(); if (err < 0) goto domain; geometry = &tegra->domain->geometry; gem_start = geometry->aperture_start; gem_end = geometry->aperture_end - CARVEOUT_SZ; carveout_start = gem_end + 1; carveout_end = geometry->aperture_end; order = __ffs(tegra->domain->pgsize_bitmap); init_iova_domain(&tegra->carveout.domain, 1UL << order, carveout_start >> order); tegra->carveout.shift = iova_shift(&tegra->carveout.domain); tegra->carveout.limit = carveout_end >> tegra->carveout.shift; drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); mutex_init(&tegra->mm_lock); DRM_DEBUG("IOMMU apertures:\n"); DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, carveout_end); }
struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt) { struct msm_gem_object *msm_obj; struct drm_gem_object *obj; uint32_t size; int ret, npages; /* if we don't have IOMMU, don't bother pretending we can import: */ if (!iommu_present(&platform_bus_type)) { dev_err(dev->dev, "cannot import without IOMMU\n"); return ERR_PTR(-EINVAL); } size = PAGE_ALIGN(dmabuf->size); ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); if (ret) goto fail; drm_gem_private_object_init(dev, obj, size); npages = size / PAGE_SIZE; msm_obj = to_msm_bo(obj); msm_obj->sgt = sgt; msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); if (!msm_obj->pages) { ret = -ENOMEM; goto fail; } ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); if (ret) goto fail; return obj; fail: if (obj) drm_gem_object_unreference_unlocked(obj); return ERR_PTR(ret); }
static void efx_init_rx_recycle_ring(struct efx_nic *efx, struct efx_rx_queue *rx_queue) { unsigned int bufs_in_recycle_ring, page_ring_size; /* Set the RX recycle ring size */ #ifdef CONFIG_PPC64 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; #else if (iommu_present(&pci_bus_type)) bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; else bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; #endif /* CONFIG_PPC64 */ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / efx->rx_bufs_per_page); rx_queue->page_ring = kcalloc(page_ring_size, sizeof(*rx_queue->page_ring), GFP_KERNEL); rx_queue->page_ptr_mask = page_ring_size - 1; }
int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_MP_STATE: case KVM_CAP_IRQ_INJECT_STATUS: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; default: r = 0; } return r; }
int msm_use_iommu() { return iommu_present(&platform_bus_type); }
static int host1x_probe(struct platform_device *pdev) { const struct of_device_id *id; struct host1x *host; struct resource *regs; int syncpt_irq; int err; id = of_match_device(host1x_of_match, &pdev->dev); if (!id) return -EINVAL; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(&pdev->dev, "failed to get registers\n"); return -ENXIO; } syncpt_irq = platform_get_irq(pdev, 0); if (syncpt_irq < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); return -ENXIO; } host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); INIT_LIST_HEAD(&host->list); host->dev = &pdev->dev; host->info = id->data; /* set common host1x device data */ platform_set_drvdata(pdev, host); host->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(host->regs)) return PTR_ERR(host->regs); dma_set_mask_and_coherent(host->dev, host->info->dma_mask); if (host->info->init) { err = host->info->init(host); if (err) return err; } host->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); err = PTR_ERR(host->clk); return err; } host->rst = devm_reset_control_get(&pdev->dev, "host1x"); if (IS_ERR(host->rst)) { err = PTR_ERR(host->rst); dev_err(&pdev->dev, "failed to get reset: %d\n", err); return err; } if (iommu_present(&platform_bus_type)) { struct iommu_domain_geometry *geometry; unsigned long order; host->domain = iommu_domain_alloc(&platform_bus_type); if (!host->domain) return -ENOMEM; err = iommu_attach_device(host->domain, &pdev->dev); if (err) goto fail_free_domain; geometry = &host->domain->geometry; order = __ffs(host->domain->pgsize_bitmap); init_iova_domain(&host->iova, 1UL << order, geometry->aperture_start >> order, geometry->aperture_end >> order); host->iova_end = geometry->aperture_end; } err = host1x_channel_list_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize channel list\n"); goto fail_detach_device; } err = clk_prepare_enable(host->clk); if (err < 0) { dev_err(&pdev->dev, "failed to enable clock\n"); goto fail_detach_device; } err = reset_control_deassert(host->rst); if (err < 0) { dev_err(&pdev->dev, "failed to deassert reset: %d\n", err); goto fail_unprepare_disable; } err = host1x_syncpt_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize syncpts\n"); goto fail_reset_assert; } err = host1x_intr_init(host, syncpt_irq); if (err) { dev_err(&pdev->dev, "failed to initialize interrupts\n"); goto fail_deinit_syncpt; } host1x_debug_init(host); err = host1x_register(host); if (err < 0) goto fail_deinit_intr; return 0; fail_deinit_intr: host1x_intr_deinit(host); fail_deinit_syncpt: host1x_syncpt_deinit(host); fail_reset_assert: reset_control_assert(host->rst); fail_unprepare_disable: clk_disable_unprepare(host->clk); fail_detach_device: if (host->domain) { put_iova_domain(&host->iova); iommu_detach_device(host->domain, &pdev->dev); } fail_free_domain: if (host->domain) iommu_domain_free(host->domain); return err; }