Ejemplo n.º 1
0
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
                            int nelems, enum dma_data_direction direction,
                            struct dma_attrs *attrs)
{
    return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems,
                        device_to_mask(dev), direction, attrs);
}
Ejemplo n.º 2
0
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
		struct sg_table *sgt, unsigned len, int prot)
{
	struct msm_iommu *iommu = to_msm_iommu(mmu);
	size_t ret;

//	pm_runtime_get_sync(mmu->dev);
	ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
//	pm_runtime_put_sync(mmu->dev);
	WARN_ON(!ret);

	return (ret == len) ? 0 : -EINVAL;
}
int msm_iommu_map_extra(struct iommu_domain *domain,
				unsigned long start_iova,
				phys_addr_t phy_addr,
				unsigned long size,
				unsigned long page_size,
				int prot)
{
	int ret = 0;
	int i = 0;
	unsigned long temp_iova = start_iova;
	/* the extra "padding" should never be written to. map it
	 * read-only. */
	prot &= ~IOMMU_WRITE;

	if (msm_iommu_page_size_is_supported(page_size)) {
		struct scatterlist *sglist;
		unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
		struct page *dummy_page = phys_to_page(phy_addr);
		size_t map_ret;

		sglist = vmalloc(sizeof(*sglist) * nrpages);
		if (!sglist) {
			ret = -ENOMEM;
			goto out;
		}

		sg_init_table(sglist, nrpages);

		for (i = 0; i < nrpages; i++)
			sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);

		map_ret = iommu_map_sg(domain, temp_iova, sglist, nrpages,
					prot);
		if (map_ret != size) {
			pr_err("%s: could not map extra %lx in domain %p\n",
				__func__, start_iova, domain);
			ret = -EINVAL;
		} else {
			ret = 0;
		}

		vfree(sglist);
	} else {
Ejemplo n.º 4
0
Archivo: gem.c Proyecto: JaneDu/ath
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
{
	int prot = IOMMU_READ | IOMMU_WRITE;
	ssize_t err;

	if (bo->mm)
		return -EBUSY;

	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
	if (!bo->mm)
		return -ENOMEM;

	err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
					 PAGE_SIZE, 0, 0, 0);
	if (err < 0) {
		dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
			err);
		goto free;
	}

	bo->paddr = bo->mm->start;

	err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
			   bo->sgt->nents, prot);
	if (err < 0) {
		dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
		goto remove;
	}

	bo->size = err;

	return 0;

remove:
	drm_mm_remove_node(bo->mm);
free:
	kfree(bo->mm);
	return err;
}
Ejemplo n.º 5
0
static int vio_map_sg(struct device *dev, struct scatterlist *sglist,
		int nelems, enum dma_data_direction direction)
{
	return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist,
			nelems, direction);
}
Ejemplo n.º 6
0
static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
		int nelems, enum dma_data_direction direction)
{
	return iommu_map_sg(pdev, devnode_table(pdev), sglist,
			nelems, device_to_mask(pdev), direction);
}