コード例 #1
0
gceSTATUS
gckIOMMU_Map(
    IN gckIOMMU Iommu,
    IN gctUINT32 DomainAddress,
    IN gctUINT32 Physical,
    IN gctUINT32 Bytes
    )
{
    gceSTATUS status;
    int ret;

    gcmkHEADER_ARG("DomainAddress=%#X, Physical=%#X, Bytes=%d",
                   DomainAddress, Physical, Bytes);

    ret = iommu_map(Iommu->domain, DomainAddress, Physical, Bytes, 0);

    if (ret)
    {
        gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
    }

    gcmkFOOTER_NO();
    return gcvSTATUS_OK;

OnError:

    gcmkFOOTER();
    return status;

}
コード例 #2
0
ファイル: seiren.c プロジェクト: MikeForeskin/Vindicator-S6
static int esa_prepare_buffer(struct device *dev)
{
	unsigned long iova;
	int n, ret;

	/* Original firmware */
	si.fwmem = devm_kzalloc(dev, FWMEM_SIZE, GFP_KERNEL);
	if (!si.fwmem) {
		esa_err("Failed to alloc fwmem\n");
		goto err;
	}
	si.fwmem_pa = virt_to_phys(si.fwmem);

	/* Firmware backup for SRAM */
	si.fwmem_sram_bak = devm_kzalloc(dev, SRAM_FW_MAX, GFP_KERNEL);
	if (!si.fwmem_sram_bak) {
		esa_err("Failed to alloc fwmem\n");
		goto err;
	}

	/* Firmware for DRAM */
	for (n = 0; n < FWAREA_NUM; n++) {
		si.fwarea[n] = dma_alloc_coherent(dev, FWAREA_SIZE,
					&si.fwarea_pa[n], GFP_KERNEL);
		if (!si.fwarea[n]) {
			esa_err("Failed to alloc fwarea\n");
			goto err0;
		}
	}

	for (n = 0, iova = FWAREA_IOVA;
			n < FWAREA_NUM; n++, iova += FWAREA_SIZE) {
		ret = iommu_map(si.domain, iova,
				si.fwarea_pa[n], FWAREA_SIZE, 0);
		if (ret) {
			esa_err("Failed to map iommu\n");
			goto err1;
		}
	}

	/* Base address for IBUF, OBUF and FW LOG  */
	si.bufmem = si.fwarea[0] + BASEMEM_OFFSET;
	si.bufmem_pa = si.fwarea_pa[0];
	si.fw_log_buf = si.sram + FW_LOG_ADDR;

	return 0;
err1:
	for (n = 0, iova = FWAREA_IOVA;
			n < FWAREA_NUM; n++, iova += FWAREA_SIZE) {
		iommu_unmap(si.domain, iova, FWAREA_SIZE);
	}
err0:
	for (n = 0; n < FWAREA_NUM; n++) {
		if (si.fwarea[n])
			dma_free_coherent(dev, FWAREA_SIZE,
					si.fwarea[n], si.fwarea_pa[n]);
	}
err:
	return -ENOMEM;
}
コード例 #3
0
ファイル: cdma.c プロジェクト: AlexShiLucky/linux
/*
 * Init push buffer resources
 */
static int host1x_pushbuffer_init(struct push_buffer *pb)
{
	struct host1x_cdma *cdma = pb_to_cdma(pb);
	struct host1x *host1x = cdma_to_host1x(cdma);
	struct iova *alloc;
	u32 size;
	int err;

	pb->mapped = NULL;
	pb->phys = 0;
	pb->size = HOST1X_PUSHBUFFER_SLOTS * 8;

	size = pb->size + 4;

	/* initialize buffer pointers */
	pb->fence = pb->size - 8;
	pb->pos = 0;

	if (host1x->domain) {
		unsigned long shift;

		size = iova_align(&host1x->iova, size);

		pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
					  GFP_KERNEL);
		if (!pb->mapped)
			return -ENOMEM;

		shift = iova_shift(&host1x->iova);
		alloc = alloc_iova(&host1x->iova, size >> shift,
				   host1x->iova_end >> shift, true);
		if (!alloc) {
			err = -ENOMEM;
			goto iommu_free_mem;
		}

		pb->dma = iova_dma_addr(&host1x->iova, alloc);
		err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
				IOMMU_READ);
		if (err)
			goto iommu_free_iova;
	} else {
コード例 #4
0
ファイル: kgsl_iommu.c プロジェクト: dimax754/msm_2.6.38
static int
kgsl_iommu_map(void *mmu_specific_pt,
			struct kgsl_memdesc *memdesc,
			unsigned int protflags)
{
	int ret = 0;
	unsigned int physaddr;
	unsigned int iommu_virt_addr;
	unsigned int offset = 0;
	int map_order;
	struct iommu_domain *domain = (struct iommu_domain *)
					mmu_specific_pt;

	BUG_ON(NULL == domain);

	map_order = get_order(SZ_4K);

	for (iommu_virt_addr = memdesc->gpuaddr;
		iommu_virt_addr < (memdesc->gpuaddr + memdesc->size);
		iommu_virt_addr += SZ_4K, offset += PAGE_SIZE) {
		physaddr = memdesc->ops->physaddr(memdesc, offset);
		if (!physaddr) {
			KGSL_CORE_ERR("Failed to convert %x address to "
			"physical\n", (unsigned int)memdesc->hostptr + offset);
			kgsl_iommu_unmap(mmu_specific_pt, memdesc);
			return -EFAULT;
		}
		ret = iommu_map(domain, iommu_virt_addr, physaddr,
				map_order, MSM_IOMMU_ATTR_NONCACHED);
		if (ret) {
			KGSL_CORE_ERR("iommu_map(%p, %x, %x, %d, %d) "
			"failed with err: %d\n", domain,
			iommu_virt_addr, physaddr, map_order,
			MSM_IOMMU_ATTR_NONCACHED, ret);
			kgsl_iommu_unmap(mmu_specific_pt, memdesc);
			return ret;
		}
	}

	return ret;
}
コード例 #5
0
int iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size)
{
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	int ret;

	if (WARN_ON((phys + size) >= IOVA_START)) {
		dev_err(dev,
			"Unable to create one to one mapping for %#x @ %#x\n",
			size, phys);
		return -EINVAL;
	}

	region = kmalloc(sizeof(*region), GFP_KERNEL);
	if (!region)
		return -ENOMEM;

	if (WARN_ON(phys & ~PAGE_MASK))
		phys = round_down(phys, PAGE_SIZE);


	ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0);
	if (ret < 0) {
		kfree(region);
		return ret;
	}

	region->start = (dma_addr_t)phys;
	region->size = size;
	INIT_LIST_HEAD(&region->node);

	spin_lock(&vmm->lock);

	list_add(&region->node, &vmm->regions_list);

	spin_unlock(&vmm->lock);

	return 0;
}
コード例 #6
0
ファイル: iommu.c プロジェクト: 020gzh/linux
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
	gfn_t gfn, end_gfn;
	kvm_pfn_t pfn;
	int r = 0;
	struct iommu_domain *domain = kvm->arch.iommu_domain;
	int flags;

	/* check if iommu exists and in use */
	if (!domain)
		return 0;

	gfn     = slot->base_gfn;
	end_gfn = gfn + slot->npages;

	flags = IOMMU_READ;
	if (!(slot->flags & KVM_MEM_READONLY))
		flags |= IOMMU_WRITE;
	if (!kvm->arch.iommu_noncoherent)
		flags |= IOMMU_CACHE;


	while (gfn < end_gfn) {
		unsigned long page_size;

		/* Check if already mapped */
		if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
			gfn += 1;
			continue;
		}

		/* Get the page size we could use to map */
		page_size = kvm_host_page_size(kvm, gfn);

		/* Make sure the page_size does not exceed the memslot */
		while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
			page_size >>= 1;

		/* Make sure gfn is aligned to the page size we want to map */
		while ((gfn << PAGE_SHIFT) & (page_size - 1))
			page_size >>= 1;

		/* Make sure hva is aligned to the page size we want to map */
		while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
			page_size >>= 1;

		/*
		 * Pin all pages we are about to map in memory. This is
		 * important because we unmap and unpin in 4kb steps later.
		 */
		pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
		if (is_error_noslot_pfn(pfn)) {
			gfn += 1;
			continue;
		}

		/* Map into IO address space */
		r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
			      page_size, flags);
		if (r) {
			printk(KERN_ERR "kvm_iommu_map_address:"
			       "iommu failed to map pfn=%llx\n", pfn);
			kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
			goto unmap_pages;
		}

		gfn += page_size >> PAGE_SHIFT;

		cond_resched();
	}

	return 0;

unmap_pages:
	kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
	return r;
}
コード例 #7
0
ファイル: iopage.c プロジェクト: majek/openonload
int
efhw_iopages_alloc(struct pci_dev *pci_dev, struct efhw_iopages *p,
		   unsigned order, efhw_iommu_domain *vf_domain,
		   unsigned long iova_base)
{
	/* dma_alloc_coherent() is really the right interface to use here.
	 * However, it allocates memory "close" to the device, but we want
	 * memory on the current numa node.  Also we need the memory to be
	 * contiguous in the kernel, but not necessarily in physical
	 * memory.
	 *
	 * vf_domain is the IOMMU protection domain - it imples that pci_dev
	 * is a VF that should not use the normal DMA mapping APIs
	 */
	struct device *dev = &pci_dev->dev;
	int i = 0;

	p->n_pages = 1 << order;
	p->dma_addrs = kmalloc(p->n_pages * sizeof(p->dma_addrs[0]), 0);
	if (p->dma_addrs == NULL)
		goto fail1;
	p->ptr = vmalloc_node(p->n_pages << PAGE_SHIFT, -1);
	if (p->ptr == NULL)
		goto fail2;
	for (i = 0; i < p->n_pages; ++i) {
		struct page *page;
		page = vmalloc_to_page(p->ptr + (i << PAGE_SHIFT));

		if (!vf_domain) {
			p->dma_addrs[i] = dma_map_page(dev, page, 0, PAGE_SIZE,
						       DMA_BIDIRECTIONAL);
			
			if (dma_mapping_error(dev, p->dma_addrs[i])) {
				EFHW_ERR("%s: ERROR dma_map_page failed",
					 __FUNCTION__);
				goto fail3;
			}
		} else
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
		{
			int rc;

			p->dma_addrs[i] = iova_base;
			rc = iommu_map(vf_domain, p->dma_addrs[i],
				       page_to_phys(page), PAGE_SIZE,
				       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
			if (rc) {
				EFHW_ERR("%s: ERROR iommu_map failed (%d)",
					 __FUNCTION__, rc);
				goto fail3;
			}
			iova_base += PAGE_SIZE;
		}
#else
		EFRM_ASSERT(0);
#endif
	}
	return 0;

fail3:
	while (i-- > 0)
		if (!vf_domain) {
			dma_unmap_page(dev, p->dma_addrs[i],
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
		} else {
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
			mutex_lock(&efrm_iommu_mutex);
			iommu_unmap(vf_domain, iova_base, PAGE_SIZE);
			mutex_unlock(&efrm_iommu_mutex);
#endif
		}
fail2:
	kfree(p->dma_addrs);
fail1:
	return -ENOMEM;
}
コード例 #8
0
static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
			int partition, unsigned long prot)
{
	unsigned long left_to_map = cp_heap->total_size;
	unsigned long order = get_order(SZ_64K);
	unsigned long page_size = SZ_64K;
	int ret_value = 0;
	unsigned long virt_addr_len = cp_heap->total_size;
	struct iommu_domain *domain = msm_get_iommu_domain(domain_num);

	/* If we are mapping into the video domain we need to map twice the
	 * size of the heap to account for prefetch issue in video core.
	 */
	if (domain_num == cp_heap->iommu_2x_map_domain)
		virt_addr_len <<= 1;

	if (cp_heap->total_size & (SZ_64K-1)) {
		pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
		ret_value = -EINVAL;
	}
	if (cp_heap->base & (SZ_64K-1)) {
		pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
		ret_value = -EINVAL;
	}
	if (!ret_value && domain) {
		unsigned long temp_phys = cp_heap->base;
		unsigned long temp_iova =
				msm_allocate_iova_address(domain_num, partition,
						virt_addr_len, SZ_64K);
		if (!temp_iova) {
			pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
				__func__, domain_num, partition);
			ret_value = -ENOMEM;
			goto out;
		}
		cp_heap->iommu_iova[domain_num] = temp_iova;

		while (left_to_map) {
			int ret = iommu_map(domain, temp_iova, temp_phys,
					    order, prot);
			if (ret) {
				pr_err("%s: could not map %lx in domain %p, error: %d\n",
					__func__, temp_iova, domain, ret);
				ret_value = -EAGAIN;
				goto free_iova;
			}
			temp_iova += page_size;
			temp_phys += page_size;
			left_to_map -= page_size;
		}
		if (domain_num == cp_heap->iommu_2x_map_domain)
			ret_value = msm_iommu_map_extra(domain, temp_iova,
							cp_heap->total_size,
							SZ_64K, prot);
		if (ret_value)
			goto free_iova;
	} else {
		pr_err("Unable to get IOMMU domain %lu\n", domain_num);
		ret_value = -ENOMEM;
	}
	goto out;

free_iova:
	msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
			      partition, virt_addr_len);
out:
	return ret_value;
}
コード例 #9
0
ファイル: a6xx_gmu.c プロジェクト: grate-driver/linux
static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
		size_t size)
{
	struct a6xx_gmu_bo *bo;
	int ret, count, i;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	bo->size = PAGE_ALIGN(size);

	count = bo->size >> PAGE_SHIFT;

	bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
	if (!bo->pages) {
		kfree(bo);
		return ERR_PTR(-ENOMEM);
	}

	for (i = 0; i < count; i++) {
		bo->pages[i] = alloc_page(GFP_KERNEL);
		if (!bo->pages[i])
			goto err;
	}

	bo->iova = gmu->uncached_iova_base;

	for (i = 0; i < count; i++) {
		ret = iommu_map(gmu->domain,
			bo->iova + (PAGE_SIZE * i),
			page_to_phys(bo->pages[i]), PAGE_SIZE,
			IOMMU_READ | IOMMU_WRITE);

		if (ret) {
			DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");

			for (i = i - 1 ; i >= 0; i--)
				iommu_unmap(gmu->domain,
					bo->iova + (PAGE_SIZE * i),
					PAGE_SIZE);

			goto err;
		}
	}

	bo->virt = vmap(bo->pages, count, VM_IOREMAP,
		pgprot_writecombine(PAGE_KERNEL));
	if (!bo->virt)
		goto err;

	/* Align future IOVA addresses on 1MB boundaries */
	gmu->uncached_iova_base += ALIGN(size, SZ_1M);

	return bo;

err:
	for (i = 0; i < count; i++) {
		if (bo->pages[i])
			__free_pages(bo->pages[i], 0);
	}

	kfree(bo->pages);
	kfree(bo);

	return ERR_PTR(-ENOMEM);
}
コード例 #10
0
dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
								size_t size)
{
	off_t start_off;
	dma_addr_t addr, start = 0;
	size_t mapped_size = 0;
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	int order;
	int ret;
	int count =0;
#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	size_t iova_size = 0;
#endif
	for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
		offset -= sg_dma_len(sg);

	start_off = offset_in_page(sg_phys(sg) + offset);
	size = PAGE_ALIGN(size + start_off);

	order = __fls(min_t(size_t, size, SZ_1M));

	region = kmalloc(sizeof(*region), GFP_KERNEL);
	if (!region) {
		ret = -ENOMEM;
		goto err_map_nomem;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	iova_size = ALIGN(size, SZ_64K);
	start = (dma_addr_t)gen_pool_alloc_aligned(vmm->vmm_pool, iova_size,
									order);
#else
	start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
#endif
	if (!start) {
		ret = -ENOMEM;
		goto err_map_noiomem;
	}

	addr = start;
	do {
		phys_addr_t phys;
		size_t len;

		phys = sg_phys(sg);
		len = sg_dma_len(sg);

		/* if back to back sg entries are contiguous consolidate them */
		while (sg_next(sg) &&
		       sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg))) {
			len += sg_dma_len(sg_next(sg));
			sg = sg_next(sg);
		}

		if (offset > 0) {
			len -= offset;
			phys += offset;
			offset = 0;
		}

		if (offset_in_page(phys)) {
			len += offset_in_page(phys);
			phys = round_down(phys, PAGE_SIZE);
		}

		len = PAGE_ALIGN(len);

		if (len > (size - mapped_size))
			len = size - mapped_size;

		ret = iommu_map(vmm->domain, addr, phys, len, 0);
		if (ret)
			break;

		addr += len;
		mapped_size += len;
	} while ((sg = sg_next(sg)) && (mapped_size < size));
	BUG_ON(mapped_size > size);

	if (mapped_size < size) {
		pr_err("IOVMM: iovmm_map failed as mapped_size (%d) < size (%d)\n", mapped_size, size);
		goto err_map_map;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	if (iova_size != size) {
		addr = start + size;
		size = iova_size;

		for (; addr < start + size; addr += PAGE_SIZE) {
			ret = iommu_map(vmm->domain, addr,
				page_to_phys(ZERO_PAGE(0)), PAGE_SIZE, 0);
			if (ret)
				goto err_map_map;

			mapped_size += PAGE_SIZE;
		}
	}
#endif

	region->start = start + start_off;
	region->size = size;

	INIT_LIST_HEAD(&region->node);

	spin_lock(&vmm->lock);

	list_add(&region->node, &vmm->regions_list);

	spin_unlock(&vmm->lock);

	dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#X bytes.\n",
					region->start, region->size);

	return region->start;

err_map_map:
	iommu_unmap(vmm->domain, start, mapped_size);
	gen_pool_free(vmm->vmm_pool, start, size);
err_map_noiomem:
	kfree(region);
err_map_nomem:
	dev_dbg(dev, "IOVMM: Failed to allocated VM region for %#x bytes.\n",
									size);
	return (dma_addr_t)ret;
}
コード例 #11
0
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add unsigned long temp_phys and int i back.
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i = 0, ret = 0;
	unsigned long extra;
	//HTC_END

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	//HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, always 1M-alignment.
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		align = SZ_1M;
	}
	//HTC_END

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	//HTC_START Jason Huang 20120419 --- Change to htc_iommu_map_range for performance improvement.
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, do 1M mapping by iommu_map().
	                                     Neither htc_iommu_map_range() nor iommu_map_range() supports 1M mapping.*/
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		temp_iova = data->iova_addr;
		temp_phys = buffer->priv_phys;
		for (i = buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M,
							  temp_phys += SZ_1M) {
			ret = iommu_map(domain, temp_iova, temp_phys,
					get_order(SZ_1M),
					ION_IS_CACHED(flags) ? 1 : 0);

			if (ret) {
				pr_err("%s: could not map %lx to %lx in domain %p\n",
					__func__, temp_iova, temp_phys, domain);
				goto out2;
			}
		}
	}
	else
	{
		ret = htc_iommu_map_range(domain, data->iova_addr, buffer->priv_phys, buffer->size, ION_IS_CACHED(flags) ? 1 : 0);
		if (ret) {
			ret = -ENOMEM;
			goto out1;
		}

		temp_iova = data->iova_addr + buffer->size;
	}
	/*
	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}
	*/
	//HTC_END

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

//HTC_START Jason Huang 20120419
//HTC_START Jason Huang 20120530
out2:
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		for ( ; i < buffer->size; i += SZ_1M, temp_iova -= SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, buffer->size);
	}
	/*
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
//HTC_END
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
コード例 #12
0
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i, ret = 0;
	unsigned long extra;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

out2:
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}