Exemplo n.º 1
0
void efhw_iopages_free(struct pci_dev *pci_dev, struct efhw_iopages *p,
		       efhw_iommu_domain *vf_domain)
{
	struct device *dev = &pci_dev->dev;
	int i;

	for (i = 0; i < p->n_pages; ++i)
		if (!vf_domain)
			dma_unmap_page(dev, p->dma_addrs[i],
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
		else {
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
			mutex_lock(&efrm_iommu_mutex);
			iommu_unmap(vf_domain, p->dma_addrs[i], PAGE_SIZE);
			mutex_unlock(&efrm_iommu_mutex);
#else
			EFRM_ASSERT(0);
#endif
		}
#ifdef CONFIG_SUSE_KERNEL
	/* bug 56168 */
	schedule();
#endif
	vfree(p->ptr);
	kfree(p->dma_addrs);
}
Exemplo n.º 2
0
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
		struct kgsl_memdesc *memdesc)
{
	int ret;
	unsigned int range = memdesc->size;
	unsigned int iommu_map_addr;
	int map_order = get_order(SZ_4K);
	struct iommu_domain *domain = (struct iommu_domain *)
					mmu_specific_pt;

	/* All GPU addresses as assigned are page aligned, but some
	   functions purturb the gpuaddr with an offset, so apply the
	   mask here to make sure we have the right address */

	unsigned int gpuaddr = memdesc->gpuaddr &  KGSL_MMU_ALIGN_MASK;

	if (range == 0 || gpuaddr == 0)
		return 0;

	for (iommu_map_addr = gpuaddr; iommu_map_addr < (gpuaddr + range);
		iommu_map_addr += SZ_4K) {
		ret = iommu_unmap(domain, iommu_map_addr, map_order);
		if (ret)
			KGSL_CORE_ERR("iommu_unmap(%p, %x, %d) failed "
			"with err: %d\n", domain, iommu_map_addr,
			map_order, ret);
	}

	return 0;
}
Exemplo n.º 3
0
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	int i;
	unsigned long temp_iova;
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	temp_iova = data->iova_addr;
	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));

	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
Exemplo n.º 4
0
static int esa_prepare_buffer(struct device *dev)
{
	unsigned long iova;
	int n, ret;

	/* Original firmware */
	si.fwmem = devm_kzalloc(dev, FWMEM_SIZE, GFP_KERNEL);
	if (!si.fwmem) {
		esa_err("Failed to alloc fwmem\n");
		goto err;
	}
	si.fwmem_pa = virt_to_phys(si.fwmem);

	/* Firmware backup for SRAM */
	si.fwmem_sram_bak = devm_kzalloc(dev, SRAM_FW_MAX, GFP_KERNEL);
	if (!si.fwmem_sram_bak) {
		esa_err("Failed to alloc fwmem\n");
		goto err;
	}

	/* Firmware for DRAM */
	for (n = 0; n < FWAREA_NUM; n++) {
		si.fwarea[n] = dma_alloc_coherent(dev, FWAREA_SIZE,
					&si.fwarea_pa[n], GFP_KERNEL);
		if (!si.fwarea[n]) {
			esa_err("Failed to alloc fwarea\n");
			goto err0;
		}
	}

	for (n = 0, iova = FWAREA_IOVA;
			n < FWAREA_NUM; n++, iova += FWAREA_SIZE) {
		ret = iommu_map(si.domain, iova,
				si.fwarea_pa[n], FWAREA_SIZE, 0);
		if (ret) {
			esa_err("Failed to map iommu\n");
			goto err1;
		}
	}

	/* Base address for IBUF, OBUF and FW LOG  */
	si.bufmem = si.fwarea[0] + BASEMEM_OFFSET;
	si.bufmem_pa = si.fwarea_pa[0];
	si.fw_log_buf = si.sram + FW_LOG_ADDR;

	return 0;
err1:
	for (n = 0, iova = FWAREA_IOVA;
			n < FWAREA_NUM; n++, iova += FWAREA_SIZE) {
		iommu_unmap(si.domain, iova, FWAREA_SIZE);
	}
err0:
	for (n = 0; n < FWAREA_NUM; n++) {
		if (si.fwarea[n])
			dma_free_coherent(dev, FWAREA_SIZE,
					si.fwarea[n], si.fwarea_pa[n]);
	}
err:
	return -ENOMEM;
}
Exemplo n.º 5
0
void iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
{
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	size_t unmapped_size;

	/* This function must not be called in IRQ handlers */
	BUG_ON(in_irq());

	if (WARN_ON(phys & ~PAGE_MASK))
		phys = round_down(phys, PAGE_SIZE);

	spin_lock(&vmm->lock);

	region = find_region(vmm, (dma_addr_t)phys);
	if (WARN_ON(!region)) {
		spin_unlock(&vmm->lock);
		return;
	}

	list_del(&region->node);

	spin_unlock(&vmm->lock);

	unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);

	exynos_sysmmu_tlb_invalidate(dev);

	WARN_ON(unmapped_size != region->size);
	dev_dbg(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",
					unmapped_size, region->start);

	kfree(region);
}
Exemplo n.º 6
0
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
		struct sg_table *sgt, unsigned len)
{
	struct msm_iommu *iommu = to_msm_iommu(mmu);

	pm_runtime_get_sync(mmu->dev);
	iommu_unmap(iommu->domain, iova, len);
	pm_runtime_put_sync(mmu->dev);

	return 0;
}
Exemplo n.º 7
0
Arquivo: gem.c Projeto: JaneDu/ath
static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
{
	if (!bo->mm)
		return 0;

	iommu_unmap(tegra->domain, bo->paddr, bo->size);
	drm_mm_remove_node(bo->mm);
	kfree(bo->mm);

	return 0;
}
Exemplo n.º 8
0
gceSTATUS
gckIOMMU_Unmap(
    IN gckIOMMU Iommu,
    IN gctUINT32 DomainAddress,
    IN gctUINT32 Bytes
    )
{
    gcmkHEADER();

    iommu_unmap(Iommu->domain, DomainAddress, Bytes);

    gcmkFOOTER_NO();
    return gcvSTATUS_OK;
}
Exemplo n.º 9
0
/*
 * Clean up push buffer resources
 */
static void host1x_pushbuffer_destroy(struct push_buffer *pb)
{
	struct host1x_cdma *cdma = pb_to_cdma(pb);
	struct host1x *host1x = cdma_to_host1x(cdma);

	if (!pb->mapped)
		return;

	if (host1x->domain) {
		iommu_unmap(host1x->domain, pb->dma, pb->alloc_size);
		free_iova(&host1x->iova, iova_pfn(&host1x->iova, pb->dma));
	}

	dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);

	pb->mapped = NULL;
	pb->phys = 0;
}
Exemplo n.º 10
0
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
{
	int count, i;
	u64 iova;

	if (IS_ERR_OR_NULL(bo))
		return;

	count = bo->size >> PAGE_SHIFT;
	iova = bo->iova;

	for (i = 0; i < count; i++, iova += PAGE_SIZE) {
		iommu_unmap(gmu->domain, iova, PAGE_SIZE);
		__free_pages(bo->pages[i], 0);
	}

	kfree(bo->pages);
	kfree(bo);
}
Exemplo n.º 11
0
static void iommu_unmap_all(unsigned long domain_num,
			    struct ion_cp_heap *cp_heap)
{
	unsigned long left_to_unmap = cp_heap->total_size;
	unsigned long page_size = SZ_64K;

	struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
	if (domain) {
		unsigned long temp_iova = cp_heap->iommu_iova[domain_num];

		while (left_to_unmap) {
			iommu_unmap(domain, temp_iova, page_size);
			temp_iova += page_size;
			left_to_unmap -= page_size;
		}
		if (domain_num == cp_heap->iommu_2x_map_domain)
			msm_iommu_unmap_extra(domain, temp_iova,
					      cp_heap->total_size, SZ_64K);
	} else {
		pr_err("Unable to get IOMMU domain %lu\n", domain_num);
	}
}
Exemplo n.º 12
0
dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
								size_t size)
{
	off_t start_off;
	dma_addr_t addr, start = 0;
	size_t mapped_size = 0;
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	int order;
	int ret;
	int count =0;
#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	size_t iova_size = 0;
#endif
	for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
		offset -= sg_dma_len(sg);

	start_off = offset_in_page(sg_phys(sg) + offset);
	size = PAGE_ALIGN(size + start_off);

	order = __fls(min_t(size_t, size, SZ_1M));

	region = kmalloc(sizeof(*region), GFP_KERNEL);
	if (!region) {
		ret = -ENOMEM;
		goto err_map_nomem;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	iova_size = ALIGN(size, SZ_64K);
	start = (dma_addr_t)gen_pool_alloc_aligned(vmm->vmm_pool, iova_size,
									order);
#else
	start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
#endif
	if (!start) {
		ret = -ENOMEM;
		goto err_map_noiomem;
	}

	addr = start;
	do {
		phys_addr_t phys;
		size_t len;

		phys = sg_phys(sg);
		len = sg_dma_len(sg);

		/* if back to back sg entries are contiguous consolidate them */
		while (sg_next(sg) &&
		       sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg))) {
			len += sg_dma_len(sg_next(sg));
			sg = sg_next(sg);
		}

		if (offset > 0) {
			len -= offset;
			phys += offset;
			offset = 0;
		}

		if (offset_in_page(phys)) {
			len += offset_in_page(phys);
			phys = round_down(phys, PAGE_SIZE);
		}

		len = PAGE_ALIGN(len);

		if (len > (size - mapped_size))
			len = size - mapped_size;

		ret = iommu_map(vmm->domain, addr, phys, len, 0);
		if (ret)
			break;

		addr += len;
		mapped_size += len;
	} while ((sg = sg_next(sg)) && (mapped_size < size));
	BUG_ON(mapped_size > size);

	if (mapped_size < size) {
		pr_err("IOVMM: iovmm_map failed as mapped_size (%d) < size (%d)\n", mapped_size, size);
		goto err_map_map;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	if (iova_size != size) {
		addr = start + size;
		size = iova_size;

		for (; addr < start + size; addr += PAGE_SIZE) {
			ret = iommu_map(vmm->domain, addr,
				page_to_phys(ZERO_PAGE(0)), PAGE_SIZE, 0);
			if (ret)
				goto err_map_map;

			mapped_size += PAGE_SIZE;
		}
	}
#endif

	region->start = start + start_off;
	region->size = size;

	INIT_LIST_HEAD(&region->node);

	spin_lock(&vmm->lock);

	list_add(&region->node, &vmm->regions_list);

	spin_unlock(&vmm->lock);

	dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#X bytes.\n",
					region->start, region->size);

	return region->start;

err_map_map:
	iommu_unmap(vmm->domain, start, mapped_size);
	gen_pool_free(vmm->vmm_pool, start, size);
err_map_noiomem:
	kfree(region);
err_map_nomem:
	dev_dbg(dev, "IOVMM: Failed to allocated VM region for %#x bytes.\n",
									size);
	return (dma_addr_t)ret;
}
Exemplo n.º 13
0
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i, ret = 0;
	unsigned long extra;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

out2:
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
Exemplo n.º 14
0
int
efhw_iopages_alloc(struct pci_dev *pci_dev, struct efhw_iopages *p,
		   unsigned order, efhw_iommu_domain *vf_domain,
		   unsigned long iova_base)
{
	/* dma_alloc_coherent() is really the right interface to use here.
	 * However, it allocates memory "close" to the device, but we want
	 * memory on the current numa node.  Also we need the memory to be
	 * contiguous in the kernel, but not necessarily in physical
	 * memory.
	 *
	 * vf_domain is the IOMMU protection domain - it imples that pci_dev
	 * is a VF that should not use the normal DMA mapping APIs
	 */
	struct device *dev = &pci_dev->dev;
	int i = 0;

	p->n_pages = 1 << order;
	p->dma_addrs = kmalloc(p->n_pages * sizeof(p->dma_addrs[0]), 0);
	if (p->dma_addrs == NULL)
		goto fail1;
	p->ptr = vmalloc_node(p->n_pages << PAGE_SHIFT, -1);
	if (p->ptr == NULL)
		goto fail2;
	for (i = 0; i < p->n_pages; ++i) {
		struct page *page;
		page = vmalloc_to_page(p->ptr + (i << PAGE_SHIFT));

		if (!vf_domain) {
			p->dma_addrs[i] = dma_map_page(dev, page, 0, PAGE_SIZE,
						       DMA_BIDIRECTIONAL);
			
			if (dma_mapping_error(dev, p->dma_addrs[i])) {
				EFHW_ERR("%s: ERROR dma_map_page failed",
					 __FUNCTION__);
				goto fail3;
			}
		} else
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
		{
			int rc;

			p->dma_addrs[i] = iova_base;
			rc = iommu_map(vf_domain, p->dma_addrs[i],
				       page_to_phys(page), PAGE_SIZE,
				       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
			if (rc) {
				EFHW_ERR("%s: ERROR iommu_map failed (%d)",
					 __FUNCTION__, rc);
				goto fail3;
			}
			iova_base += PAGE_SIZE;
		}
#else
		EFRM_ASSERT(0);
#endif
	}
	return 0;

fail3:
	while (i-- > 0)
		if (!vf_domain) {
			dma_unmap_page(dev, p->dma_addrs[i],
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
		} else {
#ifdef CONFIG_SFC_RESOURCE_VF_IOMMU
			mutex_lock(&efrm_iommu_mutex);
			iommu_unmap(vf_domain, iova_base, PAGE_SIZE);
			mutex_unlock(&efrm_iommu_mutex);
#endif
		}
fail2:
	kfree(p->dma_addrs);
fail1:
	return -ENOMEM;
}
Exemplo n.º 15
0
int iommu_map(struct iommu_domain *domain, unsigned long iova,
	      phys_addr_t paddr, size_t size, int prot)
{
	unsigned long orig_iova = iova;
	unsigned int min_pagesz;
	size_t orig_size = size;
	int ret = 0;

	if (unlikely(domain->ops->map == NULL))
		return -ENODEV;

	
	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);

	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
			"0x%x\n", iova, (unsigned long)paddr,
			(unsigned long)size, min_pagesz);
		return -EINVAL;
	}

	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
				(unsigned long)paddr, (unsigned long)size);

	while (size) {
		unsigned long pgsize, addr_merge = iova | paddr;
		unsigned int pgsize_idx;

		
		pgsize_idx = __fls(size);

		
		if (likely(addr_merge)) {
			
			unsigned int align_pgsize_idx = __ffs(addr_merge);

			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
		}

		
		pgsize = (1UL << (pgsize_idx + 1)) - 1;

		
		pgsize &= domain->ops->pgsize_bitmap;

		
		BUG_ON(!pgsize);

		
		pgsize_idx = __fls(pgsize);
		pgsize = 1UL << pgsize_idx;

		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
					(unsigned long)paddr, pgsize);

		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
		if (ret)
			break;

		iova += pgsize;
		paddr += pgsize;
		size -= pgsize;
	}

	
	if (ret)
		iommu_unmap(domain, orig_iova, orig_size - size);

	return ret;
}
Exemplo n.º 16
0
static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
		size_t size)
{
	struct a6xx_gmu_bo *bo;
	int ret, count, i;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	bo->size = PAGE_ALIGN(size);

	count = bo->size >> PAGE_SHIFT;

	bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
	if (!bo->pages) {
		kfree(bo);
		return ERR_PTR(-ENOMEM);
	}

	for (i = 0; i < count; i++) {
		bo->pages[i] = alloc_page(GFP_KERNEL);
		if (!bo->pages[i])
			goto err;
	}

	bo->iova = gmu->uncached_iova_base;

	for (i = 0; i < count; i++) {
		ret = iommu_map(gmu->domain,
			bo->iova + (PAGE_SIZE * i),
			page_to_phys(bo->pages[i]), PAGE_SIZE,
			IOMMU_READ | IOMMU_WRITE);

		if (ret) {
			DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");

			for (i = i - 1 ; i >= 0; i--)
				iommu_unmap(gmu->domain,
					bo->iova + (PAGE_SIZE * i),
					PAGE_SIZE);

			goto err;
		}
	}

	bo->virt = vmap(bo->pages, count, VM_IOREMAP,
		pgprot_writecombine(PAGE_KERNEL));
	if (!bo->virt)
		goto err;

	/* Align future IOVA addresses on 1MB boundaries */
	gmu->uncached_iova_base += ALIGN(size, SZ_1M);

	return bo;

err:
	for (i = 0; i < count; i++) {
		if (bo->pages[i])
			__free_pages(bo->pages[i], 0);
	}

	kfree(bo->pages);
	kfree(bo);

	return ERR_PTR(-ENOMEM);
}
Exemplo n.º 17
0
int iommu_map(struct iommu_domain *domain, unsigned long iova,
	      phys_addr_t paddr, size_t size, int prot)
{
	unsigned long orig_iova = iova;
	unsigned int min_pagesz;
	size_t orig_size = size;
	int ret = 0;

	if (unlikely(domain->ops->map == NULL))
		return -ENODEV;

	/* find out the minimum page size supported */
	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);

	/*
	 * both the virtual address and the physical one, as well as
	 * the size of the mapping, must be aligned (at least) to the
	 * size of the smallest page supported by the hardware
	 */
	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
		pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
			"0x%x\n", iova, (unsigned long)paddr,
			(unsigned long)size, min_pagesz);
		return -EINVAL;
	}

	pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
				(unsigned long)paddr, (unsigned long)size);

	while (size) {
		unsigned long pgsize, addr_merge = iova | paddr;
		unsigned int pgsize_idx;

		/* Max page size that still fits into 'size' */
		pgsize_idx = __fls(size);

		/* need to consider alignment requirements ? */
		if (likely(addr_merge)) {
			/* Max page size allowed by both iova and paddr */
			unsigned int align_pgsize_idx = __ffs(addr_merge);

			pgsize_idx = min(pgsize_idx, align_pgsize_idx);
		}

		/* build a mask of acceptable page sizes */
		pgsize = (1UL << (pgsize_idx + 1)) - 1;

		/* throw away page sizes not supported by the hardware */
		pgsize &= domain->ops->pgsize_bitmap;

		/* make sure we're still sane */
		BUG_ON(!pgsize);

		/* pick the biggest page */
		pgsize_idx = __fls(pgsize);
		pgsize = 1UL << pgsize_idx;

		pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
					(unsigned long)paddr, pgsize);

		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
		if (ret)
			break;

		iova += pgsize;
		paddr += pgsize;
		size -= pgsize;
	}

	/* unroll mapping in case something went wrong */
	if (ret)
		iommu_unmap(domain, orig_iova, orig_size - size);

	return ret;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add unsigned long temp_phys and int i back.
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i = 0, ret = 0;
	unsigned long extra;
	//HTC_END

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	//HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, always 1M-alignment.
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		align = SZ_1M;
	}
	//HTC_END

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	//HTC_START Jason Huang 20120419 --- Change to htc_iommu_map_range for performance improvement.
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, do 1M mapping by iommu_map().
	                                     Neither htc_iommu_map_range() nor iommu_map_range() supports 1M mapping.*/
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		temp_iova = data->iova_addr;
		temp_phys = buffer->priv_phys;
		for (i = buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M,
							  temp_phys += SZ_1M) {
			ret = iommu_map(domain, temp_iova, temp_phys,
					get_order(SZ_1M),
					ION_IS_CACHED(flags) ? 1 : 0);

			if (ret) {
				pr_err("%s: could not map %lx to %lx in domain %p\n",
					__func__, temp_iova, temp_phys, domain);
				goto out2;
			}
		}
	}
	else
	{
		ret = htc_iommu_map_range(domain, data->iova_addr, buffer->priv_phys, buffer->size, ION_IS_CACHED(flags) ? 1 : 0);
		if (ret) {
			ret = -ENOMEM;
			goto out1;
		}

		temp_iova = data->iova_addr + buffer->size;
	}
	/*
	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}
	*/
	//HTC_END

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

//HTC_START Jason Huang 20120419
//HTC_START Jason Huang 20120530
out2:
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		for ( ; i < buffer->size; i += SZ_1M, temp_iova -= SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, buffer->size);
	}
	/*
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
//HTC_END
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add int i and unsigned long temp_iova back.
	int i;
	unsigned long temp_iova;
	//HTC_END
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;
	/*HTC_START Jason Huang 20120614 --- In IOMMU map, some client may input larger virtual addresss length (even larger than 1M-aligned buffer),
	                                     such that the actual mapped size is larger than the buffer size. In IOMMU unmap, the extra part should be
	                                     un-mapped independently, since it is not 1M mapped.*/
	unsigned long extra = 0;
	//HTC_END

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	//HTC_START Jason Huang 20120419
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, they are 1M mapping. Un-map by iommu_unmap().
	                                     iommu_unmap_range() doesn't supports 1M un-mapping.*/
	if (data->buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		extra = data->mapped_size - data->buffer->size;

		temp_iova = data->iova_addr;
		for (i = data->buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));

		if (extra)
		{
			iommu_unmap_range(domain, temp_iova, extra);
		}
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	}
	/*
	temp_iova = data->iova_addr;
	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
	//HTC_END

	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}