void *ion_cp_heap_map_kernel(struct ion_heap *heap,
				   struct ion_buffer *buffer,
				   unsigned long flags)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);
	void *ret_value = NULL;

	mutex_lock(&cp_heap->lock);
	if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
	    ((cp_heap->heap_protected == HEAP_PROTECTED) &&
	      !ION_IS_CACHED(flags))) {

		if (ion_cp_request_region(cp_heap)) {
			mutex_unlock(&cp_heap->lock);
			return NULL;
		}

		if (ION_IS_CACHED(flags))
			ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
		else
			ret_value = ioremap(buffer->priv_phys, buffer->size);

		if (!ret_value) {
			ion_cp_release_region(cp_heap);
		} else {
			if (ION_IS_CACHED(buffer->flags))
				++cp_heap->kmap_cached_count;
			else
				++cp_heap->kmap_uncached_count;
		}
	}
	mutex_unlock(&cp_heap->lock);
	return ret_value;
}
예제 #2
0
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
			    unsigned long len, unsigned long align,
			    unsigned long flags)
{
	struct device *dev = heap->priv;
	struct ion_cma_buffer_info *info;

	dev_dbg(dev, "Request buffer allocation len %ld\n", len);

	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
	if (!info) {
		dev_err(dev, "Can't allocate buffer info\n");
		return -ENOMEM;
	}

	if (!ION_IS_CACHED(flags))
		info->cpu_addr = dma_alloc_writecombine(dev, len,
					&(info->handle), 0);
	else
		info->cpu_addr = dma_alloc_nonconsistent(dev, len,
					&(info->handle), 0);

	if (!info->cpu_addr) {
		dev_err(dev, "Fail to allocate buffer\n");
		goto err;
	}

	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!info->table) {
		dev_err(dev, "Fail to allocate sg table\n");
		goto free_mem;
	}

	info->is_cached = ION_IS_CACHED(flags);

	ion_cma_get_sgtable(dev,
			info->table, info->cpu_addr, info->handle, len);

	/* keep this for memory release */
	buffer->priv_virt = info;
	dev_dbg(dev, "Allocate buffer %p\n", buffer);
	return 0;
free_mem:
	if (!ION_IS_CACHED(flags))
		dma_free_writecombine(dev, len, info->cpu_addr, info->handle);
	else
		dma_free_nonconsistent(dev, len, info->cpu_addr, info->handle);
err:
	kfree(info);
	return -ENOMEM;
}
예제 #3
0
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
			struct vm_area_struct *vma)
{
	int ret_value = -EAGAIN;
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	mutex_lock(&cp_heap->lock);
	if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
		if (ion_cp_request_region(cp_heap)) {
			mutex_unlock(&cp_heap->lock);
			return -EINVAL;
		}

		if (!ION_IS_CACHED(buffer->flags))
			vma->vm_page_prot = pgprot_writecombine(
							vma->vm_page_prot);

		ret_value =  remap_pfn_range(vma, vma->vm_start,
			__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
			vma->vm_end - vma->vm_start,
			vma->vm_page_prot);

		if (ret_value)
			ion_cp_release_region(cp_heap);
		else
			++cp_heap->umap_count;
	}
	mutex_unlock(&cp_heap->lock);
	return ret_value;
}
예제 #4
0
void *ion_system_heap_map_kernel(struct ion_heap *heap,
				 struct ion_buffer *buffer,
				 unsigned long flags)
{
	if (ION_IS_CACHED(flags))
		return buffer->priv_virt;
	else {
		pr_err("%s: cannot map system heap uncached\n", __func__);
		return ERR_PTR(-EINVAL);
	}
}
예제 #5
0
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
			     struct vm_area_struct *vma, unsigned long flags)
{
	if (ION_IS_CACHED(flags))
		return remap_vmalloc_range(vma, buffer->priv_virt,
						vma->vm_pgoff);
	else {
		pr_err("%s: cannot map system heap uncached\n", __func__);
		return -EINVAL;
	}
}
static int ion_secure_cma_allocate(struct ion_heap *heap,
			    struct ion_buffer *buffer,
			    unsigned long len, unsigned long align,
			    unsigned long flags)
{
	unsigned long secure_allocation = flags & ION_FLAG_SECURE;
	struct ion_secure_cma_buffer_info *buf = NULL;

	if (!secure_allocation) {
		pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
			__func__, heap->name, flags);
		return -ENOMEM;
	}

	if (ION_IS_CACHED(flags)) {
		pr_err("%s: cannot allocate cached memory from secure heap %s\n",
			__func__, heap->name);
		return -ENOMEM;
	}


	buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);

	if (buf) {
		int ret;

		buf->secure.want_delayed_unsecure = 0;
		atomic_set(&buf->secure.secure_cnt, 0);
		mutex_init(&buf->secure.lock);
		buf->secure.is_secure = 1;
		buf->secure.ignore_check = true;

		/*
		 * make sure the size is set before trying to secure
		 */
		buffer->size = len;
		ret = ion_cp_secure_buffer(buffer, ION_CP_V2, 0, 0);
		if (ret) {
			/*
			 * Don't treat the secure buffer failing here as an
			 * error for backwards compatibility reasons. If
			 * the secure fails, the map will also fail so there
			 * is no security risk.
			 */
			pr_debug("%s: failed to secure buffer\n", __func__);
		}
		return 0;
	} else {
		return -ENOMEM;
	}
}
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
			struct vm_area_struct *vma)
{
	struct device *dev = buffer->heap->priv;
	struct ion_cma_buffer_info *info = buffer->priv_virt;


	if (ION_IS_CACHED(buffer->flags))
		return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
				info->handle, buffer->size);
	else
		return dma_mmap_writecombine(dev, vma, info->cpu_addr,
				info->handle, buffer->size);
}
예제 #8
0
int ion_system_contig_heap_map_user(struct ion_heap *heap,
				    struct ion_buffer *buffer,
				    struct vm_area_struct *vma,
				    unsigned long flags)
{
	unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));

	if (ION_IS_CACHED(flags))
		return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
			       vma->vm_end - vma->vm_start,
			       vma->vm_page_prot);
	else {
		pr_err("%s: cannot map system heap uncached\n", __func__);
		return -EINVAL;
	}
}
static int ion_cache_operations(struct smem_client *client,
	struct msm_smem *mem, enum smem_cache_ops cache_op)
{
	unsigned long ionflag = 0;
	int rc = 0;
	int msm_cache_ops = 0;
	if (!mem || !client) {
		dprintk(VIDC_ERR, "Invalid params: %p, %p\n",
			mem, client);
		return -EINVAL;
	}
	rc = ion_handle_get_flags(client->clnt,	mem->smem_priv,
		&ionflag);
	if (rc) {
		dprintk(VIDC_ERR,
			"ion_handle_get_flags failed: %d\n", rc);
		goto cache_op_failed;
	}
	if (ION_IS_CACHED(ionflag)) {
		switch (cache_op) {
		case SMEM_CACHE_CLEAN:
			msm_cache_ops = ION_IOC_CLEAN_CACHES;
			break;
		case SMEM_CACHE_INVALIDATE:
			msm_cache_ops = ION_IOC_INV_CACHES;
			break;
		case SMEM_CACHE_CLEAN_INVALIDATE:
			msm_cache_ops = ION_IOC_CLEAN_INV_CACHES;
			break;
		default:
			dprintk(VIDC_ERR, "cache operation not supported\n");
			rc = -EINVAL;
			goto cache_op_failed;
		}
		rc = msm_ion_do_cache_op(client->clnt,
				(struct ion_handle *)mem->smem_priv,
				0, (unsigned long)mem->size,
				msm_cache_ops);
		if (rc) {
			dprintk(VIDC_ERR,
					"cache operation failed %d\n", rc);
			goto cache_op_failed;
		}
	}
cache_op_failed:
	return rc;
}
예제 #10
0
int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data,
				int flags)
{
	int ret_value;
	struct ion_cp_buffer *buf = buffer->priv_virt;

	mutex_lock(&buf->lock);
	if (!buf->is_secure) {
		pr_err("%s: buffer %p was not allocated as secure\n",
			__func__, buffer);
		ret_value = -EINVAL;
		goto out_unlock;
	}

	if (ION_IS_CACHED(buffer->flags)) {
		pr_err("%s: buffer %p was allocated as cached\n",
			__func__, buffer);
		ret_value = -EINVAL;
		goto out_unlock;
	}

	if (atomic_read(&buf->map_cnt)) {
		pr_err("%s: cannot secure buffer %p with outstanding mappings. Total count: %d",
			__func__, buffer, atomic_read(&buf->map_cnt));
		ret_value = -EINVAL;
		goto out_unlock;
	}

	if (atomic_read(&buf->secure_cnt)) {
		if (buf->version != version || buf->data != data) {
			pr_err("%s: Trying to re-secure buffer with different values",
				__func__);
			pr_err("Last secured version: %d Currrent %d\n",
				buf->version, version);
			pr_err("Last secured data: %p current %p\n",
				buf->data, data);
			ret_value = -EINVAL;
			goto out_unlock;
		}
	}
	ret_value = __ion_cp_protect_buffer(buffer, version, data, flags);

out_unlock:
	mutex_unlock(&buf->lock);
	return ret_value;
}
void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
				    struct ion_buffer *buffer)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	__arch_iounmap(buffer->vaddr);
	buffer->vaddr = NULL;
	mutex_lock(&cp_heap->lock);
	if (ION_IS_CACHED(buffer->flags))
		--cp_heap->kmap_cached_count;
	else
		--cp_heap->kmap_uncached_count;
	ion_cp_release_region(cp_heap);
	mutex_unlock(&cp_heap->lock);

	return;
}
예제 #12
0
void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
				void *virt_base, unsigned long flags)
{
	int ret;
	unsigned int offset = buffer->priv_phys - phys_base;
	unsigned long start = ((unsigned long)virt_base) + offset;
	const struct mem_type *type = ION_IS_CACHED(flags) ?
				get_mem_type(MT_DEVICE_CACHED) :
				get_mem_type(MT_DEVICE);

	if (phys_base > buffer->priv_phys)
		return NULL;


	ret = ioremap_pages(start, buffer->priv_phys, buffer->size, type);

	if (!ret)
		return (void *)start;
	else
		return NULL;
}
예제 #13
0
static int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
			void *uaddr, unsigned long offset, unsigned long len,
			unsigned int cmd)
{
	struct ion_buffer *buffer;
	int ret = -EINVAL;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to do_cache_op.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}
	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!ION_IS_CACHED(buffer->flags)) {
		ret = 0;
		goto out;
	}

	if (!handle->buffer->heap->ops->cache_op) {
		pr_err("%s: cache_op is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}


	ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
						offset, len, cmd);

out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;

}
예제 #14
0
void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
				    struct ion_buffer *buffer)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (cp_heap->reusable)
		unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
	else
		__arm_iounmap(buffer->vaddr);

	buffer->vaddr = NULL;

	mutex_lock(&cp_heap->lock);
	if (ION_IS_CACHED(buffer->flags))
		--cp_heap->kmap_cached_count;
	else
		--cp_heap->kmap_uncached_count;
	ion_cp_release_region(cp_heap);
	mutex_unlock(&cp_heap->lock);

	return;
}
예제 #15
0
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num, unsigned long align,
			unsigned long iova_length, unsigned long *iova,
			unsigned long *buffer_size,
			unsigned long flags, unsigned long iommu_flags)
{
	struct ion_buffer *buffer;
	struct ion_iommu_map *iommu_map;
	int ret = 0;

	if (ION_IS_CACHED(flags)) {
		pr_err("%s: Cannot map iommu as cached.\n", __func__);
		return -EINVAL;
	}

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}

	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_iommu) {
		pr_err("%s: map_iommu is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}

	/*
	 * If clients don't want a custom iova length, just use whatever
	 * the buffer size is
	 */
	if (!iova_length)
		iova_length = buffer->size;

	if (buffer->size > iova_length) {
		pr_debug("%s: iova length %lx is not at least buffer size"
			" %x\n", __func__, iova_length, buffer->size);
		ret = -EINVAL;
		goto out;
	}

	if (buffer->size & ~PAGE_MASK) {
		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
			buffer->size, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	if (iova_length & ~PAGE_MASK) {
		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
			iova_length, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
	_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
	if (!iommu_map) {
		iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
					    align, iova_length, flags, iova);
		if (IS_ERR_OR_NULL(iommu_map)) {
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
		} else {
			iommu_map->flags = iommu_flags;

			if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
				kref_get(&iommu_map->ref);
		}
	} else {
		if (iommu_map->flags != iommu_flags) {
			pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
				__func__, handle,
				iommu_map->flags, iommu_flags);
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
			ret = -EINVAL;
		} else if (iommu_map->mapped_size != iova_length) {
			pr_err("%s: handle %p is already mapped with length"
					" %x, trying to map with length %lx\n",
				__func__, handle, iommu_map->mapped_size,
				iova_length);
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
			ret = -EINVAL;
		} else {
			kref_get(&iommu_map->ref);
			*iova = iommu_map->iova_addr;
		}
	}
	*buffer_size = buffer->size;
out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;
}
예제 #16
0
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i, ret = 0;
	unsigned long extra;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

out2:
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add unsigned long temp_phys and int i back.
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i = 0, ret = 0;
	unsigned long extra;
	//HTC_END

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	//HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, always 1M-alignment.
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		align = SZ_1M;
	}
	//HTC_END

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	//HTC_START Jason Huang 20120419 --- Change to htc_iommu_map_range for performance improvement.
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, do 1M mapping by iommu_map().
	                                     Neither htc_iommu_map_range() nor iommu_map_range() supports 1M mapping.*/
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		temp_iova = data->iova_addr;
		temp_phys = buffer->priv_phys;
		for (i = buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M,
							  temp_phys += SZ_1M) {
			ret = iommu_map(domain, temp_iova, temp_phys,
					get_order(SZ_1M),
					ION_IS_CACHED(flags) ? 1 : 0);

			if (ret) {
				pr_err("%s: could not map %lx to %lx in domain %p\n",
					__func__, temp_iova, temp_phys, domain);
				goto out2;
			}
		}
	}
	else
	{
		ret = htc_iommu_map_range(domain, data->iova_addr, buffer->priv_phys, buffer->size, ION_IS_CACHED(flags) ? 1 : 0);
		if (ret) {
			ret = -ENOMEM;
			goto out1;
		}

		temp_iova = data->iova_addr + buffer->size;
	}
	/*
	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}
	*/
	//HTC_END

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

//HTC_START Jason Huang 20120419
//HTC_START Jason Huang 20120530
out2:
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		for ( ; i < buffer->size; i += SZ_1M, temp_iova -= SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, buffer->size);
	}
	/*
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
//HTC_END
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0;
	struct iommu_domain *domain;
	unsigned long extra;
	struct scatterlist *sglist = 0;
	struct page *page = 0;
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	if (!ION_IS_CACHED(flags))
		return -EINVAL;

	if (!msm_use_iommu()) {
		data->iova_addr = virt_to_phys(buffer->vaddr);
		return 0;
	}

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}
	page = virt_to_page(buffer->vaddr);

	sglist = vmalloc(sizeof(*sglist));
	if (!sglist)
		goto out1;

	sg_init_table(sglist, 1);
	sg_set_page(sglist, page, buffer->size, 0);

	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);
	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
					  prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;
out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);

out1:
	vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
						data->mapped_size);
out:
	return ret;
}
int ion_system_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0, i;
	struct iommu_domain *domain;
	unsigned long extra;
	unsigned long extra_iova_addr;
	struct page *page;
	int npages = buffer->size >> PAGE_SHIFT;
	void *vaddr = buffer->priv_virt;
	struct scatterlist *sglist = 0;
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	if (!ION_IS_CACHED(flags))
		return -EINVAL;

	if (!msm_use_iommu())
		return -EINVAL;

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}


	sglist = vmalloc(sizeof(*sglist) * npages);
	if (!sglist) {
		ret = -ENOMEM;
		goto out1;
	}

	sg_init_table(sglist, npages);
	for (i = 0; i < npages; i++) {
		page = vmalloc_to_page(vaddr);
		if (!page)
			goto out1;
		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
		vaddr += PAGE_SIZE;
	}

	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);

	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	extra_iova_addr = data->iova_addr + buffer->size;
	if (extra) {
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
					  prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	struct iommu_domain *domain;
	int ret = 0;
	unsigned long extra;
	struct scatterlist *sglist = 0;
	struct ion_cp_heap *cp_heap =
		container_of(buffer->heap, struct ion_cp_heap, heap);
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	if (cp_heap->iommu_iova[domain_num]) {
		/* Already mapped. */
		unsigned long offset = buffer->priv_phys - cp_heap->base;
		data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
		return 0;
	} else if (cp_heap->iommu_map_all) {
		ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
		if (!ret) {
			unsigned long offset =
					buffer->priv_phys - cp_heap->base;
			data->iova_addr =
				cp_heap->iommu_iova[domain_num] + offset;
			cp_heap->iommu_partition[domain_num] = partition_num;
			/*
			clear delayed map flag so that we don't interfere
			with this feature (we are already delaying).
			*/
			data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
			return 0;
		} else {
			cp_heap->iommu_iova[domain_num] = 0;
			cp_heap->iommu_partition[domain_num] = 0;
			return ret;
		}
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	sglist = ion_cp_heap_create_sglist(buffer);
	if (IS_ERR_OR_NULL(sglist)) {
		ret = -ENOMEM;
		goto out1;
	}
	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);
	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
					  SZ_4K, prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	if (!IS_ERR_OR_NULL(sglist))
		vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}