int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0;
	struct iommu_domain *domain;
	unsigned long extra;
	struct scatterlist *sglist = 0;
	struct page *page = 0;
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	if (!ION_IS_CACHED(flags))
		return -EINVAL;

	if (!msm_use_iommu()) {
		data->iova_addr = virt_to_phys(buffer->vaddr);
		return 0;
	}

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}
	page = virt_to_page(buffer->vaddr);

	sglist = vmalloc(sizeof(*sglist));
	if (!sglist)
		goto out1;

	sg_init_table(sglist, 1);
	sg_set_page(sglist, page, buffer->size, 0);

	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);
	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
					  prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;
out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);

out1:
	vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
						data->mapped_size);
out:
	return ret;
}
int ion_cma_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0;
	struct iommu_domain *domain;
	unsigned long extra;
	unsigned long extra_iova_addr;
	struct ion_cma_buffer_info *info = buffer->priv_virt;
	struct sg_table *table = info->table;
	int prot = IOMMU_WRITE | IOMMU_READ;

	if (!msm_use_iommu()) {
		data->iova_addr = info->handle;
		data->mapped_size = iova_length;
		return 0;
	}

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	ret = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align,
						&data->iova_addr);

	if (ret)
		goto out;

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -EINVAL;
		goto out1;
	}

	ret = iommu_map_range(domain, data->iova_addr, table->sgl,
				buffer->size, prot);

	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	extra_iova_addr = data->iova_addr + buffer->size;
	if (extra) {
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
						prot);
		if (ret)
			goto out2;
	}
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
int ion_system_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0, i;
	struct iommu_domain *domain;
	unsigned long extra;
	unsigned long extra_iova_addr;
	struct page *page;
	int npages = buffer->size >> PAGE_SHIFT;
	void *vaddr = buffer->priv_virt;
	struct scatterlist *sglist = 0;
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	if (!ION_IS_CACHED(flags))
		return -EINVAL;

	if (!msm_use_iommu())
		return -EINVAL;

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}


	sglist = vmalloc(sizeof(*sglist) * npages);
	if (!sglist) {
		ret = -ENOMEM;
		goto out1;
	}

	sg_init_table(sglist, npages);
	for (i = 0; i < npages; i++) {
		page = vmalloc_to_page(vaddr);
		if (!page)
			goto out1;
		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
		vaddr += PAGE_SIZE;
	}

	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);

	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	extra_iova_addr = data->iova_addr + buffer->size;
	if (extra) {
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
					  prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	struct iommu_domain *domain;
	int ret = 0;
	unsigned long extra;
	struct scatterlist *sglist = 0;
	struct ion_cp_heap *cp_heap =
		container_of(buffer->heap, struct ion_cp_heap, heap);
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	if (cp_heap->iommu_iova[domain_num]) {
		/* Already mapped. */
		unsigned long offset = buffer->priv_phys - cp_heap->base;
		data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
		return 0;
	} else if (cp_heap->iommu_map_all) {
		ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
		if (!ret) {
			unsigned long offset =
					buffer->priv_phys - cp_heap->base;
			data->iova_addr =
				cp_heap->iommu_iova[domain_num] + offset;
			cp_heap->iommu_partition[domain_num] = partition_num;
			/*
			clear delayed map flag so that we don't interfere
			with this feature (we are already delaying).
			*/
			data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
			return 0;
		} else {
			cp_heap->iommu_iova[domain_num] = 0;
			cp_heap->iommu_partition[domain_num] = 0;
			return ret;
		}
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	sglist = ion_cp_heap_create_sglist(buffer);
	if (IS_ERR_OR_NULL(sglist)) {
		ret = -ENOMEM;
		goto out1;
	}
	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);
	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
					  SZ_4K, prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	if (!IS_ERR_OR_NULL(sglist))
		vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
			int partition, unsigned long prot)
{
	unsigned long left_to_map = cp_heap->total_size;
	unsigned long order = get_order(SZ_64K);
	unsigned long page_size = SZ_64K;
	int ret_value = 0;
	unsigned long virt_addr_len = cp_heap->total_size;
	struct iommu_domain *domain = msm_get_iommu_domain(domain_num);

	/* If we are mapping into the video domain we need to map twice the
	 * size of the heap to account for prefetch issue in video core.
	 */
	if (domain_num == cp_heap->iommu_2x_map_domain)
		virt_addr_len <<= 1;

	if (cp_heap->total_size & (SZ_64K-1)) {
		pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
		ret_value = -EINVAL;
	}
	if (cp_heap->base & (SZ_64K-1)) {
		pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
		ret_value = -EINVAL;
	}
	if (!ret_value && domain) {
		unsigned long temp_phys = cp_heap->base;
		unsigned long temp_iova =
				msm_allocate_iova_address(domain_num, partition,
						virt_addr_len, SZ_64K);
		if (!temp_iova) {
			pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
				__func__, domain_num, partition);
			ret_value = -ENOMEM;
			goto out;
		}
		cp_heap->iommu_iova[domain_num] = temp_iova;

		while (left_to_map) {
			int ret = iommu_map(domain, temp_iova, temp_phys,
					    order, prot);
			if (ret) {
				pr_err("%s: could not map %lx in domain %p, error: %d\n",
					__func__, temp_iova, domain, ret);
				ret_value = -EAGAIN;
				goto free_iova;
			}
			temp_iova += page_size;
			temp_phys += page_size;
			left_to_map -= page_size;
		}
		if (domain_num == cp_heap->iommu_2x_map_domain)
			ret_value = msm_iommu_map_extra(domain, temp_iova,
							cp_heap->total_size,
							SZ_64K, prot);
		if (ret_value)
			goto free_iova;
	} else {
		pr_err("Unable to get IOMMU domain %lu\n", domain_num);
		ret_value = -ENOMEM;
	}
	goto out;

free_iova:
	msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
			      partition, virt_addr_len);
out:
	return ret_value;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add unsigned long temp_phys and int i back.
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i = 0, ret = 0;
	unsigned long extra;
	//HTC_END

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	//HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, always 1M-alignment.
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		align = SZ_1M;
	}
	//HTC_END

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	//HTC_START Jason Huang 20120419 --- Change to htc_iommu_map_range for performance improvement.
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, do 1M mapping by iommu_map().
	                                     Neither htc_iommu_map_range() nor iommu_map_range() supports 1M mapping.*/
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		temp_iova = data->iova_addr;
		temp_phys = buffer->priv_phys;
		for (i = buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M,
							  temp_phys += SZ_1M) {
			ret = iommu_map(domain, temp_iova, temp_phys,
					get_order(SZ_1M),
					ION_IS_CACHED(flags) ? 1 : 0);

			if (ret) {
				pr_err("%s: could not map %lx to %lx in domain %p\n",
					__func__, temp_iova, temp_phys, domain);
				goto out2;
			}
		}
	}
	else
	{
		ret = htc_iommu_map_range(domain, data->iova_addr, buffer->priv_phys, buffer->size, ION_IS_CACHED(flags) ? 1 : 0);
		if (ret) {
			ret = -ENOMEM;
			goto out1;
		}

		temp_iova = data->iova_addr + buffer->size;
	}
	/*
	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}
	*/
	//HTC_END

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

//HTC_START Jason Huang 20120419
//HTC_START Jason Huang 20120530
out2:
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		for ( ; i < buffer->size; i += SZ_1M, temp_iova -= SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, buffer->size);
	}
	/*
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
//HTC_END
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
예제 #7
0
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i, ret = 0;
	unsigned long extra;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

out2:
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}