void ion_cma_unmap_iommu(struct ion_iommu_map *data)
{
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
コード例 #2
0
static void ion_iommu_add(struct ion_buffer *buffer,
			  struct ion_iommu_map *iommu)
{
	struct rb_node **p = &buffer->iommu_maps.rb_node;
	struct rb_node *parent = NULL;
	struct ion_iommu_map *entry;

	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct ion_iommu_map, node);

		if (iommu->key < entry->key) {
			p = &(*p)->rb_left;
		} else if (iommu->key > entry->key) {
			p = &(*p)->rb_right;
		} else {
			pr_err("%s: buffer %p already has mapping for domain %d"
				" and partition %d\n", __func__,
				buffer,
				iommu_map_domain(iommu),
				iommu_map_partition(iommu));
			BUG();
		}
	}

	rb_link_node(&iommu->node, parent, p);
	rb_insert_color(&iommu->node, &buffer->iommu_maps);

}
コード例 #3
0
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;
	struct ion_cp_heap *cp_heap =
		container_of(data->buffer->heap, struct ion_cp_heap, heap);

	if (!msm_use_iommu())
		return;


	domain_num = iommu_map_domain(data);

	/* If we are mapping everything we'll wait to unmap until everything
	   is freed. */
	if (cp_heap->iommu_iova[domain_num])
		return;

	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
コード例 #4
0
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	int i;
	unsigned long temp_iova;
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	temp_iova = data->iova_addr;
	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));

	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
コード例 #5
0
static int __ion_iommu_map(struct ion_buffer *buffer,
		int domain_num, int partition_num, unsigned long align,
		unsigned long iova_length, unsigned long flags,
		unsigned long *iova)
{
	struct ion_iommu_map *data;
	int ret;

	data = kmalloc(sizeof(*data), GFP_ATOMIC);

	if (!data)
		return -ENOMEM;

	data->buffer = buffer;
	iommu_map_domain(data) = domain_num;
	iommu_map_partition(data) = partition_num;

	ret = buffer->heap->ops->map_iommu(buffer, data,
						domain_num,
						partition_num,
						align,
						iova_length,
						flags);

	if (ret)
		goto out;

	kref_init(&data->ref);
	*iova = data->iova_addr;

	ion_iommu_add(buffer, data);

	return 0;

out:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				buffer->size);
	kfree(data);
	return ret;
}
コード例 #6
0
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add int i and unsigned long temp_iova back.
	int i;
	unsigned long temp_iova;
	//HTC_END
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;
	/*HTC_START Jason Huang 20120614 --- In IOMMU map, some client may input larger virtual addresss length (even larger than 1M-aligned buffer),
	                                     such that the actual mapped size is larger than the buffer size. In IOMMU unmap, the extra part should be
	                                     un-mapped independently, since it is not 1M mapped.*/
	unsigned long extra = 0;
	//HTC_END

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	//HTC_START Jason Huang 20120419
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, they are 1M mapping. Un-map by iommu_unmap().
	                                     iommu_unmap_range() doesn't supports 1M un-mapping.*/
	if (data->buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		extra = data->mapped_size - data->buffer->size;

		temp_iova = data->iova_addr;
		for (i = data->buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));

		if (extra)
		{
			iommu_unmap_range(domain, temp_iova, extra);
		}
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	}
	/*
	temp_iova = data->iova_addr;
	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
	//HTC_END

	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}