void ion_cma_unmap_iommu(struct ion_iommu_map *data)
{
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
Exemplo n.º 2
0
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
		struct kgsl_memdesc *memdesc,
		unsigned int *tlb_flags)
{
	int ret;
	unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;

	/* All GPU addresses as assigned are page aligned, but some
	   functions purturb the gpuaddr with an offset, so apply the
	   mask here to make sure we have the right address */

	unsigned int gpuaddr = memdesc->gpuaddr &  KGSL_MMU_ALIGN_MASK;

	if (range == 0 || gpuaddr == 0)
		return 0;

	ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
	if (ret)
		KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
			"with err: %d\n", iommu_pt->domain, gpuaddr,
			range, ret);

#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
	/*
	 * Flushing only required if per process pagetables are used. With
	 * global case, flushing will happen inside iommu_map function
	 */
	if (!ret && msm_soc_version_supports_iommu_v1())
		*tlb_flags = UINT_MAX;
#endif
	return 0;
}
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
		struct kgsl_memdesc *memdesc)
{
	int ret;
	unsigned int range = memdesc->size;
	struct iommu_domain *domain = (struct iommu_domain *)
					mmu_specific_pt;

	/* All GPU addresses as assigned are page aligned, but some
	   functions purturb the gpuaddr with an offset, so apply the
	   mask here to make sure we have the right address */

	unsigned int gpuaddr = memdesc->gpuaddr &  KGSL_MMU_ALIGN_MASK;

	if (range == 0 || gpuaddr == 0)
		return 0;

	ret = iommu_unmap_range(domain, gpuaddr, range);
	if (ret)
		KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
			"with err: %d\n", domain, gpuaddr,
			range, ret);

	return 0;
}
Exemplo n.º 4
0
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
		struct kgsl_memdesc *memdesc,
		unsigned int *tlb_flags)
{
	int ret;
	unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;


	unsigned int gpuaddr = memdesc->gpuaddr &  KGSL_MMU_ALIGN_MASK;

	if (range == 0 || gpuaddr == 0)
		return 0;

	ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
	if (ret)
		KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
			"with err: %d\n", iommu_pt->domain, gpuaddr,
			range, ret);

#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
	if (!ret)
		*tlb_flags = UINT_MAX;
#endif
	return 0;
}
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;
	struct ion_cp_heap *cp_heap =
		container_of(data->buffer->heap, struct ion_cp_heap, heap);

	if (!msm_use_iommu())
		return;


	domain_num = iommu_map_domain(data);

	/* If we are mapping everything we'll wait to unmap until everything
	   is freed. */
	if (cp_heap->iommu_iova[domain_num])
		return;

	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
Exemplo n.º 6
0
static void kvm_iommu_put_pages(struct kvm *kvm,
                                gfn_t base_gfn, unsigned long npages)
{
    gfn_t gfn = base_gfn;
    pfn_t pfn;
    struct iommu_domain *domain = kvm->arch.iommu_domain;
    unsigned long i;
    u64 phys;

    /* check if iommu exists and in use */
    if (!domain)
        return;

    for (i = 0; i < npages; i++) {
        phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
        pfn = phys >> PAGE_SHIFT;
        kvm_release_pfn_clean(pfn);
        gfn++;
    }

    iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages);
}
static int do_iommu_domain_unmap(struct map_result *result)
{
	unsigned long unmaped_size;
	struct hisi_iommu_domain *hisi_domain = hisi_iommu_domain_p;
	struct gen_pool *pool = hisi_domain->iova_pool;
	/* unmap tile equals to unmpa range */
	if (result->is_tile){
		unmaped_size = iommu_unmap_tile(hisi_domain->domain,
		result->iova_start,result->iova_size);

	}else{
		unmaped_size = iommu_unmap_range(hisi_domain->domain,
				result->iova_start, result->iova_size);
	}
	if (unmaped_size != result->iova_size) {
		dbg("[%s]unmap failed!\n", __func__);
		return -EINVAL;
	}
	/* free iova */
	if(pool){
		hisi_free_iova(pool, result->iova_start, result->iova_size);
	}
	return 0;
}
int ion_cma_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0;
	struct iommu_domain *domain;
	unsigned long extra;
	unsigned long extra_iova_addr;
	struct ion_cma_buffer_info *info = buffer->priv_virt;
	struct sg_table *table = info->table;
	int prot = IOMMU_WRITE | IOMMU_READ;

	if (!msm_use_iommu()) {
		data->iova_addr = info->handle;
		data->mapped_size = iova_length;
		return 0;
	}

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	ret = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align,
						&data->iova_addr);

	if (ret)
		goto out;

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -EINVAL;
		goto out1;
	}

	ret = iommu_map_range(domain, data->iova_addr, table->sgl,
				buffer->size, prot);

	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	extra_iova_addr = data->iova_addr + buffer->size;
	if (extra) {
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
						prot);
		if (ret)
			goto out2;
	}
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0;
	struct iommu_domain *domain;
	unsigned long extra;
	struct scatterlist *sglist = 0;
	struct page *page = 0;
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	if (!ION_IS_CACHED(flags))
		return -EINVAL;

	if (!msm_use_iommu()) {
		data->iova_addr = virt_to_phys(buffer->vaddr);
		return 0;
	}

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}
	page = virt_to_page(buffer->vaddr);

	sglist = vmalloc(sizeof(*sglist));
	if (!sglist)
		goto out1;

	sg_init_table(sglist, 1);
	sg_set_page(sglist, page, buffer->size, 0);

	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);
	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
					  prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;
out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);

out1:
	vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
						data->mapped_size);
out:
	return ret;
}
int ion_system_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0, i;
	struct iommu_domain *domain;
	unsigned long extra;
	unsigned long extra_iova_addr;
	struct page *page;
	int npages = buffer->size >> PAGE_SHIFT;
	void *vaddr = buffer->priv_virt;
	struct scatterlist *sglist = 0;
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	if (!ION_IS_CACHED(flags))
		return -EINVAL;

	if (!msm_use_iommu())
		return -EINVAL;

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}


	sglist = vmalloc(sizeof(*sglist) * npages);
	if (!sglist) {
		ret = -ENOMEM;
		goto out1;
	}

	sg_init_table(sglist, npages);
	for (i = 0; i < npages; i++) {
		page = vmalloc_to_page(vaddr);
		if (!page)
			goto out1;
		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
		vaddr += PAGE_SIZE;
	}

	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);

	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	extra_iova_addr = data->iova_addr + buffer->size;
	if (extra) {
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
					  prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	struct iommu_domain *domain;
	int ret = 0;
	unsigned long extra;
	struct scatterlist *sglist = 0;
	struct ion_cp_heap *cp_heap =
		container_of(buffer->heap, struct ion_cp_heap, heap);
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	if (cp_heap->iommu_iova[domain_num]) {
		/* Already mapped. */
		unsigned long offset = buffer->priv_phys - cp_heap->base;
		data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
		return 0;
	} else if (cp_heap->iommu_map_all) {
		ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
		if (!ret) {
			unsigned long offset =
					buffer->priv_phys - cp_heap->base;
			data->iova_addr =
				cp_heap->iommu_iova[domain_num] + offset;
			cp_heap->iommu_partition[domain_num] = partition_num;
			/*
			clear delayed map flag so that we don't interfere
			with this feature (we are already delaying).
			*/
			data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
			return 0;
		} else {
			cp_heap->iommu_iova[domain_num] = 0;
			cp_heap->iommu_partition[domain_num] = 0;
			return ret;
		}
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	sglist = ion_cp_heap_create_sglist(buffer);
	if (IS_ERR_OR_NULL(sglist)) {
		ret = -ENOMEM;
		goto out1;
	}
	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);
	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
					  SZ_4K, prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	if (!IS_ERR_OR_NULL(sglist))
		vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add int i and unsigned long temp_iova back.
	int i;
	unsigned long temp_iova;
	//HTC_END
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;
	/*HTC_START Jason Huang 20120614 --- In IOMMU map, some client may input larger virtual addresss length (even larger than 1M-aligned buffer),
	                                     such that the actual mapped size is larger than the buffer size. In IOMMU unmap, the extra part should be
	                                     un-mapped independently, since it is not 1M mapped.*/
	unsigned long extra = 0;
	//HTC_END

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	//HTC_START Jason Huang 20120419
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, they are 1M mapping. Un-map by iommu_unmap().
	                                     iommu_unmap_range() doesn't supports 1M un-mapping.*/
	if (data->buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		extra = data->mapped_size - data->buffer->size;

		temp_iova = data->iova_addr;
		for (i = data->buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));

		if (extra)
		{
			iommu_unmap_range(domain, temp_iova, extra);
		}
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	}
	/*
	temp_iova = data->iova_addr;
	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
	//HTC_END

	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add unsigned long temp_phys and int i back.
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i = 0, ret = 0;
	unsigned long extra;
	//HTC_END

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	//HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, always 1M-alignment.
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		align = SZ_1M;
	}
	//HTC_END

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	//HTC_START Jason Huang 20120419 --- Change to htc_iommu_map_range for performance improvement.
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, do 1M mapping by iommu_map().
	                                     Neither htc_iommu_map_range() nor iommu_map_range() supports 1M mapping.*/
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		temp_iova = data->iova_addr;
		temp_phys = buffer->priv_phys;
		for (i = buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M,
							  temp_phys += SZ_1M) {
			ret = iommu_map(domain, temp_iova, temp_phys,
					get_order(SZ_1M),
					ION_IS_CACHED(flags) ? 1 : 0);

			if (ret) {
				pr_err("%s: could not map %lx to %lx in domain %p\n",
					__func__, temp_iova, temp_phys, domain);
				goto out2;
			}
		}
	}
	else
	{
		ret = htc_iommu_map_range(domain, data->iova_addr, buffer->priv_phys, buffer->size, ION_IS_CACHED(flags) ? 1 : 0);
		if (ret) {
			ret = -ENOMEM;
			goto out1;
		}

		temp_iova = data->iova_addr + buffer->size;
	}
	/*
	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}
	*/
	//HTC_END

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

//HTC_START Jason Huang 20120419
//HTC_START Jason Huang 20120530
out2:
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		for ( ; i < buffer->size; i += SZ_1M, temp_iova -= SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, buffer->size);
	}
	/*
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
//HTC_END
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}