static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	int i;
	unsigned long temp_iova;
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	temp_iova = data->iova_addr;
	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));

	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
示例#2
0
int mdss_iommu_init(void)
{
	struct iommu_domain *domain;
	int domain_idx, i;

	domain_idx = msm_register_domain(&mdp_iommu_layout);
	if (IS_ERR_VALUE(domain_idx))
		return -EINVAL;

	domain = msm_get_iommu_domain(domain_idx);
	if (!domain) {
		pr_err("unable to get iommu domain(%d)\n", domain_idx);
		return -EINVAL;
	}

	iommu_set_fault_handler(domain, mdss_iommu_fault_handler);

	for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
		mdp_iommu_ctx[i].ctx = msm_iommu_get_ctx(mdp_iommu_ctx[i].name);
		if (!mdp_iommu_ctx[i].ctx) {
			pr_warn("unable to get iommu ctx(%s)\n",
					mdp_iommu_ctx[i].name);
			return -EINVAL;
		}
	}
	mdss_res->iommu_domain = domain_idx;

	return 0;
}
示例#3
0
int mdss_iommu_dettach(struct mdss_data_type *mdata)
{
	struct iommu_domain *domain;
	struct mdss_iommu_map_type *iomap;
	int i;

	if (!mdata->iommu_attached) {
		pr_debug("mdp iommu already dettached\n");
		return 0;
	}

	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
		iomap = mdata->iommu_map + i;

		domain = msm_get_iommu_domain(iomap->domain_idx);
		if (!domain) {
			pr_err("unable to get iommu domain(%d)\n",
				iomap->domain_idx);
			continue;
		}
		iommu_detach_device(domain, iomap->ctx);
	}

	mdata->iommu_attached = false;

	return 0;
}
void ion_cma_unmap_iommu(struct ion_iommu_map *data)
{
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;
	struct ion_cp_heap *cp_heap =
		container_of(data->buffer->heap, struct ion_cp_heap, heap);

	if (!msm_use_iommu())
		return;


	domain_num = iommu_map_domain(data);

	/* If we are mapping everything we'll wait to unmap until everything
	   is freed. */
	if (cp_heap->iommu_iova[domain_num])
		return;

	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
示例#6
0
int mdss_iommu_attach(struct mdss_data_type *mdata)
{
	struct iommu_domain *domain;
	struct mdss_iommu_map_type *iomap;
	int i;

	if (mdata->iommu_attached) {
		pr_debug("mdp iommu already attached\n");
		return 0;
	}

	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
		iomap = mdata->iommu_map + i;

		domain = msm_get_iommu_domain(iomap->domain_idx);
		if (!domain) {
			WARN(1, "could not attach iommu client %s to ctx %s\n",
				iomap->client_name, iomap->ctx_name);
			continue;
		}
		iommu_attach_device(domain, iomap->ctx);
	}

	mdata->iommu_attached = true;
	complete_all(&mdata->iommu_attach_done);
	return 0;
}
示例#7
0
int mdss_iommu_attach(void)
{
	struct iommu_domain *domain;
	int i, domain_idx;

	if (mdss_res->iommu_attached) {
		pr_warn("mdp iommu already attached\n");
		return 0;
	}

	domain_idx = mdss_get_iommu_domain();
	domain = msm_get_iommu_domain(domain_idx);
	if (!domain) {
		pr_err("unable to get iommu domain(%d)\n", domain_idx);
		return -EINVAL;
	}

	for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
		if (iommu_attach_device(domain, mdp_iommu_ctx[i].ctx)) {
			WARN(1, "could not attach iommu domain %d to ctx %s\n",
				domain_idx, mdp_iommu_ctx[i].name);
			return -EINVAL;
		}
	}
	mdss_res->iommu_attached = true;

	return 0;
}
示例#8
0
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
{
	static struct mdp4_platform_config config = {};
#ifdef CONFIG_OF
	/* TODO */
	config.max_clk = 266667000;
	config.iommu = iommu_domain_alloc(&platform_bus_type);
#else
	if (cpu_is_apq8064())
		config.max_clk = 266667000;
	else
		config.max_clk = 200000000;

	config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
#endif
	return &config;
}
示例#9
0
int mdss_iommu_init(struct mdss_data_type *mdata)
{
    struct msm_iova_layout layout;
    struct iommu_domain *domain;
    struct mdss_iommu_map_type *iomap;
    int i;

    if (mdata->iommu_map) {
        pr_warn("iommu already initialized\n");
        return 0;
    }

    for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
        iomap = &mdss_iommu_map[i];

        layout.client_name = iomap->client_name;
        layout.partitions = iomap->partitions;
        layout.npartitions = iomap->npartitions;
        layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);

        iomap->domain_idx = msm_register_domain(&layout);
        if (IS_ERR_VALUE(iomap->domain_idx))
            return -EINVAL;

        domain = msm_get_iommu_domain(iomap->domain_idx);
        if (!domain) {
            pr_err("unable to get iommu domain(%d)\n",
                   iomap->domain_idx);
            return -EINVAL;
        }
        iommu_set_fault_handler(domain, mdss_iommu_fault_handler, NULL);

        iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
        if (!iomap->ctx) {
            pr_warn("unable to get iommu ctx(%s)\n",
                    iomap->ctx_name);
            return -EINVAL;
        }
    }

    mdata->iommu_map = mdss_iommu_map;

    return 0;
}
示例#10
0
static void iommu_unmap_all(unsigned long domain_num,
			    struct ion_cp_heap *cp_heap)
{
	unsigned long left_to_unmap = cp_heap->total_size;
	unsigned long page_size = SZ_64K;

	struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
	if (domain) {
		unsigned long temp_iova = cp_heap->iommu_iova[domain_num];

		while (left_to_unmap) {
			iommu_unmap(domain, temp_iova, page_size);
			temp_iova += page_size;
			left_to_unmap -= page_size;
		}
		if (domain_num == cp_heap->iommu_2x_map_domain)
			msm_iommu_unmap_extra(domain, temp_iova,
					      cp_heap->total_size, SZ_64K);
	} else {
		pr_err("Unable to get IOMMU domain %lu\n", domain_num);
	}
}
示例#11
0
int mdss_iommu_dettach(void)
{
	struct iommu_domain *domain;
	int i, domain_idx;

	if (!mdss_res->iommu_attached) {
		pr_warn("mdp iommu already dettached\n");
		return 0;
	}

	domain_idx = mdss_get_iommu_domain();
	domain = msm_get_iommu_domain(domain_idx);
	if (!domain) {
		pr_err("unable to get iommu domain(%d)\n", domain_idx);
		return -EINVAL;
	}

	for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++)
		iommu_detach_device(domain, mdp_iommu_ctx[i].ctx);
	mdss_res->iommu_attached = false;

	return 0;
}
int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0;
	struct iommu_domain *domain;
	unsigned long extra;
	struct scatterlist *sglist = 0;
	struct page *page = 0;
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	if (!ION_IS_CACHED(flags))
		return -EINVAL;

	if (!msm_use_iommu()) {
		data->iova_addr = virt_to_phys(buffer->vaddr);
		return 0;
	}

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}
	page = virt_to_page(buffer->vaddr);

	sglist = vmalloc(sizeof(*sglist));
	if (!sglist)
		goto out1;

	sg_init_table(sglist, 1);
	sg_set_page(sglist, page, buffer->size, 0);

	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);
	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
					  prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;
out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);

out1:
	vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
						data->mapped_size);
out:
	return ret;
}
int ion_system_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0, i;
	struct iommu_domain *domain;
	unsigned long extra;
	unsigned long extra_iova_addr;
	struct page *page;
	int npages = buffer->size >> PAGE_SHIFT;
	void *vaddr = buffer->priv_virt;
	struct scatterlist *sglist = 0;
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	if (!ION_IS_CACHED(flags))
		return -EINVAL;

	if (!msm_use_iommu())
		return -EINVAL;

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}


	sglist = vmalloc(sizeof(*sglist) * npages);
	if (!sglist) {
		ret = -ENOMEM;
		goto out1;
	}

	sg_init_table(sglist, npages);
	for (i = 0; i < npages; i++) {
		page = vmalloc_to_page(vaddr);
		if (!page)
			goto out1;
		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
		vaddr += PAGE_SIZE;
	}

	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);

	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	extra_iova_addr = data->iova_addr + buffer->size;
	if (extra) {
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
					  prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	struct iommu_domain *domain;
	int ret = 0;
	unsigned long extra;
	struct scatterlist *sglist = 0;
	struct ion_cp_heap *cp_heap =
		container_of(buffer->heap, struct ion_cp_heap, heap);
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	if (cp_heap->iommu_iova[domain_num]) {
		/* Already mapped. */
		unsigned long offset = buffer->priv_phys - cp_heap->base;
		data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
		return 0;
	} else if (cp_heap->iommu_map_all) {
		ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
		if (!ret) {
			unsigned long offset =
					buffer->priv_phys - cp_heap->base;
			data->iova_addr =
				cp_heap->iommu_iova[domain_num] + offset;
			cp_heap->iommu_partition[domain_num] = partition_num;
			/*
			clear delayed map flag so that we don't interfere
			with this feature (we are already delaying).
			*/
			data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
			return 0;
		} else {
			cp_heap->iommu_iova[domain_num] = 0;
			cp_heap->iommu_partition[domain_num] = 0;
			return ret;
		}
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	sglist = ion_cp_heap_create_sglist(buffer);
	if (IS_ERR_OR_NULL(sglist)) {
		ret = -ENOMEM;
		goto out1;
	}
	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);
	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
					  SZ_4K, prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	if (!IS_ERR_OR_NULL(sglist))
		vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i, ret = 0;
	unsigned long extra;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

out2:
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
int ion_cma_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	int ret = 0;
	struct iommu_domain *domain;
	unsigned long extra;
	unsigned long extra_iova_addr;
	struct ion_cma_buffer_info *info = buffer->priv_virt;
	struct sg_table *table = info->table;
	int prot = IOMMU_WRITE | IOMMU_READ;

	if (!msm_use_iommu()) {
		data->iova_addr = info->handle;
		data->mapped_size = iova_length;
		return 0;
	}

	data->mapped_size = iova_length;
	extra = iova_length - buffer->size;

	ret = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align,
						&data->iova_addr);

	if (ret)
		goto out;

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -EINVAL;
		goto out1;
	}

	ret = iommu_map_range(domain, data->iova_addr, table->sgl,
				buffer->size, prot);

	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	extra_iova_addr = data->iova_addr + buffer->size;
	if (extra) {
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
						prot);
		if (ret)
			goto out2;
	}
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add int i and unsigned long temp_iova back.
	int i;
	unsigned long temp_iova;
	//HTC_END
	unsigned int domain_num;
	unsigned int partition_num;
	struct iommu_domain *domain;
	/*HTC_START Jason Huang 20120614 --- In IOMMU map, some client may input larger virtual addresss length (even larger than 1M-aligned buffer),
	                                     such that the actual mapped size is larger than the buffer size. In IOMMU unmap, the extra part should be
	                                     un-mapped independently, since it is not 1M mapped.*/
	unsigned long extra = 0;
	//HTC_END

	if (!msm_use_iommu())
		return;

	domain_num = iommu_map_domain(data);
	partition_num = iommu_map_partition(data);

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
		return;
	}

	//HTC_START Jason Huang 20120419
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, they are 1M mapping. Un-map by iommu_unmap().
	                                     iommu_unmap_range() doesn't supports 1M un-mapping.*/
	if (data->buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		extra = data->mapped_size - data->buffer->size;

		temp_iova = data->iova_addr;
		for (i = data->buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));

		if (extra)
		{
			iommu_unmap_range(domain, temp_iova, extra);
		}
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
	}
	/*
	temp_iova = data->iova_addr;
	for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
	//HTC_END

	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);

	return;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	//HTC_START Jason Huang 20120419
	//HTC_START Jason Huang 20120530 --- Add unsigned long temp_phys and int i back.
	unsigned long temp_phys, temp_iova;
	struct iommu_domain *domain;
	int i = 0, ret = 0;
	unsigned long extra;
	//HTC_END

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	extra = iova_length - buffer->size;

	//HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, always 1M-alignment.
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		align = SZ_1M;
	}
	//HTC_END

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	//HTC_START Jason Huang 20120419 --- Change to htc_iommu_map_range for performance improvement.
	/*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, do 1M mapping by iommu_map().
	                                     Neither htc_iommu_map_range() nor iommu_map_range() supports 1M mapping.*/
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		temp_iova = data->iova_addr;
		temp_phys = buffer->priv_phys;
		for (i = buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M,
							  temp_phys += SZ_1M) {
			ret = iommu_map(domain, temp_iova, temp_phys,
					get_order(SZ_1M),
					ION_IS_CACHED(flags) ? 1 : 0);

			if (ret) {
				pr_err("%s: could not map %lx to %lx in domain %p\n",
					__func__, temp_iova, temp_phys, domain);
				goto out2;
			}
		}
	}
	else
	{
		ret = htc_iommu_map_range(domain, data->iova_addr, buffer->priv_phys, buffer->size, ION_IS_CACHED(flags) ? 1 : 0);
		if (ret) {
			ret = -ENOMEM;
			goto out1;
		}

		temp_iova = data->iova_addr + buffer->size;
	}
	/*
	temp_iova = data->iova_addr;
	temp_phys = buffer->priv_phys;
	for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
						  temp_phys += SZ_4K) {
		ret = iommu_map(domain, temp_iova, temp_phys,
				get_order(SZ_4K),
				ION_IS_CACHED(flags) ? 1 : 0);

		if (ret) {
			pr_err("%s: could not map %lx to %lx in domain %p\n",
				__func__, temp_iova, temp_phys, domain);
			goto out2;
		}
	}
	*/
	//HTC_END

	if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
		goto out2;

	return 0;

//HTC_START Jason Huang 20120419
//HTC_START Jason Huang 20120530
out2:
	if (buffer->heap->id == ION_CP_MM_HEAP_ID)
	{
		for ( ; i < buffer->size; i += SZ_1M, temp_iova -= SZ_1M)
			iommu_unmap(domain, temp_iova, get_order(SZ_1M));
	}
	else
	{
		iommu_unmap_range(domain, data->iova_addr, buffer->size);
	}
	/*
	for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
		iommu_unmap(domain, temp_iova, get_order(SZ_4K));
	*/
//HTC_END
out1:
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
示例#19
0
static struct msm_panel_common_pdata *mdss_mdp_populate_pdata(
	struct device *dev)
{
	struct msm_panel_common_pdata *pdata;
	struct msm_iova_layout layout;
	struct iommu_domain *domain;
	struct mdss_iommu_map_type *iomap;
	int i;

	if (mdata->iommu_map) {
		pr_warn("iommu already initialized\n");
		return 0;
	}

	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
		iomap = &mdss_iommu_map[i];

		layout.client_name = iomap->client_name;
		layout.partitions = iomap->partitions;
		layout.npartitions = iomap->npartitions;
		layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);

		iomap->domain_idx = msm_register_domain(&layout);
		if (IS_ERR_VALUE(iomap->domain_idx))
			return -EINVAL;

		domain = msm_get_iommu_domain(iomap->domain_idx);
		if (!domain) {
			pr_err("unable to get iommu domain(%d)\n",
				iomap->domain_idx);
			return -EINVAL;
		}
		iommu_set_fault_handler(domain, mdss_iommu_fault_handler, NULL);

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		dev_err(dev, "could not allocate memory for pdata\n");
	return pdata;
}

static u32 mdss_mdp_res_init(struct platform_device *pdev)
{
	u32 rc;

	rc = mdss_mdp_irq_clk_setup(pdev);
	if (rc)
		return rc;

	mdss_res->clk_ctrl_wq = create_singlethread_workqueue("mdp_clk_wq");
	INIT_DELAYED_WORK(&mdss_res->clk_ctrl_worker,
			  mdss_mdp_clk_ctrl_workqueue_handler);

	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
	mdss_res->rev = MDSS_MDP_REG_READ(MDSS_REG_HW_VERSION);
	mdss_res->mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION);
	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);

	mdss_res->smp_mb_cnt = MDSS_MDP_SMP_MMB_BLOCKS;
	mdss_res->smp_mb_size = MDSS_MDP_SMP_MMB_SIZE;
	mdss_res->pipe_type_map = mdss_mdp_pipe_type_map;
	mdss_res->mixer_type_map = mdss_mdp_mixer_type_map;

	pr_info("mdss_revision=%x\n", mdss_res->rev);
	pr_info("mdp_hw_revision=%x\n", mdss_res->mdp_rev);

	mdss_res->res_init = true;
	mdss_res->timeout = HZ/20;
	mdss_res->clk_ena = false;
	mdss_res->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
	mdss_res->suspend = false;
	mdss_res->prim_ptype = NO_PANEL;
	mdss_res->irq_ena = false;

	return 0;
}
static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
			int partition, unsigned long prot)
{
	unsigned long left_to_map = cp_heap->total_size;
	unsigned long order = get_order(SZ_64K);
	unsigned long page_size = SZ_64K;
	int ret_value = 0;
	unsigned long virt_addr_len = cp_heap->total_size;
	struct iommu_domain *domain = msm_get_iommu_domain(domain_num);

	/* If we are mapping into the video domain we need to map twice the
	 * size of the heap to account for prefetch issue in video core.
	 */
	if (domain_num == cp_heap->iommu_2x_map_domain)
		virt_addr_len <<= 1;

	if (cp_heap->total_size & (SZ_64K-1)) {
		pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
		ret_value = -EINVAL;
	}
	if (cp_heap->base & (SZ_64K-1)) {
		pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
		ret_value = -EINVAL;
	}
	if (!ret_value && domain) {
		unsigned long temp_phys = cp_heap->base;
		unsigned long temp_iova =
				msm_allocate_iova_address(domain_num, partition,
						virt_addr_len, SZ_64K);
		if (!temp_iova) {
			pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
				__func__, domain_num, partition);
			ret_value = -ENOMEM;
			goto out;
		}
		cp_heap->iommu_iova[domain_num] = temp_iova;

		while (left_to_map) {
			int ret = iommu_map(domain, temp_iova, temp_phys,
					    order, prot);
			if (ret) {
				pr_err("%s: could not map %lx in domain %p, error: %d\n",
					__func__, temp_iova, domain, ret);
				ret_value = -EAGAIN;
				goto free_iova;
			}
			temp_iova += page_size;
			temp_phys += page_size;
			left_to_map -= page_size;
		}
		if (domain_num == cp_heap->iommu_2x_map_domain)
			ret_value = msm_iommu_map_extra(domain, temp_iova,
							cp_heap->total_size,
							SZ_64K, prot);
		if (ret_value)
			goto free_iova;
	} else {
		pr_err("Unable to get IOMMU domain %lu\n", domain_num);
		ret_value = -ENOMEM;
	}
	goto out;

free_iova:
	msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
			      partition, virt_addr_len);
out:
	return ret_value;
}
static int venus_register_domain(u32 fw_max_sz)
{
	struct msm_iova_partition venus_fw_partition = {
		.start = 0,
		.size = fw_max_sz,
	};
	struct msm_iova_layout venus_fw_layout = {
		.partitions = &venus_fw_partition,
		.npartitions = 1,
		.client_name = "pil_venus",
		.domain_flags = 0,
	};

	return msm_register_domain(&venus_fw_layout);
}

static int pil_venus_mem_setup(struct platform_device *pdev, size_t size)
{
	int domain;

	venus_data->iommu_fw_ctx  = msm_iommu_get_ctx("venus_fw");
	if (!venus_data->iommu_fw_ctx) {
		dprintk(VIDC_ERR, "No iommu fw context found\n");
		return -ENODEV;
	}

	if (!venus_data->venus_domain_num) {
		size = round_up(size, SZ_4K);
		domain = venus_register_domain(size);
		if (domain < 0) {
			dprintk(VIDC_ERR,
				"Venus fw iommu domain register failed\n");
			return -ENODEV;
		}
		venus_data->iommu_fw_domain = msm_get_iommu_domain(domain);
		if (!venus_data->iommu_fw_domain) {
			dprintk(VIDC_ERR, "No iommu fw domain found\n");
			return -ENODEV;
		}
		venus_data->venus_domain_num = domain;
		venus_data->fw_sz = size;
	}
	return 0;
}

static int pil_venus_auth_and_reset(struct platform_device *pdev)
{
	int rc;
	phys_addr_t fw_bias = venus_data->resources->firmware_base;
	void __iomem *reg_base = venus_data->reg_base;
	u32 ver;
	bool iommu_present = is_iommu_present(venus_data->resources);

	if (!fw_bias) {
		dprintk(VIDC_ERR, "FW bias is not valid\n");
		return -EINVAL;
	}
	/* Get Venus version number */
	if (!venus_data->hw_ver_checked) {
		ver = readl_relaxed(reg_base + VIDC_WRAPPER_HW_VERSION);
		venus_data->hw_ver_minor = (ver & 0x0FFF0000) >> 16;
		venus_data->hw_ver_major = (ver & 0xF0000000) >> 28;
		venus_data->hw_ver_checked = 1;
	}