示例#1
0
static unsigned int fimg2d_map_dma_buf(struct fimg2d_control *info,
		struct fimg2d_dma *dma, int fd,
		enum dma_data_direction direction)
{
	dma_addr_t dma_addr;

	dma->direction = direction;
	dma->dma_buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dma->dma_buf)) {
		dev_err(info->dev, "dma_buf_get() failed: %ld\n",
				PTR_ERR(dma->dma_buf));
		goto err_buf_get;
	}

	dma->attachment = dma_buf_attach(dma->dma_buf, info->dev);
	if (IS_ERR_OR_NULL(dma->attachment)) {
		dev_err(info->dev, "dma_buf_attach() failed: %ld\n",
				PTR_ERR(dma->attachment));
		goto err_buf_attach;
	}

	dma->sg_table = dma_buf_map_attachment(dma->attachment,
			direction);
	if (IS_ERR_OR_NULL(dma->sg_table)) {
		dev_err(info->dev, "dma_buf_map_attachment() failed: %ld\n",
				PTR_ERR(dma->sg_table));
		goto err_buf_map_attachment;
	}

	dma_addr = iovmm_map(info->dev, dma->sg_table->sgl, 0,
			dma->dma_buf->size);
	if (!dma_addr || IS_ERR_VALUE(dma_addr)) {
		dev_err(info->dev, "iovmm_map() failed: %d\n", dma->dma_addr);
		goto err_iovmm_map;
	}

	dma->dma_addr = dma_addr;
	return dma->dma_buf->size;

err_iovmm_map:
	dma_buf_unmap_attachment(dma->attachment, dma->sg_table,
			direction);
err_buf_map_attachment:
	dma_buf_detach(dma->dma_buf, dma->attachment);
err_buf_attach:
	dma_buf_put(dma->dma_buf);
err_buf_get:
	return 0;
}
int m2m1shot_dma_addr_map(struct device *dev,
			struct m2m1shot_buffer_dma *buf,
			int plane_idx, enum dma_data_direction dir)
{
	struct m2m1shot_buffer_plane_dma *plane = &buf->plane[plane_idx];
	dma_addr_t iova;

	if (plane->dmabuf) {
		iova = ion_iovmm_map(plane->attachment, 0,
					plane->bytes_used, dir, plane_idx);
	} else {
		iova = iovmm_map(dev, plane->sgt->sgl, 0,
					plane->bytes_used, dir, plane_idx);
	}

	if (IS_ERR_VALUE(iova))
		return (int)iova;

	buf->plane[plane_idx].dma_addr = iova;

	return 0;
}
示例#3
0
static void *vb2_ion_get_userptr(void *alloc_ctx, unsigned long vaddr,
				 unsigned long size, int write)
{
	struct vb2_ion_conf *conf = alloc_ctx;
	struct vb2_ion_buf *buf = NULL;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma = NULL;
	size_t len;
	int ret = 0;
	bool malloced = false;
	struct scatterlist *sg;

	/* Create vb2_ion_buf */
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf) {
		pr_err("kzalloc failed\n");
		return ERR_PTR(-ENOMEM);
	}

	/* Getting handle, client from DVA */
	buf->handle = ion_import_uva(conf->client, vaddr);
	if (IS_ERR(buf->handle)) {
		if ((PTR_ERR(buf->handle) == -ENXIO) && conf->use_mmu) {
			int flags = ION_HEAP_EXYNOS_USER_MASK;

			if (write)
				flags |= ION_EXYNOS_WRITE_MASK;

			buf->handle = ion_exynos_get_user_pages(conf->client,
							vaddr, size, flags);
			if (IS_ERR(buf->handle))
				ret = PTR_ERR(buf->handle);
		} else {
			ret = -EINVAL;
		}

		if (ret) {
			pr_err("%s: Failed to retrieving non-ion user buffer @ "
				"0x%lx (size:0x%lx, dev:%s, errno %ld)\n",
				__func__, vaddr, size, dev_name(conf->dev),
					PTR_ERR(buf->handle));
			goto err_import_uva;
		}

		malloced = true;
	}

	/* TODO: Need to check whether already DVA is created or not */

	buf->sg = ion_map_dma(conf->client, buf->handle);
	if (IS_ERR(buf->sg)) {
		ret = -ENOMEM;
		goto err_map_dma;
	}
	dbg(6, "PA(0x%x) size(%x)\n", buf->sg->dma_address, buf->sg->length);

	sg = buf->sg;
	do {
		buf->nents++;
	} while ((sg = sg_next(sg)));

	/* Map DVA */
	if (conf->use_mmu) {
		buf->dva = iovmm_map(conf->dev, buf->sg);
		if (!buf->dva) {
			pr_err("iovmm_map: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
		dbg(6, "DVA(0x%x)\n", buf->dva);
	} else {
		ret = ion_phys(conf->client, buf->handle,
			       (unsigned long *)&buf->dva, &len);
		if (ret) {
			pr_err("ion_phys: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
	}

	if (!malloced) {
		/* Get offset from the start */
		down_read(&mm->mmap_sem);
		vma = find_vma(mm, vaddr);
		if (vma == NULL) {
			pr_err("Failed acquiring VMA to get offset 0x%08lx\n",
					vaddr);
			up_read(&mm->mmap_sem);

			if (conf->use_mmu)
				iovmm_unmap(conf->dev, buf->dva);

			goto err_get_vma;
		}
		buf->offset = vaddr - vma->vm_start;
		up_read(&mm->mmap_sem);
	}
	dbg(6, "dva(0x%x), size(0x%x), offset(0x%x)\n",
			(u32)buf->dva, (u32)size, (u32)buf->offset);

	/* Set vb2_ion_buf */
	ret = _vb2_ion_get_vma(vaddr, size, &vma);
	if (ret) {
		pr_err("Failed acquiring VMA 0x%08lx\n", vaddr);

		if (conf->use_mmu)
			iovmm_unmap(conf->dev, buf->dva);

		goto err_get_vma;
	}

	buf->vma = vma;
	buf->conf = conf;
	buf->size = size;
	buf->cacheable = conf->cacheable;

	return buf;

err_get_vma:	/* fall through */
err_ion_map_dva:
	ion_unmap_dma(conf->client, buf->handle);

err_map_dma:
	ion_free(conf->client, buf->handle);

err_import_uva:
	kfree(buf);

	return ERR_PTR(ret);
}
示例#4
0
static void *vb2_ion_alloc(void *alloc_ctx, unsigned long size)
{
	struct vb2_ion_conf	*conf = alloc_ctx;
	struct vb2_ion_buf	*buf;
	struct scatterlist	*sg;
	size_t	len;
	u32 heap = 0;
	int ret = 0;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf) {
		pr_err("no memory for vb2_ion_conf\n");
		return ERR_PTR(-ENOMEM);
	}

	/* Set vb2_ion_buf */
	buf->conf = conf;
	buf->size = size;
	buf->cacheable = conf->cacheable;

	/* Allocate: physical memory */
	if (conf->contig)
		heap = ION_HEAP_EXYNOS_CONTIG_MASK;
	else
		heap = ION_HEAP_EXYNOS_MASK;

	buf->handle = ion_alloc(conf->client, size, conf->align, heap);
	if (IS_ERR(buf->handle)) {
		pr_err("ion_alloc of size %ld\n", size);
		ret = -ENOMEM;
		goto err_alloc;
	}

	/* Getting scatterlist */
	buf->sg = ion_map_dma(conf->client, buf->handle);
	if (IS_ERR(buf->sg)) {
		pr_err("ion_map_dma conf->name(%s)\n", conf->name);
		ret = -ENOMEM;
		goto err_map_dma;
	}
	dbg(6, "PA(0x%x), SIZE(%x)\n", buf->sg->dma_address, buf->sg->length);

	sg = buf->sg;
	do {
		buf->nents++;
	} while ((sg = sg_next(sg)));
	dbg(6, "buf->nents(0x%x)\n", buf->nents);

	/* Map DVA */
	if (conf->use_mmu) {
		buf->dva = iovmm_map(conf->dev, buf->sg);
		if (!buf->dva) {
			pr_err("iovmm_map: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
		dbg(6, "DVA(0x%x)\n", buf->dva);
	} else {
		ret = ion_phys(conf->client, buf->handle,
			       (unsigned long *)&buf->dva, &len);
		if (ret) {
			pr_err("ion_phys: conf->name(%s)\n", conf->name);
			goto err_ion_map_dva;
		}
	}

	/* Set struct vb2_vmarea_handler */
	buf->handler.refcount = &buf->ref;
	buf->handler.put = vb2_ion_put;
	buf->handler.arg = buf;

	atomic_inc(&buf->ref);

	return buf;

err_ion_map_dva:
	ion_unmap_dma(conf->client, buf->handle);

err_map_dma:
	ion_free(conf->client, buf->handle);

err_alloc:
	kfree(buf);

	return ERR_PTR(ret);
}