예제 #1
0
파일: ion.c 프로젝트: Ntemis/Bexus-N
struct scatterlist *ion_map_dma(struct ion_client *client,
				struct ion_handle *handle)
{
	struct ion_buffer *buffer;
	struct scatterlist *sglist;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_dma.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return ERR_PTR(-EINVAL);
	}
	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_dma) {
		pr_err("%s: map_kernel is not implemented by this heap.\n",
		       __func__);
		mutex_unlock(&buffer->lock);
		mutex_unlock(&client->lock);
		return ERR_PTR(-ENODEV);
	}
	if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
		sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
		if (IS_ERR_OR_NULL(sglist))
			_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
		buffer->sglist = sglist;
	} else {
		sglist = buffer->sglist;
	}
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return sglist;
}
예제 #2
0
파일: ion.c 프로젝트: fulmix/fulmix.Kernel
int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
{
	struct ion_buffer *buffer;
	struct dma_buf *dmabuf;
	bool valid_handle;
	int fd;

	mutex_lock(&client->lock);
	valid_handle = ion_handle_validate(client, handle);
	mutex_unlock(&client->lock);
	if (!valid_handle) {
		WARN(1, "%s: invalid handle passed to share.\n", __func__);
		return -EINVAL;
	}

	buffer = handle->buffer;
	ion_buffer_get(buffer);
	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
	if (IS_ERR(dmabuf)) {
		ion_buffer_put(buffer);
		return PTR_ERR(dmabuf);
	}
	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
	if (fd < 0)
		dma_buf_put(dmabuf);

	return fd;
}
예제 #3
0
파일: ion.c 프로젝트: fulmix/fulmix.Kernel
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
			unsigned long flags)
{
	struct ion_buffer *buffer;
	void *vaddr;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return ERR_PTR(-EINVAL);
	}

	buffer = handle->buffer;

	if (!handle->buffer->heap->ops->map_kernel) {
		pr_err("%s: map_kernel is not implemented by this heap.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return ERR_PTR(-ENODEV);
	}

	if (ion_validate_buffer_flags(buffer, flags)) {
		mutex_unlock(&client->lock);
		return ERR_PTR(-EEXIST);
	}

	mutex_lock(&buffer->lock);
	vaddr = ion_handle_kmap_get(handle);
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return vaddr;
}
예제 #4
0
파일: ion.c 프로젝트: fulmix/fulmix.Kernel
static int ion_share_set_flags(struct ion_client *client,
				struct ion_handle *handle,
				unsigned long flags)
{
	struct ion_buffer *buffer;
	bool valid_handle;
	unsigned long ion_flags = ION_SET_CACHE(CACHED);
	if (flags & O_DSYNC)
		ion_flags = ION_SET_CACHE(UNCACHED);

	mutex_lock(&client->lock);
	valid_handle = ion_handle_validate(client, handle);
	mutex_unlock(&client->lock);
	if (!valid_handle) {
		WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
		return -EINVAL;
	}

	buffer = handle->buffer;

	mutex_lock(&buffer->lock);
	if (ion_validate_buffer_flags(buffer, ion_flags)) {
		mutex_unlock(&buffer->lock);
		return -EEXIST;
	}
	mutex_unlock(&buffer->lock);
	return 0;
}
예제 #5
0
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
	bool valid_handle;

	BUG_ON(client != handle->client);

	mutex_lock(&client->lock);
	valid_handle = ion_handle_validate(client, handle);
	if (!valid_handle) {
		mutex_unlock(&client->lock);
		WARN("%s: invalid handle passed to free.\n", __func__);
		return;
	}
	ion_handle_put(handle);
	mutex_unlock(&client->lock);
}
예제 #6
0
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
	bool valid_handle;

	/* WARN when the client and handle do not match. Used to be BUG */
	WARN_ON(client != handle->client);

	mutex_lock(&client->lock);
	valid_handle = ion_handle_validate(client, handle);
	if (!valid_handle) {
		mutex_unlock(&client->lock);
		WARN(1, "%s: invalid handle passed to free.\n", __func__);
		return;
	}
	ion_handle_put(handle);
	mutex_unlock(&client->lock);
}
예제 #7
0
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
			unsigned long flags)
{
	struct ion_buffer *buffer;
	void *vaddr;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return ERR_PTR(-EINVAL);
	}

	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_kernel) {
		pr_err("%s: map_kernel is not implemented by this heap.\n",
		       __func__);
		mutex_unlock(&buffer->lock);
		mutex_unlock(&client->lock);
		return ERR_PTR(-ENODEV);
	}

	if (ion_validate_buffer_flags(buffer, flags)) {
			vaddr = ERR_PTR(-EEXIST);
			goto out;
	}

	if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
		vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer,
							flags);
		if (IS_ERR_OR_NULL(vaddr))
			_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
		buffer->vaddr = vaddr;
	} else {
		vaddr = buffer->vaddr;
	}

out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return vaddr;
}
예제 #8
0
파일: ion.c 프로젝트: fulmix/fulmix.Kernel
struct sg_table *ion_sg_table(struct ion_client *client,
			      struct ion_handle *handle)
{
	struct ion_buffer *buffer;
	struct sg_table *table;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_dma.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return ERR_PTR(-EINVAL);
	}
	buffer = handle->buffer;
	table = buffer->sg_table;
	mutex_unlock(&client->lock);
	return table;
}
예제 #9
0
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
	bool valid_handle;

	BUG_ON(client != handle->client);

	mutex_lock(&client->lock);
	valid_handle = ion_handle_validate(client, handle);
//120817 [email protected] gpu: ion: Fix race condition with ion_import 
//	mutex_unlock(&client->lock);

	if (!valid_handle) {
		WARN("%s: invalid handle passed to free.\n", __func__);
		mutex_unlock(&client->lock);		
		return;
	}
	ion_handle_put(handle);
	mutex_unlock(&client->lock);		
}
예제 #10
0
struct ion_buffer *ion_share(struct ion_client *client,
				 struct ion_handle *handle)
{
	bool valid_handle;

	mutex_lock(&client->lock);
	valid_handle = ion_handle_validate(client, handle);
	mutex_unlock(&client->lock);
	if (!valid_handle) {
		WARN("%s: invalid handle passed to share.\n", __func__);
		return ERR_PTR(-EINVAL);
	}

	/* do not take an extra reference here, the burden is on the caller
	 * to make sure the buffer doesn't go away while it's passing it
	 * to another client -- ion_free should not be called on this handle
	 * until the buffer has been imported into the other client
	 */
	return handle->buffer;
}
예제 #11
0
파일: ion.c 프로젝트: fulmix/fulmix.Kernel
int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
			unsigned long *flags)
{
	struct ion_buffer *buffer;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to %s.\n",
		       __func__, __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}
	buffer = handle->buffer;
	mutex_lock(&buffer->lock);
	*flags = buffer->flags;
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);

	return 0;
}
예제 #12
0
static int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
			void *uaddr, unsigned long offset, unsigned long len,
			unsigned int cmd)
{
	struct ion_buffer *buffer;
	int ret = -EINVAL;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to do_cache_op.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}
	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!ION_IS_CACHED(buffer->flags)) {
		ret = 0;
		goto out;
	}

	if (!handle->buffer->heap->ops->cache_op) {
		pr_err("%s: cache_op is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}


	ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
						offset, len, cmd);

out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;

}
예제 #13
0
struct ion_handle* ion_drv_get_kernel_handle(struct ion_client* client, void *handle, int from_kernel)
{
    struct ion_handle *kernel_handle = handle;
    if(!from_kernel)
    {
        kernel_handle = ion_uhandle_get(client, handle);
        if(!kernel_handle)
        {
            IONMSG("handle invalid, handle_id=%d\n", __FUNCTION__,handle);
            return ERR_PTR(-EINVAL);
        }
    }
    else
    {
        if(!ion_handle_validate(client, handle))
        {
            IONMSG(" handle invalid, handle=0x%x\n", __FUNCTION__, handle);
            return ERR_PTR(-EINVAL);
        }
    }

    return kernel_handle;
}
예제 #14
0
int ion_phys(struct ion_client *client, struct ion_handle *handle,
	     ion_phys_addr_t *addr, size_t *len)
{
	struct ion_buffer *buffer;
	int ret;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		mutex_unlock(&client->lock);
		return -EINVAL;
	}

	buffer = handle->buffer;

	if (!buffer->heap->ops->phys) {
		pr_err("%s: ion_phys is not implemented by this heap.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -ENODEV;
	}
	mutex_unlock(&client->lock);
	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
	return ret;
}
예제 #15
0
파일: ion.c 프로젝트: Snuzzo/funky_msm8960
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num, unsigned long align,
			unsigned long iova_length, unsigned long *iova,
			unsigned long *buffer_size,
			unsigned long flags, unsigned long iommu_flags)
{
	struct ion_buffer *buffer;
	struct ion_iommu_map *iommu_map;
	int ret = 0;

	if (ION_IS_CACHED(flags)) {
		pr_err("%s: Cannot map iommu as cached.\n", __func__);
		return -EINVAL;
	}

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}

	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_iommu) {
		pr_err("%s: map_iommu is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}

	/*
	 * If clients don't want a custom iova length, just use whatever
	 * the buffer size is
	 */
	if (!iova_length)
		iova_length = buffer->size;

	/*HTC_START Jason Huang 20120530 --- Buffers from ION CP MM heap are 1M-alignment,
	                                     clients may input expected mapped virtual address
	                                     length which is shorter than the buffer size.*/
	/*
	if (buffer->size > iova_length) {
		pr_debug("%s: iova length %lx is not at least buffer size"
			" %x\n", __func__, iova_length, buffer->size);
		ret = -EINVAL;
		goto out;
	}
	*/
	if (buffer->size > iova_length)
	{
		iova_length = buffer->size;
	}
	//HTC_END

	if (buffer->size & ~PAGE_MASK) {
		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
			buffer->size, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	if (iova_length & ~PAGE_MASK) {
		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
			iova_length, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
	_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
	if (!iommu_map) {
		iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
					    align, iova_length, flags, iova);
		if (IS_ERR_OR_NULL(iommu_map)) {
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
		} else {
			iommu_map->flags = iommu_flags;

			if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
				kref_get(&iommu_map->ref);
		}
	} else {
		if (iommu_map->flags != iommu_flags) {
			pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
				__func__, handle,
				iommu_map->flags, iommu_flags);
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
			ret = -EINVAL;
		} else if (iommu_map->mapped_size != iova_length) {
			pr_err("%s: handle %p is already mapped with length"
					" %x, trying to map with length %lx\n",
				__func__, handle, iommu_map->mapped_size,
				iova_length);
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
			ret = -EINVAL;
		} else {
			kref_get(&iommu_map->ref);
			*iova = iommu_map->iova_addr;
		}
	}
	*buffer_size = buffer->size;
out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;
}
예제 #16
0
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct ion_client *client = filp->private_data;

	switch (cmd) {
	case ION_IOC_ALLOC_NEW:
	{
		struct ion_allocation_data_new data;

		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;
		data.handle = ion_alloc(client, data.len, data.align,
					     data.flags | data.heap_mask);

		if (IS_ERR(data.handle))
			return PTR_ERR(data.handle);

		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
			ion_free(client, data.handle);
			return -EFAULT;
		}
		break;

	}
	case ION_IOC_ALLOC:
	{
		struct ion_allocation_data data;

		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;
		data.handle = ion_alloc(client, data.len, data.align,
					     data.flags);

		if (IS_ERR(data.handle))
			return PTR_ERR(data.handle);

		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
			ion_free(client, data.handle);
			return -EFAULT;
		}
		break;
	}
	case ION_IOC_FREE:
	{
		struct ion_handle_data data;
		bool valid;

		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_handle_data)))
			return -EFAULT;
		mutex_lock(&client->lock);
		valid = ion_handle_validate(client, data.handle);
		mutex_unlock(&client->lock);
		if (!valid)
			return -EINVAL;
		ion_free(client, data.handle);
		break;
	}
	case ION_IOC_MAP:
	case ION_IOC_SHARE:
	{
		struct ion_fd_data data;
		int ret;
		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;

		ret = ion_share_set_flags(client, data.handle, filp->f_flags);
		if (ret)
			return ret;

		data.fd = ion_share_dma_buf(client, data.handle);
		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
			return -EFAULT;
		if (data.fd < 0)
			return data.fd;
		break;
	}
	case ION_IOC_IMPORT:
	{
		struct ion_fd_data data;
		int ret = 0;
		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_fd_data)))
			return -EFAULT;
		data.handle = ion_import_dma_buf(client, data.fd);
		if (IS_ERR(data.handle))
			data.handle = NULL;
		if (copy_to_user((void __user *)arg, &data,
				 sizeof(struct ion_fd_data)))
			return -EFAULT;
		if (ret < 0)
			return ret;
		break;
	}
	case ION_IOC_CUSTOM:
	{
		struct ion_device *dev = client->dev;
		struct ion_custom_data data;

		if (!dev->custom_ioctl)
			return -ENOTTY;
		if (copy_from_user(&data, (void __user *)arg,
				sizeof(struct ion_custom_data)))
			return -EFAULT;
		return dev->custom_ioctl(client, data.cmd, data.arg);
	}
	case ION_IOC_CLEAN_CACHES_OLD:
	case ION_IOC_CLEAN_CACHES:
		return client->dev->custom_ioctl(client,
						ION_IOC_CLEAN_CACHES, arg);
	case ION_IOC_INV_CACHES_OLD:
	case ION_IOC_INV_CACHES:
		return client->dev->custom_ioctl(client,
						ION_IOC_INV_CACHES, arg);
	case ION_IOC_CLEAN_INV_CACHES_OLD:
	case ION_IOC_CLEAN_INV_CACHES:
		return client->dev->custom_ioctl(client,
						ION_IOC_CLEAN_INV_CACHES, arg);
	case ION_IOC_GET_FLAGS_OLD:
	case ION_IOC_GET_FLAGS:
		return client->dev->custom_ioctl(client,
						ION_IOC_GET_FLAGS, arg);
	case ION_IOC_CLIENT_RENAME:
	{
		struct ion_client_name_data data;
		int len;

		if (copy_from_user(&data, (void __user *)arg,
					sizeof(struct ion_client_name_data)))
			return -EFAULT;
		if(data.len < 0)
			return -EFAULT;
		len = (ION_CLIENT_NAME_LENGTH > data.len) ? data.len: ION_CLIENT_NAME_LENGTH;
		if (copy_from_user(client->name, (void __user *)data.name, len))
                        return -EFAULT;
		client->name[len] = '\0';
		break;
	}
	default:
		return -ENOTTY;
	}
	return 0;
}
예제 #17
0
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num, unsigned long align,
			unsigned long iova_length, unsigned long *iova,
			unsigned long *buffer_size,
			unsigned long flags)
{
	struct ion_buffer *buffer;
	struct ion_iommu_map *iommu_map;
	int ret = 0;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}

	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_iommu) {
		pr_err("%s: map_iommu is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}

	if (ion_validate_buffer_flags(buffer, flags)) {
		ret = -EEXIST;
		goto out;
	}

	/*
	 * If clients don't want a custom iova length, just use whatever
	 * the buffer size is
	 */
	if (!iova_length)
		iova_length = buffer->size;

	if (buffer->size > iova_length) {
		pr_debug("%s: iova length %lx is not at least buffer size"
			" %x\n", __func__, iova_length, buffer->size);
		ret = -EINVAL;
		goto out;
	}

	if (buffer->size & ~PAGE_MASK) {
		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
			buffer->size, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	if (iova_length & ~PAGE_MASK) {
		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
			iova_length, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
	if (_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt) ||
		!iommu_map) {
		ret = __ion_iommu_map(buffer, domain_num, partition_num, align,
					iova_length, flags, iova);
		if (ret < 0)
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
	} else {
		if (iommu_map->mapped_size != iova_length) {
			pr_err("%s: handle %p is already mapped with length"
				" %x, trying to map with length %lx\n",
				__func__, handle, iommu_map->mapped_size,
				iova_length);
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
			ret = -EINVAL;
		} else {
			kref_get(&iommu_map->ref);
			*iova = iommu_map->iova_addr;
		}
	}
	*buffer_size = buffer->size;
out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;
}
예제 #18
0
static long pxa_ion_ioctl(struct ion_client *client, unsigned int cmd,
							unsigned long arg)
{
	switch (cmd) {
	case ION_PXA_PHYS:
	       {
			struct ion_pxa_region region ;
			if (copy_from_user(&region, (void __user *)arg,
						sizeof(struct ion_pxa_region)))
				return -EFAULT;

			ion_phys(client, region.handle,
					&region.addr, &region.len);
			if (copy_to_user((void __user *)arg, &region,
						sizeof(struct ion_pxa_region)))
				return -EFAULT;
			break;
		}
	case ION_PXA_SYNC:
	{
		struct ion_pxa_cache_region region;
		struct ion_buffer *buffer;
		struct vm_area_struct *vma;
		bool valid_handle;
		phys_addr_t pstart, pend;
		unsigned long start, end;

		if (copy_from_user(&region, (void __user *)arg,
				   sizeof(struct ion_pxa_cache_region)))
			return -EFAULT;
		valid_handle = ion_handle_validate(client, region.handle);
		if (!valid_handle) {
			pr_err("%s:invalid handle 0x%x\n",
				__func__, (unsigned int)region.handle);
			return -EINVAL;
		}
		buffer = ion_handle_buffer(region.handle);
		mutex_lock(&buffer->lock);
		if (region.offset > buffer->size) {
			mutex_unlock(&buffer->lock);
			pr_err("%s: invalid offset exceeds buffer size\n",
				__func__);
			return -EINVAL;
		}
		if (region.offset + region.len > buffer->size)
			region.len = buffer->size - region.offset;
		/*
		 * Limitation: There's only one vma in one process context.
		 * If we want to support multiple vmas in one process context,
		 * we need to extend the interface.
		 */
		vma = pxa_ion_find_vma(buffer);
		if (!vma) {
			mutex_unlock(&buffer->lock);
			pr_err("%s: Buffer doesn't map to virtual address\n",
				__func__);
			return -EINVAL;
		}
		start = vma->vm_start + region.offset;
		end = start + region.len;
		/* don't check direction */
		dmac_flush_range((void *)start, (void *)end);

		pstart = buffer->priv_phys + region.offset;
		pend = pstart + region.len;
		outer_flush_range(pstart, pend);
		mutex_unlock(&buffer->lock);
		break;
	}
	default:
	       pr_err("%s: Unknown ioctl %d\n", __func__, cmd);
	       return -EINVAL;
	}
	return 0;
}
예제 #19
0
파일: ion.c 프로젝트: fulmix/fulmix.Kernel
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct ion_client *client = filp->private_data;

	switch (cmd) {
	case ION_IOC_ALLOC:
	{
		struct ion_allocation_data data;

		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;
		data.handle = ion_alloc(client, data.len, data.align,
					     data.flags);

		if (IS_ERR(data.handle))
			return PTR_ERR(data.handle);

		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
			ion_free(client, data.handle);
			return -EFAULT;
		}
		break;
	}
	case ION_IOC_FREE:
	{
		struct ion_handle_data data;
		bool valid;

		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_handle_data)))
			return -EFAULT;
		mutex_lock(&client->lock);
		valid = ion_handle_validate(client, data.handle);
		mutex_unlock(&client->lock);
		if (!valid)
			return -EINVAL;
		ion_free(client, data.handle);
		break;
	}
	case ION_IOC_MAP:
	case ION_IOC_SHARE:
	{
		struct ion_fd_data data;
		int ret;
		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;

		ret = ion_share_set_flags(client, data.handle, filp->f_flags);
		if (ret)
			return ret;

		data.fd = ion_share_dma_buf(client, data.handle);
		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
			return -EFAULT;
		if (data.fd < 0)
			return data.fd;
		break;
	}
	case ION_IOC_IMPORT:
	{
		struct ion_fd_data data;
		int ret = 0;
		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_fd_data)))
			return -EFAULT;
		data.handle = ion_import_dma_buf(client, data.fd);
		if (IS_ERR(data.handle))
			data.handle = NULL;
		if (copy_to_user((void __user *)arg, &data,
				 sizeof(struct ion_fd_data)))
			return -EFAULT;
		if (ret < 0)
			return ret;
		break;
	}
	case ION_IOC_CUSTOM:
	{
		struct ion_device *dev = client->dev;
		struct ion_custom_data data;

		if (!dev->custom_ioctl)
			return -ENOTTY;
		if (copy_from_user(&data, (void __user *)arg,
				sizeof(struct ion_custom_data)))
			return -EFAULT;
		return dev->custom_ioctl(client, data.cmd, data.arg);
	}
	case ION_IOC_CLEAN_CACHES:
	case ION_IOC_INV_CACHES:
	case ION_IOC_CLEAN_INV_CACHES:
	{
		struct ion_flush_data data;
		unsigned long start, end;
		struct ion_handle *handle = NULL;
		int ret;

		if (copy_from_user(&data, (void __user *)arg,
				sizeof(struct ion_flush_data)))
			return -EFAULT;

		start = (unsigned long) data.vaddr;
		end = (unsigned long) data.vaddr + data.length;

		if (check_vaddr_bounds(start, end)) {
			pr_err("%s: virtual address %p is out of bounds\n",
				__func__, data.vaddr);
			return -EINVAL;
		}

		if (!data.handle) {
			handle = ion_import_dma_buf(client, data.fd);
			if (IS_ERR(handle)) {
				pr_info("%s: Could not import handle: %d\n",
					__func__, (int)handle);
				return -EINVAL;
			}
		}

		ret = ion_do_cache_op(client,
					data.handle ? data.handle : handle,
					data.vaddr, data.offset, data.length,
					cmd);

		if (!data.handle)
			ion_free(client, handle);

		if (ret < 0)
			return ret;
		break;

	}
	case ION_IOC_GET_FLAGS:
	{
		struct ion_flag_data data;
		int ret;
		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_flag_data)))
			return -EFAULT;

		ret = ion_handle_get_flags(client, data.handle, &data.flags);
		if (ret < 0)
			return ret;
		if (copy_to_user((void __user *)arg, &data,
				 sizeof(struct ion_flag_data)))
			return -EFAULT;
		break;
	}
	default:
		return -ENOTTY;
	}
	return 0;
}
예제 #20
0
파일: ion.c 프로젝트: fulmix/fulmix.Kernel
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num, unsigned long align,
			unsigned long iova_length, unsigned long *iova,
			unsigned long *buffer_size,
			unsigned long flags, unsigned long iommu_flags)
{
	struct ion_buffer *buffer;
	struct ion_iommu_map *iommu_map;
	int ret = 0;

	if (ION_IS_CACHED(flags)) {
		pr_err("%s: Cannot map iommu as cached.\n", __func__);
		return -EINVAL;
	}

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}

	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_iommu) {
		pr_err("%s: map_iommu is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}

	if (!iova_length)
		iova_length = buffer->size;

	if (buffer->size > iova_length) {
		pr_debug("%s: iova length %lx is not at least buffer size"
			" %x\n", __func__, iova_length, buffer->size);
		ret = -EINVAL;
		goto out;
	}

	if (buffer->size & ~PAGE_MASK) {
		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
			buffer->size, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	if (iova_length & ~PAGE_MASK) {
		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
			iova_length, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
	if (!iommu_map) {
		iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
					    align, iova_length, flags, iova);
		if (!IS_ERR_OR_NULL(iommu_map)) {
			iommu_map->flags = iommu_flags;

			if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
				kref_get(&iommu_map->ref);
		}
	} else {
		if (iommu_map->flags != iommu_flags) {
			pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
				__func__, handle,
				iommu_map->flags, iommu_flags);
			ret = -EINVAL;
		} else if (iommu_map->mapped_size != iova_length) {
			pr_err("%s: handle %p is already mapped with length"
					" %x, trying to map with length %lx\n",
				__func__, handle, iommu_map->mapped_size,
				iova_length);
			ret = -EINVAL;
		} else {
			kref_get(&iommu_map->ref);
			*iova = iommu_map->iova_addr;
		}
	}
	if (!ret)
		buffer->iommu_map_cnt++;
	*buffer_size = buffer->size;
out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;
}