示例#1
0
void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num)
{
	struct ion_iommu_map *iommu_map;
	struct ion_buffer *buffer;

	mutex_lock(&client->lock);
	buffer = handle->buffer;

	mutex_lock(&buffer->lock);

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);

	if (!iommu_map) {
		WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
				domain_num, partition_num, buffer);
		goto out;
	}

	_ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
	kref_put(&iommu_map->ref, ion_iommu_release);

out:
	mutex_unlock(&buffer->lock);

	mutex_unlock(&client->lock);

}
示例#2
0
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num, unsigned long align,
			unsigned long iova_length, unsigned long *iova,
			unsigned long *buffer_size,
			unsigned long flags, unsigned long iommu_flags)
{
	struct ion_buffer *buffer;
	struct ion_iommu_map *iommu_map;
	int ret = 0;

	if (ION_IS_CACHED(flags)) {
		pr_err("%s: Cannot map iommu as cached.\n", __func__);
		return -EINVAL;
	}

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}

	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_iommu) {
		pr_err("%s: map_iommu is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}

	if (!iova_length)
		iova_length = buffer->size;

	if (buffer->size > iova_length) {
		pr_debug("%s: iova length %lx is not at least buffer size"
			" %x\n", __func__, iova_length, buffer->size);
		ret = -EINVAL;
		goto out;
	}

	if (buffer->size & ~PAGE_MASK) {
		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
			buffer->size, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	if (iova_length & ~PAGE_MASK) {
		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
			iova_length, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
	if (!iommu_map) {
		iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
					    align, iova_length, flags, iova);
		if (!IS_ERR_OR_NULL(iommu_map)) {
			iommu_map->flags = iommu_flags;

			if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
				kref_get(&iommu_map->ref);
		}
	} else {
		if (iommu_map->flags != iommu_flags) {
			pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
				__func__, handle,
				iommu_map->flags, iommu_flags);
			ret = -EINVAL;
		} else if (iommu_map->mapped_size != iova_length) {
			pr_err("%s: handle %p is already mapped with length"
					" %x, trying to map with length %lx\n",
				__func__, handle, iommu_map->mapped_size,
				iova_length);
			ret = -EINVAL;
		} else {
			kref_get(&iommu_map->ref);
			*iova = iommu_map->iova_addr;
		}
	}
	if (!ret)
		buffer->iommu_map_cnt++;
	*buffer_size = buffer->size;
out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;
}
示例#3
0
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num, unsigned long align,
			unsigned long iova_length, unsigned long *iova,
			unsigned long *buffer_size,
			unsigned long flags)
{
	struct ion_buffer *buffer;
	struct ion_iommu_map *iommu_map;
	int ret = 0;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}

	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_iommu) {
		pr_err("%s: map_iommu is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}

	if (ion_validate_buffer_flags(buffer, flags)) {
		ret = -EEXIST;
		goto out;
	}

	/*
	 * If clients don't want a custom iova length, just use whatever
	 * the buffer size is
	 */
	if (!iova_length)
		iova_length = buffer->size;

	if (buffer->size > iova_length) {
		pr_debug("%s: iova length %lx is not at least buffer size"
			" %x\n", __func__, iova_length, buffer->size);
		ret = -EINVAL;
		goto out;
	}

	if (buffer->size & ~PAGE_MASK) {
		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
			buffer->size, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	if (iova_length & ~PAGE_MASK) {
		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
			iova_length, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
	if (_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt) ||
		!iommu_map) {
		ret = __ion_iommu_map(buffer, domain_num, partition_num, align,
					iova_length, flags, iova);
		if (ret < 0)
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
	} else {
		if (iommu_map->mapped_size != iova_length) {
			pr_err("%s: handle %p is already mapped with length"
				" %x, trying to map with length %lx\n",
				__func__, handle, iommu_map->mapped_size,
				iova_length);
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
			ret = -EINVAL;
		} else {
			kref_get(&iommu_map->ref);
			*iova = iommu_map->iova_addr;
		}
	}
	*buffer_size = buffer->size;
out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;
}
示例#4
0
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num, unsigned long align,
			unsigned long iova_length, unsigned long *iova,
			unsigned long *buffer_size,
			unsigned long flags, unsigned long iommu_flags)
{
	struct ion_buffer *buffer;
	struct ion_iommu_map *iommu_map;
	int ret = 0;

	if (ION_IS_CACHED(flags)) {
		pr_err("%s: Cannot map iommu as cached.\n", __func__);
		return -EINVAL;
	}

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}

	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_iommu) {
		pr_err("%s: map_iommu is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}

	/*
	 * If clients don't want a custom iova length, just use whatever
	 * the buffer size is
	 */
	if (!iova_length)
		iova_length = buffer->size;

	/*HTC_START Jason Huang 20120530 --- Buffers from ION CP MM heap are 1M-alignment,
	                                     clients may input expected mapped virtual address
	                                     length which is shorter than the buffer size.*/
	/*
	if (buffer->size > iova_length) {
		pr_debug("%s: iova length %lx is not at least buffer size"
			" %x\n", __func__, iova_length, buffer->size);
		ret = -EINVAL;
		goto out;
	}
	*/
	if (buffer->size > iova_length)
	{
		iova_length = buffer->size;
	}
	//HTC_END

	if (buffer->size & ~PAGE_MASK) {
		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
			buffer->size, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	if (iova_length & ~PAGE_MASK) {
		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
			iova_length, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
	_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
	if (!iommu_map) {
		iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
					    align, iova_length, flags, iova);
		if (IS_ERR_OR_NULL(iommu_map)) {
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
		} else {
			iommu_map->flags = iommu_flags;

			if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
				kref_get(&iommu_map->ref);
		}
	} else {
		if (iommu_map->flags != iommu_flags) {
			pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
				__func__, handle,
				iommu_map->flags, iommu_flags);
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
			ret = -EINVAL;
		} else if (iommu_map->mapped_size != iova_length) {
			pr_err("%s: handle %p is already mapped with length"
					" %x, trying to map with length %lx\n",
				__func__, handle, iommu_map->mapped_size,
				iova_length);
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
			ret = -EINVAL;
		} else {
			kref_get(&iommu_map->ref);
			*iova = iommu_map->iova_addr;
		}
	}
	*buffer_size = buffer->size;
out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;
}