struct scatterlist *ion_map_dma(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct scatterlist *sglist; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_dma.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; mutex_lock(&buffer->lock); if (!handle->buffer->heap->ops->map_dma) { pr_err("%s: map_kernel is not implemented by this heap.\n", __func__); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); if (IS_ERR_OR_NULL(sglist)) _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); buffer->sglist = sglist; } else { sglist = buffer->sglist; } mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return sglist; }
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle, unsigned long flags) { struct ion_buffer *buffer; void *vaddr; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_kernel.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; mutex_lock(&buffer->lock); if (!handle->buffer->heap->ops->map_kernel) { pr_err("%s: map_kernel is not implemented by this heap.\n", __func__); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } if (ion_validate_buffer_flags(buffer, flags)) { vaddr = ERR_PTR(-EEXIST); goto out; } if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer, flags); if (IS_ERR_OR_NULL(vaddr)) _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); buffer->vaddr = vaddr; } else { vaddr = buffer->vaddr; } out: mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return vaddr; }
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, int domain_num, int partition_num, unsigned long align, unsigned long iova_length, unsigned long *iova, unsigned long *buffer_size, unsigned long flags) { struct ion_buffer *buffer; struct ion_iommu_map *iommu_map; int ret = 0; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_kernel.\n", __func__); mutex_unlock(&client->lock); return -EINVAL; } buffer = handle->buffer; mutex_lock(&buffer->lock); if (!handle->buffer->heap->ops->map_iommu) { pr_err("%s: map_iommu is not implemented by this heap.\n", __func__); ret = -ENODEV; goto out; } if (ion_validate_buffer_flags(buffer, flags)) { ret = -EEXIST; goto out; } /* * If clients don't want a custom iova length, just use whatever * the buffer size is */ if (!iova_length) iova_length = buffer->size; if (buffer->size > iova_length) { pr_debug("%s: iova length %lx is not at least buffer size" " %x\n", __func__, iova_length, buffer->size); ret = -EINVAL; goto out; } if (buffer->size & ~PAGE_MASK) { pr_debug("%s: buffer size %x is not aligned to %lx", __func__, buffer->size, PAGE_SIZE); ret = -EINVAL; goto out; } if (iova_length & ~PAGE_MASK) { pr_debug("%s: iova_length %lx is not aligned to %lx", __func__, iova_length, PAGE_SIZE); ret = -EINVAL; goto out; } iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num); if (_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt) || !iommu_map) { ret = __ion_iommu_map(buffer, domain_num, partition_num, align, iova_length, flags, iova); if (ret < 0) _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt); } else { if (iommu_map->mapped_size != iova_length) { pr_err("%s: handle %p is already mapped with length" " %x, trying to map with length %lx\n", __func__, handle, iommu_map->mapped_size, iova_length); _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt); ret = -EINVAL; } else { kref_get(&iommu_map->ref); *iova = iommu_map->iova_addr; } } *buffer_size = buffer->size; out: mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return ret; }
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, int domain_num, int partition_num, unsigned long align, unsigned long iova_length, unsigned long *iova, unsigned long *buffer_size, unsigned long flags, unsigned long iommu_flags) { struct ion_buffer *buffer; struct ion_iommu_map *iommu_map; int ret = 0; if (ION_IS_CACHED(flags)) { pr_err("%s: Cannot map iommu as cached.\n", __func__); return -EINVAL; } mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_kernel.\n", __func__); mutex_unlock(&client->lock); return -EINVAL; } buffer = handle->buffer; mutex_lock(&buffer->lock); if (!handle->buffer->heap->ops->map_iommu) { pr_err("%s: map_iommu is not implemented by this heap.\n", __func__); ret = -ENODEV; goto out; } /* * If clients don't want a custom iova length, just use whatever * the buffer size is */ if (!iova_length) iova_length = buffer->size; /*HTC_START Jason Huang 20120530 --- Buffers from ION CP MM heap are 1M-alignment, clients may input expected mapped virtual address length which is shorter than the buffer size.*/ /* if (buffer->size > iova_length) { pr_debug("%s: iova length %lx is not at least buffer size" " %x\n", __func__, iova_length, buffer->size); ret = -EINVAL; goto out; } */ if (buffer->size > iova_length) { iova_length = buffer->size; } //HTC_END if (buffer->size & ~PAGE_MASK) { pr_debug("%s: buffer size %x is not aligned to %lx", __func__, buffer->size, PAGE_SIZE); ret = -EINVAL; goto out; } if (iova_length & ~PAGE_MASK) { pr_debug("%s: iova_length %lx is not aligned to %lx", __func__, iova_length, PAGE_SIZE); ret = -EINVAL; goto out; } iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num); _ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt); if (!iommu_map) { iommu_map = __ion_iommu_map(buffer, domain_num, partition_num, align, iova_length, flags, iova); if (IS_ERR_OR_NULL(iommu_map)) { _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt); } else { iommu_map->flags = iommu_flags; if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED) kref_get(&iommu_map->ref); } } else { if (iommu_map->flags != iommu_flags) { pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n", __func__, handle, iommu_map->flags, iommu_flags); _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt); ret = -EINVAL; } else if (iommu_map->mapped_size != iova_length) { pr_err("%s: handle %p is already mapped with length" " %x, trying to map with length %lx\n", __func__, handle, iommu_map->mapped_size, iova_length); _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt); ret = -EINVAL; } else { kref_get(&iommu_map->ref); *iova = iommu_map->iova_addr; } } *buffer_size = buffer->size; out: mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return ret; }