void ion_cma_unmap_iommu(struct ion_iommu_map *data) { unsigned int domain_num; unsigned int partition_num; struct iommu_domain *domain; if (!msm_use_iommu()) return; domain_num = iommu_map_domain(data); partition_num = iommu_map_partition(data); domain = msm_get_iommu_domain(domain_num); if (!domain) { WARN(1, "Could not get domain %d. Corruption?\n", domain_num); return; } iommu_unmap_range(domain, data->iova_addr, data->mapped_size); msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); return; }
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data) { unsigned int domain_num; unsigned int partition_num; struct iommu_domain *domain; struct ion_cp_heap *cp_heap = container_of(data->buffer->heap, struct ion_cp_heap, heap); if (!msm_use_iommu()) return; domain_num = iommu_map_domain(data); /* If we are mapping everything we'll wait to unmap until everything is freed. */ if (cp_heap->iommu_iova[domain_num]) return; partition_num = iommu_map_partition(data); domain = msm_get_iommu_domain(domain_num); if (!domain) { WARN(1, "Could not get domain %d. Corruption?\n", domain_num); return; } iommu_unmap_range(domain, data->iova_addr, data->mapped_size); msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); return; }
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr, unsigned long size) { struct ion_cp_heap *cp_heap = container_of(heap, struct ion_cp_heap, heap); if (addr == ION_CP_ALLOCATE_FAIL) return; gen_pool_free(cp_heap->pool, addr, size); mutex_lock(&cp_heap->lock); cp_heap->allocated_bytes -= size; if (!cp_heap->allocated_bytes) { unsigned int i; for (i = 0; i < MAX_DOMAINS; ++i) { if (cp_heap->iommu_iova[i]) { unsigned long vaddr_len = cp_heap->total_size; if (i == cp_heap->iommu_2x_map_domain) vaddr_len <<= 1; iommu_unmap_all(i, cp_heap); msm_free_iova_address(cp_heap->iommu_iova[i], i, cp_heap->iommu_partition[i], vaddr_len); } cp_heap->iommu_iova[i] = 0; cp_heap->iommu_partition[i] = 0; } } mutex_unlock(&cp_heap->lock); }
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data) { int i; unsigned long temp_iova; unsigned int domain_num; unsigned int partition_num; struct iommu_domain *domain; if (!msm_use_iommu()) return; domain_num = iommu_map_domain(data); partition_num = iommu_map_partition(data); domain = msm_get_iommu_domain(domain_num); if (!domain) { WARN(1, "Could not get domain %d. Corruption?\n", domain_num); return; } temp_iova = data->iova_addr; for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K) iommu_unmap(domain, temp_iova, get_order(SZ_4K)); msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); return; }
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr, unsigned long size) { struct ion_cp_heap *cp_heap = container_of(heap, struct ion_cp_heap, heap); if (addr == ION_CP_ALLOCATE_FAIL) return; gen_pool_free(cp_heap->pool, addr, size); mutex_lock(&cp_heap->lock); cp_heap->allocated_bytes -= size; if (cp_heap->reusable && !cp_heap->allocated_bytes && cp_heap->heap_protected == HEAP_NOT_PROTECTED) { if (fmem_set_state(FMEM_T_STATE) != 0) pr_err("%s: unable to transition heap to T-state\n", __func__); } /* Unmap everything if we previously mapped the whole heap at once. */ if (!cp_heap->allocated_bytes) { unsigned int i; for (i = 0; i < MAX_DOMAINS; ++i) { if (cp_heap->iommu_iova[i]) { unsigned long vaddr_len = cp_heap->total_size; if (i == cp_heap->iommu_2x_map_domain) vaddr_len <<= 1; iommu_unmap_all(i, cp_heap); msm_free_iova_address(cp_heap->iommu_iova[i], i, cp_heap->iommu_partition[i], vaddr_len); } cp_heap->iommu_iova[i] = 0; cp_heap->iommu_partition[i] = 0; } } mutex_unlock(&cp_heap->lock); }
static int __ion_iommu_map(struct ion_buffer *buffer, int domain_num, int partition_num, unsigned long align, unsigned long iova_length, unsigned long flags, unsigned long *iova) { struct ion_iommu_map *data; int ret; data = kmalloc(sizeof(*data), GFP_ATOMIC); if (!data) return -ENOMEM; data->buffer = buffer; iommu_map_domain(data) = domain_num; iommu_map_partition(data) = partition_num; ret = buffer->heap->ops->map_iommu(buffer, data, domain_num, partition_num, align, iova_length, flags); if (ret) goto out; kref_init(&data->ref); *iova = data->iova_addr; ion_iommu_add(buffer, data); return 0; out: msm_free_iova_address(data->iova_addr, domain_num, partition_num, buffer->size); kfree(data); return ret; }
int ion_cma_map_iommu(struct ion_buffer *buffer, struct ion_iommu_map *data, unsigned int domain_num, unsigned int partition_num, unsigned long align, unsigned long iova_length, unsigned long flags) { int ret = 0; struct iommu_domain *domain; unsigned long extra; unsigned long extra_iova_addr; struct ion_cma_buffer_info *info = buffer->priv_virt; struct sg_table *table = info->table; int prot = IOMMU_WRITE | IOMMU_READ; if (!msm_use_iommu()) { data->iova_addr = info->handle; data->mapped_size = iova_length; return 0; } data->mapped_size = iova_length; extra = iova_length - buffer->size; ret = msm_allocate_iova_address(domain_num, partition_num, data->mapped_size, align, &data->iova_addr); if (ret) goto out; domain = msm_get_iommu_domain(domain_num); if (!domain) { ret = -EINVAL; goto out1; } ret = iommu_map_range(domain, data->iova_addr, table->sgl, buffer->size, prot); if (ret) { pr_err("%s: could not map %lx in domain %p\n", __func__, data->iova_addr, domain); goto out1; } extra_iova_addr = data->iova_addr + buffer->size; if (extra) { ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K, prot); if (ret) goto out2; } return ret; out2: iommu_unmap_range(domain, data->iova_addr, buffer->size); out1: msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); out: return ret; }
int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer, struct ion_iommu_map *data, unsigned int domain_num, unsigned int partition_num, unsigned long align, unsigned long iova_length, unsigned long flags) { int ret = 0; struct iommu_domain *domain; unsigned long extra; struct scatterlist *sglist = 0; struct page *page = 0; int prot = IOMMU_WRITE | IOMMU_READ; prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; if (!ION_IS_CACHED(flags)) return -EINVAL; if (!msm_use_iommu()) { data->iova_addr = virt_to_phys(buffer->vaddr); return 0; } data->mapped_size = iova_length; extra = iova_length - buffer->size; data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, data->mapped_size, align); if (!data->iova_addr) { ret = -ENOMEM; goto out; } domain = msm_get_iommu_domain(domain_num); if (!domain) { ret = -ENOMEM; goto out1; } page = virt_to_page(buffer->vaddr); sglist = vmalloc(sizeof(*sglist)); if (!sglist) goto out1; sg_init_table(sglist, 1); sg_set_page(sglist, page, buffer->size, 0); ret = iommu_map_range(domain, data->iova_addr, sglist, buffer->size, prot); if (ret) { pr_err("%s: could not map %lx in domain %p\n", __func__, data->iova_addr, domain); goto out1; } if (extra) { unsigned long extra_iova_addr = data->iova_addr + buffer->size; ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K, prot); if (ret) goto out2; } vfree(sglist); return ret; out2: iommu_unmap_range(domain, data->iova_addr, buffer->size); out1: vfree(sglist); msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); out: return ret; }
int ion_system_heap_map_iommu(struct ion_buffer *buffer, struct ion_iommu_map *data, unsigned int domain_num, unsigned int partition_num, unsigned long align, unsigned long iova_length, unsigned long flags) { int ret = 0, i; struct iommu_domain *domain; unsigned long extra; unsigned long extra_iova_addr; struct page *page; int npages = buffer->size >> PAGE_SHIFT; void *vaddr = buffer->priv_virt; struct scatterlist *sglist = 0; int prot = IOMMU_WRITE | IOMMU_READ; prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; if (!ION_IS_CACHED(flags)) return -EINVAL; if (!msm_use_iommu()) return -EINVAL; data->mapped_size = iova_length; extra = iova_length - buffer->size; data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, data->mapped_size, align); if (!data->iova_addr) { ret = -ENOMEM; goto out; } domain = msm_get_iommu_domain(domain_num); if (!domain) { ret = -ENOMEM; goto out1; } sglist = vmalloc(sizeof(*sglist) * npages); if (!sglist) { ret = -ENOMEM; goto out1; } sg_init_table(sglist, npages); for (i = 0; i < npages; i++) { page = vmalloc_to_page(vaddr); if (!page) goto out1; sg_set_page(&sglist[i], page, PAGE_SIZE, 0); vaddr += PAGE_SIZE; } ret = iommu_map_range(domain, data->iova_addr, sglist, buffer->size, prot); if (ret) { pr_err("%s: could not map %lx in domain %p\n", __func__, data->iova_addr, domain); goto out1; } extra_iova_addr = data->iova_addr + buffer->size; if (extra) { ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K, prot); if (ret) goto out2; } vfree(sglist); return ret; out2: iommu_unmap_range(domain, data->iova_addr, buffer->size); out1: vfree(sglist); msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); out: return ret; }
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer, struct ion_iommu_map *data, unsigned int domain_num, unsigned int partition_num, unsigned long align, unsigned long iova_length, unsigned long flags) { struct iommu_domain *domain; int ret = 0; unsigned long extra; struct scatterlist *sglist = 0; struct ion_cp_heap *cp_heap = container_of(buffer->heap, struct ion_cp_heap, heap); int prot = IOMMU_WRITE | IOMMU_READ; prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; data->mapped_size = iova_length; if (!msm_use_iommu()) { data->iova_addr = buffer->priv_phys; return 0; } if (cp_heap->iommu_iova[domain_num]) { /* Already mapped. */ unsigned long offset = buffer->priv_phys - cp_heap->base; data->iova_addr = cp_heap->iommu_iova[domain_num] + offset; return 0; } else if (cp_heap->iommu_map_all) { ret = iommu_map_all(domain_num, cp_heap, partition_num, prot); if (!ret) { unsigned long offset = buffer->priv_phys - cp_heap->base; data->iova_addr = cp_heap->iommu_iova[domain_num] + offset; cp_heap->iommu_partition[domain_num] = partition_num; /* clear delayed map flag so that we don't interfere with this feature (we are already delaying). */ data->flags &= ~ION_IOMMU_UNMAP_DELAYED; return 0; } else { cp_heap->iommu_iova[domain_num] = 0; cp_heap->iommu_partition[domain_num] = 0; return ret; } } extra = iova_length - buffer->size; data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, data->mapped_size, align); if (!data->iova_addr) { ret = -ENOMEM; goto out; } domain = msm_get_iommu_domain(domain_num); if (!domain) { ret = -ENOMEM; goto out1; } sglist = ion_cp_heap_create_sglist(buffer); if (IS_ERR_OR_NULL(sglist)) { ret = -ENOMEM; goto out1; } ret = iommu_map_range(domain, data->iova_addr, sglist, buffer->size, prot); if (ret) { pr_err("%s: could not map %lx in domain %p\n", __func__, data->iova_addr, domain); goto out1; } if (extra) { unsigned long extra_iova_addr = data->iova_addr + buffer->size; ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K, prot); if (ret) goto out2; } vfree(sglist); return ret; out2: iommu_unmap_range(domain, data->iova_addr, buffer->size); out1: if (!IS_ERR_OR_NULL(sglist)) vfree(sglist); msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); out: return ret; }
static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap, int partition, unsigned long prot) { unsigned long left_to_map = cp_heap->total_size; unsigned long order = get_order(SZ_64K); unsigned long page_size = SZ_64K; int ret_value = 0; unsigned long virt_addr_len = cp_heap->total_size; struct iommu_domain *domain = msm_get_iommu_domain(domain_num); /* If we are mapping into the video domain we need to map twice the * size of the heap to account for prefetch issue in video core. */ if (domain_num == cp_heap->iommu_2x_map_domain) virt_addr_len <<= 1; if (cp_heap->total_size & (SZ_64K-1)) { pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n"); ret_value = -EINVAL; } if (cp_heap->base & (SZ_64K-1)) { pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n"); ret_value = -EINVAL; } if (!ret_value && domain) { unsigned long temp_phys = cp_heap->base; unsigned long temp_iova = msm_allocate_iova_address(domain_num, partition, virt_addr_len, SZ_64K); if (!temp_iova) { pr_err("%s: could not allocate iova from domain %lu, partition %d\n", __func__, domain_num, partition); ret_value = -ENOMEM; goto out; } cp_heap->iommu_iova[domain_num] = temp_iova; while (left_to_map) { int ret = iommu_map(domain, temp_iova, temp_phys, order, prot); if (ret) { pr_err("%s: could not map %lx in domain %p, error: %d\n", __func__, temp_iova, domain, ret); ret_value = -EAGAIN; goto free_iova; } temp_iova += page_size; temp_phys += page_size; left_to_map -= page_size; } if (domain_num == cp_heap->iommu_2x_map_domain) ret_value = msm_iommu_map_extra(domain, temp_iova, cp_heap->total_size, SZ_64K, prot); if (ret_value) goto free_iova; } else { pr_err("Unable to get IOMMU domain %lu\n", domain_num); ret_value = -ENOMEM; } goto out; free_iova: msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num, partition, virt_addr_len); out: return ret_value; }
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data) { //HTC_START Jason Huang 20120419 //HTC_START Jason Huang 20120530 --- Add int i and unsigned long temp_iova back. int i; unsigned long temp_iova; //HTC_END unsigned int domain_num; unsigned int partition_num; struct iommu_domain *domain; /*HTC_START Jason Huang 20120614 --- In IOMMU map, some client may input larger virtual addresss length (even larger than 1M-aligned buffer), such that the actual mapped size is larger than the buffer size. In IOMMU unmap, the extra part should be un-mapped independently, since it is not 1M mapped.*/ unsigned long extra = 0; //HTC_END if (!msm_use_iommu()) return; domain_num = iommu_map_domain(data); partition_num = iommu_map_partition(data); domain = msm_get_iommu_domain(domain_num); if (!domain) { WARN(1, "Could not get domain %d. Corruption?\n", domain_num); return; } //HTC_START Jason Huang 20120419 /*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, they are 1M mapping. Un-map by iommu_unmap(). iommu_unmap_range() doesn't supports 1M un-mapping.*/ if (data->buffer->heap->id == ION_CP_MM_HEAP_ID) { extra = data->mapped_size - data->buffer->size; temp_iova = data->iova_addr; for (i = data->buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M) iommu_unmap(domain, temp_iova, get_order(SZ_1M)); if (extra) { iommu_unmap_range(domain, temp_iova, extra); } } else { iommu_unmap_range(domain, data->iova_addr, data->mapped_size); } /* temp_iova = data->iova_addr; for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K) iommu_unmap(domain, temp_iova, get_order(SZ_4K)); */ //HTC_END msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); return; }
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer, struct ion_iommu_map *data, unsigned int domain_num, unsigned int partition_num, unsigned long align, unsigned long iova_length, unsigned long flags) { //HTC_START Jason Huang 20120419 //HTC_START Jason Huang 20120530 --- Add unsigned long temp_phys and int i back. unsigned long temp_phys, temp_iova; struct iommu_domain *domain; int i = 0, ret = 0; unsigned long extra; //HTC_END data->mapped_size = iova_length; if (!msm_use_iommu()) { data->iova_addr = buffer->priv_phys; return 0; } extra = iova_length - buffer->size; //HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, always 1M-alignment. if (buffer->heap->id == ION_CP_MM_HEAP_ID) { align = SZ_1M; } //HTC_END data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, data->mapped_size, align); if (!data->iova_addr) { ret = -ENOMEM; goto out; } domain = msm_get_iommu_domain(domain_num); if (!domain) { ret = -ENOMEM; goto out1; } //HTC_START Jason Huang 20120419 --- Change to htc_iommu_map_range for performance improvement. /*HTC_START Jason Huang 20120530 --- For buffers from ION CP MM heap, do 1M mapping by iommu_map(). Neither htc_iommu_map_range() nor iommu_map_range() supports 1M mapping.*/ if (buffer->heap->id == ION_CP_MM_HEAP_ID) { temp_iova = data->iova_addr; temp_phys = buffer->priv_phys; for (i = buffer->size; i > 0; i -= SZ_1M, temp_iova += SZ_1M, temp_phys += SZ_1M) { ret = iommu_map(domain, temp_iova, temp_phys, get_order(SZ_1M), ION_IS_CACHED(flags) ? 1 : 0); if (ret) { pr_err("%s: could not map %lx to %lx in domain %p\n", __func__, temp_iova, temp_phys, domain); goto out2; } } } else { ret = htc_iommu_map_range(domain, data->iova_addr, buffer->priv_phys, buffer->size, ION_IS_CACHED(flags) ? 1 : 0); if (ret) { ret = -ENOMEM; goto out1; } temp_iova = data->iova_addr + buffer->size; } /* temp_iova = data->iova_addr; temp_phys = buffer->priv_phys; for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K, temp_phys += SZ_4K) { ret = iommu_map(domain, temp_iova, temp_phys, get_order(SZ_4K), ION_IS_CACHED(flags) ? 1 : 0); if (ret) { pr_err("%s: could not map %lx to %lx in domain %p\n", __func__, temp_iova, temp_phys, domain); goto out2; } } */ //HTC_END if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0)) goto out2; return 0; //HTC_START Jason Huang 20120419 //HTC_START Jason Huang 20120530 out2: if (buffer->heap->id == ION_CP_MM_HEAP_ID) { for ( ; i < buffer->size; i += SZ_1M, temp_iova -= SZ_1M) iommu_unmap(domain, temp_iova, get_order(SZ_1M)); } else { iommu_unmap_range(domain, data->iova_addr, buffer->size); } /* for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K) iommu_unmap(domain, temp_iova, get_order(SZ_4K)); */ //HTC_END out1: msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); out: return ret; }
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer, struct ion_iommu_map *data, unsigned int domain_num, unsigned int partition_num, unsigned long align, unsigned long iova_length, unsigned long flags) { unsigned long temp_phys, temp_iova; struct iommu_domain *domain; int i, ret = 0; unsigned long extra; data->mapped_size = iova_length; if (!msm_use_iommu()) { data->iova_addr = buffer->priv_phys; return 0; } extra = iova_length - buffer->size; data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, data->mapped_size, align); if (!data->iova_addr) { ret = -ENOMEM; goto out; } domain = msm_get_iommu_domain(domain_num); if (!domain) { ret = -ENOMEM; goto out1; } temp_iova = data->iova_addr; temp_phys = buffer->priv_phys; for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K, temp_phys += SZ_4K) { ret = iommu_map(domain, temp_iova, temp_phys, get_order(SZ_4K), ION_IS_CACHED(flags) ? 1 : 0); if (ret) { pr_err("%s: could not map %lx to %lx in domain %p\n", __func__, temp_iova, temp_phys, domain); goto out2; } } if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0)) goto out2; return 0; out2: for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K) iommu_unmap(domain, temp_iova, get_order(SZ_4K)); out1: msm_free_iova_address(data->iova_addr, domain_num, partition_num, data->mapped_size); out: return ret; }