void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
		       unsigned long size)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (addr == ION_CP_ALLOCATE_FAIL)
		return;
	gen_pool_free(cp_heap->pool, addr, size);

	mutex_lock(&cp_heap->lock);
	cp_heap->allocated_bytes -= size;

	if (!cp_heap->allocated_bytes) {
		unsigned int i;
		for (i = 0; i < MAX_DOMAINS; ++i) {
			if (cp_heap->iommu_iova[i]) {
				unsigned long vaddr_len = cp_heap->total_size;

				if (i == cp_heap->iommu_2x_map_domain)
					vaddr_len <<= 1;
				iommu_unmap_all(i, cp_heap);

				msm_free_iova_address(cp_heap->iommu_iova[i], i,
						cp_heap->iommu_partition[i],
						vaddr_len);
			}
			cp_heap->iommu_iova[i] = 0;
			cp_heap->iommu_partition[i] = 0;
		}
	}
	mutex_unlock(&cp_heap->lock);
}
Пример #2
0
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
		       unsigned long size)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (addr == ION_CP_ALLOCATE_FAIL)
		return;
	gen_pool_free(cp_heap->pool, addr, size);

	mutex_lock(&cp_heap->lock);
	cp_heap->allocated_bytes -= size;

	if (cp_heap->reusable && !cp_heap->allocated_bytes &&
	    cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
		if (fmem_set_state(FMEM_T_STATE) != 0)
			pr_err("%s: unable to transition heap to T-state\n",
				__func__);
	}

	/* Unmap everything if we previously mapped the whole heap at once. */
	if (!cp_heap->allocated_bytes) {
		unsigned int i;
		for (i = 0; i < MAX_DOMAINS; ++i) {
			if (cp_heap->iommu_iova[i]) {
				unsigned long vaddr_len = cp_heap->total_size;

				if (i == cp_heap->iommu_2x_map_domain)
					vaddr_len <<= 1;
				iommu_unmap_all(i, cp_heap);

				msm_free_iova_address(cp_heap->iommu_iova[i], i,
						cp_heap->iommu_partition[i],
						vaddr_len);
			}
			cp_heap->iommu_iova[i] = 0;
			cp_heap->iommu_partition[i] = 0;
		}
	}
	mutex_unlock(&cp_heap->lock);
}