Esempio n. 1
0
static void *__alloc(struct mem_pool *mpool, unsigned long size,
	unsigned long align, int cached, void *caller)
{
	unsigned long paddr;
	void __iomem *vaddr;

	unsigned long aligned_size;
	int log_align = ilog2(align);

	struct alloc *node;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return NULL;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

#ifndef CONFIG_UML
	if (cached)
		vaddr = ioremap_cached(paddr, aligned_size);
	else
		vaddr = ioremap(paddr, aligned_size);
#endif

	if (!vaddr)
		goto out_kfree;

	/*
	 * Just cast to an unsigned long to avoid warnings about casting from a
	 * pointer to an integer of different size. The pointer is only 32-bits
	 * so we lose no data.
	 */
	node->vaddr = (unsigned long)vaddr;
	node->paddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;

	return vaddr;
out_kfree:
#ifndef CONFIG_UML
	if (vaddr)
		iounmap(vaddr);
#endif
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return NULL;
}
ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
				      unsigned long size,
				      unsigned long align,
				      unsigned long flags)
{
	unsigned long offset;
	unsigned long secure_allocation = flags & ION_SECURE;

	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	mutex_lock(&cp_heap->lock);
	if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
		mutex_unlock(&cp_heap->lock);
		pr_err("ION cannot allocate un-secure memory from protected"
			" heap %s\n", heap->name);
		return ION_CP_ALLOCATE_FAIL;
	}

	if (secure_allocation &&
	    (cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) {
		mutex_unlock(&cp_heap->lock);
		pr_err("ION cannot allocate secure memory from heap with "
			"outstanding mappings: User space: %lu, kernel space "
			"(cached): %lu\n", cp_heap->umap_count,
					   cp_heap->kmap_cached_count);
		return ION_CP_ALLOCATE_FAIL;
	}

	cp_heap->allocated_bytes += size;
	mutex_unlock(&cp_heap->lock);

	offset = gen_pool_alloc_aligned(cp_heap->pool,
					size, ilog2(align));

	if (!offset) {
		mutex_lock(&cp_heap->lock);
		cp_heap->allocated_bytes -= size;
		if ((cp_heap->total_size -
		     cp_heap->allocated_bytes) >= size)
			pr_debug("%s: heap %s has enough memory (%lx) but"
				" the allocation of size %lx still failed."
				" Memory is probably fragmented.\n",
				__func__, heap->name,
				cp_heap->total_size -
				cp_heap->allocated_bytes, size);
		mutex_unlock(&cp_heap->lock);
		return ION_CP_ALLOCATE_FAIL;
	}
	return offset;
}
Esempio n. 3
0
unsigned long allocate_contiguous_memory_nomap(unsigned long size,
	int mem_type, unsigned long align)
{
	unsigned long paddr;
	unsigned long aligned_size;

	struct alloc *node;
	struct mem_pool *mpool;
	int log_align = ilog2(align);

	mpool = mem_type_to_memory_pool(mem_type);
	if (!mpool)
		return -EINVAL;

	if (!mpool->gpool)
		return -EAGAIN;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return -EAGAIN;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	node->paddr = paddr;

	/* We search the tree using node->vaddr, so set
	 * it to something unique even though we don't
	 * use it for physical allocation nodes.
	 * The virtual and physical address ranges
	 * are disjoint, so there won't be any chance of
	 * a duplicate node->vaddr value.
	 */
	node->vaddr = (void *)paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;
	return paddr;
out_kfree:
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return -ENOMEM;
}
static void *__alloc(struct mem_pool *mpool, unsigned long size,
	unsigned long align, int cached, void *caller)
{
	unsigned long paddr;
	void __iomem *vaddr;

	unsigned long aligned_size;
	int log_align = ilog2(align);

	struct alloc *node;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return NULL;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	if (cached)
		vaddr = ioremap_cached(paddr, aligned_size);
	else
		vaddr = ioremap(paddr, aligned_size);

	if (!vaddr)
		goto out_kfree;

	node->vaddr = (unsigned long)vaddr;
	node->paddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;

	return vaddr;
out_kfree:
	if (vaddr)
		iounmap(vaddr);
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return NULL;
}
phys_addr_t _allocate_contiguous_memory_nomap(unsigned long size,
	int mem_type, unsigned long align, void *caller)
{
	phys_addr_t paddr;
	unsigned long aligned_size;

	struct alloc *node;
	struct mem_pool *mpool;
	int log_align = ilog2(align);

	mpool = mem_type_to_memory_pool(mem_type);
	if (!mpool || !mpool->gpool)
		return 0;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return 0;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	node->paddr = paddr;

	node->vaddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;
	return paddr;
out_kfree:
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return 0;
}
Esempio n. 6
0
dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
								size_t size)
{
	off_t start_off;
	dma_addr_t addr, start = 0;
	size_t mapped_size = 0;
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	int order;
	int ret;
	int count =0;
#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	size_t iova_size = 0;
#endif
	for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
		offset -= sg_dma_len(sg);

	start_off = offset_in_page(sg_phys(sg) + offset);
	size = PAGE_ALIGN(size + start_off);

	order = __fls(min_t(size_t, size, SZ_1M));

	region = kmalloc(sizeof(*region), GFP_KERNEL);
	if (!region) {
		ret = -ENOMEM;
		goto err_map_nomem;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	iova_size = ALIGN(size, SZ_64K);
	start = (dma_addr_t)gen_pool_alloc_aligned(vmm->vmm_pool, iova_size,
									order);
#else
	start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
#endif
	if (!start) {
		ret = -ENOMEM;
		goto err_map_noiomem;
	}

	addr = start;
	do {
		phys_addr_t phys;
		size_t len;

		phys = sg_phys(sg);
		len = sg_dma_len(sg);

		/* if back to back sg entries are contiguous consolidate them */
		while (sg_next(sg) &&
		       sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg))) {
			len += sg_dma_len(sg_next(sg));
			sg = sg_next(sg);
		}

		if (offset > 0) {
			len -= offset;
			phys += offset;
			offset = 0;
		}

		if (offset_in_page(phys)) {
			len += offset_in_page(phys);
			phys = round_down(phys, PAGE_SIZE);
		}

		len = PAGE_ALIGN(len);

		if (len > (size - mapped_size))
			len = size - mapped_size;

		ret = iommu_map(vmm->domain, addr, phys, len, 0);
		if (ret)
			break;

		addr += len;
		mapped_size += len;
	} while ((sg = sg_next(sg)) && (mapped_size < size));
	BUG_ON(mapped_size > size);

	if (mapped_size < size) {
		pr_err("IOVMM: iovmm_map failed as mapped_size (%d) < size (%d)\n", mapped_size, size);
		goto err_map_map;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	if (iova_size != size) {
		addr = start + size;
		size = iova_size;

		for (; addr < start + size; addr += PAGE_SIZE) {
			ret = iommu_map(vmm->domain, addr,
				page_to_phys(ZERO_PAGE(0)), PAGE_SIZE, 0);
			if (ret)
				goto err_map_map;

			mapped_size += PAGE_SIZE;
		}
	}
#endif

	region->start = start + start_off;
	region->size = size;

	INIT_LIST_HEAD(&region->node);

	spin_lock(&vmm->lock);

	list_add(&region->node, &vmm->regions_list);

	spin_unlock(&vmm->lock);

	dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#X bytes.\n",
					region->start, region->size);

	return region->start;

err_map_map:
	iommu_unmap(vmm->domain, start, mapped_size);
	gen_pool_free(vmm->vmm_pool, start, size);
err_map_noiomem:
	kfree(region);
err_map_nomem:
	dev_dbg(dev, "IOVMM: Failed to allocated VM region for %#x bytes.\n",
									size);
	return (dma_addr_t)ret;
}
ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
				      unsigned long size,
				      unsigned long align,
				      unsigned long flags)
{
	unsigned long offset;
	unsigned long secure_allocation = flags & ION_SECURE;

	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	mutex_lock(&cp_heap->lock);
	if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
		mutex_unlock(&cp_heap->lock);
		pr_err("ION cannot allocate un-secure memory from protected"
			" heap %s\n", heap->name);
		return ION_CP_ALLOCATE_FAIL;
	}

	if (secure_allocation &&
	    (cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) {
		mutex_unlock(&cp_heap->lock);
		pr_err("ION cannot allocate secure memory from heap with "
			"outstanding mappings: User space: %lu, kernel space "
			"(cached): %lu\n", cp_heap->umap_count,
					   cp_heap->kmap_cached_count);
		return ION_CP_ALLOCATE_FAIL;
	}

	/*
	 * if this is the first reusable allocation, transition
	 * the heap
	 */
	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
		if (fmem_set_state(FMEM_C_STATE) != 0) {
			mutex_unlock(&cp_heap->lock);
			return ION_RESERVED_ALLOCATE_FAIL;
		}
	}

	cp_heap->allocated_bytes += size;
	mutex_unlock(&cp_heap->lock);

	offset = gen_pool_alloc_aligned(cp_heap->pool,
					size, ilog2(align));

	if (!offset) {
		mutex_lock(&cp_heap->lock);
		cp_heap->allocated_bytes -= size;
		if ((cp_heap->total_size -
		     cp_heap->allocated_bytes) >= size)
			pr_debug("%s: heap %s has enough memory (%lx) but"
				" the allocation of size %lx still failed."
				" Memory is probably fragmented.\n",
				__func__, heap->name,
				cp_heap->total_size -
				cp_heap->allocated_bytes, size);

		if (cp_heap->reusable && !cp_heap->allocated_bytes &&
		    cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
			if (fmem_set_state(FMEM_T_STATE) != 0)
				pr_err("%s: unable to transition heap to T-state\n",
					__func__);
		}
		mutex_unlock(&cp_heap->lock);

		return ION_CP_ALLOCATE_FAIL;
	}

	printk(KERN_WARNING "heap %s allocated %lx (total allocated_bytes %lx)\n",
		heap->name, size, cp_heap->allocated_bytes);

	return offset;
}