static struct gen_pool *iova_pool_setup(unsigned long start,
		unsigned long end, unsigned long align)
{
	struct gen_pool *pool = NULL;
	int ret = 0;

	pool = gen_pool_create(order_base_2(align), -1);
	if (!pool) {
		printk(KERN_ERR "Create gen pool failed!\n");
		return NULL;
	}
	/* iova start should not be 0, because return
	   0 when alloc iova is considered as error */
	if (0 == start) {
		WARN(1, "iova start should not be 0!\n");
	}

	ret = gen_pool_add(pool, start, (end - start), -1);
	if (ret) {
		printk(KERN_ERR "Gen pool add failed!\n");
		gen_pool_destroy(pool);
		return NULL;
	}

	return pool;
}
Exemple #2
0
void omap_tiler_heap_destroy(struct ion_heap *heap)
{
	struct omap_ion_heap *omap_ion_heap = (struct omap_ion_heap *)heap;
	if (omap_ion_heap->pool)
		gen_pool_destroy(omap_ion_heap->pool);
	kfree(heap);
}
Exemple #3
0
/*
 * Initializes the memory pool used to allocate card memory.
 *
 * Returns nonzero if there was a failure.
 */
int tl880_init_memory(struct tl880_dev *tl880dev) /* {{{ */
{
	int result;

	if(CHECK_NULL(tl880dev) || TL_ASSERT(tl880dev->pool == NULL)) {
		return -EINVAL;
	}

	tl880dev->pool = gen_pool_create(6, -1);
	if(TL_ASSERT(tl880dev->pool != NULL)) {
		return -ENOMEM;
	}

	/* Should I specify a specific NUMA node here ever? */
	/* The pool starts at 0x11000 because earlier points in RAM are used for something already */
	/* XXX:TODO: Changed to 0x100000 temporarily to allow the ALSA driver to use the memory without allocating it */
	if(TL_ASSERT((result = gen_pool_add(tl880dev->pool, 0x100000, tl880dev->memlen - 0x11000, -1)) == 0)) {
		printk(KERN_ERR "tl880: Failed to add card %d's memory to its memory pool\n", tl880dev->id);
		gen_pool_destroy(tl880dev->pool);
		tl880dev->pool = NULL;
		return result;
	}

	return 0;
} /* }}} */
Exemple #4
0
static int __init atomic_pool_init(void)
{
	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	unsigned int pool_size_order = get_order(atomic_pool_size);

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
						 pool_size_order, false);
	else
		page = alloc_pages(GFP_DMA32, pool_size_order);

	if (page) {
		int ret;
		void *page_addr = page_address(page);

		memset(page_addr, 0, atomic_pool_size);
		__dma_flush_area(page_addr, atomic_pool_size);

		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
		if (!atomic_pool)
			goto free_page;

		addr = dma_common_contiguous_remap(page, atomic_pool_size,
					VM_USERMAP, prot, atomic_pool_init);

		if (!addr)
			goto destroy_genpool;

		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
					page_to_phys(page),
					atomic_pool_size, -1);
		if (ret)
			goto remove_mapping;

		gen_pool_set_algo(atomic_pool,
				  gen_pool_first_fit_order_align,
				  NULL);

		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
			atomic_pool_size / 1024);
		return 0;
	}
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}
struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
{
	struct ion_cp_heap *cp_heap;
	int ret;

	cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
	if (!cp_heap)
		return ERR_PTR(-ENOMEM);

	mutex_init(&cp_heap->lock);

	cp_heap->pool = gen_pool_create(12, -1);
	if (!cp_heap->pool)
		goto free_heap;

	cp_heap->base = heap_data->base;
	ret = gen_pool_add(cp_heap->pool, cp_heap->base, heap_data->size, -1);
	if (ret < 0)
		goto destroy_pool;

	cp_heap->allocated_bytes = 0;
	cp_heap->umap_count = 0;
	cp_heap->kmap_cached_count = 0;
	cp_heap->kmap_uncached_count = 0;
	cp_heap->total_size = heap_data->size;
	cp_heap->heap.ops = &cp_heap_ops;
	cp_heap->heap.type = ION_HEAP_TYPE_CP;
	cp_heap->heap_protected = HEAP_NOT_PROTECTED;
	cp_heap->secure_base = cp_heap->base;
	cp_heap->secure_size = heap_data->size;
	if (heap_data->extra_data) {
		struct ion_cp_heap_pdata *extra_data =
				heap_data->extra_data;
		cp_heap->reusable = extra_data->reusable;
		cp_heap->reserved_vrange = extra_data->virt_addr;
		cp_heap->permission_type = extra_data->permission_type;
		if (extra_data->secure_size) {
			cp_heap->secure_base = extra_data->secure_base;
			cp_heap->secure_size = extra_data->secure_size;
		}
		if (extra_data->setup_region)
			cp_heap->bus_id = extra_data->setup_region();
		if (extra_data->request_region)
			cp_heap->request_region = extra_data->request_region;
		if (extra_data->release_region)
			cp_heap->release_region = extra_data->release_region;
	}
	return &cp_heap->heap;

destroy_pool:
	gen_pool_destroy(cp_heap->pool);

free_heap:
	kfree(cp_heap);

	return ERR_PTR(-ENOMEM);
}
void ion_cp_heap_destroy(struct ion_heap *heap)
{
	struct ion_cp_heap *cp_heap =
	     container_of(heap, struct  ion_cp_heap, heap);

	gen_pool_destroy(cp_heap->pool);
	kfree(cp_heap);
	cp_heap = NULL;
}
/*!
******************************************************************************

 @Function                SYSMEMKM_Deinitialise

******************************************************************************/
static IMG_VOID Deinitialise(
    SYSMEM_Heap *  heap
)
{
    struct priv_params *priv = (struct priv_params *)heap->priv;

    /* If we have a page allocation array - free it...*/
    gen_pool_destroy(priv->pool);
    IMG_FREE(priv);
    heap->priv = IMG_NULL;
}
Exemple #8
0
/*
 * Deinitializes the memory pool for card memory.
 */
void tl880_deinit_memory(struct tl880_dev *tl880dev) /* {{{ */
{
	if(CHECK_NULL(tl880dev)) {
		return;
	}

	if(!CHECK_NULL_W(tl880dev->pool)) {
		gen_pool_destroy(tl880dev->pool);
		tl880dev->pool = NULL;
	}
} /* }}} */
Exemple #9
0
static int sram_remove(struct vmm_device *dev)
{
	struct sram_dev *sram = vmm_devdrv_get_data(dev);

	if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
		vmm_printf("%s: removed while SRAM allocated\n", dev->name);

	gen_pool_destroy(sram->pool);

	if (sram->clk)
		clk_disable_unprepare(sram->clk);

	return 0;
}
Exemple #10
0
static void cfv_destroy_genpool(struct cfv_info *cfv)
{
	if (cfv->alloc_addr)
		dma_free_coherent(cfv->vdev->dev.parent->parent,
				  cfv->allocsz, cfv->alloc_addr,
				  cfv->alloc_dma);

	if (!cfv->genpool)
		return;
	gen_pool_free(cfv->genpool,  cfv->reserved_mem,
		      cfv->reserved_size);
	gen_pool_destroy(cfv->genpool);
	cfv->genpool = NULL;
}
Exemple #11
0
static int sram_remove(struct platform_device *pdev)
{
	struct sram_dev *sram = platform_get_drvdata(pdev);

	if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
		dev_dbg(&pdev->dev, "removed while SRAM allocated\n");

	gen_pool_destroy(sram->pool);

	if (sram->clk)
		clk_disable_unprepare(sram->clk);

	return 0;
}
Exemple #12
0
int sps_mem_de_init(void)
{
	if (iomem_virt != NULL) {
		gen_pool_destroy(pool);
		pool = NULL;
		iounmap(iomem_virt);
		iomem_virt = NULL;
	}

	if (total_alloc == total_free)
		return 0;
	else {
		SPS_ERR("sps:sps_mem_de_init:some memory not free");
		return SPS_ERROR;
	}
}
Exemple #13
0
static struct gen_pool *initialize_gpool(unsigned long start,
	unsigned long size)
{
	struct gen_pool *gpool;

	gpool = gen_pool_create(PAGE_SHIFT, -1);

	if (!gpool)
		return NULL;
	if (gen_pool_add(gpool, start, size, -1)) {
		gen_pool_destroy(gpool);
		return NULL;
	}

	return gpool;
}
IMG_RESULT SYSMEMKM_AddCarveoutMemory(
    IMG_UINTPTR     vstart,
    IMG_PHYSADDR    pstart,
    IMG_UINT32      size,
    SYS_eMemPool *  peMemPool
)
{
    IMG_RESULT ui32Result;
    struct priv_params *prv;
    struct gen_pool *pool = gen_pool_create(12, -1);

    prv = (struct priv_params *)IMG_MALLOC(sizeof(*prv));
    IMG_ASSERT(prv != IMG_NULL);
    if(IMG_NULL == prv)
    {
        ui32Result = IMG_ERROR_OUT_OF_MEMORY;
        goto error_priv_alloc;
    }
    IMG_MEMSET((void *)prv, 0, sizeof(*prv));

    IMG_ASSERT(pool != IMG_NULL);
    IMG_ASSERT(size != 0);
    IMG_ASSERT((vstart & (HOST_MMU_PAGE_SIZE-1)) == 0);
    gen_pool_add_virt(pool, (unsigned long)vstart, (unsigned long)pstart, size, -1);

    prv->pool = pool;
    prv->pstart = pstart;
    prv->size = size;
    prv->vstart = vstart;

    ui32Result = SYSMEMU_AddMemoryHeap(&carveout_ops, IMG_TRUE, (IMG_VOID *)prv, peMemPool);
    IMG_ASSERT(IMG_SUCCESS == ui32Result);
    if(IMG_SUCCESS != ui32Result)
    {
        goto error_heap_add;
    }

    return IMG_SUCCESS;

error_heap_add:
    IMG_FREE(prv);
error_priv_alloc:
    gen_pool_destroy(pool);

    return ui32Result;
}
Exemple #15
0
static int sram_probe(struct platform_device *pdev)
{
	void __iomem *virt_base;
	struct sram_dev *sram;
	struct resource *res;
	unsigned long size;
	int ret;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	virt_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(virt_base))
		return PTR_ERR(virt_base);

	size = resource_size(res);

	sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
	if (!sram)
		return -ENOMEM;

	sram->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(sram->clk))
		sram->clk = NULL;
	else
		clk_prepare_enable(sram->clk);

	sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1);
	if (!sram->pool)
		return -ENOMEM;

	ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base,
				res->start, size, -1);
	if (ret < 0) {
		gen_pool_destroy(sram->pool);
		return ret;
	}

	platform_set_drvdata(pdev, sram);

	dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);

	return 0;
}
int kgsl_mmu_destroypagetableobject(struct kgsl_pagetable *pagetable)
{
	KGSL_MEM_VDBG("enter (pagetable=%p)\n", pagetable);

	if (pagetable) {
		if (pagetable->base.gpuaddr)
			kgsl_sharedmem_free(&pagetable->base);

		if (pagetable->pool) {
			gen_pool_destroy(pagetable->pool);
			pagetable->pool = NULL;
		}

		kfree(pagetable);

	}
	KGSL_MEM_VDBG("return 0x%08x\n", 0);

	return 0;
}
int exynos_init_iovmm(struct device *sysmmu, struct exynos_iovmm *vmm1)
{
	int ret = 0;
	struct sysmmu_drvdata *data = dev_get_drvdata(sysmmu);
	struct exynos_iovmm *vmm=&data->vmm;
	vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
	if (!vmm->vmm_pool) {
		ret = -ENOMEM;
		goto err_setup_genalloc;
	}

	/* (1GB - 4KB) addr space from 0xC0000000 */
	ret = gen_pool_add(vmm->vmm_pool, IOVA_START, IOVM_SIZE, -1);
	if (ret)
	{
		goto err_setup_domain;
       }
	vmm->domain = iommu_domain_alloc(&platform_bus_type);
	if (!vmm->domain) {
	printk("exynos_init_iovmm-------line = %d\n",__LINE__);
		ret = -ENOMEM;
		goto err_setup_domain;
	}

	spin_lock_init(&vmm->lock);

	INIT_LIST_HEAD(&vmm->regions_list);

	dev_dbg(sysmmu, "IOVMM: Created %#x B IOVMM from %#x.\n",
						IOVM_SIZE, IOVA_START);
	return 0;
err_setup_domain:
	gen_pool_destroy(vmm->vmm_pool);
err_setup_genalloc:
	dev_dbg(sysmmu, "IOVMM: Failed to create IOVMM (%d)\n", ret);

	return ret;
}
Exemple #18
0
static int sram_probe(struct vmm_device *dev,
		      const struct vmm_devtree_nodeid *nodeid)
{
	void *virt_base = NULL;
	struct sram_dev *sram = NULL;
	physical_addr_t start = 0;
	virtual_size_t size = 0;
	int ret = VMM_OK;

	ret = vmm_devtree_regaddr(dev->of_node, &start, 0);
	if (VMM_OK != ret) {
		vmm_printf("%s: Failed to get device base\n", dev->name);
		return ret;
	}

	ret = vmm_devtree_regsize(dev->of_node, &size, 0);
	if (VMM_OK != ret) {
		vmm_printf("%s: Failed to get device size\n", dev->name);
		goto err_out;
	}

	virt_base = (void *)vmm_host_iomap(start, size);
	if (NULL == virt_base) {
		vmm_printf("%s: Failed to get remap memory\n", dev->name);
		ret = VMM_ENOMEM;
		goto err_out;
	}

	sram = vmm_devm_zalloc(dev, sizeof(*sram));
	if (!sram) {
		vmm_printf("%s: Failed to allocate structure\n", dev->name);
		ret = VMM_ENOMEM;
		goto err_out;
	}

	sram->clk = devm_clk_get(dev, NULL);
	if (VMM_IS_ERR(sram->clk))
		sram->clk = NULL;
	else
		clk_prepare_enable(sram->clk);

	sram->pool = devm_gen_pool_create(dev, SRAM_GRANULARITY_LOG);
	if (!sram->pool) {
		vmm_printf("%s: Failed to create memory pool\n", dev->name);
		ret = VMM_ENOMEM;
	}

	ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base,
				start, size);
	if (ret < 0) {
		vmm_printf("%s: Failed to add memory chunk\n", dev->name);
		goto err_out;
	}

	vmm_devdrv_set_data(dev, sram);

	vmm_printf("%s: SRAM pool: %ld KiB @ 0x%p\n", dev->name, size / 1024,
		   virt_base);

	return 0;

err_out:
	if (sram->pool)
		gen_pool_destroy(sram->pool);

#if 0
	if (sram->clk)
		clk_disable_unprepare(sram->clk);
#endif /* 0 */

	if (sram)
		vmm_free(sram);
	sram = NULL;

	if (virt_base)
		vmm_host_iounmap((virtual_addr_t)virt_base);
	virt_base = NULL;

	return ret;
}
static void ghes_estatus_pool_exit(void)
{
	gen_pool_for_each_chunk(ghes_estatus_pool,
				ghes_estatus_pool_free_chunk_page, NULL);
	gen_pool_destroy(ghes_estatus_pool);
}
static void iova_pool_destory(struct gen_pool *pool)
{
	gen_pool_destroy(pool);
}
struct kgsl_pagetable *kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu)
{
	int status = 0;
	struct kgsl_pagetable *pagetable = NULL;
	uint32_t flags;

	KGSL_MEM_VDBG("enter (mmu=%p)\n", mmu);

	pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
	if (pagetable == NULL) {
		KGSL_MEM_ERR("Unable to allocate pagetable object.\n");
		return NULL;
	}

	pagetable->mmu = mmu;
	pagetable->va_base = mmu->va_base;
	pagetable->va_range = mmu->va_range;
	pagetable->last_superpte = 0;
	pagetable->max_entries = (mmu->va_range >> KGSL_PAGESIZE_SHIFT)
				 + GSL_PT_EXTRA_ENTRIES;

	pagetable->pool = gen_pool_create(KGSL_PAGESIZE_SHIFT, -1);
	if (pagetable->pool == NULL) {
		KGSL_MEM_ERR("Unable to allocate virtualaddr pool.\n");
		goto err_gen_pool_create;
	}

	if (gen_pool_add(pagetable->pool, pagetable->va_base,
				pagetable->va_range, -1)) {
		KGSL_MEM_ERR("gen_pool_create failed for pagetable %p\n",
				pagetable);
		goto err_gen_pool_add;
	}

	/* allocate page table memory */
	flags = (KGSL_MEMFLAGS_ALIGN4K | KGSL_MEMFLAGS_CONPHYS
		 | KGSL_MEMFLAGS_STRICTREQUEST);
	status = kgsl_sharedmem_alloc(flags,
				      pagetable->max_entries * GSL_PTE_SIZE,
				      &pagetable->base);

	if (status) {
		KGSL_MEM_ERR("cannot alloc page tables\n");
		goto err_kgsl_sharedmem_alloc;
	}

	/* reset page table entries
	 * -- all pte's are marked as not dirty initially
	 */
	kgsl_sharedmem_set(&pagetable->base, 0, 0, pagetable->base.size);
	pagetable->base.gpuaddr = pagetable->base.physaddr;

	KGSL_MEM_VDBG("return %p\n", pagetable);

	return pagetable;

err_kgsl_sharedmem_alloc:
err_gen_pool_add:
	gen_pool_destroy(pagetable->pool);
err_gen_pool_create:
	kfree(pagetable);
	return NULL;
}