static struct gen_pool *iova_pool_setup(unsigned long start,
		unsigned long end, unsigned long align)
{
	struct gen_pool *pool = NULL;
	int ret = 0;

	pool = gen_pool_create(order_base_2(align), -1);
	if (!pool) {
		printk(KERN_ERR "Create gen pool failed!\n");
		return NULL;
	}
	/* iova start should not be 0, because return
	   0 when alloc iova is considered as error */
	if (0 == start) {
		WARN(1, "iova start should not be 0!\n");
	}

	ret = gen_pool_add(pool, start, (end - start), -1);
	if (ret) {
		printk(KERN_ERR "Gen pool add failed!\n");
		gen_pool_destroy(pool);
		return NULL;
	}

	return pool;
}
Example #2
0
static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs)
{
	void *ret;

	printk("hexagon_dma_alloc_coherent %d\n", size);
	if (coherent_pool == NULL) {
		coherent_pool = gen_pool_create(PAGE_SHIFT, -1);

		if (coherent_pool == NULL)
			panic("Can't create %s() memory pool!", __func__);
		else
			gen_pool_add(coherent_pool,
				hexagon_coherent_pool_start,
				hexagon_coherent_pool_size, -1);
	}
	printk("hexagon_dma_alloc_coherent2 %X %X\n", (u32)hexagon_coherent_pool_start, (u32)hexagon_coherent_pool_size);

	ret = (void *) gen_pool_alloc(coherent_pool, size);
	printk("hexagon_dma_alloc_coherent3 %X\n", (u32)ret);

	if (ret) {
		memset(ret, 0, size);
		*dma_addr = (dma_addr_t) (ret - PAGE_OFFSET);
	} else
		*dma_addr = ~0;

	printk("hexagon_dma_alloc_coherent4\n");

	return ret;
}
Example #3
0
/*
 * Initializes the memory pool used to allocate card memory.
 *
 * Returns nonzero if there was a failure.
 */
int tl880_init_memory(struct tl880_dev *tl880dev) /* {{{ */
{
	int result;

	if(CHECK_NULL(tl880dev) || TL_ASSERT(tl880dev->pool == NULL)) {
		return -EINVAL;
	}

	tl880dev->pool = gen_pool_create(6, -1);
	if(TL_ASSERT(tl880dev->pool != NULL)) {
		return -ENOMEM;
	}

	/* Should I specify a specific NUMA node here ever? */
	/* The pool starts at 0x11000 because earlier points in RAM are used for something already */
	/* XXX:TODO: Changed to 0x100000 temporarily to allow the ALSA driver to use the memory without allocating it */
	if(TL_ASSERT((result = gen_pool_add(tl880dev->pool, 0x100000, tl880dev->memlen - 0x11000, -1)) == 0)) {
		printk(KERN_ERR "tl880: Failed to add card %d's memory to its memory pool\n", tl880dev->id);
		gen_pool_destroy(tl880dev->pool);
		tl880dev->pool = NULL;
		return result;
	}

	return 0;
} /* }}} */
Example #4
0
void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs)
{
	void *ret;

	if (coherent_pool == NULL) {
		coherent_pool = gen_pool_create(PAGE_SHIFT, -1);

		if (coherent_pool == NULL)
			panic("Can't create %s() memory pool!", __func__);
		else
			gen_pool_add(coherent_pool,
				(PAGE_OFFSET + (max_low_pfn << PAGE_SHIFT)),
				hexagon_coherent_pool_size, -1);
	}

	ret = (void *) gen_pool_alloc(coherent_pool, size);

	if (ret) {
		memset(ret, 0, size);
		*dma_addr = (dma_addr_t) (ret - PAGE_OFFSET);
	} else
		*dma_addr = ~0;

	return ret;
}
Example #5
0
static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 unsigned long attrs)
{
	void *ret;

	/*
	 * Our max_low_pfn should have been backed off by 16MB in
	 * mm/init.c to create DMA coherent space.  Use that as the VA
	 * for the pool.
	 */

	if (coherent_pool == NULL) {
		coherent_pool = gen_pool_create(PAGE_SHIFT, -1);

		if (coherent_pool == NULL)
			panic("Can't create %s() memory pool!", __func__);
		else
			gen_pool_add(coherent_pool,
				pfn_to_virt(max_low_pfn),
				hexagon_coherent_pool_size, -1);
	}

	ret = (void *) gen_pool_alloc(coherent_pool, size);

	if (ret) {
		memset(ret, 0, size);
		*dma_addr = (dma_addr_t) virt_to_phys(ret);
	} else
		*dma_addr = ~0;

	return ret;
}
static int ghes_estatus_pool_init(void)
{
	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
	if (!ghes_estatus_pool)
		return -ENOMEM;
	return 0;
}
Example #7
0
struct ion_heap *omap_tiler_heap_create(struct ion_platform_heap *data)
{
	struct omap_ion_heap *heap;

	heap = kzalloc(sizeof(struct omap_ion_heap), GFP_KERNEL);
	if (!heap)
		return ERR_PTR(-ENOMEM);

	if ((data->id == OMAP_ION_HEAP_TILER) ||
	    (data->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
		heap->pool = gen_pool_create(12, -1);
		if (!heap->pool) {
			kfree(heap);
			return ERR_PTR(-ENOMEM);
		}
		heap->base = data->base;
		gen_pool_add(heap->pool, heap->base, data->size, -1);
	}
	heap->heap.ops = &omap_tiler_ops;
	heap->heap.type = OMAP_ION_HEAP_TYPE_TILER;
	heap->heap.name = data->name;
	heap->heap.id = data->id;

	if (omap_total_ram_size() <= SZ_512M)
		use_dynamic_pages = true;
	else
// Use dynamic memory allocations
#if 1
		use_dynamic_pages = true;
#else
		use_dynamic_pages = false;
#endif

	return &heap->heap;
}
/*
 * REVISIT This supports CPU and DMA access to/from SRAM, but it
 * doesn't (yet?) support some other notable uses of SRAM:  as TCM
 * for data and/or instructions; and holding code needed to enter
 * and exit suspend states (while DRAM can't be used).
 */
static int __init sram_init(void)
{
	phys_addr_t phys = davinci_soc_info.sram_dma;
	unsigned len = davinci_soc_info.sram_len;
	int status = 0;
	void __iomem *addr;

	if (len) {
		len = min_t(unsigned, len, SRAM_SIZE);
		sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1);
		if (!sram_pool)
			status = -ENOMEM;
	}

	if (sram_pool) {
		addr = ioremap(phys, len);
		if (!addr)
			return -ENOMEM;
		status = gen_pool_add_virt(sram_pool, (unsigned long) addr,
					   phys, len, -1);
		if (status < 0)
			iounmap(addr);
	}

	WARN_ON(status < 0);
	return status;
}
/**
 * Initialize driver memory module
 *
 */
int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size)
{
	int res;
	/* 2^8=128. The desc-fifo and data-fifo minimal allocation. */
	int min_alloc_order = 8;

	iomem_phys = pipemem_phys_base;
	iomem_size = pipemem_size;

	if (iomem_phys == 0) {
		SPS_ERR("sps:Invalid Pipe-Mem address");
		return SPS_ERROR;
	} else {
		iomem_virt = ioremap(iomem_phys, iomem_size);
		if (!iomem_virt) {
			SPS_ERR("sps:Failed to IO map pipe memory.\n");
			return -ENOMEM;
		}
	}

	iomem_offset = 0;
	SPS_DBG("sps:sps_mem_init.iomem_phys=0x%x,iomem_virt=0x%x.",
		iomem_phys, (u32) iomem_virt);

	pool = gen_pool_create(min_alloc_order, nid);
	res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid);
	if (res)
		return res;

	return 0;
}
Example #10
0
static int __init atomic_pool_init(void)
{
	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	unsigned int pool_size_order = get_order(atomic_pool_size);

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
						 pool_size_order, false);
	else
		page = alloc_pages(GFP_DMA32, pool_size_order);

	if (page) {
		int ret;
		void *page_addr = page_address(page);

		memset(page_addr, 0, atomic_pool_size);
		__dma_flush_area(page_addr, atomic_pool_size);

		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
		if (!atomic_pool)
			goto free_page;

		addr = dma_common_contiguous_remap(page, atomic_pool_size,
					VM_USERMAP, prot, atomic_pool_init);

		if (!addr)
			goto destroy_genpool;

		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
					page_to_phys(page),
					atomic_pool_size, -1);
		if (ret)
			goto remove_mapping;

		gen_pool_set_algo(atomic_pool,
				  gen_pool_first_fit_order_align,
				  NULL);

		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
			atomic_pool_size / 1024);
		return 0;
	}
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}
Example #11
0
static int __init uncached_init(void)
{
	int nid;

	for_each_node_state(nid, N_ONLINE) {
		uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
		mutex_init(&uncached_pools[nid].add_chunk_mutex);
	}
struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
{
	struct ion_cp_heap *cp_heap;
	int ret;

	cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
	if (!cp_heap)
		return ERR_PTR(-ENOMEM);

	mutex_init(&cp_heap->lock);

	cp_heap->pool = gen_pool_create(12, -1);
	if (!cp_heap->pool)
		goto free_heap;

	cp_heap->base = heap_data->base;
	ret = gen_pool_add(cp_heap->pool, cp_heap->base, heap_data->size, -1);
	if (ret < 0)
		goto destroy_pool;

	cp_heap->allocated_bytes = 0;
	cp_heap->umap_count = 0;
	cp_heap->kmap_cached_count = 0;
	cp_heap->kmap_uncached_count = 0;
	cp_heap->total_size = heap_data->size;
	cp_heap->heap.ops = &cp_heap_ops;
	cp_heap->heap.type = ION_HEAP_TYPE_CP;
	cp_heap->heap_protected = HEAP_NOT_PROTECTED;
	cp_heap->secure_base = cp_heap->base;
	cp_heap->secure_size = heap_data->size;
	if (heap_data->extra_data) {
		struct ion_cp_heap_pdata *extra_data =
				heap_data->extra_data;
		cp_heap->reusable = extra_data->reusable;
		cp_heap->reserved_vrange = extra_data->virt_addr;
		cp_heap->permission_type = extra_data->permission_type;
		if (extra_data->secure_size) {
			cp_heap->secure_base = extra_data->secure_base;
			cp_heap->secure_size = extra_data->secure_size;
		}
		if (extra_data->setup_region)
			cp_heap->bus_id = extra_data->setup_region();
		if (extra_data->request_region)
			cp_heap->request_region = extra_data->request_region;
		if (extra_data->release_region)
			cp_heap->release_region = extra_data->release_region;
	}
	return &cp_heap->heap;

destroy_pool:
	gen_pool_destroy(cp_heap->pool);

free_heap:
	kfree(cp_heap);

	return ERR_PTR(-ENOMEM);
}
Example #13
0
static int __init sram_pool_init(void)
{
	/*
	 * This is a global pool, we don't care about node locality.
	 */
	sram_pool = gen_pool_create(1, -1);
	if (unlikely(!sram_pool))
		return -ENOMEM;

	return 0;
}
Example #14
0
/*
 * Note that we cannot use ioremap for SRAM, as clock init needs SRAM early.
 */
static void __init omap_map_sram(void)
{
	int cached = 1;

	if (omap_sram_size == 0)
		return;

#ifdef CONFIG_OMAP4_ERRATA_I688
		omap_sram_start += PAGE_SIZE;
		omap_sram_size -= SZ_16K;
#endif
	if (cpu_is_omap34xx()) {
		/*
		 * SRAM must be marked as non-cached on OMAP3 since the
		 * CORE DPLL M2 divider change code (in SRAM) runs with the
		 * SDRAM controller disabled, and if it is marked cached,
		 * the ARM may attempt to write cache lines back to SDRAM
		 * which will cause the system to hang.
		 */
		cached = 0;
	}

	omap_sram_start = ROUND_DOWN(omap_sram_start, PAGE_SIZE);
	omap_sram_base = __arm_ioremap_exec(omap_sram_start, omap_sram_size,
						cached);
	if (!omap_sram_base) {
		pr_err("SRAM: Could not map\n");
		return;
	}

	{
		/* The first SRAM_BOOTLOADER_SZ of SRAM are reserved */
		void *base = (void *)omap_sram_base + SRAM_BOOTLOADER_SZ;
		phys_addr_t phys = omap_sram_start + SRAM_BOOTLOADER_SZ;
		size_t len = omap_sram_size - SRAM_BOOTLOADER_SZ;

		omap_gen_pool = gen_pool_create(ilog2(FNCPY_ALIGN), -1);
		if (omap_gen_pool)
			WARN_ON(gen_pool_add_virt(omap_gen_pool,
					(unsigned long)base, phys, len, -1));
		WARN_ON(!omap_gen_pool);
	}

	/*
	 * Looks like we need to preserve some bootloader code at the
	 * beginning of SRAM for jumping to flash for reboot to work...
	 */
	memset((void *)omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
	       omap_sram_size - SRAM_BOOTLOADER_SZ);
}
Example #15
0
static struct gen_pool *initialize_gpool(unsigned long start,
	unsigned long size)
{
	struct gen_pool *gpool;

	gpool = gen_pool_create(PAGE_SHIFT, -1);

	if (!gpool)
		return NULL;
	if (gen_pool_add(gpool, start, size, -1)) {
		gen_pool_destroy(gpool);
		return NULL;
	}

	return gpool;
}
Example #16
0
static int __init sram_init(void)
{
	unsigned len = davinci_soc_info.sram_len;
	int status = 0;

	if (len) {
		len = min_t(unsigned, len, SRAM_SIZE);
		sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1);
		if (!sram_pool)
			status = -ENOMEM;
	}
	if (sram_pool)
		status = gen_pool_add(sram_pool, SRAM_VIRT, len, -1);
	WARN_ON(status < 0);
	return status;
}
Example #17
0
static int __init iram_init_internal(unsigned long base, unsigned long size)
{
        iram_phys_base = base;

        iram_pool = gen_pool_create(PAGE_SHIFT, -1);
        if (!iram_pool)
                return -ENOMEM;

        gen_pool_add(iram_pool, base, size, -1);
        iram_virt_base = ioremap(iram_phys_base, size);
        if (!iram_virt_base)
                return -EIO;

        pr_debug("i.MX IRAM pool: %ld KB@0x%p\n", size / 1024, iram_virt_base);
        return 0;
}
IMG_RESULT SYSMEMKM_AddCarveoutMemory(
    IMG_UINTPTR     vstart,
    IMG_PHYSADDR    pstart,
    IMG_UINT32      size,
    SYS_eMemPool *  peMemPool
)
{
    IMG_RESULT ui32Result;
    struct priv_params *prv;
    struct gen_pool *pool = gen_pool_create(12, -1);

    prv = (struct priv_params *)IMG_MALLOC(sizeof(*prv));
    IMG_ASSERT(prv != IMG_NULL);
    if(IMG_NULL == prv)
    {
        ui32Result = IMG_ERROR_OUT_OF_MEMORY;
        goto error_priv_alloc;
    }
    IMG_MEMSET((void *)prv, 0, sizeof(*prv));

    IMG_ASSERT(pool != IMG_NULL);
    IMG_ASSERT(size != 0);
    IMG_ASSERT((vstart & (HOST_MMU_PAGE_SIZE-1)) == 0);
    gen_pool_add_virt(pool, (unsigned long)vstart, (unsigned long)pstart, size, -1);

    prv->pool = pool;
    prv->pstart = pstart;
    prv->size = size;
    prv->vstart = vstart;

    ui32Result = SYSMEMU_AddMemoryHeap(&carveout_ops, IMG_TRUE, (IMG_VOID *)prv, peMemPool);
    IMG_ASSERT(IMG_SUCCESS == ui32Result);
    if(IMG_SUCCESS != ui32Result)
    {
        goto error_heap_add;
    }

    return IMG_SUCCESS;

error_heap_add:
    IMG_FREE(prv);
error_priv_alloc:
    gen_pool_destroy(pool);

    return ui32Result;
}
/**
 * Initialize driver memory module
 *
 */
int sps_mem_init(phys_addr_t pipemem_phys_base, u32 pipemem_size)
{
	int res;

	/* 2^8=128. The desc-fifo and data-fifo minimal allocation. */
	int min_alloc_order = 8;

	if ((d_type == 0) || (d_type == 2) || imem) {
		iomem_phys = pipemem_phys_base;
		iomem_size = pipemem_size;

		if (iomem_phys == 0) {
			SPS_ERR("sps:Invalid Pipe-Mem address");
			return SPS_ERROR;
		} else {
			iomem_virt = ioremap(iomem_phys, iomem_size);
			if (!iomem_virt) {
				SPS_ERR("sps:Failed to IO map pipe memory.\n");
				return -ENOMEM;
			}
		}

		iomem_offset = 0;
		SPS_DBG("sps:sps_mem_init.iomem_phys=%pa,iomem_virt=0x%p.",
			&iomem_phys, iomem_virt);
	}

	pool = gen_pool_create(min_alloc_order, nid);

	if (!pool) {
		SPS_ERR("sps:Failed to create a new memory pool.\n");
		return -ENOMEM;
	}

	if ((d_type == 0) || (d_type == 2) || imem) {
		res = gen_pool_add(pool, (uintptr_t)iomem_virt,
				iomem_size, nid);
		if (res)
			return res;
	}

	return 0;
}
Example #20
0
int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size)
{
	int res;

	
	int min_alloc_order = 8;

	if ((d_type == 0) || (d_type == 2)) {
		iomem_phys = pipemem_phys_base;
		iomem_size = pipemem_size;

		if (iomem_phys == 0) {
			SPS_ERR("sps:Invalid Pipe-Mem address");
			return SPS_ERROR;
		} else {
			iomem_virt = ioremap(iomem_phys, iomem_size);
			if (!iomem_virt) {
				SPS_ERR("sps:Failed to IO map pipe memory.\n");
				return -ENOMEM;
			}
		}

		iomem_offset = 0;
		SPS_DBG("sps:sps_mem_init.iomem_phys=0x%x,iomem_virt=0x%x.",
			iomem_phys, (u32) iomem_virt);
	}

	pool = gen_pool_create(min_alloc_order, nid);

	if (!pool) {
		SPS_ERR("sps:Failed to create a new memory pool.\n");
		return -ENOMEM;
	}

	if ((d_type == 0) || (d_type == 2)) {
		res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid);
		if (res)
			return res;
	}

	return 0;
}
Example #21
0
int exynos_init_iovmm(struct device *sysmmu, struct exynos_iovmm *vmm1)
{
	int ret = 0;
	struct sysmmu_drvdata *data = dev_get_drvdata(sysmmu);
	struct exynos_iovmm *vmm=&data->vmm;
	vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
	if (!vmm->vmm_pool) {
		ret = -ENOMEM;
		goto err_setup_genalloc;
	}

	/* (1GB - 4KB) addr space from 0xC0000000 */
	ret = gen_pool_add(vmm->vmm_pool, IOVA_START, IOVM_SIZE, -1);
	if (ret)
	{
		goto err_setup_domain;
       }
	vmm->domain = iommu_domain_alloc(&platform_bus_type);
	if (!vmm->domain) {
	printk("exynos_init_iovmm-------line = %d\n",__LINE__);
		ret = -ENOMEM;
		goto err_setup_domain;
	}

	spin_lock_init(&vmm->lock);

	INIT_LIST_HEAD(&vmm->regions_list);

	dev_dbg(sysmmu, "IOVMM: Created %#x B IOVMM from %#x.\n",
						IOVM_SIZE, IOVA_START);
	return 0;
err_setup_domain:
	gen_pool_destroy(vmm->vmm_pool);
err_setup_genalloc:
	dev_dbg(sysmmu, "IOVMM: Failed to create IOVMM (%d)\n", ret);

	return ret;
}
Example #22
0
struct kgsl_pagetable *kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu)
{
	int status = 0;
	struct kgsl_pagetable *pagetable = NULL;
	uint32_t flags;

	KGSL_MEM_VDBG("enter (mmu=%p)\n", mmu);

	pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
	if (pagetable == NULL) {
		KGSL_MEM_ERR("Unable to allocate pagetable object.\n");
		return NULL;
	}

	pagetable->mmu = mmu;
	pagetable->va_base = mmu->va_base;
	pagetable->va_range = mmu->va_range;
	pagetable->last_superpte = 0;
	pagetable->max_entries = (mmu->va_range >> KGSL_PAGESIZE_SHIFT)
				 + GSL_PT_EXTRA_ENTRIES;

	pagetable->pool = gen_pool_create(KGSL_PAGESIZE_SHIFT, -1);
	if (pagetable->pool == NULL) {
		KGSL_MEM_ERR("Unable to allocate virtualaddr pool.\n");
		goto err_gen_pool_create;
	}

	if (gen_pool_add(pagetable->pool, pagetable->va_base,
				pagetable->va_range, -1)) {
		KGSL_MEM_ERR("gen_pool_create failed for pagetable %p\n",
				pagetable);
		goto err_gen_pool_add;
	}

	/* allocate page table memory */
	flags = (KGSL_MEMFLAGS_ALIGN4K | KGSL_MEMFLAGS_CONPHYS
		 | KGSL_MEMFLAGS_STRICTREQUEST);
	status = kgsl_sharedmem_alloc(flags,
				      pagetable->max_entries * GSL_PTE_SIZE,
				      &pagetable->base);

	if (status) {
		KGSL_MEM_ERR("cannot alloc page tables\n");
		goto err_kgsl_sharedmem_alloc;
	}

	/* reset page table entries
	 * -- all pte's are marked as not dirty initially
	 */
	kgsl_sharedmem_set(&pagetable->base, 0, 0, pagetable->base.size);
	pagetable->base.gpuaddr = pagetable->base.physaddr;

	KGSL_MEM_VDBG("return %p\n", pagetable);

	return pagetable;

err_kgsl_sharedmem_alloc:
err_gen_pool_add:
	gen_pool_destroy(pagetable->pool);
err_gen_pool_create:
	kfree(pagetable);
	return NULL;
}
Example #23
0
static int cfv_create_genpool(struct cfv_info *cfv)
{
	int err;

	/* dma_alloc can only allocate whole pages, and we need a more
	 * fine graned allocation so we use genpool. We ask for space needed
	 * by IP and a full ring. If the dma allcoation fails we retry with a
	 * smaller allocation size.
	 */
	err = -ENOMEM;
	cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
			(ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
	if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
		return -EINVAL;

	for (;;) {
		if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
			netdev_info(cfv->ndev, "Not enough device memory\n");
			return -ENOMEM;
		}

		cfv->alloc_addr = dma_alloc_coherent(
						cfv->vdev->dev.parent->parent,
						cfv->allocsz, &cfv->alloc_dma,
						GFP_ATOMIC);
		if (cfv->alloc_addr)
			break;

		cfv->allocsz = (cfv->allocsz * 3) >> 2;
	}

	netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
		   cfv->allocsz);

	/* Allocate on 128 bytes boundaries (1 << 7)*/
	cfv->genpool = gen_pool_create(7, -1);
	if (!cfv->genpool)
		goto err;

	err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
				(phys_addr_t)virt_to_phys(cfv->alloc_addr),
				cfv->allocsz, -1);
	if (err)
		goto err;

	/* Reserve some memory for low memory situations. If we hit the roof
	 * in the memory pool, we stop TX flow and release the reserve.
	 */
	cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
	cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
					   cfv->reserved_size);
	if (!cfv->reserved_mem) {
		err = -ENOMEM;
		goto err;
	}

	cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
	return 0;
err:
	cfv_destroy_genpool(cfv);
	return err;
}