예제 #1
0
/*
 * REVISIT This supports CPU and DMA access to/from SRAM, but it
 * doesn't (yet?) support some other notable uses of SRAM:  as TCM
 * for data and/or instructions; and holding code needed to enter
 * and exit suspend states (while DRAM can't be used).
 */
static int __init sram_init(void)
{
	phys_addr_t phys = davinci_soc_info.sram_dma;
	unsigned len = davinci_soc_info.sram_len;
	int status = 0;
	void __iomem *addr;

	if (len) {
		len = min_t(unsigned, len, SRAM_SIZE);
		sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1);
		if (!sram_pool)
			status = -ENOMEM;
	}

	if (sram_pool) {
		addr = ioremap(phys, len);
		if (!addr)
			return -ENOMEM;
		status = gen_pool_add_virt(sram_pool, (unsigned long) addr,
					   phys, len, -1);
		if (status < 0)
			iounmap(addr);
	}

	WARN_ON(status < 0);
	return status;
}
예제 #2
0
파일: dma-mapping.c 프로젝트: bristot/linux
static int __init atomic_pool_init(void)
{
	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	unsigned int pool_size_order = get_order(atomic_pool_size);

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
						 pool_size_order, false);
	else
		page = alloc_pages(GFP_DMA32, pool_size_order);

	if (page) {
		int ret;
		void *page_addr = page_address(page);

		memset(page_addr, 0, atomic_pool_size);
		__dma_flush_area(page_addr, atomic_pool_size);

		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
		if (!atomic_pool)
			goto free_page;

		addr = dma_common_contiguous_remap(page, atomic_pool_size,
					VM_USERMAP, prot, atomic_pool_init);

		if (!addr)
			goto destroy_genpool;

		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
					page_to_phys(page),
					atomic_pool_size, -1);
		if (ret)
			goto remove_mapping;

		gen_pool_set_algo(atomic_pool,
				  gen_pool_first_fit_order_align,
				  NULL);

		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
			atomic_pool_size / 1024);
		return 0;
	}
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}
예제 #3
0
/*
 * Note that we cannot use ioremap for SRAM, as clock init needs SRAM early.
 */
static void __init omap_map_sram(void)
{
	int cached = 1;

	if (omap_sram_size == 0)
		return;

#ifdef CONFIG_OMAP4_ERRATA_I688
		omap_sram_start += PAGE_SIZE;
		omap_sram_size -= SZ_16K;
#endif
	if (cpu_is_omap34xx()) {
		/*
		 * SRAM must be marked as non-cached on OMAP3 since the
		 * CORE DPLL M2 divider change code (in SRAM) runs with the
		 * SDRAM controller disabled, and if it is marked cached,
		 * the ARM may attempt to write cache lines back to SDRAM
		 * which will cause the system to hang.
		 */
		cached = 0;
	}

	omap_sram_start = ROUND_DOWN(omap_sram_start, PAGE_SIZE);
	omap_sram_base = __arm_ioremap_exec(omap_sram_start, omap_sram_size,
						cached);
	if (!omap_sram_base) {
		pr_err("SRAM: Could not map\n");
		return;
	}

	{
		/* The first SRAM_BOOTLOADER_SZ of SRAM are reserved */
		void *base = (void *)omap_sram_base + SRAM_BOOTLOADER_SZ;
		phys_addr_t phys = omap_sram_start + SRAM_BOOTLOADER_SZ;
		size_t len = omap_sram_size - SRAM_BOOTLOADER_SZ;

		omap_gen_pool = gen_pool_create(ilog2(FNCPY_ALIGN), -1);
		if (omap_gen_pool)
			WARN_ON(gen_pool_add_virt(omap_gen_pool,
					(unsigned long)base, phys, len, -1));
		WARN_ON(!omap_gen_pool);
	}

	/*
	 * Looks like we need to preserve some bootloader code at the
	 * beginning of SRAM for jumping to flash for reboot to work...
	 */
	memset((void *)omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
	       omap_sram_size - SRAM_BOOTLOADER_SZ);
}
IMG_RESULT SYSMEMKM_AddCarveoutMemory(
    IMG_UINTPTR     vstart,
    IMG_PHYSADDR    pstart,
    IMG_UINT32      size,
    SYS_eMemPool *  peMemPool
)
{
    IMG_RESULT ui32Result;
    struct priv_params *prv;
    struct gen_pool *pool = gen_pool_create(12, -1);

    prv = (struct priv_params *)IMG_MALLOC(sizeof(*prv));
    IMG_ASSERT(prv != IMG_NULL);
    if(IMG_NULL == prv)
    {
        ui32Result = IMG_ERROR_OUT_OF_MEMORY;
        goto error_priv_alloc;
    }
    IMG_MEMSET((void *)prv, 0, sizeof(*prv));

    IMG_ASSERT(pool != IMG_NULL);
    IMG_ASSERT(size != 0);
    IMG_ASSERT((vstart & (HOST_MMU_PAGE_SIZE-1)) == 0);
    gen_pool_add_virt(pool, (unsigned long)vstart, (unsigned long)pstart, size, -1);

    prv->pool = pool;
    prv->pstart = pstart;
    prv->size = size;
    prv->vstart = vstart;

    ui32Result = SYSMEMU_AddMemoryHeap(&carveout_ops, IMG_TRUE, (IMG_VOID *)prv, peMemPool);
    IMG_ASSERT(IMG_SUCCESS == ui32Result);
    if(IMG_SUCCESS != ui32Result)
    {
        goto error_heap_add;
    }

    return IMG_SUCCESS;

error_heap_add:
    IMG_FREE(prv);
error_priv_alloc:
    gen_pool_destroy(pool);

    return ui32Result;
}
예제 #5
0
파일: sram.c 프로젝트: ChrisOHu/linux
static int sram_probe(struct platform_device *pdev)
{
	void __iomem *virt_base;
	struct sram_dev *sram;
	struct resource *res;
	unsigned long size;
	int ret;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	virt_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(virt_base))
		return PTR_ERR(virt_base);

	size = resource_size(res);

	sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
	if (!sram)
		return -ENOMEM;

	sram->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(sram->clk))
		sram->clk = NULL;
	else
		clk_prepare_enable(sram->clk);

	sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1);
	if (!sram->pool)
		return -ENOMEM;

	ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base,
				res->start, size, -1);
	if (ret < 0) {
		gen_pool_destroy(sram->pool);
		return ret;
	}

	platform_set_drvdata(pdev, sram);

	dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);

	return 0;
}
예제 #6
0
static int sram_probe(struct vmm_device *dev,
		      const struct vmm_devtree_nodeid *nodeid)
{
	void *virt_base = NULL;
	struct sram_dev *sram = NULL;
	physical_addr_t start = 0;
	virtual_size_t size = 0;
	int ret = VMM_OK;

	ret = vmm_devtree_regaddr(dev->of_node, &start, 0);
	if (VMM_OK != ret) {
		vmm_printf("%s: Failed to get device base\n", dev->name);
		return ret;
	}

	ret = vmm_devtree_regsize(dev->of_node, &size, 0);
	if (VMM_OK != ret) {
		vmm_printf("%s: Failed to get device size\n", dev->name);
		goto err_out;
	}

	virt_base = (void *)vmm_host_iomap(start, size);
	if (NULL == virt_base) {
		vmm_printf("%s: Failed to get remap memory\n", dev->name);
		ret = VMM_ENOMEM;
		goto err_out;
	}

	sram = vmm_devm_zalloc(dev, sizeof(*sram));
	if (!sram) {
		vmm_printf("%s: Failed to allocate structure\n", dev->name);
		ret = VMM_ENOMEM;
		goto err_out;
	}

	sram->clk = devm_clk_get(dev, NULL);
	if (VMM_IS_ERR(sram->clk))
		sram->clk = NULL;
	else
		clk_prepare_enable(sram->clk);

	sram->pool = devm_gen_pool_create(dev, SRAM_GRANULARITY_LOG);
	if (!sram->pool) {
		vmm_printf("%s: Failed to create memory pool\n", dev->name);
		ret = VMM_ENOMEM;
	}

	ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base,
				start, size);
	if (ret < 0) {
		vmm_printf("%s: Failed to add memory chunk\n", dev->name);
		goto err_out;
	}

	vmm_devdrv_set_data(dev, sram);

	vmm_printf("%s: SRAM pool: %ld KiB @ 0x%p\n", dev->name, size / 1024,
		   virt_base);

	return 0;

err_out:
	if (sram->pool)
		gen_pool_destroy(sram->pool);

#if 0
	if (sram->clk)
		clk_disable_unprepare(sram->clk);
#endif /* 0 */

	if (sram)
		vmm_free(sram);
	sram = NULL;

	if (virt_base)
		vmm_host_iounmap((virtual_addr_t)virt_base);
	virt_base = NULL;

	return ret;
}
예제 #7
0
/**
 * zynq_ocm_probe - Probe method for the OCM driver
 * @pdev:	Pointer to the platform_device structure
 *
 * This function initializes the driver data structures and the hardware.
 *
 * Return:	0 on success and error value on failure
 */
static int zynq_ocm_probe(struct platform_device *pdev)
{
	int ret;
	struct zynq_ocm_dev *zynq_ocm;
	u32 i, ocm_config, curr;
	struct resource *res;

	ocm_config = zynq_slcr_get_ocm_config();

	zynq_ocm = devm_kzalloc(&pdev->dev, sizeof(*zynq_ocm), GFP_KERNEL);
	if (!zynq_ocm)
		return -ENOMEM;

	zynq_ocm->pool = devm_gen_pool_create(&pdev->dev,
					      ilog2(ZYNQ_OCM_GRANULARITY),
					      NUMA_NO_NODE, NULL);
	if (!zynq_ocm->pool)
		return -ENOMEM;

	curr = 0; /* For storing current struct resource for OCM */
	for (i = 0; i < ZYNQ_OCM_BLOCKS; i++) {
		u32 base, start, end;

		/* Setup base address for 64kB OCM block */
		if (ocm_config & BIT(i))
			base = ZYNQ_OCM_HIGHADDR;
		else
			base = ZYNQ_OCM_LOWADDR;

		/* Calculate start and end block addresses */
		start = i * ZYNQ_OCM_BLOCK_SIZE + base;
		end = start + (ZYNQ_OCM_BLOCK_SIZE - 1);

		/* Concatenate OCM blocks together to get bigger pool */
		if (i > 0 && start == (zynq_ocm->res[curr - 1].end + 1)) {
			zynq_ocm->res[curr - 1].end = end;
		} else {
#ifdef CONFIG_SMP
			/*
			 * OCM block if placed at 0x0 has special meaning
			 * for SMP because jump trampoline is added there.
			 * Ensure that this address won't be allocated.
			 */
			if (!base) {
				u32 trampoline_code_size =
					&zynq_secondary_trampoline_end -
					&zynq_secondary_trampoline;
				dev_dbg(&pdev->dev,
					"Allocate reset vector table %dB\n",
					trampoline_code_size);
				/* postpone start offset */
				start += trampoline_code_size;
			}
#endif
			/* First resource is always initialized */
			zynq_ocm->res[curr].start = start;
			zynq_ocm->res[curr].end = end;
			zynq_ocm->res[curr].flags = IORESOURCE_MEM;
			curr++; /* Increment curr value */
		}
		dev_dbg(&pdev->dev, "OCM block %d, start %x, end %x\n",
			i, start, end);
	}

	/*
	 * Separate pool allocation from OCM block detection to ensure
	 * the biggest possible pool.
	 */
	for (i = 0; i < ZYNQ_OCM_BLOCKS; i++) {
		unsigned long size;
		void __iomem *virt_base;

		/* Skip all zero size resources */
		if (zynq_ocm->res[i].end == 0)
			break;
		dev_dbg(&pdev->dev, "OCM resources %d, start %x, end %x\n",
			i, zynq_ocm->res[i].start, zynq_ocm->res[i].end);
		size = resource_size(&zynq_ocm->res[i]);
		virt_base = devm_ioremap_resource(&pdev->dev,
						  &zynq_ocm->res[i]);
		if (IS_ERR(virt_base))
			return PTR_ERR(virt_base);

		ret = gen_pool_add_virt(zynq_ocm->pool,
					(unsigned long)virt_base,
					zynq_ocm->res[i].start, size, -1);
		if (ret < 0) {
			dev_err(&pdev->dev, "Gen pool failed\n");
			return ret;
		}
		dev_info(&pdev->dev, "ZYNQ OCM pool: %ld KiB @ 0x%p\n",
			 size / 1024, virt_base);
	}

	/* Get OCM config space */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	zynq_ocm->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(zynq_ocm->base))
		return PTR_ERR(zynq_ocm->base);

	/* Allocate OCM parity IRQ */
	zynq_ocm->irq = platform_get_irq(pdev, 0);
	if (zynq_ocm->irq < 0) {
		dev_err(&pdev->dev, "irq resource not found\n");
		return zynq_ocm->irq;
	}
	ret = devm_request_irq(&pdev->dev, zynq_ocm->irq, zynq_ocm_irq_handler,
			       0, pdev->name, zynq_ocm);
	if (ret != 0) {
		dev_err(&pdev->dev, "request_irq failed\n");
		return ret;
	}

	/* Enable parity errors */
	writel(ZYNQ_OCM_PARITY_ENABLE, zynq_ocm->base + ZYNQ_OCM_PARITY_CTRL);

	platform_set_drvdata(pdev, zynq_ocm);

	return 0;
}
예제 #8
0
static int cfv_create_genpool(struct cfv_info *cfv)
{
	int err;

	/* dma_alloc can only allocate whole pages, and we need a more
	 * fine graned allocation so we use genpool. We ask for space needed
	 * by IP and a full ring. If the dma allcoation fails we retry with a
	 * smaller allocation size.
	 */
	err = -ENOMEM;
	cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
			(ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
	if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
		return -EINVAL;

	for (;;) {
		if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
			netdev_info(cfv->ndev, "Not enough device memory\n");
			return -ENOMEM;
		}

		cfv->alloc_addr = dma_alloc_coherent(
						cfv->vdev->dev.parent->parent,
						cfv->allocsz, &cfv->alloc_dma,
						GFP_ATOMIC);
		if (cfv->alloc_addr)
			break;

		cfv->allocsz = (cfv->allocsz * 3) >> 2;
	}

	netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
		   cfv->allocsz);

	/* Allocate on 128 bytes boundaries (1 << 7)*/
	cfv->genpool = gen_pool_create(7, -1);
	if (!cfv->genpool)
		goto err;

	err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
				(phys_addr_t)virt_to_phys(cfv->alloc_addr),
				cfv->allocsz, -1);
	if (err)
		goto err;

	/* Reserve some memory for low memory situations. If we hit the roof
	 * in the memory pool, we stop TX flow and release the reserve.
	 */
	cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
	cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
					   cfv->reserved_size);
	if (!cfv->reserved_mem) {
		err = -ENOMEM;
		goto err;
	}

	cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
	return 0;
err:
	cfv_destroy_genpool(cfv);
	return err;
}