Esempio n. 1
0
static int omap_tiler_alloc_carveout(struct ion_heap *heap,
				     struct omap_tiler_info *info)
{
	struct omap_ion_heap *omap_heap = (struct omap_ion_heap *)heap;
	int i;
	int ret;
	ion_phys_addr_t addr;

	addr = gen_pool_alloc(omap_heap->pool, info->n_phys_pages * PAGE_SIZE);
	if (addr) {
		info->lump = true;
		for (i = 0; i < info->n_phys_pages; i++)
			info->phys_addrs[i] = addr + i * PAGE_SIZE;
		return 0;
	}

	for (i = 0; i < info->n_phys_pages; i++) {
		addr = gen_pool_alloc(omap_heap->pool, PAGE_SIZE);

		if (addr == 0) {
			ret = -ENOMEM;
			pr_err("%s: failed to allocate pages to back "
			       "tiler address space\n", __func__);
			goto err;
		}
		info->phys_addrs[i] = addr;
	}
	return 0;

err:
	for (i -= 1; i >= 0; i--)
		gen_pool_free(omap_heap->pool, info->phys_addrs[i], PAGE_SIZE);
	return ret;
}
Esempio n. 2
0
File: pm33xx.c Progetto: krzk/linux
/*
 * Push the minimal suspend-resume code to SRAM
 */
static int am33xx_pm_alloc_sram(void)
{
	struct device_node *np;
	int ret = 0;

	np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
	if (!np) {
		np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
		if (!np) {
			dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
				__func__);
			return -ENODEV;
		}
	}

	sram_pool = of_gen_pool_get(np, "pm-sram", 0);
	if (!sram_pool) {
		dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
			__func__);
		ret = -ENODEV;
		goto mpu_put_node;
	}

	sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
	if (!sram_pool_data) {
		dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
			__func__);
		ret = -ENODEV;
		goto mpu_put_node;
	}

	ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
	if (!ocmcram_location) {
		dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
			__func__);
		ret = -ENOMEM;
		goto mpu_put_node;
	}

	ocmcram_location_data = gen_pool_alloc(sram_pool_data,
					       sizeof(struct emif_regs_amx3));
	if (!ocmcram_location_data) {
		dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
		gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
		ret = -ENOMEM;
	}

mpu_put_node:
	of_node_put(np);
	return ret;
}
Esempio n. 3
0
/*
 * uncached_alloc_page
 *
 * @starting_nid: node id of node to start with, or -1
 * @n_pages: number of contiguous pages to allocate
 *
 * Allocate the specified number of contiguous uncached pages on the
 * the requested node. If not enough contiguous uncached pages are available
 * on the requested node, roundrobin starting with the next higher node.
 */
unsigned long uncached_alloc_page(int starting_nid, int n_pages)
{
	unsigned long uc_addr;
	struct uncached_pool *uc_pool;
	int nid;

	if (unlikely(starting_nid >= MAX_NUMNODES))
		return 0;

	if (starting_nid < 0)
		starting_nid = numa_node_id();
	nid = starting_nid;

	do {
		if (!node_state(nid, N_HIGH_MEMORY))
			continue;
		uc_pool = &uncached_pools[nid];
		if (uc_pool->pool == NULL)
			continue;
		do {
			uc_addr = gen_pool_alloc(uc_pool->pool,
						 n_pages * PAGE_SIZE);
			if (uc_addr != 0)
				return uc_addr;
		} while (uncached_add_chunk(uc_pool, nid) == 0);

	} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);

	return 0;
}
Esempio n. 4
0
static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 unsigned long attrs)
{
	void *ret;

	/*
	 * Our max_low_pfn should have been backed off by 16MB in
	 * mm/init.c to create DMA coherent space.  Use that as the VA
	 * for the pool.
	 */

	if (coherent_pool == NULL) {
		coherent_pool = gen_pool_create(PAGE_SHIFT, -1);

		if (coherent_pool == NULL)
			panic("Can't create %s() memory pool!", __func__);
		else
			gen_pool_add(coherent_pool,
				pfn_to_virt(max_low_pfn),
				hexagon_coherent_pool_size, -1);
	}

	ret = (void *) gen_pool_alloc(coherent_pool, size);

	if (ret) {
		memset(ret, 0, size);
		*dma_addr = (dma_addr_t) virt_to_phys(ret);
	} else
		*dma_addr = ~0;

	return ret;
}
static int allocate_sram(struct snd_pcm_substream *substream,
		struct gen_pool *sram_pool, unsigned size,
		struct snd_pcm_hardware *ppcm)
{
	struct snd_dma_buffer *buf = &substream->dma_buffer;
	struct snd_dma_buffer *iram_dma = NULL;
	dma_addr_t iram_phys = 0;
	void *iram_virt = NULL;

	if (buf->private_data || !size)
		return 0;

	ppcm->period_bytes_max = size;
	iram_virt = (void *)gen_pool_alloc(sram_pool, size);
	if (!iram_virt)
		goto exit1;
	iram_phys = gen_pool_virt_to_phys(sram_pool, (unsigned)iram_virt);
	iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL);
	if (!iram_dma)
		goto exit2;
	iram_dma->area = iram_virt;
	iram_dma->addr = iram_phys;
	memset(iram_dma->area, 0, size);
	iram_dma->bytes = size;
	buf->private_data = iram_dma;
	return 0;
exit2:
	if (iram_virt)
		gen_pool_free(sram_pool, (unsigned)iram_virt, size);
exit1:
	return -ENOMEM;
}
Esempio n. 6
0
void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs)
{
	void *ret;

	if (coherent_pool == NULL) {
		coherent_pool = gen_pool_create(PAGE_SHIFT, -1);

		if (coherent_pool == NULL)
			panic("Can't create %s() memory pool!", __func__);
		else
			gen_pool_add(coherent_pool,
				(PAGE_OFFSET + (max_low_pfn << PAGE_SHIFT)),
				hexagon_coherent_pool_size, -1);
	}

	ret = (void *) gen_pool_alloc(coherent_pool, size);

	if (ret) {
		memset(ret, 0, size);
		*dma_addr = (dma_addr_t) (ret - PAGE_OFFSET);
	} else
		*dma_addr = ~0;

	return ret;
}
Esempio n. 7
0
static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs)
{
	void *ret;

	printk("hexagon_dma_alloc_coherent %d\n", size);
	if (coherent_pool == NULL) {
		coherent_pool = gen_pool_create(PAGE_SHIFT, -1);

		if (coherent_pool == NULL)
			panic("Can't create %s() memory pool!", __func__);
		else
			gen_pool_add(coherent_pool,
				hexagon_coherent_pool_start,
				hexagon_coherent_pool_size, -1);
	}
	printk("hexagon_dma_alloc_coherent2 %X %X\n", (u32)hexagon_coherent_pool_start, (u32)hexagon_coherent_pool_size);

	ret = (void *) gen_pool_alloc(coherent_pool, size);
	printk("hexagon_dma_alloc_coherent3 %X\n", (u32)ret);

	if (ret) {
		memset(ret, 0, size);
		*dma_addr = (dma_addr_t) (ret - PAGE_OFFSET);
	} else
		*dma_addr = ~0;

	printk("hexagon_dma_alloc_coherent4\n");

	return ret;
}
static void *__alloc_from_pool(size_t size, struct page **ret_pages, gfp_t flags)
{
	unsigned long val;
	void *ptr = NULL;
	int count = size >> PAGE_SHIFT;
	int i;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
		for (i = 0; i < count ; i++) {
			ret_pages[i] = phys_to_page(phys);
			phys += 1 << PAGE_SHIFT;
		}
		ptr = (void *)val;
		memset(ptr, 0, size);
	}

	return ptr;
}
static void *ocram_init_mem(size_t size, void **other)
{
	struct device_node *np;
	struct gen_pool *gp;
	void *sram_addr;
	const char *compat = "altr,ocram-edac";

	if (of_machine_is_compatible("altr,socfpga-arria10"))
		compat = "altr,a10-ocram-edac";

	np = of_find_compatible_node(NULL, NULL, compat);
	if (!np)
		return NULL;

	gp = of_gen_pool_get(np, "iram", 0);
	if (!gp)
		return NULL;
	*other = gp;

	sram_addr = (void *)gen_pool_alloc(gp, size);
	if (!sram_addr)
		return NULL;

	memset(sram_addr, 0, size);
	wmb();	/* Ensure data is written out */

	return sram_addr;
}
Esempio n. 10
0
/*!
 * Allocates uncacheable buffer from IRAM
 */
void __iomem *sdma_iram_malloc(size_t size, unsigned long *buf)
{
	*buf = gen_pool_alloc(sdma_iram_pool, size);
	if (!buf)
		return NULL;

	return sdma_iram_phys_to_virt(*buf);
}
Esempio n. 11
0
/*
 * Allocates a block of card memory.
 *
 * Returns 0 if there was an error, otherwise the address (relative to the
 * start of card memory) on success.
 */
unsigned long tl880_alloc_memory(struct tl880_dev *tl880dev, size_t bytes) /* {{{ */
{
	if(CHECK_NULL(tl880dev) || CHECK_NULL(tl880dev->pool)) {
		return 0;
	}

	return gen_pool_alloc(tl880dev->pool, bytes);
} /* }}} */
void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
{
	unsigned long vaddr = gen_pool_alloc(pool, size);

	if (vaddr)
		*dma = gen_pool_virt_to_phys(pool, vaddr);
	return (void *)vaddr;
}
Esempio n. 13
0
void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr)
{
        if (!iram_pool)
                return NULL;

        *dma_addr = gen_pool_alloc(iram_pool, size);
        pr_debug("iram alloc - %dB@0x%lX\n", size, *dma_addr);
        if (!*dma_addr)
                return NULL;
        return iram_phys_to_virt(*dma_addr);
}
Esempio n. 14
0
/* This is invoked whenever the remote processor completed processing
 * a TX msg we just sent, and the buffer is put back to the used ring.
 */
static void cfv_release_used_buf(struct virtqueue *vq_tx)
{
	struct cfv_info *cfv = vq_tx->vdev->priv;
	unsigned long flags;

	BUG_ON(vq_tx != cfv->vq_tx);

	for (;;) {
		unsigned int len;
		struct buf_info *buf_info;

		/* Get used buffer from used ring to recycle used descriptors */
		spin_lock_irqsave(&cfv->tx_lock, flags);
		buf_info = virtqueue_get_buf(vq_tx, &len);
		spin_unlock_irqrestore(&cfv->tx_lock, flags);

		/* Stop looping if there are no more buffers to free */
		if (!buf_info)
			break;

		free_buf_info(cfv, buf_info);

		/* watermark_tx indicates if we previously stopped the tx
		 * queues. If we have enough free stots in the virtio ring,
		 * re-establish memory reserved and open up tx queues.
		 */
		if (cfv->vq_tx->num_free <= cfv->watermark_tx)
			continue;

		/* Re-establish memory reserve */
		if (cfv->reserved_mem == 0 && cfv->genpool)
			cfv->reserved_mem =
				gen_pool_alloc(cfv->genpool,
					       cfv->reserved_size);

		/* Open up the tx queues */
		if (cfv->reserved_mem) {
			cfv->watermark_tx =
				virtqueue_get_vring_size(cfv->vq_tx);
			netif_tx_wake_all_queues(cfv->ndev);
			/* Buffers are recycled in cfv_netdev_tx, so
			 * disable notifications when queues are opened.
			 */
			virtqueue_disable_cb(cfv->vq_tx);
			++cfv->stats.tx_flow_on;
		} else {
			/* if no memory reserve, wait for more free slots */
			WARN_ON(cfv->watermark_tx >
			       virtqueue_get_vring_size(cfv->vq_tx));
			cfv->watermark_tx +=
				virtqueue_get_vring_size(cfv->vq_tx) / 4;
		}
	}
}
Esempio n. 15
0
static int bm_alloc_bpid_range(u32 *result, u32 count)
{
	unsigned long addr;

	addr = gen_pool_alloc(bm_bpalloc, count);
	if (!addr)
		return -ENOMEM;

	*result = addr & ~DPAA_GENALLOC_OFF;

	return 0;
}
unsigned long allocate_head(struct ocmem_zone *z, unsigned long size)
{

	unsigned long offset;

	offset  = gen_pool_alloc(z->z_pool, size);

	if (!offset)
		return -ENOMEM;

	z->z_head += size;
	z->z_free -= size;
	return offset;
}
Esempio n. 17
0
/**
 * zynq_pm_remap_ocm() - Remap OCM
 * Returns a pointer to the mapped memory or NULL.
 *
 * Remap the OCM.
 */
static void __iomem *zynq_pm_remap_ocm(void)
{
	struct device_node *np;
	const char *comp = "xlnx,zynq-ocmc-1.0";
	void __iomem *base = NULL;

	np = of_find_compatible_node(NULL, NULL, comp);
	if (np) {
		struct device *dev;
		unsigned long pool_addr;
		unsigned long pool_addr_virt;
		struct gen_pool *pool;

		of_node_put(np);

		dev = &(of_find_device_by_node(np)->dev);

		/* Get OCM pool from device tree or platform data */
		pool = dev_get_gen_pool(dev);
		if (!pool) {
			pr_warn("%s: OCM pool is not available\n", __func__);
			return NULL;
		}

		pool_addr_virt = gen_pool_alloc(pool, zynq_sys_suspend_sz);
		if (!pool_addr_virt) {
			pr_warn("%s: Can't get OCM poll\n", __func__);
			return NULL;
		}
		pool_addr = gen_pool_virt_to_phys(pool, pool_addr_virt);
		if (!pool_addr) {
			pr_warn("%s: Can't get physical address of OCM pool\n",
				__func__);
			return NULL;
		}
		base = __arm_ioremap(pool_addr, zynq_sys_suspend_sz,
				     MT_MEMORY_RWX);
		if (!base) {
			pr_warn("%s: IOremap OCM pool failed\n", __func__);
			return NULL;
		}
		pr_debug("%s: Remap OCM %s from %lx to %lx\n", __func__, comp,
			 pool_addr_virt, (unsigned long)base);
	} else {
		pr_warn("%s: no compatible node found for '%s'\n", __func__,
				comp);
	}

	return base;
}
unsigned long allocate_tail(struct ocmem_zone *z, unsigned long size)
{
	unsigned long offset;
	unsigned long reserve;
	unsigned long head;

	if (z->z_tail < (z->z_head + size))
		return -ENOMEM;

	reserve = z->z_tail - z->z_head - size;
	if (reserve) {
		head = gen_pool_alloc(z->z_pool, reserve);
		offset = gen_pool_alloc(z->z_pool, size);
		gen_pool_free(z->z_pool, head, reserve);
	} else
		offset = gen_pool_alloc(z->z_pool, size);

	if (!offset)
		return -ENOMEM;

	z->z_tail -= size;
	z->z_free -= size;
	return offset;
}
Esempio n. 19
0
struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
{
	struct gen_pool *gpool;
	int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);

	gpool = sram_get_gpool("asram");
	if (!gpool)
		return NULL;

	tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
	if (!tdmac->desc_arr)
		return NULL;

	tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
			(unsigned long)tdmac->desc_arr);

	return tdmac->desc_arr;
}
Esempio n. 20
0
static int am33xx_prepare_push_sram_idle(void)
{
	struct device_node *np;
	int ret;

	ret = ti_emif_copy_pm_function_table(pm_sram->emif_sram_table);
	if (ret) {
		pr_err("PM: %s: EMIF function copy failed\n", __func__);
		return -EPROBE_DEFER;
	}

	np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");

	if (!np) {
		np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
		if (!np) {
			pr_warn("PM: %s: Unable to find device node for mpu\n",
				__func__);
			return -ENODEV;
		}
	}

	sram_pool = of_get_named_gen_pool(np, "sram", 0);

	if (!sram_pool) {
		pr_warn("PM: %s: Unable to get sram pool for ocmcram\n",
			__func__);
		return -ENODEV;
	}

	ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
	if (!ocmcram_location) {
		pr_warn("PM: %s: Unable to allocate memory from ocmcram\n",
			__func__);
		return -EINVAL;
	}

	/* Save physical address to calculate resume offset during pm init */
	am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
							ocmcram_location);

	return 0;
}
Esempio n. 21
0
void *sram_alloc(size_t len, dma_addr_t *dma)
{
	unsigned long vaddr;
	dma_addr_t dma_base = davinci_soc_info.sram_dma;

	if (dma)
		*dma = 0;
	if (!sram_pool || (dma && !dma_base))
		return NULL;

	vaddr = gen_pool_alloc(sram_pool, len);
	if (!vaddr)
		return NULL;

	if (dma)
		*dma = dma_base + (vaddr - SRAM_VIRT);
	return (void *)vaddr;

}
Esempio n. 22
0
static int ti_emif_push_sram(struct device *dev)
{
	struct device_node *np = dev->of_node;

	sram_pool = of_gen_pool_get(np, "sram", 0);

	if (!sram_pool) {
		dev_err(dev, "Unable to get sram pool for ocmcram\n");
		return -ENODEV;
	}

	ocmcram_location = gen_pool_alloc(sram_pool, ti_emif_sram_sz);
	if (!ocmcram_location) {
		dev_err(dev, "Unable to allocate memory from ocmcram\n");
		return -EINVAL;
	}

	/* Save physical address to calculate resume offset during pm init */
	ti_emif_sram_phys = gen_pool_virt_to_phys(sram_pool,
						  ocmcram_location);
	ti_emif_sram_virt = fncpy((void *)ocmcram_location,
				  &ti_emif_sram,
				  ti_emif_sram_sz);

	/*
	 * These functions are called during suspend path while MMU is
	 * still on so add virtual base to offset for absolute address
	 */
	ti_emif_pm.save_context = sram_suspend_address(ti_emif_pm.save_context);
	ti_emif_pm.enter_sr = sram_suspend_address(ti_emif_pm.enter_sr);
	ti_emif_pm.abort_sr = sram_suspend_address(ti_emif_pm.abort_sr);

	/*
	 * These are called during resume path when MMU is not enabled
	 * so physical address is used instead
	 */
	ti_emif_pm.restore_context =
		sram_resume_address(ti_emif_pm.restore_context);
	ti_emif_pm.exit_sr = sram_resume_address(ti_emif_pm.exit_sr);

	return 0;
}
static void *__alloc_from_pool(size_t size, struct page **ret_page)
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = phys_to_page(phys);
		ptr = (void *)val;
	}

	return ptr;
}
Esempio n. 24
0
u32 sps_mem_alloc_io(u32 bytes)
{
	u32 phys_addr = SPS_ADDR_INVALID;
	u32 virt_addr = 0;

	virt_addr = gen_pool_alloc(pool, bytes);
	if (virt_addr) {
		iomem_offset = virt_addr - (u32) iomem_virt;
		phys_addr = iomem_phys + iomem_offset;
		total_alloc += bytes;
	} else {
		SPS_ERR("sps:gen_pool_alloc %d bytes fail.", bytes);
		return SPS_ADDR_INVALID;
	}

	SPS_DBG2("sps:sps_mem_alloc_io.phys=0x%x.virt=0x%x.size=0x%x.",
		phys_addr, virt_addr, bytes);

	return phys_addr;
}
Esempio n. 25
0
/**
 * Allocate I/O (pipe) memory
 *
 */
phys_addr_t sps_mem_alloc_io(u32 bytes)
{
	phys_addr_t phys_addr = SPS_ADDR_INVALID;
	unsigned long virt_addr = 0;

	virt_addr = gen_pool_alloc(pool, bytes);
	if (virt_addr) {
		iomem_offset = virt_addr - (uintptr_t) iomem_virt;
		phys_addr = iomem_phys + iomem_offset;
		total_alloc += bytes;
	} else {
		SPS_ERR("sps:gen_pool_alloc %d bytes fail.", bytes);
		return SPS_ADDR_INVALID;
	}

	SPS_DBG2("sps:sps_mem_alloc_io.phys=%pa.virt=0x%lx.size=0x%x.",
		&phys_addr, virt_addr, bytes);

	return phys_addr;
}
Esempio n. 26
0
/* Allocate a buffer in dma-memory and copy skb to it */
static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv,
						       struct sk_buff *skb,
						       struct scatterlist *sg)
{
	struct caif_payload_info *info = (void *)&skb->cb;
	struct buf_info *buf_info = NULL;
	u8 pad_len, hdr_ofs;

	if (!cfv->genpool)
		goto err;

	if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
		netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n",
			    cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
		goto err;
	}

	buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC);
	if (unlikely(!buf_info))
		goto err;

	/* Make the IP header aligned in tbe buffer */
	hdr_ofs = cfv->tx_hr + info->hdr_len;
	pad_len = hdr_ofs & (IP_HDR_ALIGN - 1);
	buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;

	/* allocate dma memory buffer */
	buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size);
	if (unlikely(!buf_info->vaddr))
		goto err;

	/* copy skbuf contents to send buffer */
	skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
	sg_init_one(sg, buf_info->vaddr + pad_len,
		    skb->len + cfv->tx_hr + cfv->rx_hr);

	return buf_info;
err:
	kfree(buf_info);
	return NULL;
}
/**
 * get a rage of space from genpool
 */
static unsigned long hisi_alloc_iova(struct gen_pool *pool,
		unsigned long size, unsigned long align)
{
	unsigned long iova = 0;

	mutex_lock(&iova_pool_mutex);

	iova = gen_pool_alloc(pool, size);
	if (!iova) {
		printk(KERN_ERR "hisi iommu gen_pool_alloc failed!\n");
		mutex_unlock(&iova_pool_mutex);
		return 0;
	}

	if (align > (1 << pool->min_alloc_order)) {
		WARN(1, "hisi iommu domain cant align to 0x%lx\n", align);
	}

	dbg_inf.alloc_iova_count++;

	mutex_unlock(&iova_pool_mutex);
	return iova;
}
Esempio n. 28
0
static int __devinit pruss_probe(struct platform_device *dev)
{
	struct uio_info *p;
	struct uio_pruss_dev *gdev;
	struct resource *regs_prussio;
	int ret = -ENODEV, cnt = 0, len;
	struct uio_pruss_pdata *pdata = dev->dev.platform_data;

	gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
	if (!gdev)
		return -ENOMEM;

	gdev->info = kzalloc(sizeof(*p) * MAX_PRUSS_EVT, GFP_KERNEL);
	if (!gdev->info) {
		kfree(gdev);
		return -ENOMEM;
	}
	/* Power on PRU in case its not done as part of boot-loader */
	gdev->pruss_clk = clk_get(&dev->dev, "pruss");
	if (IS_ERR(gdev->pruss_clk)) {
		dev_err(&dev->dev, "Failed to get clock\n");
		kfree(gdev->info);
		kfree(gdev);
		ret = PTR_ERR(gdev->pruss_clk);
		return ret;
	} else {
		clk_enable(gdev->pruss_clk);
	}

	regs_prussio = platform_get_resource(dev, IORESOURCE_MEM, 0);
	if (!regs_prussio) {
		dev_err(&dev->dev, "No PRUSS I/O resource specified\n");
		goto out_free;
	}

	if (!regs_prussio->start) {
		dev_err(&dev->dev, "Invalid memory resource\n");
		goto out_free;
	}

	gdev->sram_vaddr = (void *)gen_pool_alloc(davinci_gen_pool,
						  sram_pool_sz);
	if (!gdev->sram_vaddr) {
		dev_err(&dev->dev, "Could not allocate SRAM pool\n");
		goto out_free;
	}

	gdev->sram_paddr = gen_pool_virt_to_phys(davinci_gen_pool,
					(unsigned long)gdev->sram_vaddr);

	gdev->ddr_vaddr = dma_alloc_coherent(&dev->dev, extram_pool_sz,
				&(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
	if (!gdev->ddr_vaddr) {
		dev_err(&dev->dev, "Could not allocate external memory\n");
		goto out_free;
	}

	len = resource_size(regs_prussio);
	gdev->prussio_vaddr = ioremap(regs_prussio->start, len);
	if (!gdev->prussio_vaddr) {
		dev_err(&dev->dev, "Can't remap PRUSS I/O  address range\n");
		goto out_free;
	}

	gdev->pintc_base = pdata->pintc_base;
	gdev->hostirq_start = platform_get_irq(dev, 0);

	for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) {
		p->mem[0].addr = regs_prussio->start;
		p->mem[0].size = resource_size(regs_prussio);
		p->mem[0].memtype = UIO_MEM_PHYS;

		p->mem[1].addr = gdev->sram_paddr;
		p->mem[1].size = sram_pool_sz;
		p->mem[1].memtype = UIO_MEM_PHYS;

		p->mem[2].addr = gdev->ddr_paddr;
		p->mem[2].size = extram_pool_sz;
		p->mem[2].memtype = UIO_MEM_PHYS;

		p->name = kasprintf(GFP_KERNEL, "pruss_evt%d", cnt);
		p->version = DRV_VERSION;

		/* Register PRUSS IRQ lines */
		p->irq = gdev->hostirq_start + cnt;
		p->handler = pruss_handler;
		p->priv = gdev;

		ret = uio_register_device(&dev->dev, p);
		if (ret < 0)
			goto out_free;
	}

	platform_set_drvdata(dev, gdev);
	return 0;

out_free:
	pruss_cleanup(dev, gdev);
	return ret;
}
Esempio n. 29
0
int init_mmdc_lpddr2_settings(struct platform_device *busfreq_pdev)
{
	struct platform_device *ocram_dev;
	unsigned int iram_paddr;
	struct device_node *node;
	struct gen_pool *iram_pool;

	busfreq_dev = &busfreq_pdev->dev;
	node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-mmdc");
	if (!node) {
		printk(KERN_ERR "failed to find imx6sl-mmdc device tree data!\n");
		return -EINVAL;
	}
	mmdc_base = of_iomap(node, 0);
	WARN(!mmdc_base, "unable to map mmdc registers\n");

	node = NULL;
	node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-ccm");
	if (!node) {
		printk(KERN_ERR "failed to find imx6sl-ccm device tree data!\n");
		return -EINVAL;
	}
	ccm_base = of_iomap(node, 0);
	WARN(!ccm_base, "unable to map ccm registers\n");

	node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
	if (!node) {
		printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
		return -EINVAL;
	}
	l2_base = of_iomap(node, 0);
	WARN(!l2_base, "unable to map PL310 registers\n");

	node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop");
	if (!node) {
		printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
		return -EINVAL;
	}
	anatop_base = of_iomap(node, 0);
	WARN(!anatop_base, "unable to map anatop registers\n");

	node = NULL;
	node = of_find_compatible_node(NULL, NULL, "mmio-sram");
	if (!node) {
		dev_err(busfreq_dev, "%s: failed to find ocram node\n",
			__func__);
		return -EINVAL;
	}

	ocram_dev = of_find_device_by_node(node);
	if (!ocram_dev) {
		dev_err(busfreq_dev, "failed to find ocram device!\n");
		return -EINVAL;
	}

	iram_pool = dev_get_gen_pool(&ocram_dev->dev);
	if (!iram_pool) {
		dev_err(busfreq_dev, "iram pool unavailable!\n");
		return -EINVAL;
	}

	reg_addrs[0] = (unsigned long)anatop_base;
	reg_addrs[1] = (unsigned long)ccm_base;
	reg_addrs[2] = (unsigned long)mmdc_base;
	reg_addrs[3] = (unsigned long)l2_base;

	ddr_freq_change_iram_base = (void *)gen_pool_alloc(iram_pool,
						LPDDR2_FREQ_CHANGE_SIZE);
	if (!ddr_freq_change_iram_base) {
		dev_err(busfreq_dev,
			"Cannot alloc iram for ddr freq change code!\n");
		return -ENOMEM;
	}

	iram_paddr = gen_pool_virt_to_phys(iram_pool,
				(unsigned long)ddr_freq_change_iram_base);
	/*
	 * Need to remap the area here since we want
	 * the memory region to be executable.
	 */
	ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
						LPDDR2_FREQ_CHANGE_SIZE,
						MT_MEMORY_NONCACHED);
	mx6_change_lpddr2_freq = (void *)fncpy(ddr_freq_change_iram_base,
		&mx6_lpddr2_freq_change, LPDDR2_FREQ_CHANGE_SIZE);

	curr_ddr_rate = ddr_normal_rate;

	return 0;
}
Esempio n. 30
0
dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
								size_t size)
{
	off_t start_off;
	dma_addr_t addr, start = 0;
	size_t mapped_size = 0;
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	int order;
	int ret;
	int count =0;
#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	size_t iova_size = 0;
#endif
	for (; sg_dma_len(sg) < offset; sg = sg_next(sg))
		offset -= sg_dma_len(sg);

	start_off = offset_in_page(sg_phys(sg) + offset);
	size = PAGE_ALIGN(size + start_off);

	order = __fls(min_t(size_t, size, SZ_1M));

	region = kmalloc(sizeof(*region), GFP_KERNEL);
	if (!region) {
		ret = -ENOMEM;
		goto err_map_nomem;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	iova_size = ALIGN(size, SZ_64K);
	start = (dma_addr_t)gen_pool_alloc_aligned(vmm->vmm_pool, iova_size,
									order);
#else
	start = (dma_addr_t)gen_pool_alloc(vmm->vmm_pool, size);
#endif
	if (!start) {
		ret = -ENOMEM;
		goto err_map_noiomem;
	}

	addr = start;
	do {
		phys_addr_t phys;
		size_t len;

		phys = sg_phys(sg);
		len = sg_dma_len(sg);

		/* if back to back sg entries are contiguous consolidate them */
		while (sg_next(sg) &&
		       sg_phys(sg) + sg_dma_len(sg) == sg_phys(sg_next(sg))) {
			len += sg_dma_len(sg_next(sg));
			sg = sg_next(sg);
		}

		if (offset > 0) {
			len -= offset;
			phys += offset;
			offset = 0;
		}

		if (offset_in_page(phys)) {
			len += offset_in_page(phys);
			phys = round_down(phys, PAGE_SIZE);
		}

		len = PAGE_ALIGN(len);

		if (len > (size - mapped_size))
			len = size - mapped_size;

		ret = iommu_map(vmm->domain, addr, phys, len, 0);
		if (ret)
			break;

		addr += len;
		mapped_size += len;
	} while ((sg = sg_next(sg)) && (mapped_size < size));
	BUG_ON(mapped_size > size);

	if (mapped_size < size) {
		pr_err("IOVMM: iovmm_map failed as mapped_size (%d) < size (%d)\n", mapped_size, size);
		goto err_map_map;
	}

#ifdef CONFIG_EXYNOS_IOVMM_ALIGN64K
	if (iova_size != size) {
		addr = start + size;
		size = iova_size;

		for (; addr < start + size; addr += PAGE_SIZE) {
			ret = iommu_map(vmm->domain, addr,
				page_to_phys(ZERO_PAGE(0)), PAGE_SIZE, 0);
			if (ret)
				goto err_map_map;

			mapped_size += PAGE_SIZE;
		}
	}
#endif

	region->start = start + start_off;
	region->size = size;

	INIT_LIST_HEAD(&region->node);

	spin_lock(&vmm->lock);

	list_add(&region->node, &vmm->regions_list);

	spin_unlock(&vmm->lock);

	dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#X bytes.\n",
					region->start, region->size);

	return region->start;

err_map_map:
	iommu_unmap(vmm->domain, start, mapped_size);
	gen_pool_free(vmm->vmm_pool, start, size);
err_map_noiomem:
	kfree(region);
err_map_nomem:
	dev_dbg(dev, "IOVMM: Failed to allocated VM region for %#x bytes.\n",
									size);
	return (dma_addr_t)ret;
}