Esempio n. 1
0
static int omap_tiler_alloc_carveout(struct ion_heap *heap,
				     struct omap_tiler_info *info)
{
	struct omap_ion_heap *omap_heap = (struct omap_ion_heap *)heap;
	int i;
	int ret;
	ion_phys_addr_t addr;

	addr = gen_pool_alloc(omap_heap->pool, info->n_phys_pages * PAGE_SIZE);
	if (addr) {
		info->lump = true;
		for (i = 0; i < info->n_phys_pages; i++)
			info->phys_addrs[i] = addr + i * PAGE_SIZE;
		return 0;
	}

	for (i = 0; i < info->n_phys_pages; i++) {
		addr = gen_pool_alloc(omap_heap->pool, PAGE_SIZE);

		if (addr == 0) {
			ret = -ENOMEM;
			pr_err("%s: failed to allocate pages to back "
			       "tiler address space\n", __func__);
			goto err;
		}
		info->phys_addrs[i] = addr;
	}
	return 0;

err:
	for (i -= 1; i >= 0; i--)
		gen_pool_free(omap_heap->pool, info->phys_addrs[i], PAGE_SIZE);
	return ret;
}
Esempio n. 2
0
void sdma_iram_free(unsigned long *buf, u32 size)
{
	if (!sdma_iram_pool)
		return;

	gen_pool_free(sdma_iram_pool, buf, size);
}
Esempio n. 3
0
void iovmm_unmap(struct device *dev, dma_addr_t iova)
{
	struct exynos_vm_region *region;
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	size_t unmapped_size;

	/* This function must not be called in IRQ handlers */
	BUG_ON(in_irq());

	spin_lock(&vmm->lock);

	region = find_region(vmm, iova);
	if (WARN_ON(!region)) {
		spin_unlock(&vmm->lock);
		return;
	}

	list_del(&region->node);

	spin_unlock(&vmm->lock);

	region->start = round_down(region->start, PAGE_SIZE);

	unmapped_size = iommu_unmap(vmm->domain, region->start, region->size);

	exynos_sysmmu_tlb_invalidate(dev);

	gen_pool_free(vmm->vmm_pool, region->start, region->size);

	WARN_ON(unmapped_size != region->size);
	dev_dbg(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",
					unmapped_size, region->start);

	kfree(region);
}
Esempio n. 4
0
static int allocate_sram(struct snd_pcm_substream *substream,
		struct gen_pool *sram_pool, unsigned size,
		struct snd_pcm_hardware *ppcm)
{
	struct snd_dma_buffer *buf = &substream->dma_buffer;
	struct snd_dma_buffer *iram_dma = NULL;
	dma_addr_t iram_phys = 0;
	void *iram_virt = NULL;

	if (buf->private_data || !size)
		return 0;

	ppcm->period_bytes_max = size;
	iram_virt = gen_pool_dma_alloc(sram_pool, size, &iram_phys);
	if (!iram_virt)
		goto exit1;
	iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL);
	if (!iram_dma)
		goto exit2;
	iram_dma->area = iram_virt;
	iram_dma->addr = iram_phys;
	memset(iram_dma->area, 0, size);
	iram_dma->bytes = size;
	buf->private_data = iram_dma;
	return 0;
exit2:
	if (iram_virt)
		gen_pool_free(sram_pool, (unsigned)iram_virt, size);
exit1:
	return -ENOMEM;
}
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
		       unsigned long size)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (addr == ION_CP_ALLOCATE_FAIL)
		return;
	gen_pool_free(cp_heap->pool, addr, size);

	mutex_lock(&cp_heap->lock);
	cp_heap->allocated_bytes -= size;

	if (!cp_heap->allocated_bytes) {
		unsigned int i;
		for (i = 0; i < MAX_DOMAINS; ++i) {
			if (cp_heap->iommu_iova[i]) {
				unsigned long vaddr_len = cp_heap->total_size;

				if (i == cp_heap->iommu_2x_map_domain)
					vaddr_len <<= 1;
				iommu_unmap_all(i, cp_heap);

				msm_free_iova_address(cp_heap->iommu_iova[i], i,
						cp_heap->iommu_partition[i],
						vaddr_len);
			}
			cp_heap->iommu_iova[i] = 0;
			cp_heap->iommu_partition[i] = 0;
		}
	}
	mutex_unlock(&cp_heap->lock);
}
Esempio n. 6
0
void iram_free(unsigned long addr, unsigned int size)
{
        if (!iram_pool)
                return;

        gen_pool_free(iram_pool, addr, size);
}
Esempio n. 7
0
static void __free(void *vaddr, bool unmap)
{
	struct alloc *node = find_alloc((unsigned long)vaddr);

	if (!node)
		return;

#ifndef CONFIG_UML
	if (unmap)
		/*
		 * We need the double cast because otherwise gcc complains about
		 * cast to pointer of different size. This is technically a down
		 * cast but if unmap is being called, this had better be an
		 * actual 32-bit pointer anyway.
		 */
		iounmap((void *)(unsigned long)node->vaddr);
#endif


	gen_pool_free(node->mpool->gpool, node->paddr, node->len);
	node->mpool->free += node->len;

	remove_alloc(node);
	kfree(node);
}
Esempio n. 8
0
static void omap_tiler_free_carveout(struct ion_heap *heap,
				     struct omap_tiler_info *info)
{
	struct omap_ion_heap *omap_heap = (struct omap_ion_heap *)heap;
	int i;

	if (info->lump) {
		gen_pool_free(omap_heap->pool,
				info->phys_addrs[0],
				info->n_phys_pages * PAGE_SIZE);
		return;
	}

	for (i = 0; i < info->n_phys_pages; i++)
		gen_pool_free(omap_heap->pool, info->phys_addrs[i], PAGE_SIZE);
}
Esempio n. 9
0
/**
 * snd_free_dev_iram - free allocated specific memory from on-chip internal ram
 * @dmab: buffer allocation record to store the allocated data
 */
static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
{
	struct gen_pool *pool = dmab->private_data;

	if (pool && dmab->area)
		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
}
Esempio n. 10
0
/*
 * Frees a block of card memory.
 */
void tl880_free_memory(struct tl880_dev *tl880dev, unsigned long addr, size_t bytes) /* {{{ */
{
	if(CHECK_NULL(tl880dev) || CHECK_NULL(tl880dev->pool)) {
		return;
	}

	gen_pool_free(tl880dev->pool, addr, bytes);
} /* }}} */
Esempio n. 11
0
static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info)
{
	if (!buf_info)
		return;
	gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr,
		      buf_info->size);
	kfree(buf_info);
}
Esempio n. 12
0
static void davinci_free_sram(struct snd_pcm_substream *substream,
			      struct snd_dma_buffer *iram_dma)
{
	struct davinci_runtime_data *prtd = substream->runtime->private_data;
	struct gen_pool *sram_pool = prtd->params->sram_pool;

	gen_pool_free(sram_pool, (unsigned) iram_dma->area, iram_dma->bytes);
}
static int __free_from_pool(void *start, size_t size)
{
	if (!__in_atomic_pool(start, size))
		return 0;

	gen_pool_free(atomic_pool, (unsigned long)start, size);

	return 1;
}
void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
{
	HGSMIBUFFERHEADER *h =
		(HGSMIBUFFERHEADER *)((u8 *)buf - sizeof(*h));
	size_t total_size = h->u32DataSize + sizeof(*h) +
					     sizeof(HGSMIBUFFERTAIL);

	gen_pool_free(guest_pool, (unsigned long)h, total_size);
}
Esempio n. 15
0
static void *__alloc(struct mem_pool *mpool, unsigned long size,
	unsigned long align, int cached, void *caller)
{
	unsigned long paddr;
	void __iomem *vaddr;

	unsigned long aligned_size;
	int log_align = ilog2(align);

	struct alloc *node;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return NULL;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

#ifndef CONFIG_UML
	if (cached)
		vaddr = ioremap_cached(paddr, aligned_size);
	else
		vaddr = ioremap(paddr, aligned_size);
#endif

	if (!vaddr)
		goto out_kfree;

	/*
	 * Just cast to an unsigned long to avoid warnings about casting from a
	 * pointer to an integer of different size. The pointer is only 32-bits
	 * so we lose no data.
	 */
	node->vaddr = (unsigned long)vaddr;
	node->paddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;

	return vaddr;
out_kfree:
#ifndef CONFIG_UML
	if (vaddr)
		iounmap(vaddr);
#endif
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return NULL;
}
/**
 * free a rage of space back to genpool
 */
static void hisi_free_iova(struct gen_pool *pool,
		unsigned long iova, size_t size)
{
	mutex_lock(&iova_pool_mutex);
	gen_pool_free(pool, iova, size);

	dbg_inf.free_iova_count++;

	mutex_unlock(&iova_pool_mutex);
}
Esempio n. 17
0
static int ti_emif_remove(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;

	pm_runtime_put_sync(dev);
	pm_runtime_disable(dev);

	gen_pool_free(sram_pool, ocmcram_location,
		      ti_emif_sram_sz);

	return 0;
}
Esempio n. 18
0
static int bm_release_bpid(u32 bpid)
{
	int ret;

	ret = bm_shutdown_pool(bpid);
	if (ret) {
		pr_debug("BPID %d leaked\n", bpid);
		return ret;
	}

	gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
	return 0;
}
int free_tail(struct ocmem_zone *z, unsigned long offset,
				unsigned long size)
{
	if (offset > z->z_tail) {
		pr_err("ocmem: Detected out of order free "
				"leading to fragmentation\n");
		return -EINVAL;
	}
	gen_pool_free(z->z_pool, offset, size);
	z->z_tail += size;
	z->z_free += size;
	return 0;
}
Esempio n. 20
0
static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
{
	struct gen_pool *gpool;
	int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);

	gpool = sram_get_gpool("asram");
	if (tdmac->desc_arr)
		gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
				size);
	tdmac->desc_arr = NULL;

	return;
}
Esempio n. 21
0
void sps_mem_free_io(u32 phys_addr, u32 bytes)
{
	u32 virt_addr = 0;

	iomem_offset = phys_addr - iomem_phys;
	virt_addr = (u32) iomem_virt + iomem_offset;

	SPS_DBG2("sps:sps_mem_free_io.phys=0x%x.virt=0x%x.size=0x%x.",
		phys_addr, virt_addr, bytes);

	gen_pool_free(pool, virt_addr, bytes);
	total_free += bytes;
}
Esempio n. 22
0
/*
 * uncached_free_page
 *
 * @uc_addr: uncached address of first page to free
 * @n_pages: number of contiguous pages to free
 *
 * Free the specified number of uncached pages.
 */
void uncached_free_page(unsigned long uc_addr, int n_pages)
{
	int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
	struct gen_pool *pool = uncached_pools[nid].pool;

	if (unlikely(pool == NULL))
		return;

	if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
		panic("uncached_free_page invalid address %lx\n", uc_addr);

	gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
}
Esempio n. 23
0
/**
 * Free I/O memory
 *
 */
void sps_mem_free_io(phys_addr_t phys_addr, u32 bytes)
{
	unsigned long virt_addr = 0;

	iomem_offset = phys_addr - iomem_phys;
	virt_addr = (uintptr_t) iomem_virt + iomem_offset;

	SPS_DBG2("sps:sps_mem_free_io.phys=%pa.virt=0x%lx.size=0x%x.",
		&phys_addr, virt_addr, bytes);

	gen_pool_free(pool, virt_addr, bytes);
	total_free += bytes;
}
Esempio n. 24
0
File: pm33xx.c Progetto: krzk/linux
/*
 * Push the minimal suspend-resume code to SRAM
 */
static int am33xx_pm_alloc_sram(void)
{
	struct device_node *np;
	int ret = 0;

	np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
	if (!np) {
		np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
		if (!np) {
			dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
				__func__);
			return -ENODEV;
		}
	}

	sram_pool = of_gen_pool_get(np, "pm-sram", 0);
	if (!sram_pool) {
		dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
			__func__);
		ret = -ENODEV;
		goto mpu_put_node;
	}

	sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
	if (!sram_pool_data) {
		dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
			__func__);
		ret = -ENODEV;
		goto mpu_put_node;
	}

	ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
	if (!ocmcram_location) {
		dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
			__func__);
		ret = -ENOMEM;
		goto mpu_put_node;
	}

	ocmcram_location_data = gen_pool_alloc(sram_pool_data,
					       sizeof(struct emif_regs_amx3));
	if (!ocmcram_location_data) {
		dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
		gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
		ret = -ENOMEM;
	}

mpu_put_node:
	of_node_put(np);
	return ret;
}
Esempio n. 25
0
static void cfv_destroy_genpool(struct cfv_info *cfv)
{
	if (cfv->alloc_addr)
		dma_free_coherent(cfv->vdev->dev.parent->parent,
				  cfv->allocsz, cfv->alloc_addr,
				  cfv->alloc_dma);

	if (!cfv->genpool)
		return;
	gen_pool_free(cfv->genpool,  cfv->reserved_mem,
		      cfv->reserved_size);
	gen_pool_destroy(cfv->genpool);
	cfv->genpool = NULL;
}
Esempio n. 26
0
unsigned long allocate_contiguous_memory_nomap(unsigned long size,
	int mem_type, unsigned long align)
{
	unsigned long paddr;
	unsigned long aligned_size;

	struct alloc *node;
	struct mem_pool *mpool;
	int log_align = ilog2(align);

	mpool = mem_type_to_memory_pool(mem_type);
	if (!mpool)
		return -EINVAL;

	if (!mpool->gpool)
		return -EAGAIN;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return -EAGAIN;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	node->paddr = paddr;

	/* We search the tree using node->vaddr, so set
	 * it to something unique even though we don't
	 * use it for physical allocation nodes.
	 * The virtual and physical address ranges
	 * are disjoint, so there won't be any chance of
	 * a duplicate node->vaddr value.
	 */
	node->vaddr = (void *)paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;
	return paddr;
out_kfree:
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return -ENOMEM;
}
Esempio n. 27
0
static void __free(void *vaddr, bool unmap)
{
	struct alloc *node = find_alloc(vaddr);

	if (!node)
		return;

	if (unmap)
		iounmap(node->vaddr);

	gen_pool_free(node->mpool->gpool, node->paddr, node->len);
	node->mpool->free += node->len;

	remove_alloc(node);
	kfree(node);
}
static void *__alloc(struct mem_pool *mpool, unsigned long size,
	unsigned long align, int cached, void *caller)
{
	unsigned long paddr;
	void __iomem *vaddr;

	unsigned long aligned_size;
	int log_align = ilog2(align);

	struct alloc *node;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return NULL;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	if (cached)
		vaddr = ioremap_cached(paddr, aligned_size);
	else
		vaddr = ioremap(paddr, aligned_size);

	if (!vaddr)
		goto out_kfree;

	node->vaddr = (unsigned long)vaddr;
	node->paddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;

	return vaddr;
out_kfree:
	if (vaddr)
		iounmap(vaddr);
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return NULL;
}
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
		       unsigned long size)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (addr == ION_CP_ALLOCATE_FAIL)
		return;
	gen_pool_free(cp_heap->pool, addr, size);

	mutex_lock(&cp_heap->lock);
	cp_heap->allocated_bytes -= size;

	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
		if (fmem_set_state(FMEM_T_STATE) != 0)
			pr_err("%s: unable to transition heap to T-state\n",
				__func__);
	}
	mutex_unlock(&cp_heap->lock);
}
Esempio n. 30
0
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
		       unsigned long size)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (addr == ION_CP_ALLOCATE_FAIL)
		return;
	gen_pool_free(cp_heap->pool, addr, size);

	mutex_lock(&cp_heap->lock);
	cp_heap->allocated_bytes -= size;

	if (cp_heap->reusable && !cp_heap->allocated_bytes &&
	    cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
		if (fmem_set_state(FMEM_T_STATE) != 0)
			pr_err("%s: unable to transition heap to T-state\n",
				__func__);
	}

	/* Unmap everything if we previously mapped the whole heap at once. */
	if (!cp_heap->allocated_bytes) {
		unsigned int i;
		for (i = 0; i < MAX_DOMAINS; ++i) {
			if (cp_heap->iommu_iova[i]) {
				unsigned long vaddr_len = cp_heap->total_size;

				if (i == cp_heap->iommu_2x_map_domain)
					vaddr_len <<= 1;
				iommu_unmap_all(i, cp_heap);

				msm_free_iova_address(cp_heap->iommu_iova[i], i,
						cp_heap->iommu_partition[i],
						vaddr_len);
			}
			cp_heap->iommu_iova[i] = 0;
			cp_heap->iommu_partition[i] = 0;
		}
	}
	mutex_unlock(&cp_heap->lock);
}