struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
{
	struct ion_cp_heap *cp_heap;
	int ret;

	cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
	if (!cp_heap)
		return ERR_PTR(-ENOMEM);

	mutex_init(&cp_heap->lock);

	cp_heap->pool = gen_pool_create(12, -1);
	if (!cp_heap->pool)
		goto free_heap;

	cp_heap->base = heap_data->base;
	ret = gen_pool_add(cp_heap->pool, cp_heap->base, heap_data->size, -1);
	if (ret < 0)
		goto destroy_pool;

	cp_heap->allocated_bytes = 0;
	cp_heap->umap_count = 0;
	cp_heap->kmap_cached_count = 0;
	cp_heap->kmap_uncached_count = 0;
	cp_heap->total_size = heap_data->size;
	cp_heap->heap.ops = &cp_heap_ops;
	cp_heap->heap.type = ION_HEAP_TYPE_CP;
	cp_heap->heap_protected = HEAP_NOT_PROTECTED;
	cp_heap->secure_base = cp_heap->base;
	cp_heap->secure_size = heap_data->size;
	if (heap_data->extra_data) {
		struct ion_cp_heap_pdata *extra_data =
				heap_data->extra_data;
		cp_heap->reusable = extra_data->reusable;
		cp_heap->reserved_vrange = extra_data->virt_addr;
		cp_heap->permission_type = extra_data->permission_type;
		if (extra_data->secure_size) {
			cp_heap->secure_base = extra_data->secure_base;
			cp_heap->secure_size = extra_data->secure_size;
		}
		if (extra_data->setup_region)
			cp_heap->bus_id = extra_data->setup_region();
		if (extra_data->request_region)
			cp_heap->request_region = extra_data->request_region;
		if (extra_data->release_region)
			cp_heap->release_region = extra_data->release_region;
	}
	return &cp_heap->heap;

destroy_pool:
	gen_pool_destroy(cp_heap->pool);

free_heap:
	kfree(cp_heap);

	return ERR_PTR(-ENOMEM);
}
Пример #2
0
/*
 * uncached_build_memmap,
 *
 * @uc_start: uncached starting address of a chunk of uncached memory
 * @uc_end: uncached ending address of a chunk of uncached memory
 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
 *
 * Called at boot time to build a map of pages that can be used for
 * memory special operations.
 */
static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
{
	int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
	struct gen_pool *pool = uncached_pools[nid].pool;
	size_t size = uc_end - uc_start;

	touch_softlockup_watchdog();

	if (pool != NULL) {
		memset((char *)uc_start, 0, size);
		(void) gen_pool_add(pool, uc_start, size, nid);
	}
	return 0;
}
Пример #3
0
static int __init sram_init(void)
{
	unsigned len = davinci_soc_info.sram_len;
	int status = 0;

	if (len) {
		len = min_t(unsigned, len, SRAM_SIZE);
		sram_pool = gen_pool_create(ilog2(SRAM_GRANULARITY), -1);
		if (!sram_pool)
			status = -ENOMEM;
	}
	if (sram_pool)
		status = gen_pool_add(sram_pool, SRAM_VIRT, len, -1);
	WARN_ON(status < 0);
	return status;
}
Пример #4
0
static int __init iram_init_internal(unsigned long base, unsigned long size)
{
        iram_phys_base = base;

        iram_pool = gen_pool_create(PAGE_SHIFT, -1);
        if (!iram_pool)
                return -ENOMEM;

        gen_pool_add(iram_pool, base, size, -1);
        iram_virt_base = ioremap(iram_phys_base, size);
        if (!iram_virt_base)
                return -EIO;

        pr_debug("i.MX IRAM pool: %ld KB@0x%p\n", size / 1024, iram_virt_base);
        return 0;
}
Пример #5
0
static struct gen_pool *initialize_gpool(unsigned long start,
	unsigned long size)
{
	struct gen_pool *gpool;

	gpool = gen_pool_create(PAGE_SHIFT, -1);

	if (!gpool)
		return NULL;
	if (gen_pool_add(gpool, start, size, -1)) {
		gen_pool_destroy(gpool);
		return NULL;
	}

	return gpool;
}
Пример #6
0
/**
 * Initialize driver memory module
 *
 */
int sps_mem_init(phys_addr_t pipemem_phys_base, u32 pipemem_size)
{
	int res;

	/* 2^8=128. The desc-fifo and data-fifo minimal allocation. */
	int min_alloc_order = 8;

	if ((d_type == 0) || (d_type == 2) || imem) {
		iomem_phys = pipemem_phys_base;
		iomem_size = pipemem_size;

		if (iomem_phys == 0) {
			SPS_ERR("sps:Invalid Pipe-Mem address");
			return SPS_ERROR;
		} else {
			iomem_virt = ioremap(iomem_phys, iomem_size);
			if (!iomem_virt) {
				SPS_ERR("sps:Failed to IO map pipe memory.\n");
				return -ENOMEM;
			}
		}

		iomem_offset = 0;
		SPS_DBG("sps:sps_mem_init.iomem_phys=%pa,iomem_virt=0x%p.",
			&iomem_phys, iomem_virt);
	}

	pool = gen_pool_create(min_alloc_order, nid);

	if (!pool) {
		SPS_ERR("sps:Failed to create a new memory pool.\n");
		return -ENOMEM;
	}

	if ((d_type == 0) || (d_type == 2) || imem) {
		res = gen_pool_add(pool, (uintptr_t)iomem_virt,
				iomem_size, nid);
		if (res)
			return res;
	}

	return 0;
}
/**
 * Initialize driver memory module
 *
 */
int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size)
{
#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
	int res;
#endif
	/* 2^8=128. The desc-fifo and data-fifo minimal allocation. */
	int min_alloc_order = 8;

#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
	iomem_phys = pipemem_phys_base;
	iomem_size = pipemem_size;

	if (iomem_phys == 0) {
		SPS_ERR("sps:Invalid Pipe-Mem address");
		return SPS_ERROR;
	} else {
		iomem_virt = ioremap(iomem_phys, iomem_size);
		if (!iomem_virt) {
			SPS_ERR("sps:Failed to IO map pipe memory.\n");
			return -ENOMEM;
		}
	}

	iomem_offset = 0;
	SPS_DBG("sps:sps_mem_init.iomem_phys=0x%x,iomem_virt=0x%x.",
		iomem_phys, (u32) iomem_virt);
#endif

	pool = gen_pool_create(min_alloc_order, nid);

	if (!pool) {
		SPS_ERR("sps:Failed to create a new memory pool.\n");
		return -ENOMEM;
	}

#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
	res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid);
	if (res)
		return res;
#endif

	return 0;
}
Пример #8
0
int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size)
{
	int res;

	
	int min_alloc_order = 8;

	if ((d_type == 0) || (d_type == 2)) {
		iomem_phys = pipemem_phys_base;
		iomem_size = pipemem_size;

		if (iomem_phys == 0) {
			SPS_ERR("sps:Invalid Pipe-Mem address");
			return SPS_ERROR;
		} else {
			iomem_virt = ioremap(iomem_phys, iomem_size);
			if (!iomem_virt) {
				SPS_ERR("sps:Failed to IO map pipe memory.\n");
				return -ENOMEM;
			}
		}

		iomem_offset = 0;
		SPS_DBG("sps:sps_mem_init.iomem_phys=0x%x,iomem_virt=0x%x.",
			iomem_phys, (u32) iomem_virt);
	}

	pool = gen_pool_create(min_alloc_order, nid);

	if (!pool) {
		SPS_ERR("sps:Failed to create a new memory pool.\n");
		return -ENOMEM;
	}

	if ((d_type == 0) || (d_type == 2)) {
		res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid);
		if (res)
			return res;
	}

	return 0;
}
Пример #9
0
static int ghes_estatus_pool_expand(unsigned long len)
{
	unsigned long i, pages, size, addr;
	int ret;

	ghes_estatus_pool_size_request += PAGE_ALIGN(len);
	size = gen_pool_size(ghes_estatus_pool);
	if (size >= ghes_estatus_pool_size_request)
		return 0;
	pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
	for (i = 0; i < pages; i++) {
		addr = __get_free_page(GFP_KERNEL);
		if (!addr)
			return -ENOMEM;
		ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
		if (ret)
			return ret;
	}

	return 0;
}
Пример #10
0
int exynos_init_iovmm(struct device *sysmmu, struct exynos_iovmm *vmm1)
{
	int ret = 0;
	struct sysmmu_drvdata *data = dev_get_drvdata(sysmmu);
	struct exynos_iovmm *vmm=&data->vmm;
	vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
	if (!vmm->vmm_pool) {
		ret = -ENOMEM;
		goto err_setup_genalloc;
	}

	/* (1GB - 4KB) addr space from 0xC0000000 */
	ret = gen_pool_add(vmm->vmm_pool, IOVA_START, IOVM_SIZE, -1);
	if (ret)
	{
		goto err_setup_domain;
       }
	vmm->domain = iommu_domain_alloc(&platform_bus_type);
	if (!vmm->domain) {
	printk("exynos_init_iovmm-------line = %d\n",__LINE__);
		ret = -ENOMEM;
		goto err_setup_domain;
	}

	spin_lock_init(&vmm->lock);

	INIT_LIST_HEAD(&vmm->regions_list);

	dev_dbg(sysmmu, "IOVMM: Created %#x B IOVMM from %#x.\n",
						IOVM_SIZE, IOVA_START);
	return 0;
err_setup_domain:
	gen_pool_destroy(vmm->vmm_pool);
err_setup_genalloc:
	dev_dbg(sysmmu, "IOVMM: Failed to create IOVMM (%d)\n", ret);

	return ret;
}
Пример #11
0
/*
 * Add a new chunk of uncached memory pages to the specified pool.
 *
 * @pool: pool to add new chunk of uncached memory to
 * @nid: node id of node to allocate memory from, or -1
 *
 * This is accomplished by first allocating a granule of cached memory pages
 * and then converting them to uncached memory pages.
 */
static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
{
	struct page *page;
	int status, i, nchunks_added = uc_pool->nchunks_added;
	unsigned long c_addr, uc_addr;

	if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
		return -1;	/* interrupted by a signal */

	if (uc_pool->nchunks_added > nchunks_added) {
		/* someone added a new chunk while we were waiting */
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return 0;
	}

	if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return -1;
	}

	/* attempt to allocate a granule's worth of cached memory pages */

	page = alloc_pages_exact_node(nid,
				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
				IA64_GRANULE_SHIFT-PAGE_SHIFT);
	if (!page) {
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return -1;
	}

	/* convert the memory pages from cached to uncached */

	c_addr = (unsigned long)page_address(page);
	uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;

	/*
	 * There's a small race here where it's possible for someone to
	 * access the page through /dev/mem halfway through the conversion
	 * to uncached - not sure it's really worth bothering about
	 */
	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
		SetPageUncached(&page[i]);

	flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);

	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
		atomic_set(&uc_pool->status, 0);
		status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
		if (status || atomic_read(&uc_pool->status))
			goto failed;
	} else if (status != PAL_VISIBILITY_OK)
		goto failed;

	preempt_disable();

	if (ia64_platform_is("sn2"))
		sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
	else
		flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);

	/* flush the just introduced uncached translation from the TLB */
	local_flush_tlb_all();

	preempt_enable();

	status = ia64_pal_mc_drain();
	if (status != PAL_STATUS_SUCCESS)
		goto failed;
	atomic_set(&uc_pool->status, 0);
	status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
	if (status || atomic_read(&uc_pool->status))
		goto failed;

	/*
	 * The chunk of memory pages has been converted to uncached so now we
	 * can add it to the pool.
	 */
	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
	if (status)
		goto failed;

	uc_pool->nchunks_added++;
	mutex_unlock(&uc_pool->add_chunk_mutex);
	return 0;

	/* failed to convert or add the chunk so give it back to the kernel */
failed:
	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
		ClearPageUncached(&page[i]);

	free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
	mutex_unlock(&uc_pool->add_chunk_mutex);
	return -1;
}
Пример #12
0
static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
{
	struct page *page;
	int status, i, nchunks_added = uc_pool->nchunks_added;
	unsigned long c_addr, uc_addr;

	if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
		return -1;	/*                         */

	if (uc_pool->nchunks_added > nchunks_added) {
		/*                                                 */
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return 0;
	}

	if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return -1;
	}

	/*                                                              */

	page = alloc_pages_exact_node(nid,
				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
				IA64_GRANULE_SHIFT-PAGE_SHIFT);
	if (!page) {
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return -1;
	}

	/*                                                  */

	c_addr = (unsigned long)page_address(page);
	uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;

	/*
                                                                
                                                                   
                                                            
  */
	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
		SetPageUncached(&page[i]);

	flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);

	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
		atomic_set(&uc_pool->status, 0);
		status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
		if (status || atomic_read(&uc_pool->status))
			goto failed;
	} else if (status != PAL_VISIBILITY_OK)
		goto failed;

	preempt_disable();

	if (ia64_platform_is("sn2"))
		sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
	else
		flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);

	/*                                                             */
	local_flush_tlb_all();

	preempt_enable();

	status = ia64_pal_mc_drain();
	if (status != PAL_STATUS_SUCCESS)
		goto failed;
	atomic_set(&uc_pool->status, 0);
	status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
	if (status || atomic_read(&uc_pool->status))
		goto failed;

	/*
                                                                      
                           
  */
	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
	if (status)
		goto failed;

	uc_pool->nchunks_added++;
	mutex_unlock(&uc_pool->add_chunk_mutex);
	return 0;

	/*                                                                  */
failed:
	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
		ClearPageUncached(&page[i]);

	free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
	mutex_unlock(&uc_pool->add_chunk_mutex);
	return -1;
}
Пример #13
0
struct kgsl_pagetable *kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu)
{
	int status = 0;
	struct kgsl_pagetable *pagetable = NULL;
	uint32_t flags;

	KGSL_MEM_VDBG("enter (mmu=%p)\n", mmu);

	pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
	if (pagetable == NULL) {
		KGSL_MEM_ERR("Unable to allocate pagetable object.\n");
		return NULL;
	}

	pagetable->mmu = mmu;
	pagetable->va_base = mmu->va_base;
	pagetable->va_range = mmu->va_range;
	pagetable->last_superpte = 0;
	pagetable->max_entries = (mmu->va_range >> KGSL_PAGESIZE_SHIFT)
				 + GSL_PT_EXTRA_ENTRIES;

	pagetable->pool = gen_pool_create(KGSL_PAGESIZE_SHIFT, -1);
	if (pagetable->pool == NULL) {
		KGSL_MEM_ERR("Unable to allocate virtualaddr pool.\n");
		goto err_gen_pool_create;
	}

	if (gen_pool_add(pagetable->pool, pagetable->va_base,
				pagetable->va_range, -1)) {
		KGSL_MEM_ERR("gen_pool_create failed for pagetable %p\n",
				pagetable);
		goto err_gen_pool_add;
	}

	/* allocate page table memory */
	flags = (KGSL_MEMFLAGS_ALIGN4K | KGSL_MEMFLAGS_CONPHYS
		 | KGSL_MEMFLAGS_STRICTREQUEST);
	status = kgsl_sharedmem_alloc(flags,
				      pagetable->max_entries * GSL_PTE_SIZE,
				      &pagetable->base);

	if (status) {
		KGSL_MEM_ERR("cannot alloc page tables\n");
		goto err_kgsl_sharedmem_alloc;
	}

	/* reset page table entries
	 * -- all pte's are marked as not dirty initially
	 */
	kgsl_sharedmem_set(&pagetable->base, 0, 0, pagetable->base.size);
	pagetable->base.gpuaddr = pagetable->base.physaddr;

	KGSL_MEM_VDBG("return %p\n", pagetable);

	return pagetable;

err_kgsl_sharedmem_alloc:
err_gen_pool_add:
	gen_pool_destroy(pagetable->pool);
err_gen_pool_create:
	kfree(pagetable);
	return NULL;
}