Esempio n. 1
0
static void __init mx6q_sabrelite_reserve(void)
{
    phys_addr_t phys;
    int i;

    if (imx6q_gpu_pdata.reserved_mem_size) {
        phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size,
                                   SZ_4K, SZ_1G);
        memblock_free(phys, imx6q_gpu_pdata.reserved_mem_size);
        memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size);
        imx6q_gpu_pdata.reserved_mem_base = phys;
    }

    if (imx_ion_data.heaps[0].size) {
        phys = memblock_alloc(imx_ion_data.heaps[0].size, SZ_4K);
        memblock_free(phys, imx_ion_data.heaps[0].size);
        memblock_remove(phys, imx_ion_data.heaps[0].size);
        imx_ion_data.heaps[0].base = phys;
    }

    for (i = 0; i < ARRAY_SIZE(sabrelite_fb_data); i++)
        if (sabrelite_fb_data[i].res_size[0]) {
            /* reserve for background buffer */
            phys = memblock_alloc(sabrelite_fb_data[i].res_size[0],
                                  SZ_4K);
            memblock_free(phys, sabrelite_fb_data[i].res_size[0]);
            memblock_remove(phys, sabrelite_fb_data[i].res_size[0]);
            sabrelite_fb_data[i].res_base[0] = phys;
        }
}
Esempio n. 2
0
File: setup.c Progetto: VizXu/linux
/* Reserve a portion of memory for CEU 0 and CEU 1 buffers */
static void __init ecovec_mv_mem_reserve(void)
{
	phys_addr_t phys;
	phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;

	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
	memblock_free(phys, size);
	memblock_remove(phys, size);
	ceu0_dma_membase = phys;

	phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
	memblock_free(phys, size);
	memblock_remove(phys, size);
	ceu1_dma_membase = phys;
}
STATIC int balong_ion_free_mem_to_buddy(void)
{
    int i;
    u32 fb_heap_phy = 0;
    struct ion_heap_info_data mem_data;

    if (0 != hisi_ion_get_heap_info(ION_FB_HEAP_ID, &mem_data)) {
        balongfb_loge("fail to get ION_FB_HEAP_ID\n");
        return -EINVAL;
    }

    if (0 == mem_data.heap_size) {
        balongfb_loge("fb reserved size 0\n");
        return -EINVAL;
    }

    fb_heap_phy = mem_data.heap_phy;
    for(i = 0; i < ((mem_data.heap_size)/PAGE_SIZE); i++){
        free_reserved_page(phys_to_page(mem_data.heap_phy));
#ifdef CONFIG_HIGHMEM
        if (PageHighMem(phys_to_page(mem_data.heap_phy)))
            totalhigh_pages += 1;
#endif
        mem_data.heap_phy += PAGE_SIZE;
    }

    memblock_free(fb_heap_phy, mem_data.heap_size);
    return 0;
}
Esempio n. 4
0
void *memblock_test(void *arg)
{
	long cnt = 0;
	long cntfail = 0;
	int i;
	int runlength = (int)(long)arg;
	struct memblock *p[MAX_RUN];

	if (runlength > MAX_RUN)
		runlength = MAX_RUN;
	while (goflag) {
		for (i = 0; i < runlength; i++)
			p[i] = memblock_alloc();
		for (i = 0; i < runlength; i++) {
			if (p[i] == NULL) {
				cntfail++;
			} else {
				memblock_free(p[i]);
				cnt++;
			}
		}
	}
	__get_thread_var(results) += cnt;
	__get_thread_var(failures) += cntfail;

	return NULL;
}
Esempio n. 5
0
static void __init xen_del_extra_mem(unsigned long start_pfn,
				     unsigned long n_pfns)
{
	int i;
	unsigned long start_r, size_r;

	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
		start_r = xen_extra_mem[i].start_pfn;
		size_r = xen_extra_mem[i].n_pfns;

		/* Start of region. */
		if (start_r == start_pfn) {
			BUG_ON(n_pfns > size_r);
			xen_extra_mem[i].start_pfn += n_pfns;
			xen_extra_mem[i].n_pfns -= n_pfns;
			break;
		}
		/* End of region. */
		if (start_r + size_r == start_pfn + n_pfns) {
			BUG_ON(n_pfns > size_r);
			xen_extra_mem[i].n_pfns -= n_pfns;
			break;
		}
		/* Mid of region. */
		if (start_pfn > start_r && start_pfn < start_r + size_r) {
			BUG_ON(start_pfn + n_pfns > start_r + size_r);
			xen_extra_mem[i].n_pfns = start_pfn - start_r;
			/* Calling memblock_reserve() again is okay. */
			xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
					  (start_pfn + n_pfns));
			break;
		}
	}
	memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
}
Esempio n. 6
0
static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
				      unsigned long end, int nid)
{
	pmd_t *pmd;
	unsigned long next;

	if (pud_none(*pud)) {
		void *p;

		if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
		    ((end - addr) == PUD_SIZE) &&
		    IS_ALIGNED(addr, PUD_SIZE)) {
			p = early_alloc(PUD_SIZE, nid, false);
			if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
				return;
			else if (p)
				memblock_free(__pa(p), PUD_SIZE);
		}

		p = early_alloc(PAGE_SIZE, nid, true);
		pud_populate(&init_mm, pud, p);
	}

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (!pmd_large(*pmd))
			kasan_populate_pmd(pmd, addr, next, nid);
	} while (pmd++, addr = next, addr != end);
}
static ssize_t dump_end_proc_read(struct file *file, char __user *userbuf,
				  size_t bytes, loff_t *off)
{
	phys_addr_t addr;
	struct page *page;

	for (addr = g_memdump_addr; addr < (g_memdump_addr + g_memdump_size);
	     addr += PAGE_SIZE) {
		page = pfn_to_page(addr >> PAGE_SHIFT);
		free_reserved_page(page);
#ifdef CONFIG_HIGHMEM
		if (PageHighMem(page))
			totalhigh_pages++;
#endif
	}

	memblock_free(g_memdump_addr, g_memdump_size);

	pr_err("dump_end_proc_read:g_memdump_addr=0x%x, g_memdump_end=0x%x,g_memdump_size=0x%x\n",
		(unsigned int)g_memdump_addr, (unsigned int)g_memdump_end,
		g_memdump_size);
	pr_info("%s:addr%lu\n", __func__, (unsigned long)addr);
	g_memdump_addr = 0;
	g_memdump_end = 0;
	g_memdump_size = 0;
	return 0;
}
Esempio n. 8
0
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
				      unsigned long end, int nid)
{
	pte_t *pte;

	if (pmd_none(*pmd)) {
		void *p;

		if (boot_cpu_has(X86_FEATURE_PSE) &&
		    ((end - addr) == PMD_SIZE) &&
		    IS_ALIGNED(addr, PMD_SIZE)) {
			p = early_alloc(PMD_SIZE, nid, false);
			if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
				return;
			else if (p)
				memblock_free(__pa(p), PMD_SIZE);
		}

		p = early_alloc(PAGE_SIZE, nid, true);
		pmd_populate_kernel(&init_mm, pmd, p);
	}

	pte = pte_offset_kernel(pmd, addr);
	do {
		pte_t entry;
		void *p;

		if (!pte_none(*pte))
			continue;

		p = early_alloc(PAGE_SIZE, nid, true);
		entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
		set_pte_at(&init_mm, addr, pte, entry);
	} while (pte++, addr += PAGE_SIZE, addr != end);
}
static int msm_export_last_regs(void)
{
	struct proc_dir_entry *proc_entry;

	writel_relaxed(mem_dump_data.dump_table_phys,
				MSM_IMEM_BASE + DUMP_TABLE_OFFSET1);
	if (!last_regs_base || !last_regs_size)
		return -EINVAL;

	printk(KERN_INFO "%s: exporting HWWD context as /proc/last_regs\n",
		__func__);

	last_regs_buf = kmalloc(last_regs_size, GFP_KERNEL);
	if (!last_regs_buf) {
		printk(KERN_ERR "%s: failed to allocate last_regs_buf\n",
			__func__);
		return -ENOMEM;
	}

	memcpy(last_regs_buf, last_regs_base, last_regs_size);
	memblock_free(virt_to_phys(last_regs_base), last_regs_size);
	proc_entry = create_proc_entry("last_regs",
					  S_IFREG | S_IRUGO, NULL);
	if (!proc_entry) {
		printk(KERN_ERR "%s: failed to create proc entry\n", __func__);
		kfree(last_regs_buf);
		last_regs_buf = NULL;
		return -ENOMEM;
	}
	proc_entry->proc_fops = &last_regs_fops;
	proc_entry->size = last_regs_size;

	return 0;
}
Esempio n. 10
0
static void __init device_tree_init(void)
{
	unsigned long base, size;
	struct device_node *cpu;
	int ret;

	if (!initial_boot_params)
		return;

	base = __pa(initial_boot_params);
	size = be32_to_cpu(initial_boot_params->totalsize);

	/* Before we do anything, lets reserve the dt blob */
	memblock_reserve(base, size);

	unflatten_device_tree();

	/* free the space reserved for the dt blob */
	memblock_free(base, size);

	cpu = of_find_compatible_node(NULL, NULL, "lattice,lm32");
	if (!cpu)
		panic("No compatible CPU found in device tree\n");

	ret = of_property_read_u32(cpu, "clock-frequency", &cpu_frequency);
	if (ret)
		cpu_frequency = (unsigned long)CONFIG_CPU_CLOCK;

	of_node_put(cpu);
}
void __init bitfix_reserve(void)
{
	int i;
	int ret;

	/*
	 * We'll auto-enable if needed.  However we still allocate memory even
	 * if we detect we're not needed.  That allows us to enable this at
	 * runtime for testing.
	 */
	bitfix_enabled = bitfix_is_needed();

	/* We need pm_check enabled */
	if (bitfix_enabled) {
		pr_info("%s: Detected firmware that needs bitfix\n", __func__);
		s3c_pm_check_set_enable(true);
	}

	for (i = 0; i < UPPER_LOOPS; i++) {
		phys_addr_t xor_superchunk_addr =
			bitfix_get_xor_superchunk_addr(i);
		bool was_reserved;

		pr_debug("%s: trying to reserve %08x@%08x\n",
			__func__, SUPERCHUNK_SIZE, xor_superchunk_addr);
		was_reserved = memblock_is_region_reserved(xor_superchunk_addr,
			SUPERCHUNK_SIZE);
		if (was_reserved) {
			pr_err("%s: memory already reserved %08x@%08x\n",
				__func__, SUPERCHUNK_SIZE, xor_superchunk_addr);
			goto error;
		}

		ret = memblock_reserve(xor_superchunk_addr, SUPERCHUNK_SIZE);
		if (ret) {
			pr_err("%s: memblock_reserve fail (%d) %08x@%08x\n",
				__func__, ret, SUPERCHUNK_SIZE,
				xor_superchunk_addr);
			goto error;
		}
	}

	return;
error:
	/*
	 * If we detected that we needed bitfix code and we couldn't init
	 * then that's a serious problem.  Dump stack so it's pretty obvious.
	 */
	WARN_ON(true);

	for (i--; i >= 0; i--) {
		phys_addr_t xor_superchunk_addr =
			bitfix_get_xor_superchunk_addr(i);
		ret = memblock_free(xor_superchunk_addr, SUPERCHUNK_SIZE);
		WARN_ON(ret);
	}
	bitfix_enabled = false;

	__memblock_dump_all();
}
Esempio n. 12
0
int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
	phys_addr_t *res_base)
{
	phys_addr_t base;
	/*
	 * We use __memblock_alloc_base() because memblock_alloc_base()
	 * panic()s on allocation failure.
	 */
	end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
	base = __memblock_alloc_base(size, align, end);
	if (!base)
		return -ENOMEM;

	/*
	 * Check if the allocated region fits in to start..end window
	 */
	if (base < start) {
		memblock_free(base, size);
		return -ENOMEM;
	}

	*res_base = base;
	if (nomap)
		return memblock_remove(base, size);
	return 0;
}
Esempio n. 13
0
static void __init pcm037_reserve(void)
{
	/* reserve 4 MiB for mx3-camera */
	mx3_camera_base = memblock_alloc(MX3_CAMERA_BUF_SIZE,
			MX3_CAMERA_BUF_SIZE);
	memblock_free(mx3_camera_base, MX3_CAMERA_BUF_SIZE);
	memblock_remove(mx3_camera_base, MX3_CAMERA_BUF_SIZE);
}
Esempio n. 14
0
void __init tegra_release_bootloader_fb(void)
{
	/* Since bootloader fb is reserved in common.c, it is freed here. */
	if (tegra_bootloader_fb_size)
		if (memblock_free(tegra_bootloader_fb_start,
						tegra_bootloader_fb_size))
			pr_err("Failed to free bootloader fb.\n");
}
Esempio n. 15
0
File: kernel.c Progetto: jnow-87/foo
void kfree(void *addr){
	csection_lock(&kmem_mtx);

	if(memblock_free(&kernel_heap, addr) < 0)
		kpanic(0x0, "double free at %p\n", addr);

	csection_unlock(&kmem_mtx);
}
Esempio n. 16
0
/*
 * Free memblock.reserved.regions
 */
int __init_memblock memblock_free_reserved_regions(void)
{
	if (memblock.reserved.regions == memblock_reserved_init_regions)
		return 0;

	return memblock_free(__pa(memblock.reserved.regions),
		 sizeof(struct memblock_region) * memblock.reserved.max);
}
Esempio n. 17
0
static void __init mx31_3ds_reserve(void)
{
	/* reserve MX31_3DS_CAMERA_BUF_SIZE bytes for mx3-camera */
	mx3_camera_base = memblock_alloc(MX31_3DS_CAMERA_BUF_SIZE,
					 MX31_3DS_CAMERA_BUF_SIZE);
	memblock_free(mx3_camera_base, MX31_3DS_CAMERA_BUF_SIZE);
	memblock_remove(mx3_camera_base, MX31_3DS_CAMERA_BUF_SIZE);
}
static void __init_memblock adf_memblock_release(struct dma_buf *buf)
{
    struct adf_memblock_pdata *pdata = buf->priv;
    int err = memblock_free(pdata->base, buf->size);

    if (err < 0)
        pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
    kfree(pdata);
}
Esempio n. 19
0
/* No lock necessary */
void pa_memblock_unref(pa_memblock*b) {
    pa_assert(b);
    pa_assert(PA_REFCNT_VALUE(b) > 0);

    if (PA_REFCNT_DEC(b) > 0)
        return;

    memblock_free(b);
}
static void __init mx6q_sabresd_reserve(void)
{
	phys_addr_t phys;
	int i, fb0_reserved = 0, fb_array_size;

	/*
	 * Reserve primary framebuffer memory if its base address
	 * is set by kernel command line.
	 */
	fb_array_size = ARRAY_SIZE(sabresd_fb_data);
	if (fb_array_size > 0 && sabresd_fb_data[0].res_base[0] &&
	    sabresd_fb_data[0].res_size[0]) {
		if (sabresd_fb_data[0].res_base[0] > SZ_2G)
			printk(KERN_INFO"UI Performance downgrade with FB phys address %x!\n",
			    sabresd_fb_data[0].res_base[0]);
		memblock_reserve(sabresd_fb_data[0].res_base[0],
				 sabresd_fb_data[0].res_size[0]);
		memblock_remove(sabresd_fb_data[0].res_base[0],
				sabresd_fb_data[0].res_size[0]);
		sabresd_fb_data[0].late_init = true;
		ipu_data[ldb_data.ipu_id].bypass_reset = true;
		fb0_reserved = 1;
	}
	for (i = fb0_reserved; i < fb_array_size; i++)
		if (sabresd_fb_data[i].res_size[0]) {
			/* Reserve for other background buffer. */
			phys = memblock_alloc_base(sabresd_fb_data[i].res_size[0],
						SZ_4K, SZ_2G);
			memblock_remove(phys, sabresd_fb_data[i].res_size[0]);
			sabresd_fb_data[i].res_base[0] = phys;
		}

#ifdef CONFIG_ANDROID_RAM_CONSOLE
	phys = memblock_alloc_base(SZ_1M, SZ_4K, SZ_1G);
	memblock_remove(phys, SZ_1M);
	memblock_free(phys, SZ_1M);
	ram_console_resource.start = phys;
	ram_console_resource.end   = phys + SZ_1M - 1;
#endif

#if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE)
	if (imx6q_gpu_pdata.reserved_mem_size) {
		phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size,
					   SZ_4K, SZ_2G);
		memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size);
		imx6q_gpu_pdata.reserved_mem_base = phys;
	}
#endif

#if defined(CONFIG_ION)
	if (imx_ion_data.heaps[0].size) {
		phys = memblock_alloc(imx_ion_data.heaps[0].size, SZ_4K);
		memblock_remove(phys, imx_ion_data.heaps[0].size);
		imx_ion_data.heaps[0].base = phys;
	}
#endif
}
Esempio n. 21
0
File: mmu.c Progetto: 1314cc/linux
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
				  phys_addr_t phys, pgprot_t prot,
				  phys_addr_t (*pgtable_alloc)(void))
{
	pmd_t *pmd;
	unsigned long next;

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
	 */
	if (pud_none(*pud) || pud_sect(*pud)) {
		phys_addr_t pmd_phys;
		BUG_ON(!pgtable_alloc);
		pmd_phys = pgtable_alloc();
		pmd = pmd_set_fixmap(pmd_phys);
		if (pud_sect(*pud)) {
			/*
			 * need to have the 1G of mappings continue to be
			 * present
			 */
			split_pud(pud, pmd);
		}
		__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
		flush_tlb_all();
		pmd_clear_fixmap();
	}
	BUG_ON(pud_bad(*pud));

	pmd = pmd_set_fixmap_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		/* try section mapping first */
		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
		      block_mappings_allowed(pgtable_alloc)) {
			pmd_t old_pmd =*pmd;
			pmd_set_huge(pmd, phys, prot);
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
			 */
			if (!pmd_none(old_pmd)) {
				flush_tlb_all();
				if (pmd_table(old_pmd)) {
					phys_addr_t table = pmd_page_paddr(old_pmd);
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
				}
			}
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot, pgtable_alloc);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);

	pmd_clear_fixmap();
}
Esempio n. 22
0
/**
 * init_alloc_remap - Initialize remap allocator for a NUMA node
 * @nid: NUMA node to initizlie remap allocator for
 *
 * NUMA nodes may end up without any lowmem.  As allocating pgdat and
 * memmap on a different node with lowmem is inefficient, a special
 * remap allocator is implemented which can be used by alloc_remap().
 *
 * For each node, the amount of memory which will be necessary for
 * pgdat and memmap is calculated and two memory areas of the size are
 * allocated - one in the node and the other in lowmem; then, the area
 * in the node is remapped to the lowmem area.
 *
 * As pgdat and memmap must be allocated in lowmem anyway, this
 * doesn't waste lowmem address space; however, the actual lowmem
 * which gets remapped over is wasted.  The amount shouldn't be
 * problematic on machines this feature will be used.
 *
 * Initialization failure isn't fatal.  alloc_remap() is used
 * opportunistically and the callers will fall back to other memory
 * allocation mechanisms on failure.
 */
void __init init_alloc_remap(int nid, u64 start, u64 end)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long end_pfn = end >> PAGE_SHIFT;
	unsigned long size, pfn;
	u64 node_pa, remap_pa;
	void *remap_va;

	/*
	 * The acpi/srat node info can show hot-add memroy zones where
	 * memory could be added but not currently present.
	 */
	printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
	       nid, start_pfn, end_pfn);

	/* calculate the necessary space aligned to large page size */
	size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
	size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
	size = ALIGN(size, LARGE_PAGE_BYTES);

	/* allocate node memory and the lowmem remap area */
	node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
	if (!node_pa) {
		pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
			   size, nid);
		return;
	}
	memblock_reserve(node_pa, size);

	remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
					  max_low_pfn << PAGE_SHIFT,
					  size, LARGE_PAGE_BYTES);
	if (!remap_pa) {
		pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
			   size, nid);
		memblock_free(node_pa, size);
		return;
	}
	memblock_reserve(remap_pa, size);
	remap_va = phys_to_virt(remap_pa);

	/* perform actual remap */
	for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
		set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
			    (node_pa >> PAGE_SHIFT) + pfn,
			    PAGE_KERNEL_LARGE);

	/* initialize remap allocator parameters */
	node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
	node_remap_start_vaddr[nid] = remap_va;
	node_remap_end_vaddr[nid] = remap_va + size;
	node_remap_alloc_vaddr[nid] = remap_va;

	printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
	       nid, node_pa, node_pa + size, remap_va, remap_va + size);
}
Esempio n. 23
0
void __init numa_reset_distance(void)
{
	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);

	
	if (numa_distance_cnt)
		memblock_free(__pa(numa_distance), size);
	numa_distance_cnt = 0;
	numa_distance = NULL;	
}
Esempio n. 24
0
/**
 * numa_reset_distance - Reset NUMA distance table
 *
 * The current table is freed.  The next numa_set_distance() call will
 * create a new one.
 */
void __init numa_reset_distance(void)
{
	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);

	/* numa_distance could be 1LU marking allocation failure, test cnt */
	if (numa_distance_cnt)
		memblock_free(__pa(numa_distance), size);
	numa_distance_cnt = 0;
	numa_distance = NULL;	/* enable table creation */
}
Esempio n. 25
0
/* Reserve a portion of memory for CEU 0 and CEU 1 buffers */
static void __init ms7724se_mv_mem_reserve(void)
{
	phys_addr_t phys;
	phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;

	phys = memblock_phys_alloc(size, PAGE_SIZE);
	if (!phys)
		panic("Failed to allocate CEU0 memory\n");

	memblock_free(phys, size);
	memblock_remove(phys, size);
	ceu0_dma_membase = phys;

	phys = memblock_phys_alloc(size, PAGE_SIZE);
	if (!phys)
		panic("Failed to allocate CEU1 memory\n");

	memblock_free(phys, size);
	memblock_remove(phys, size);
	ceu1_dma_membase = phys;
}
Esempio n. 26
0
static void __init mx6q_sabrelite_reserve(void)
{
	phys_addr_t phys;

	if (imx6q_gpu_pdata.reserved_mem_size) {
		phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size,
					   SZ_4K, SZ_1G);
		memblock_free(phys, imx6q_gpu_pdata.reserved_mem_size);
		memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size);
		imx6q_gpu_pdata.reserved_mem_base = phys;
	}
}
Esempio n. 27
0
static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
				  unsigned long addr, unsigned long end,
				  phys_addr_t phys, pgprot_t prot,
				  void *(*alloc)(unsigned long size))
{
	pmd_t *pmd;
	unsigned long next;

	/*
	 * Check for initial section mappings in the pgd/pud and remove them.
	 */
	if (pud_none(*pud) || pud_sect(*pud)) {
		pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
		if (pud_sect(*pud)) {
			/*
			 * need to have the 1G of mappings continue to be
			 * present
			 */
			split_pud(pud, pmd);
		}
		pud_populate(mm, pud, pmd);
		flush_tlb_all();
	}
	BUG_ON(pud_bad(*pud));

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		/* try section mapping first */
		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
			pmd_t old_pmd =*pmd;
			set_pmd(pmd, __pmd(phys |
					   pgprot_val(mk_sect_prot(prot))));
			/*
			 * Check for previous table entries created during
			 * boot (__create_page_tables) and flush them.
			 */
			if (!pmd_none(old_pmd)) {
				flush_tlb_all();
				if (pmd_table(old_pmd)) {
					phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
				}
			}
		} else {
			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
				       prot, alloc);
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);
}
static phys_addr_t _reserve_mem(const char *name, unsigned long size,
				unsigned long align)
{
	unsigned long base;

	size = ALIGN(size, align);
	base = memblock_alloc(size, align);
	memblock_free(base, size);
	memblock_remove(base, size);
	pr_info("msm7x30_surf: reserved memory for %s @ 0x%08lx (%lu bytes)\n",
		name, base, size);
	return base;
}
Esempio n. 29
0
static void __init exynos5_cma_region_reserve(
			struct cma_region *regions_normal,
			struct cma_region *regions_secure)
{
	struct cma_region *reg;
	size_t size_secure = 0, align_secure = 0;
	phys_addr_t paddr = 0;

	for (reg = regions_normal; reg->size != 0; reg++) {
		if ((reg->alignment & (reg->alignment - 1)) || reg->reserved)
			continue;

		if (reg->start) {
			if (!memblock_is_region_reserved(reg->start, reg->size)
			    && memblock_reserve(reg->start, reg->size) >= 0)
				reg->reserved = 1;
		} else {
			paddr = __memblock_alloc_base(reg->size, reg->alignment,
					MEMBLOCK_ALLOC_ACCESSIBLE);
			if (paddr) {
				reg->start = paddr;
				reg->reserved = 1;
				if (reg->size & (reg->alignment - 1))
					memblock_free(paddr + reg->size,
						ALIGN(reg->size, reg->alignment)
						- reg->size);
			}
		}
	}

	if (regions_secure && regions_secure->size) {
		for (reg = regions_secure; reg->size != 0; reg++)
			size_secure += reg->size;

		reg--;

		align_secure = reg->alignment;
		BUG_ON(align_secure & (align_secure - 1));

		paddr -= size_secure;
		paddr &= ~(align_secure - 1);

		if (!memblock_reserve(paddr, size_secure)) {
			do {
				reg->start = paddr;
				reg->reserved = 1;
				paddr += reg->size;
			} while (reg-- != regions_secure);
		}
	}
}
Esempio n. 30
0
File: mmu.c Progetto: 1314cc/linux
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
				  phys_addr_t phys, pgprot_t prot,
				  phys_addr_t (*pgtable_alloc)(void))
{
	pud_t *pud;
	unsigned long next;

	if (pgd_none(*pgd)) {
		phys_addr_t pud_phys;
		BUG_ON(!pgtable_alloc);
		pud_phys = pgtable_alloc();
		__pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
	}
	BUG_ON(pgd_bad(*pgd));

	pud = pud_set_fixmap_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);

		/*
		 * For 4K granule only, attempt to put down a 1GB block
		 */
		if (use_1G_block(addr, next, phys) &&
		    block_mappings_allowed(pgtable_alloc)) {
			pud_t old_pud = *pud;
			pud_set_huge(pud, phys, prot);

			/*
			 * If we have an old value for a pud, it will
			 * be pointing to a pmd table that we no longer
			 * need (from swapper_pg_dir).
			 *
			 * Look up the old pmd table and free it.
			 */
			if (!pud_none(old_pud)) {
				flush_tlb_all();
				if (pud_table(old_pud)) {
					phys_addr_t table = pud_page_paddr(old_pud);
					if (!WARN_ON_ONCE(slab_is_available()))
						memblock_free(table, PAGE_SIZE);
				}
			}
		} else {
			alloc_init_pmd(pud, addr, next, phys, prot,
				       pgtable_alloc);
		}
		phys += next - addr;
	} while (pud++, addr = next, addr != end);

	pud_clear_fixmap();
}