void __init setup_log_buf(int early)
{
	unsigned long flags;
	unsigned start, dest_idx, offset;
	char *new_log_buf;
	int free;

	if (!new_log_buf_len)
		return;

	if (early) {
		unsigned long mem;

		mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
		if (!mem)
			return;
		new_log_buf = __va(mem);
	} else {
		new_log_buf = alloc_bootmem_nopanic(new_log_buf_len);
	}

	if (unlikely(!new_log_buf)) {
		pr_err("log_buf_len: %ld bytes not available\n",
			new_log_buf_len);
		return;
	}

	raw_spin_lock_irqsave(&logbuf_lock, flags);
	log_buf_len = new_log_buf_len;
	log_buf = new_log_buf;
	new_log_buf_len = 0;
	free = __LOG_BUF_LEN - log_end;

	offset = start = min(con_start, log_start);
	dest_idx = 0;
	while (start != log_end) {
		unsigned log_idx_mask = start & (__LOG_BUF_LEN - 1);

		log_buf[dest_idx] = __log_buf[log_idx_mask];
		start++;
		dest_idx++;
	}
	log_start -= offset;
	con_start -= offset;
	log_end -= offset;
	raw_spin_unlock_irqrestore(&logbuf_lock, flags);

	pr_info("log_buf_len: %d\n", log_buf_len);
	pr_info("early log buf free: %d(%d%%)\n",
		free, (free * 100) / __LOG_BUF_LEN);
}
Ejemplo n.º 2
0
static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
					    *uname, int depth, void *data)
{
	const __be32 *prop;
	int count, i;
	u32 isa;

	/* We are scanning "ibm,powerpc-cpu-features" nodes only */
	if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
		return 0;

	prop = of_get_flat_dt_prop(node, "isa", NULL);
	if (!prop)
		/* We checked before, "can't happen" */
		return 0;

	isa = be32_to_cpup(prop);

	/* Count and allocate space for cpu features */
	of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
						&nr_dt_cpu_features);
	dt_cpu_features = __va(
		memblock_alloc(sizeof(struct dt_cpu_feature)*
				nr_dt_cpu_features, PAGE_SIZE));

	cpufeatures_setup_start(isa);

	/* Scan nodes into dt_cpu_features and enable those without deps  */
	count = 0;
	of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);

	/* Recursive enable remaining features with dependencies */
	for (i = 0; i < nr_dt_cpu_features; i++) {
		struct dt_cpu_feature *f = &dt_cpu_features[i];

		cpufeatures_deps_enable(f);
	}

	prop = of_get_flat_dt_prop(node, "display-name", NULL);
	if (prop && strlen((char *)prop) != 0) {
		strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
		cur_cpu_spec->cpu_name = dt_cpu_name;
	}

	cpufeatures_setup_finished();

	memblock_free(__pa(dt_cpu_features),
			sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);

	return 0;
}
Ejemplo n.º 3
0
void __init exc_lvl_early_init(void)
{
	unsigned int i, hw_cpu;

	/* interrupt stacks must be in lowmem, we get that for free on ppc32
	 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
	for_each_possible_cpu(i) {
#ifdef CONFIG_SMP
		hw_cpu = get_hard_smp_processor_id(i);
#else
		hw_cpu = 0;
#endif

		critirq_ctx[hw_cpu] = (struct thread_info *)
			__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE
		dbgirq_ctx[hw_cpu] = (struct thread_info *)
			__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
		mcheckirq_ctx[hw_cpu] = (struct thread_info *)
			__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#endif
	}
}
Ejemplo n.º 4
0
struct pci_controller * __init
alloc_pci_controller(void)
{
	struct pci_controller *hose;

	hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
	if (!hose)
		panic("%s: Failed to allocate %zu bytes\n", __func__,
		      sizeof(*hose));

	*hose_tail = hose;
	hose_tail = &hose->next;

	return hose;
}
Ejemplo n.º 5
0
void * __init prom_early_alloc(unsigned long size)
{
	unsigned long paddr = memblock_alloc(size, SMP_CACHE_BYTES);
	void *ret;

	if (!paddr) {
		prom_printf("prom_early_alloc(%lu) failed\n", size);
		prom_halt();
	}

	ret = __va(paddr);
	memset(ret, 0, size);
	prom_early_allocated += size;

	return ret;
}
Ejemplo n.º 6
0
void __init efi_fake_memmap(void)
{
	int new_nr_map = efi.memmap.nr_map;
	efi_memory_desc_t *md;
	phys_addr_t new_memmap_phy;
	void *new_memmap;
	int i;

	if (!nr_fake_mem)
		return;

	/* count up the number of EFI memory descriptor */
	for (i = 0; i < nr_fake_mem; i++) {
		for_each_efi_memory_desc(md) {
			struct range *r = &fake_mems[i].range;

			new_nr_map += efi_memmap_split_count(md, r);
		}
	}

	/* allocate memory for new EFI memmap */
	new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
					PAGE_SIZE);
	if (!new_memmap_phy)
		return;

	/* create new EFI memmap */
	new_memmap = early_memremap(new_memmap_phy,
				    efi.memmap.desc_size * new_nr_map);
	if (!new_memmap) {
		memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
		return;
	}

	for (i = 0; i < nr_fake_mem; i++)
		efi_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]);

	/* swap into new EFI memmap */
	early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);

	efi_memmap_install(new_memmap_phy, new_nr_map);

	/* print new EFI memmap */
	efi_print_memmap();
}
Ejemplo n.º 7
0
void __init ti81xx_pcie_mem_reserve_sdram_memblock(void)
{

	phys_addr_t paddr;

	if (!ti81xx_def_sdram_pcie_mem_size)
		return;

	paddr = memblock_alloc(ti81xx_def_sdram_pcie_mem_size, SZ_1M);
	if (!paddr) {
		pr_err("%s: failed to reserve %x bytes\n",
			__func__, ti81xx_def_sdram_pcie_mem_size);
		return;
	}
	memblock_free(paddr, ti81xx_def_sdram_pcie_mem_size);
	memblock_remove(paddr, ti81xx_def_sdram_pcie_mem_size);
	ti81xx_ep_mem_start = paddr;
	ti81xx_ep_mem_size = ti81xx_def_sdram_pcie_mem_size;
}
Ejemplo n.º 8
0
void __init omap_dsp_reserve_sdram_memblock(void)
{
	phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
	phys_addr_t paddr;

	if (!size)
		return;

	paddr = memblock_alloc(size, SZ_1M);
	if (!paddr) {
		pr_err("%s: failed to reserve %x bytes\n",
				__func__, size);
		return;
	}
	memblock_free(paddr, size);
	memblock_remove(paddr, size);

	omap_dsp_phys_mempool_base = paddr;
}
Ejemplo n.º 9
0
static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size)
{
	unsigned int handle_size, alloc_size;
	struct mdesc_handle *hp;
	unsigned long paddr;

	handle_size = (sizeof(struct mdesc_handle) -
		       sizeof(struct mdesc_hdr) +
		       mdesc_size);
	alloc_size = PAGE_ALIGN(handle_size);

	paddr = memblock_alloc(alloc_size, PAGE_SIZE);

	hp = NULL;
	if (paddr) {
		hp = __va(paddr);
		mdesc_handle_init(hp, handle_size, hp);
	}
	return hp;
}
Ejemplo n.º 10
0
u64 __init memblock_alloc_nid(u64 size, u64 align, int nid,
			 u64 (*nid_range)(u64 start, u64 end, int *nid))
{
	struct memblock_region *mem = &memblock.memory;
	int i;

	BUG_ON(0 == size);

	size = memblock_align_up(size, align);

	for (i = 0; i < mem->cnt; i++) {
		u64 ret = memblock_alloc_nid_region(&mem->region[i],
					       nid_range,
					       size, align, nid);
		if (ret != ~(u64)0)
			return ret;
	}

	return memblock_alloc(size, align);
}
Ejemplo n.º 11
0
/*
 * Initialise the coherent DMA memory allocator using the given uncached region.
 */
void __init coherent_mem_init(phys_addr_t start, u32 size)
{
	if (!size)
		return;

	printk(KERN_INFO
	       "Coherent memory (DMA) region start=0x%x size=0x%x\n",
	       start, size);

	dma_base = start;
	dma_size = size;

	/* allocate bitmap */
	dma_pages = dma_size >> PAGE_SHIFT;
	if (dma_size & (PAGE_SIZE - 1))
		++dma_pages;

	dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
				    sizeof(long));
}
Ejemplo n.º 12
0
void __init s3c64xx_reserve_bootmem(void)
{
    struct s3c_media_device *mdev;
    int i;

    for(i = 0; i < sizeof(s3c_mdevs) / sizeof(s3c_mdevs[0]); i++) {
        mdev = &s3c_mdevs[i];
        if (mdev->memsize > 0) {
#if 0
            mdev->paddr = virt_to_phys(alloc_bootmem_low(mdev->memsize));
#else
            mdev->paddr = memblock_alloc(mdev->memsize, PAGE_SIZE);
#endif
            printk(KERN_INFO \
                   "s3c64xx: %lu bytes SDRAM reserved "
                   "for %s at 0x%08x\n",
                   (unsigned long) mdev->memsize, \
                   mdev->name, mdev->paddr);
        }
    }
}
Ejemplo n.º 13
0
void __init omap_ipu_reserve_sdram_memblock(void)
{
	/* currently handles only ipu. dsp will be handled later...*/
	u32 size = CONFIG_OMAP_REMOTEPROC_MEMPOOL_SIZE;
	phys_addr_t paddr;

	if (!size)
		return;

	paddr = memblock_alloc(size, SZ_1M);
	if (!paddr) {
		pr_err("%s: failed to reserve %x bytes\n",
				__func__, size);
		return;
	}
	memblock_free(paddr, size);
	memblock_remove(paddr, size);

	omap_ipu_phys_mempool_base = paddr;
	omap_ipu_phys_mempool_size = size;
}
Ejemplo n.º 14
0
static void __init mx6_evk_reserve(void)
{
#if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE)
	phys_addr_t phys;

	if (imx6q_gpu_pdata.reserved_mem_size) {
		phys = memblock_alloc_base(imx6q_gpu_pdata.reserved_mem_size,
					   SZ_4K, MEMBLOCK_ALLOC_ACCESSIBLE);
		memblock_remove(phys, imx6q_gpu_pdata.reserved_mem_size);
		imx6q_gpu_pdata.reserved_mem_base = phys;
	}
#endif

#if defined(CONFIG_ION)
	if (imx_ion_data.heaps[0].size) {
		phys = memblock_alloc(imx_ion_data.heaps[0].size, SZ_4K);
		memblock_remove(phys, imx_ion_data.heaps[0].size);
		imx_ion_data.heaps[0].base = phys;
	}
#endif
}
Ejemplo n.º 15
0
/*
 * paging_init() continues the virtual memory environment setup which
 * was begun by the code in arch/head.S.
 * The parameters are pointers to where to stick the starting and ending
 * addresses of available kernel virtual memory.
 */
void __init paging_init(void)
{
	/*
	 * Make sure start_mem is page aligned,  otherwise bootmem and
	 * page_alloc get different views og the world.
	 */
	unsigned long start_mem = PAGE_ALIGN(memory_start);
	unsigned long end_mem   = memory_end & PAGE_MASK;

	pr_debug("start_mem is %#lx\nvirtual_end is %#lx\n",
		 start_mem, end_mem);

	/*
	 * Initialize the bad page table and bad page to point
	 * to a couple of allocated pages.
	 */
	empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
	if (!empty_zero_page)
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
		      __func__, PAGE_SIZE, PAGE_SIZE);

	/*
	 * Set up SFC/DFC registers (user data space).
	 */
	set_fs(USER_DS);

	pr_debug("before free_area_init\n");

	pr_debug("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
		 start_mem, end_mem);

	{
		unsigned long zones_size[MAX_NR_ZONES] = {0, };

		zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
		free_area_init(zones_size);
	}
}
Ejemplo n.º 16
0
static void __init bootmem_init_one_node(unsigned int nid)
{
	unsigned long total_pages, paddr;
	unsigned long end_pfn;
	struct pglist_data *p;

	p = NODE_DATA(nid);

	/* Nothing to do.. */
	if (!p->node_spanned_pages)
		return;

	end_pfn = p->node_start_pfn + p->node_spanned_pages;

	total_pages = bootmem_bootmap_pages(p->node_spanned_pages);

	paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
	if (!paddr)
		panic("Can't allocate bootmap for nid[%d]\n", nid);

	init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);

	free_bootmem_with_active_regions(nid, end_pfn);

	/*
	 * XXX Handle initial reservations for the system memory node
	 * only for the moment, we'll refactor this later for handling
	 * reservations in other nodes.
	 */
	if (nid == 0) {
		struct memblock_region *reg;

		/* Reserve the sections we're already using. */
		for_each_memblock(reserved, reg) {
			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
		}
	}
Ejemplo n.º 17
0
/*
 * reserve_crashkernel() - reserves memory for crash kernel
 *
 * This function reserves memory area given in "crashkernel=" kernel command
 * line parameter. The memory reserved is used by a dump capture kernel when
 * primary kernel is crashing.
 */
static void __init reserve_crashkernel(phys_addr_t limit)
{
	unsigned long long crash_size = 0, crash_base = 0;
	int ret;

	ret = parse_crashkernel(boot_command_line, limit,
				&crash_size, &crash_base);
	if (ret)
		return;

	if (crash_base == 0) {
		crash_base = memblock_alloc(crash_size, 1 << 20);
		if (crash_base == 0) {
			pr_warn("crashkernel allocation failed (size:%llx)\n",
				crash_size);
			return;
		}
	} else {
		/* User specifies base address explicitly. Sanity check */
		if (!memblock_is_region_memory(crash_base, crash_size) ||
			memblock_is_region_reserved(crash_base, crash_size)) {
			pr_warn("crashkernel= has wrong address or size\n");
			return;
		}

		if (memblock_reserve(crash_base, crash_size)) {
			pr_warn("crashkernel reservation failed - out of memory\n");
			return;
		}
	}

	pr_info("Reserving %lldMB of memory at %lldMB for crashkernel\n",
		crash_size >> 20, crash_base >> 20);

	crashk_res.start = crash_base;
	crashk_res.end = crash_base + crash_size - 1;
}
Ejemplo n.º 18
0
static phys_addr_t __init early_pgtable_alloc(void)
{
	phys_addr_t phys;
	void *ptr;

	phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);

	/*
	 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
	 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
	 * any level of table.
	 */
	ptr = pte_set_fixmap(phys);

	memset(ptr, 0, PAGE_SIZE);

	/*
	 * Implicit barriers also ensure the zeroed page is visible to the page
	 * table walker
	 */
	pte_clear_fixmap();

	return phys;
}
Ejemplo n.º 19
0
void __init omap_dsp_reserve_sdram_memblock(void)
{
#if defined(CONFIG_OMAP_REMOTE_PROC_DSP)
	phys_addr_t size = CONFIG_OMAP_REMOTEPROC_MEMPOOL_SIZE_DSP;
#else
	phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
#endif
	phys_addr_t paddr;

	if (!size)
		return;

	paddr = memblock_alloc(size, SZ_1M);
	if (!paddr) {
		pr_err("%s: failed to reserve %x bytes\n",
				__func__, size);
		return;
	}
	memblock_free(paddr, size);
	memblock_remove(paddr, size);

	omap_dsp_phys_mempool_base = paddr;
	omap_dsp_phys_mempool_size = size;
}
Ejemplo n.º 20
0
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
	return __va(memblock_alloc(size, align));
}
Ejemplo n.º 21
0
{
	size_t default_size = IO_TLB_DEFAULT_SIZE;
	unsigned char *vstart = 0;
	unsigned long bytes;
	phys_addr_t start;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

	/* Get IO TLB memory from the low pages */
	memblock_set_current_limit(0xffffffff);	/* 4GB */
	start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
	if (start) {
		vstart = __va(start);
	} else {
		pr_err("iotlb allocation fail\n");
	}
	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
	if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
		return;

	if (io_tlb_start)
		memblock_free(io_tlb_start,
				PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
	pr_warn("Cannot allocate SWIOTLB buffer");
	no_iotlb_memory = true;
}
Ejemplo n.º 22
0
static void __init wand_reserve(void)
{
	phys_addr_t phys;
	phys_addr_t total_mem = 0;
	int i;
	struct meminfo *mi = &meminfo;

	for (i = 0; i < mi->nr_banks; i++)
		total_mem += mi->bank[i].size;

#if 0
	int fb0_reserved = 0, fb_array_size;

	/*
	 * Reserve primary framebuffer memory if its base address
	 * is set by kernel command line.
	 */
	fb_array_size = ARRAY_SIZE(wand_fb_pdata);
	if (fb_array_size > 0 && wand_fb_pdata[0].res_base[0] &&
	    wand_fb_pdata[0].res_size[0]) {
		if (wand_fb_pdata[0].res_base[0] > SZ_2G)
			printk(KERN_INFO"UI Performance downgrade with FB phys address %x!\n",
			    wand_fb_pdata[0].res_base[0]);
		memblock_reserve(wand_fb_pdata[0].res_base[0],
				 wand_fb_pdata[0].res_size[0]);
		memblock_remove(wand_fb_pdata[0].res_base[0],
				wand_fb_pdata[0].res_size[0]);
		wand_fb_pdata[0].late_init = true;
		wand_ipu_data[wand_ldb_data.ipu_id].bypass_reset = true;
		fb0_reserved = 1;
	}
	for (i = fb0_reserved; i < fb_array_size; i++)
		if (wand_fb_pdata[i].res_size[0]) {
			/* Reserve for other background buffer. */
			phys = memblock_alloc_base(wand_fb_pdata[i].res_size[0],
						SZ_4K, total_mem);
			memblock_remove(phys, wand_fb_pdata[i].res_size[0]);
			wand_fb_pdata[i].res_base[0] = phys;
		}
#endif

#ifdef CONFIG_ANDROID_RAM_CONSOLE
	phys = memblock_alloc_base(SZ_1M, SZ_4K, total_mem);
	printk("EDWARD :  ram console init at phys 0x%x\n",phys);
	memblock_remove(phys, SZ_1M);
	memblock_free(phys, SZ_1M);
	ram_console_resource.start = phys;
	ram_console_resource.end   = phys + SZ_1M - 1;
#endif

#if defined(CONFIG_MXC_GPU_VIV) || defined(CONFIG_MXC_GPU_VIV_MODULE)
	if (wand_gpu_pdata.reserved_mem_size) {
		printk("EDWARD : GPU_Reserved Memory equals to %d\n",wand_gpu_pdata.reserved_mem_size);
			phys = memblock_alloc_base(wand_gpu_pdata.reserved_mem_size,
						   SZ_4K, total_mem);
		printk("EDWARD :  gpumem init at phys 0x%x\n",phys);
		memblock_remove(phys, wand_gpu_pdata.reserved_mem_size);
		wand_gpu_pdata.reserved_mem_base = phys;
	}
#endif

#if defined(CONFIG_ION)
	if (wand_ion_data.heaps[0].size) {
		phys = memblock_alloc(wand_ion_data.heaps[0].size, SZ_4K);
		memblock_remove(phys, wand_ion_data.heaps[0].size);
		wand_ion_data.heaps[0].base = phys;
	}
#endif
}
Ejemplo n.º 23
0
void __init setup_log_buf(int early)
{
	unsigned long flags;
	unsigned start, dest_idx, offset;
	char *new_log_buf;
	int free;

	if (!new_log_buf_len){
		#ifdef CONFIG_KERNEL_PANIC_DUMP
		printk("*********************************************\n");
		printk("**************setup_log_buf**  RETURN !!!!!!!\n");
		printk("*********************************************\n");
		b_log_setup=1;
		#endif
		return;
		}

	if (early) {
		unsigned long mem;

		mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
		if (mem == MEMBLOCK_ERROR)
			return;
		new_log_buf = __va(mem);
	} else {
		new_log_buf = alloc_bootmem_nopanic(new_log_buf_len);
	}

	if (unlikely(!new_log_buf)) {
		pr_err("log_buf_len: %ld bytes not available\n",
			new_log_buf_len);
		return;
	}

	spin_lock_irqsave(&logbuf_lock, flags);
	log_buf_len = new_log_buf_len;
	log_buf = new_log_buf;
	new_log_buf_len = 0;
	free = __LOG_BUF_LEN - log_end;

	offset = start = min(con_start, log_start);
	dest_idx = 0;
	while (start != log_end) {
		unsigned log_idx_mask = start & (__LOG_BUF_LEN - 1);
#if 1
//#ifndef CONFIG_KERNEL_PANIC_DUMP
		log_buf[dest_idx] = __log_buf[log_idx_mask];
#else
		log_buf[dest_idx] = panic_dump_buffer(0)[log_idx_mask];
#endif
		start++;
		dest_idx++;
	}
	log_start -= offset;
	con_start -= offset;
	log_end -= offset;
	spin_unlock_irqrestore(&logbuf_lock, flags);

	pr_info("log_buf_len: %d\n", log_buf_len);
	pr_info("early log buf free: %d(%d%%)\n",
		free, (free * 100) / __LOG_BUF_LEN);
}
Ejemplo n.º 24
0
static void __init msm9615_reserve(void)
{
	msm_pm_boot_pdata.p_addr = memblock_alloc(SZ_8, SZ_64K);
}
Ejemplo n.º 25
0
		if (strcmp(panels[i].mode.name, str) == 0) {
			current_panel_idx = i;
			break;
		}
	}
	return 0;
}

early_param("panel", panel_setup);

static inline void preallocate_fb(struct vt8500fb_platform_data *p,
				  unsigned long align) {
	p->video_mem_len = (p->xres_virtual * p->yres_virtual * 4) >>
			(p->bpp > 16 ? 0 : (p->bpp > 8 ? 1 :
					(8 / p->bpp) + 1));
	p->video_mem_phys = (unsigned long)memblock_alloc(p->video_mem_len,
							  align);
	p->video_mem_virt = phys_to_virt(p->video_mem_phys);
}

struct platform_device vt8500_device_uart0 = {
	.name		= "vt8500_serial",
	.id		= 0,
};

struct platform_device vt8500_device_uart1 = {
	.name		= "vt8500_serial",
	.id		= 1,
};

struct platform_device vt8500_device_uart2 = {
	.name		= "vt8500_serial",
void __init setup_log_buf(int early)
{
	unsigned long flags;
	unsigned start, dest_idx, offset;
	char *new_log_buf;
	int free;

	if (!new_log_buf_len) {
#if defined(CONFIG_SEC_DEBUG)
		//{{ Mark for GetLog
		sec_getlog_supply_kloginfo(__log_buf);
		//}} Mark for GetLog
#endif
		return;
	}
	if (early) {
		unsigned long mem;

		mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
		if (mem == MEMBLOCK_ERROR)
			return;
		new_log_buf = __va(mem);
	} else {
		new_log_buf = alloc_bootmem_nopanic(new_log_buf_len);
	}

	if (unlikely(!new_log_buf)) {
		pr_err("log_buf_len: %ld bytes not available\n",
			new_log_buf_len);
		return;
	}

	spin_lock_irqsave(&logbuf_lock, flags);
	log_buf_len = new_log_buf_len;
	log_buf = new_log_buf;
	new_log_buf_len = 0;
	free = __LOG_BUF_LEN - log_end;

	offset = start = min(con_start, log_start);
	dest_idx = 0;
	while (start != log_end) {
		unsigned log_idx_mask = start & (__LOG_BUF_LEN - 1);

		log_buf[dest_idx] = __log_buf[log_idx_mask];
		start++;
		dest_idx++;
	}
	log_start -= offset;
	con_start -= offset;
	log_end -= offset;
	spin_unlock_irqrestore(&logbuf_lock, flags);

#if defined(CONFIG_SEC_DEBUG)
		//{{ Mark for GetLog
	sec_getlog_supply_kloginfo(__log_buf);
		//}} Mark for GetLog
#endif

	pr_info("log_buf_len: %d\n", log_buf_len);
	pr_info("early log buf free: %d(%d%%)\n",
		free, (free * 100) / __LOG_BUF_LEN);
}
Ejemplo n.º 27
0
struct pci_iommu_arena * __init
iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
		     unsigned long window_size, unsigned long align)
{
	unsigned long mem_size;
	struct pci_iommu_arena *arena;

	mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));

	/* Note that the TLB lookup logic uses bitwise concatenation,
	   not addition, so the required arena alignment is based on
	   the size of the window.  Retain the align parameter so that
	   particular systems can over-align the arena.  */
	if (align < mem_size)
		align = mem_size;


#ifdef CONFIG_DISCONTIGMEM

	arena = memblock_alloc_node(sizeof(*arena), align, nid);
	if (!NODE_DATA(nid) || !arena) {
		printk("%s: couldn't allocate arena from node %d\n"
		       "    falling back to system-wide allocation\n",
		       __func__, nid);
		arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
		if (!arena)
			panic("%s: Failed to allocate %zu bytes\n", __func__,
			      sizeof(*arena));
	}

	arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
	if (!NODE_DATA(nid) || !arena->ptes) {
		printk("%s: couldn't allocate arena ptes from node %d\n"
		       "    falling back to system-wide allocation\n",
		       __func__, nid);
		arena->ptes = memblock_alloc(mem_size, align);
		if (!arena->ptes)
			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
			      __func__, mem_size, align);
	}

#else /* CONFIG_DISCONTIGMEM */

	arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
	if (!arena)
		panic("%s: Failed to allocate %zu bytes\n", __func__,
		      sizeof(*arena));
	arena->ptes = memblock_alloc(mem_size, align);
	if (!arena->ptes)
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
		      __func__, mem_size, align);

#endif /* CONFIG_DISCONTIGMEM */

	spin_lock_init(&arena->lock);
	arena->hose = hose;
	arena->dma_base = base;
	arena->size = window_size;
	arena->next_entry = 0;

	/* Align allocations to a multiple of a page size.  Not needed
	   unless there are chip bugs.  */
	arena->align_entry = 1;

	return arena;
}
Ejemplo n.º 28
0
u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
	return memblock_alloc(size, align);
}