Exemplo n.º 1
0
void __init x86_64_start_reservations(char *real_mode_data)
{
	copy_bootdata(__va(real_mode_data));

	memblock_reserve(__pa_symbol(&_text),
			 __pa_symbol(&__bss_stop) - __pa_symbol(&_text));

#ifdef CONFIG_BLK_DEV_INITRD
	/* Reserve INITRD */
	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
		/* Assume only end is not page aligned */
		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
		unsigned long ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
		memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
	}
#endif

	reserve_ebda_region();

	/*
	 * At this point everything still needed from the boot loader
	 * or BIOS or kernel text should be early reserved or marked not
	 * RAM in e820. All other memory is free game.
	 */

	start_kernel();
}
Exemplo n.º 2
0
static void __init parse_memmap_one(char *p)
{
	char *oldp;
	unsigned long start_at, mem_size;

	if (!p)
		return;

	oldp = p;
	mem_size = memparse(p, &p);
	if (p == oldp)
		return;

	switch (*p) {
	case '@':
		start_at = memparse(p + 1, &p);
		memblock_add(start_at, mem_size);
		break;

	case '$':
		start_at = memparse(p + 1, &p);
		memblock_reserve(start_at, mem_size);
		break;

	case 0:
		memblock_reserve(mem_size, -mem_size);
		break;

	default:
		pr_warn("Unrecognized memmap syntax: %s\n", p);
		break;
	}
}
Exemplo n.º 3
0
int __init __sipc_reserve_memblock(void)
{
	uint32_t smem_size = 0;

#ifdef CONFIG_SIPC_TD
	if (memblock_reserve(CPT_START_ADDR, CPT_TOTAL_SIZE))
		return -ENOMEM;
	smem_size += CPT_SMEM_SIZE;
#endif

#ifdef CONFIG_SIPC_WCDMA
	if (memblock_reserve(CPW_START_ADDR, CPW_TOTAL_SIZE))
		return -ENOMEM;
	smem_size += CPW_SMEM_SIZE;
#endif

#ifdef CONFIG_SIPC_WCN
	if (memblock_reserve(WCN_START_ADDR, WCN_TOTAL_SIZE))
		return -ENOMEM;
	smem_size += WCN_SMEM_SIZE;
#endif

	if (memblock_reserve(SIPC_SMEM_ADDR, smem_size))
		return -ENOMEM;

	return 0;
}
Exemplo n.º 4
0
void __init arm_dt_memblock_reserve(void)
{
	u64 *reserve_map, base, size;

	if (!initial_boot_params)
		/*! 20131005 device tree 영역이 없으면 리턴 */
		return;

	/* Reserve the dtb region */
	memblock_reserve(virt_to_phys(initial_boot_params),
			 be32_to_cpu(initial_boot_params->totalsize));
	/*! 20131005
	 * be32_to_cpu: big endian을 cpu의 endian으로 바꾸어준다.
	 * device tree 영역을 reserved로 표시한다.
	 */

	/*
	 * Process the reserve map.  This will probably overlap the initrd
	 * and dtb locations which are already reserved, but overlaping
	 * doesn't hurt anything
	 */
	reserve_map = ((void*)initial_boot_params) +
			be32_to_cpu(initial_boot_params->off_mem_rsvmap);
	/*! 20131005
	 * off_mem_rsvmap : 메모리 reserve map의 offset
	 * device tree 에서 정의한 reserved의 영역을 reserve region에 추가한다.
	 */
	while (1) {
		base = be64_to_cpup(reserve_map++);
		size = be64_to_cpup(reserve_map++);
		if (!size)
			break;
		memblock_reserve(base, size);
	}
}
Exemplo n.º 5
0
void __init x86_64_start_reservations(char *real_mode_data)
{
	copy_bootdata(__va(real_mode_data)); /* &boot_params에 복사 */

	/* text부터 bss까지의 커널 영역을 memblock에 예약한다. */
	memblock_reserve(__pa_symbol(&_text),
			 __pa_symbol(&__bss_stop) - __pa_symbol(&_text));

#ifdef CONFIG_BLK_DEV_INITRD
	/* Reserve INITRD */
	/* 같이 로드한 INITRD(init ramdisk) 영역도 예약한다. */
	if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
		/* Assume only end is not page aligned */
		unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
		unsigned long ramdisk_size  = boot_params.hdr.ramdisk_size;
		unsigned long ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
		memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
	}
#endif

	reserve_ebda_region();		/* EBDA 메모리 영역을 예약해놓았다. */

	/*
	 * At this point everything still needed from the boot loader
	 * or BIOS or kernel text should be early reserved or marked not
	 * RAM in e820. All other memory is free game.
	 /* int 15h 서 예약된 부분과 커널/바이오스/부트로더 코드영역
	  * 외의 메모리 공간은 예약되어있지 않다. */
	start_kernel();
}
Exemplo n.º 6
0
void __init bootmem_init(void)
{
	/* Reserve all memory below PHYS_OFFSET, as memory
	 * accounting doesn't work for pages below that address.
	 *
	 * If PHYS_OFFSET is zero reserve page at address 0:
	 * successfull allocations should never return NULL.
	 */
	if (PHYS_OFFSET)
		memblock_reserve(0, PHYS_OFFSET);
	else
		memblock_reserve(0, 1);

	early_init_fdt_scan_reserved_mem();

	if (!memblock_phys_mem_size())
		panic("No memory found!\n");

	min_low_pfn = PFN_UP(memblock_start_of_DRAM());
	min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
	max_low_pfn = min(max_pfn, MAX_LOW_PFN);

	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));

	memblock_dump_all();
}
Exemplo n.º 7
0
void __init arm_dt_memblock_reserve(void)
{
	u64 *reserve_map, base, size;

	if (!initial_boot_params)
		return;

	/* Reserve the dtb region */
	memblock_reserve(virt_to_phys(initial_boot_params),
			 be32_to_cpu(initial_boot_params->totalsize));

	/*
	 * Process the reserve map.  This will probably overlap the initrd
	 * and dtb locations which are already reserved, but overlaping
	 * doesn't hurt anything
	 */
	reserve_map = ((void*)initial_boot_params) +
			be32_to_cpu(initial_boot_params->off_mem_rsvmap);
	while (1) {
		base = be64_to_cpup(reserve_map++);
		size = be64_to_cpup(reserve_map++);
		if (!size)
			break;
		memblock_reserve(base, size);
	}
}
Exemplo n.º 8
0
Arquivo: init.c Projeto: 01org/prd
/*
 * First memory setup routine called from setup_arch()
 * 1. setup swapper's mm @init_mm
 * 2. Count the pages we have and setup bootmem allocator
 * 3. zone setup
 */
void __init setup_arch_memory(void)
{
	unsigned long zones_size[MAX_NR_ZONES];
	unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;

	init_mm.start_code = (unsigned long)_text;
	init_mm.end_code = (unsigned long)_etext;
	init_mm.end_data = (unsigned long)_edata;
	init_mm.brk = (unsigned long)_end;

	/*
	 * We do it here, so that memory is correctly instantiated
	 * even if "mem=xxx" cmline over-ride is given and/or
	 * DT has memory node. Each causes an update to @arc_mem_sz
	 * and we finally add memory one here
	 */
	memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);

	/*------------- externs in mm need setting up ---------------*/

	/* first page of system - kernel .vector starts here */
	min_low_pfn = ARCH_PFN_OFFSET;

	/* Last usable page of low mem (no HIGHMEM yet for ARC port) */
	max_low_pfn = max_pfn = PFN_DOWN(end_mem);

	max_mapnr = max_low_pfn - min_low_pfn;

	/*------------- reserve kernel image -----------------------*/
	memblock_reserve(CONFIG_LINUX_LINK_BASE,
			 __pa(_end) - CONFIG_LINUX_LINK_BASE);

#ifdef CONFIG_BLK_DEV_INITRD
	/*------------- reserve initrd image -----------------------*/
	if (initrd_start)
		memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
#endif

	memblock_dump_all();

	/*-------------- node setup --------------------------------*/
	memset(zones_size, 0, sizeof(zones_size));
	zones_size[ZONE_NORMAL] = max_mapnr;

	/*
	 * We can't use the helper free_area_init(zones[]) because it uses
	 * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
	 * when our kernel doesn't start at PAGE_OFFSET, i.e.
	 * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE
	 */
	free_area_init_node(0,			/* node-id */
			    zones_size,		/* num pages per zone */
			    min_low_pfn,	/* first pfn of node */
			    NULL);		/* NO holes */

	high_memory = (void *)end_mem;
}
Exemplo n.º 9
0
/**
 * init_alloc_remap - Initialize remap allocator for a NUMA node
 * @nid: NUMA node to initizlie remap allocator for
 *
 * NUMA nodes may end up without any lowmem.  As allocating pgdat and
 * memmap on a different node with lowmem is inefficient, a special
 * remap allocator is implemented which can be used by alloc_remap().
 *
 * For each node, the amount of memory which will be necessary for
 * pgdat and memmap is calculated and two memory areas of the size are
 * allocated - one in the node and the other in lowmem; then, the area
 * in the node is remapped to the lowmem area.
 *
 * As pgdat and memmap must be allocated in lowmem anyway, this
 * doesn't waste lowmem address space; however, the actual lowmem
 * which gets remapped over is wasted.  The amount shouldn't be
 * problematic on machines this feature will be used.
 *
 * Initialization failure isn't fatal.  alloc_remap() is used
 * opportunistically and the callers will fall back to other memory
 * allocation mechanisms on failure.
 */
void __init init_alloc_remap(int nid, u64 start, u64 end)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long end_pfn = end >> PAGE_SHIFT;
	unsigned long size, pfn;
	u64 node_pa, remap_pa;
	void *remap_va;

	/*
	 * The acpi/srat node info can show hot-add memroy zones where
	 * memory could be added but not currently present.
	 */
	printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
	       nid, start_pfn, end_pfn);

	/* calculate the necessary space aligned to large page size */
	size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
	size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
	size = ALIGN(size, LARGE_PAGE_BYTES);

	/* allocate node memory and the lowmem remap area */
	node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
	if (!node_pa) {
		pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
			   size, nid);
		return;
	}
	memblock_reserve(node_pa, size);

	remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
					  max_low_pfn << PAGE_SHIFT,
					  size, LARGE_PAGE_BYTES);
	if (!remap_pa) {
		pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
			   size, nid);
		memblock_free(node_pa, size);
		return;
	}
	memblock_reserve(remap_pa, size);
	remap_va = phys_to_virt(remap_pa);

	/* perform actual remap */
	for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
		set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
			    (node_pa >> PAGE_SHIFT) + pfn,
			    PAGE_KERNEL_LARGE);

	/* initialize remap allocator parameters */
	node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
	node_remap_start_vaddr[nid] = remap_va;
	node_remap_end_vaddr[nid] = remap_va + size;
	node_remap_alloc_vaddr[nid] = remap_va;

	printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
	       nid, node_pa, node_pa + size, remap_va, remap_va + size);
}
Exemplo n.º 10
0
static int __ddr_training_memblock(void)
{
	memblock_reserve(CONFIG_PHYS_OFFSET, PAGE_SIZE);
	if(2 == dram_cs_num) {
		if(0==dram_cs0_size){
			pr_err("dram_cs0_size = 0, error, please check the dram size\n");
			return -ENOMEM;
		}
		memblock_reserve(CONFIG_PHYS_OFFSET+dram_cs0_size, PAGE_SIZE);
	}
	return 0;
}
Exemplo n.º 11
0
static void __init exynos5_cma_region_reserve(
			struct cma_region *regions_normal,
			struct cma_region *regions_secure)
{
	struct cma_region *reg;
	size_t size_secure = 0, align_secure = 0;
	phys_addr_t paddr = 0;

	for (reg = regions_normal; reg->size != 0; reg++) {
		if ((reg->alignment & (reg->alignment - 1)) || reg->reserved)
			continue;

		if (reg->start) {
			if (!memblock_is_region_reserved(reg->start, reg->size)
			    && memblock_reserve(reg->start, reg->size) >= 0)
				reg->reserved = 1;
		} else {
			paddr = __memblock_alloc_base(reg->size, reg->alignment,
					MEMBLOCK_ALLOC_ACCESSIBLE);
			if (paddr) {
				reg->start = paddr;
				reg->reserved = 1;
				if (reg->size & (reg->alignment - 1))
					memblock_free(paddr + reg->size,
						ALIGN(reg->size, reg->alignment)
						- reg->size);
			}
		}
	}

	if (regions_secure && regions_secure->size) {
		for (reg = regions_secure; reg->size != 0; reg++)
			size_secure += reg->size;

		reg--;

		align_secure = reg->alignment;
		BUG_ON(align_secure & (align_secure - 1));

		paddr -= size_secure;
		paddr &= ~(align_secure - 1);

		if (!memblock_reserve(paddr, size_secure)) {
			do {
				reg->start = paddr;
				reg->reserved = 1;
				paddr += reg->size;
			} while (reg-- != regions_secure);
		}
	}
}
Exemplo n.º 12
0
void __init tegra_ventana_reserve(void)
{
	if (memblock_reserve(0x0, 4096) < 0)
		pr_warn("Cannot reserve first 4K of memory for safety\n");

	tegra_reserve(SZ_256M, SZ_8M, SZ_16M);
}
Exemplo n.º 13
0
void __init bitfix_reserve(void)
{
	int i;
	int ret;

	/*
	 * We'll auto-enable if needed.  However we still allocate memory even
	 * if we detect we're not needed.  That allows us to enable this at
	 * runtime for testing.
	 */
	bitfix_enabled = bitfix_is_needed();

	/* We need pm_check enabled */
	if (bitfix_enabled) {
		pr_info("%s: Detected firmware that needs bitfix\n", __func__);
		s3c_pm_check_set_enable(true);
	}

	for (i = 0; i < UPPER_LOOPS; i++) {
		phys_addr_t xor_superchunk_addr =
			bitfix_get_xor_superchunk_addr(i);
		bool was_reserved;

		pr_debug("%s: trying to reserve %08x@%08x\n",
			__func__, SUPERCHUNK_SIZE, xor_superchunk_addr);
		was_reserved = memblock_is_region_reserved(xor_superchunk_addr,
			SUPERCHUNK_SIZE);
		if (was_reserved) {
			pr_err("%s: memory already reserved %08x@%08x\n",
				__func__, SUPERCHUNK_SIZE, xor_superchunk_addr);
			goto error;
		}

		ret = memblock_reserve(xor_superchunk_addr, SUPERCHUNK_SIZE);
		if (ret) {
			pr_err("%s: memblock_reserve fail (%d) %08x@%08x\n",
				__func__, ret, SUPERCHUNK_SIZE,
				xor_superchunk_addr);
			goto error;
		}
	}

	return;
error:
	/*
	 * If we detected that we needed bitfix code and we couldn't init
	 * then that's a serious problem.  Dump stack so it's pretty obvious.
	 */
	WARN_ON(true);

	for (i--; i >= 0; i--) {
		phys_addr_t xor_superchunk_addr =
			bitfix_get_xor_superchunk_addr(i);
		ret = memblock_free(xor_superchunk_addr, SUPERCHUNK_SIZE);
		WARN_ON(ret);
	}
	bitfix_enabled = false;

	__memblock_dump_all();
}
Exemplo n.º 14
0
Arquivo: init.c Projeto: 59psi/linux
/*
 * Pages returned are already directly mapped.
 *
 * Changing that is likely to break Xen, see commit:
 *
 *    279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
 *
 * for detailed information.
 */
__ref void *alloc_low_pages(unsigned int num)
{
	unsigned long pfn;
	int i;

	if (after_bootmem) {
		unsigned int order;

		order = get_order((unsigned long)num << PAGE_SHIFT);
		return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
						__GFP_ZERO, order);
	}

	if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
		unsigned long ret;
		if (min_pfn_mapped >= max_pfn_mapped)
			panic("alloc_low_page: ran out of memory");
		ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
					max_pfn_mapped << PAGE_SHIFT,
					PAGE_SIZE * num , PAGE_SIZE);
		if (!ret)
			panic("alloc_low_page: can not alloc memory");
		memblock_reserve(ret, PAGE_SIZE * num);
		pfn = ret >> PAGE_SHIFT;
	} else {
Exemplo n.º 15
0
void __init wii_memory_fixups(void)
{
	struct memblock_region *p = memblock.memory.regions;

	/*
	 * This is part of a workaround to allow the use of two
	 * discontinuous RAM ranges on the Wii, even if this is
	 * currently unsupported on 32-bit PowerPC Linux.
	 *
	 * We coalesce the two memory ranges of the Wii into a
	 * single range, then create a reservation for the "hole"
	 * between both ranges.
	 */

	BUG_ON(memblock.memory.cnt != 2);
	BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));

	/* trim unaligned tail */
	memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE),
			(phys_addr_t)ULLONG_MAX);

	/* determine hole, add & reserve them */
	wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE);
	wii_hole_size = p[1].base - wii_hole_start;
	memblock_add(wii_hole_start, wii_hole_size);
	memblock_reserve(wii_hole_start, wii_hole_size);

	BUG_ON(memblock.memory.cnt != 1);
	__memblock_dump_all();

	/* allow ioremapping the address space in the hole */
	__allow_ioremap_reserved = 1;
}
Exemplo n.º 16
0
/**
 *
 * Create a new NUMA distance table.
 *
 */
static int __init numa_alloc_distance(void)
{
	size_t size;
	u64 phys;
	int i, j;

	size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn),
				      size, PAGE_SIZE);
	if (WARN_ON(!phys))
		return -ENOMEM;

	memblock_reserve(phys, size);

	numa_distance = __va(phys);
	numa_distance_cnt = nr_node_ids;

	/* fill with the default distances */
	for (i = 0; i < numa_distance_cnt; i++)
		for (j = 0; j < numa_distance_cnt; j++)
			numa_distance[i * numa_distance_cnt + j] = i == j ?
				LOCAL_DISTANCE : REMOTE_DISTANCE;

	pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);

	return 0;
}
Exemplo n.º 17
0
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
					u64 goal, u64 limit)
{
	void *ptr;
	u64 addr;

	if (limit > memblock.current_limit)
		limit = memblock.current_limit;

	addr = memblock_find_in_range_node(size, align, goal, limit, nid);
	if (!addr)
		return NULL;

	if (memblock_reserve(addr, size))
		return NULL;

	ptr = phys_to_virt(addr);
	memset(ptr, 0, size);
	/*
	 * The min_count is set to 0 so that bootmem allocated blocks
	 * are never reported as leaks.
	 */
	kmemleak_alloc(ptr, size, 0, 0);
	return ptr;
}
Exemplo n.º 18
0
/*
 * Reserve the memory associated with the Memory Attributes configuration
 * table, if it exists.
 */
int __init efi_memattr_init(void)
{
	efi_memory_attributes_table_t *tbl;

	if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR)
		return 0;

	tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl));
	if (!tbl) {
		pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
		       efi.mem_attr_table);
		return -ENOMEM;
	}

	if (tbl->version > 1) {
		pr_warn("Unexpected EFI Memory Attributes table version %d\n",
			tbl->version);
		goto unmap;
	}

	tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
	memblock_reserve(efi.mem_attr_table, tbl_size);

unmap:
	early_memunmap(tbl, sizeof(*tbl));
	return 0;
}
Exemplo n.º 19
0
/*
 * Reserve Xen mfn_list.
 */
static void __init xen_reserve_xen_mfnlist(void)
{
	phys_addr_t start, size;

	if (xen_start_info->mfn_list >= __START_KERNEL_map) {
		start = __pa(xen_start_info->mfn_list);
		size = PFN_ALIGN(xen_start_info->nr_pages *
				 sizeof(unsigned long));
	} else {
		start = PFN_PHYS(xen_start_info->first_p2m_pfn);
		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
	}

	if (!xen_is_e820_reserved(start, size)) {
		memblock_reserve(start, size);
		return;
	}

#ifdef CONFIG_X86_32
	/*
	 * Relocating the p2m on 32 bit system to an arbitrary virtual address
	 * is not supported, so just give up.
	 */
	xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
	BUG();
#else
	xen_relocate_p2m();
#endif
}
Exemplo n.º 20
0
/*
 * Find a free area in physical memory not yet reserved and compliant with
 * E820 map.
 * Used to relocate pre-allocated areas like initrd or p2m list which are in
 * conflict with the to be used E820 map.
 * In case no area is found, return 0. Otherwise return the physical address
 * of the area which is already reserved for convenience.
 */
phys_addr_t __init xen_find_free_area(phys_addr_t size)
{
	unsigned mapcnt;
	phys_addr_t addr, start;
	struct e820entry *entry = xen_e820_map;

	for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
		if (entry->type != E820_RAM || entry->size < size)
			continue;
		start = entry->addr;
		for (addr = start; addr < start + size; addr += PAGE_SIZE) {
			if (!memblock_is_reserved(addr))
				continue;
			start = addr + PAGE_SIZE;
			if (start + size > entry->addr + entry->size)
				break;
		}
		if (addr >= start + size) {
			memblock_reserve(start, size);
			return start;
		}
	}

	return 0;
}
Exemplo n.º 21
0
static void __init setup_initrd(void)
{
	unsigned long size;

	if (initrd_start >= initrd_end) {
		pr_info("initrd not found or empty");
		goto disable;
	}
	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
		pr_err("initrd extends beyond end of memory");
		goto disable;
	}

	size = initrd_end - initrd_start;
	memblock_reserve(__pa(initrd_start), size);
	initrd_below_start_ok = 1;

	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
		(void *)(initrd_start), size);
	return;
disable:
	pr_cont(" - disabling initrd\n");
	initrd_start = 0;
	initrd_end = 0;
}
Exemplo n.º 22
0
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
					       u64 goal, u64 limit)
{
	phys_addr_t addr;
	void *ptr;
	unsigned long flags = choose_memblock_flags();

	if (WARN_ON_ONCE(limit > memblock.current_limit)) {
		limit = memblock.current_limit;
	}

again:
	addr = memblock_find_in_range_node(size, align, goal, limit, nid, flags);
	if (!addr && (flags & MEMBLOCK_MIRROR)) {
		flags &= ~MEMBLOCK_MIRROR;
		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
			&size);
		goto again;
	}
	if (!addr)
		return NULL;

	if (memblock_reserve(addr, size))
		return NULL;

	ptr = phys_to_virt(addr);
	memset(ptr, 0, size);
	return ptr;
}
Exemplo n.º 23
0
static void __init xen_add_extra_mem(u64 start, u64 size)
{
	unsigned long pfn;
	int i;

	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
		/* Add new region. */
		if (xen_extra_mem[i].size == 0) {
			xen_extra_mem[i].start = start;
			xen_extra_mem[i].size  = size;
			break;
		}
		/* Append to existing region. */
		if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
			xen_extra_mem[i].size += size;
			break;
		}
	}
	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
		printk(KERN_WARNING "Warning: not enough extra memory regions\n");

	memblock_reserve(start, size);

	xen_max_p2m_pfn = PFN_DOWN(start + size);
	for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
		unsigned long mfn = pfn_to_mfn(pfn);

		if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
			continue;
		WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
			pfn, mfn);

		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
	}
}
Exemplo n.º 24
0
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
					u64 goal, u64 limit)
{
	void *ptr;
	u64 addr;
	ulong flags = choose_memblock_flags();

	if (limit > memblock.current_limit)
		limit = memblock.current_limit;

again:
	addr = memblock_find_in_range_node(size, align, goal, limit, nid,
					   flags);
	if (!addr && (flags & MEMBLOCK_MIRROR)) {
		flags &= ~MEMBLOCK_MIRROR;
		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
			&size);
		goto again;
	}
	if (!addr)
		return NULL;

	if (memblock_reserve(addr, size))
		return NULL;

	ptr = phys_to_virt(addr);
	memset(ptr, 0, size);
	/*
	 * The min_count is set to 0 so that bootmem allocated blocks
	 * are never reported as leaks.
	 */
	kmemleak_alloc(ptr, size, 0, 0);
	return ptr;
}
Exemplo n.º 25
0
static void msm_reserve_sdlog_memory(void)
{
    //reserve app memory for sdlog
    int reserver = 0;
    
    struct boot_shared_imem_cookie_type *boot_shared_imem_ptr = (struct boot_shared_imem_cookie_type *)MSM_IMEM_BASE;

    if (boot_shared_imem_ptr->app_mem_reserved == SDLOG_MEM_RESERVED_COOKIE)
    {
        pr_err("sdlog is enable, reserver 16M buffer in 0x%x \n", MSM_SDLOG_PHYS);
        reserver = memblock_reserve(MSM_SDLOG_PHYS, MSM_SDLOG_SIZE);

        if (reserver != 0)
        {
            pr_err("sdlog reserve memory failed, disable sdlog \n");
            boot_shared_imem_ptr->app_mem_reserved = 0;
        }
        
    }
    else
    {
        pr_err("sdlog is disabled \n");
    }

}
Exemplo n.º 26
0
void __init cf_bootmem_alloc(void)
{
	unsigned long memstart;

	/* _rambase and _ramend will be naturally page aligned */
	m68k_memory[0].addr = _rambase;
	m68k_memory[0].size = _ramend - _rambase;

	memblock_add(m68k_memory[0].addr, m68k_memory[0].size);

	/* compute total pages in system */
	num_pages = PFN_DOWN(_ramend - _rambase);

	/* page numbers */
	memstart = PAGE_ALIGN(_ramstart);
	min_low_pfn = PFN_DOWN(_rambase);
	max_pfn = max_low_pfn = PFN_DOWN(_ramend);
	high_memory = (void *)_ramend;

	/* Reserve kernel text/data/bss */
	memblock_reserve(memstart, memstart - _rambase);

	m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
	module_fixup(NULL, __start_fixup, __stop_fixup);

	/* setup node data */
	m68k_setup_node(0);
}
/*
 * The UEFI specification makes it clear that the operating system is free to do
 * whatever it wants with boot services code after ExitBootServices() has been
 * called. Ignoring this recommendation a significant bunch of EFI implementations 
 * continue calling into boot services code (SetVirtualAddressMap). In order to 
 * work around such buggy implementations we reserve boot services region during 
 * EFI init and make sure it stays executable. Then, after SetVirtualAddressMap(), it
* is discarded.
*/
void __init efi_reserve_boot_services(void)
{
	void *p;

	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
		efi_memory_desc_t *md = p;
		u64 start = md->phys_addr;
		u64 size = md->num_pages << EFI_PAGE_SHIFT;

		if (md->type != EFI_BOOT_SERVICES_CODE &&
		    md->type != EFI_BOOT_SERVICES_DATA)
			continue;
		/* Only reserve where possible:
		 * - Not within any already allocated areas
		 * - Not over any memory area (really needed, if above?)
		 * - Not within any part of the kernel
		 * - Not the bios reserved area
		*/
		if ((start + size > __pa_symbol(_text)
				&& start <= __pa_symbol(_end)) ||
			!e820_all_mapped(start, start+size, E820_RAM) ||
			memblock_is_region_reserved(start, size)) {
			/* Could not reserve, skip it */
			md->num_pages = 0;
			memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n",
				     start, start+size-1);
		} else
			memblock_reserve(start, size);
	}
}
Exemplo n.º 28
0
static void __init xen_add_extra_mem(u64 start, u64 size)
{
	unsigned long pfn;
	int i;

	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
		/*                 */
		if (xen_extra_mem[i].size == 0) {
			xen_extra_mem[i].start = start;
			xen_extra_mem[i].size  = size;
			break;
		}
		/*                            */
		if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
			xen_extra_mem[i].size += size;
			break;
		}
	}
	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
		printk(KERN_WARNING "Warning: not enough extra memory regions\n");

	memblock_reserve(start, size);

	xen_max_p2m_pfn = PFN_DOWN(start + size);

	for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
Exemplo n.º 29
0
static void __init xen_add_extra_mem(unsigned long start_pfn,
				     unsigned long n_pfns)
{
	int i;

	/*
	 * No need to check for zero size, should happen rarely and will only
	 * write a new entry regarded to be unused due to zero size.
	 */
	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
		/* Add new region. */
		if (xen_extra_mem[i].n_pfns == 0) {
			xen_extra_mem[i].start_pfn = start_pfn;
			xen_extra_mem[i].n_pfns = n_pfns;
			break;
		}
		/* Append to existing region. */
		if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
		    start_pfn) {
			xen_extra_mem[i].n_pfns += n_pfns;
			break;
		}
	}
	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
		printk(KERN_WARNING "Warning: not enough extra memory regions\n");

	memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
}
static void __init lge_add_persist_ram_devices(void)
{
	int ret;
	phys_addr_t base;
	phys_addr_t size;

	size = lge_ramoops_data.mem_size;

	/* find a 1M section from highmem */
	base = memblock_find_in_range(memblock.current_limit,
			MEMBLOCK_ALLOC_ANYWHERE, size, SECTION_SIZE);
	if (!base) {
		/* find a 1M section from lowmem */
		base = memblock_find_in_range(0,
				MEMBLOCK_ALLOC_ACCESSIBLE,
				size, SECTION_SIZE);
		if (!base) {
			pr_err("%s: not enough membank\n", __func__);
			return;
		}
	}

	pr_info("ramoops: reserved 1 MiB at 0x%08x\n", (int)base);

	lge_ramoops_data.mem_address = base;
	ret = memblock_reserve(lge_ramoops_data.mem_address,
			lge_ramoops_data.mem_size);

	if (ret)
		pr_err("%s: failed to initialize persistent ram\n", __func__);
}