Example #1
0
/*
 * Common iterator interface used to define for_each_mem_range().
 */
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
				unsigned long *out_start_pfn,
				unsigned long *out_end_pfn, int *out_nid)
{
	struct memblock_type *type = &memblock.memory;
	struct memblock_region *r;

	while (++*idx < type->cnt) {
		r = &type->regions[*idx];

		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
			continue;
		if (nid == MAX_NUMNODES || nid == r->nid)
			break;
	}
	if (*idx >= type->cnt) {
		*idx = -1;
		return;
	}

	if (out_start_pfn)
		*out_start_pfn = PFN_UP(r->base);
	if (out_end_pfn)
		*out_end_pfn = PFN_DOWN(r->base + r->size);
	if (out_nid)
		*out_nid = r->nid;
}
Example #2
0
static void __init mem_prof_init(void)
{
	unsigned long start_pfn, holes, free_pfn;
	const unsigned long zone_alignment = 1UL << (MAX_ORDER - 1);
	unsigned long ul;
	mem_prof_t *mp;

	/* Node#0 SDRAM */
	mp = &mem_prof[0];
	mp->start_pfn = PFN_UP(CONFIG_MEMORY_START);
	mp->pages = PFN_DOWN(memory_end - memory_start);
	mp->holes = 0;
	mp->free_pfn = PFN_UP(__pa(_end));

	/* Node#1 internal SRAM */
	mp = &mem_prof[1];
	start_pfn = free_pfn = PFN_UP(CONFIG_IRAM_START);
	holes = 0;
	if (start_pfn & (zone_alignment - 1)) {
		ul = zone_alignment;
		while (start_pfn >= ul)
			ul += zone_alignment;

		start_pfn = ul - zone_alignment;
		holes = free_pfn - start_pfn;
	}

	mp->start_pfn = start_pfn;
	mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes;
	mp->holes = holes;
	mp->free_pfn = PFN_UP(CONFIG_IRAM_START);
}
Example #3
0
void __init bootmem_init(void)
{
	/* Reserve all memory below PHYS_OFFSET, as memory
	 * accounting doesn't work for pages below that address.
	 *
	 * If PHYS_OFFSET is zero reserve page at address 0:
	 * successfull allocations should never return NULL.
	 */
	if (PHYS_OFFSET)
		memblock_reserve(0, PHYS_OFFSET);
	else
		memblock_reserve(0, 1);

	early_init_fdt_scan_reserved_mem();

	if (!memblock_phys_mem_size())
		panic("No memory found!\n");

	min_low_pfn = PFN_UP(memblock_start_of_DRAM());
	min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
	max_low_pfn = min(max_pfn, MAX_LOW_PFN);

	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));

	memblock_dump_all();
}
Example #4
0
File: setup.c Project: mbgg/linux
static unsigned long __init xen_set_identity_and_release(
	const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
	phys_addr_t start = 0;
	unsigned long released = 0;
	unsigned long identity = 0;
	const struct e820entry *entry;
	int i;
	int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);

	/*
	 * Combine non-RAM regions and gaps until a RAM region (or the
	 * end of the map) is reached, then set the 1:1 map and
	 * release the pages (if available) in those non-RAM regions.
	 *
	 * The combined non-RAM regions are rounded to a whole number
	 * of pages so any partial pages are accessible via the 1:1
	 * mapping.  This is needed for some BIOSes that put (for
	 * example) the DMI tables in a reserved region that begins on
	 * a non-page boundary.
	 */
	for (i = 0, entry = list; i < map_size; i++, entry++) {
		phys_addr_t end = entry->addr + entry->size;
		if (entry->type == E820_RAM || i == map_size - 1) {
			unsigned long start_pfn = PFN_DOWN(start);
			unsigned long end_pfn = PFN_UP(end);

			if (entry->type == E820_RAM)
				end_pfn = PFN_UP(entry->addr);

			if (start_pfn < end_pfn) {
				if (xlated_phys) {
					xen_pvh_adjust_stats(start_pfn,
						end_pfn, &released, &identity,
						nr_pages);
				} else {
					xen_set_identity_and_release_chunk(
						start_pfn, end_pfn, nr_pages,
						&released, &identity);
				}
			}
			start = end;
		}
	}

	if (released)
		printk(KERN_INFO "Released %lu pages of unused memory\n", released);
	if (identity)
		printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);

	return released;
}
Example #5
0
static void __init find_limits(unsigned long *min, unsigned long *max_low,
			       unsigned long *max_high)
{
	*max_low = PFN_DOWN(memblock_get_current_limit());
	*min = PFN_UP(memblock_start_of_DRAM());
	*max_high = PFN_DOWN(memblock_end_of_DRAM());
}
Example #6
0
/**
 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
 * @chunk: chunk of interest
 * @off: offset to the area to populate
 * @size: size of the area to populate in bytes
 *
 * For each cpu, populate and map pages [@page_start,@page_end) into
 * @chunk.  The area is cleared on return.
 *
 * CONTEXT:
 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
 */
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
    int page_start = PFN_DOWN(off);
    int page_end = PFN_UP(off + size);
    int free_end = page_start, unmap_end = page_start;
    struct page **pages;
    unsigned long *populated;
    unsigned int cpu;
    int rs, re, rc;

    /* quick path, check whether all pages are already there */
    rs = page_start;
    pcpu_next_pop(chunk, &rs, &re, page_end);
    if (rs == page_start && re == page_end)
        goto clear;

    /* need to allocate and map pages, this chunk can't be immutable */
    WARN_ON(chunk->immutable);

    pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
    if (!pages)
        return -ENOMEM;

    /* alloc and map */
    pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
        rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
        if (rc)
            goto err_free;
        free_end = re;
    }
Example #7
0
void __init setup_physmem(unsigned long start, unsigned long reserve_end,
			  unsigned long len, unsigned long long highmem)
{
	unsigned long reserve = reserve_end - start;
	int pfn = PFN_UP(__pa(reserve_end));
	int delta = (len - reserve) >> PAGE_SHIFT;
	int err, offset, bootmap_size;

	physmem_fd = create_mem_file(len + highmem);

	offset = uml_reserved - uml_physmem;
	err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
			    len - offset, 1, 1, 1);
	if (err < 0) {
		printf("setup_physmem - mapping %ld bytes of memory at 0x%p "
		       "failed - errno = %d\n", len - offset,
		       (void *) uml_reserved, err);
		exit(1);
	}

	/*
	 * Special kludge - This page will be mapped in to userspace processes
	 * from physmem_fd, so it needs to be written out there.
	 */
	os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
	os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);

	bootmap_size = init_bootmem(pfn, pfn + delta);
	free_bootmem(__pa(reserve_end) + bootmap_size,
		     len - bootmap_size - reserve);
}
Example #8
0
static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
{
	unsigned long extra = 0;
	unsigned long start_pfn, end_pfn;
	const struct e820entry *entry = xen_e820_map;
	int i;

	end_pfn = 0;
	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
		start_pfn = PFN_DOWN(entry->addr);
		/* Adjacent regions on non-page boundaries handling! */
		end_pfn = min(end_pfn, start_pfn);

		if (start_pfn >= max_pfn)
			return extra + max_pfn - end_pfn;

		/* Add any holes in map to result. */
		extra += start_pfn - end_pfn;

		end_pfn = PFN_UP(entry->addr + entry->size);
		end_pfn = min(end_pfn, max_pfn);

		if (entry->type != E820_RAM)
			extra += end_pfn - start_pfn;
	}

	return extra;
}
Example #9
0
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
	int page_start = PFN_DOWN(off);
	int page_end = PFN_UP(off + size);
	int free_end = page_start, unmap_end = page_start;
	struct page **pages;
	unsigned long *populated;
	unsigned int cpu;
	int rs, re, rc;

	
	rs = page_start;
	pcpu_next_pop(chunk, &rs, &re, page_end);
	if (rs == page_start && re == page_end)
		goto clear;

	
	WARN_ON(chunk->immutable);

	pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
	if (!pages)
		return -ENOMEM;

	
	pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
		rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
		if (rc)
			goto err_free;
		free_end = re;
	}
Example #10
0
/*
 * Finds the next RAM pfn available in the E820 map after min_pfn.
 * This function updates min_pfn with the pfn found and returns
 * the size of that range or zero if not found.
 */
static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
{
	const struct e820entry *entry = xen_e820_map;
	unsigned int i;
	unsigned long done = 0;

	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
		unsigned long s_pfn;
		unsigned long e_pfn;

		if (entry->type != E820_RAM)
			continue;

		e_pfn = PFN_DOWN(entry->addr + entry->size);

		/* We only care about E820 after this */
		if (e_pfn <= *min_pfn)
			continue;

		s_pfn = PFN_UP(entry->addr);

		/* If min_pfn falls within the E820 entry, we want to start
		 * at the min_pfn PFN.
		 */
		if (s_pfn <= *min_pfn) {
			done = e_pfn - *min_pfn;
		} else {
			done = e_pfn - s_pfn;
			*min_pfn = s_pfn;
		}
		break;
	}

	return done;
}
Example #11
0
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
	int page_start = PFN_DOWN(off);
	int page_end = PFN_UP(off + size);
	struct page **pages;
	unsigned long *populated;
	int rs, re;

	
	rs = page_start;
	pcpu_next_unpop(chunk, &rs, &re, page_end);
	if (rs == page_start && re == page_end)
		return;

	
	WARN_ON(chunk->immutable);

	pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
	BUG_ON(!pages);

	
	pcpu_pre_unmap_flush(chunk, page_start, page_end);

	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
		pcpu_unmap_pages(chunk, pages, populated, rs, re);

	

	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
		pcpu_free_pages(chunk, pages, populated, rs, re);

	
	bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
}
Example #12
0
int pfn_is_nosave(unsigned long pfn)
{
	unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
	unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));

	return	(pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
Example #13
0
File: tboot.c Project: amodj/Utopia
void __init tboot_probe(void)
{
    tboot_shared_t *tboot_shared;
    unsigned long p_tboot_shared;
    uint32_t map_base, map_size;
    unsigned long map_addr;

    /* Look for valid page-aligned address for shared page. */
    p_tboot_shared = simple_strtoul(opt_tboot, NULL, 0);
    if ( (p_tboot_shared == 0) || ((p_tboot_shared & ~PAGE_MASK) != 0) )
        return;

    /* Map and check for tboot UUID. */
    set_fixmap(FIX_TBOOT_SHARED_BASE, p_tboot_shared);
    tboot_shared = (tboot_shared_t *)fix_to_virt(FIX_TBOOT_SHARED_BASE);
    if ( tboot_shared == NULL )
        return;
    if ( memcmp(&tboot_shared_uuid, (uuid_t *)tboot_shared, sizeof(uuid_t)) )
        return;

    /* new tboot_shared (w/ GAS support, integrity, etc.) is not backwards
       compatible */
    if ( tboot_shared->version < 4 ) {
        printk("unsupported version of tboot (%u)\n", tboot_shared->version);
        return;
    }

    g_tboot_shared = tboot_shared;
    printk("TBOOT: found shared page at phys addr %lx:\n", p_tboot_shared);
    printk("  version: %d\n", tboot_shared->version);
    printk("  log_addr: 0x%08x\n", tboot_shared->log_addr);
    printk("  shutdown_entry: 0x%08x\n", tboot_shared->shutdown_entry);
    printk("  tboot_base: 0x%08x\n", tboot_shared->tboot_base);
    printk("  tboot_size: 0x%x\n", tboot_shared->tboot_size);

    /* these will be needed by tboot_protect_mem_regions() and/or
       tboot_parse_dmar_table(), so get them now */

    map_base = PFN_DOWN(TXT_PUB_CONFIG_REGS_BASE);
    map_size = PFN_UP(NR_TXT_CONFIG_PAGES * PAGE_SIZE);
    map_addr = (unsigned long)__va(map_base << PAGE_SHIFT);
    if ( map_pages_to_xen(map_addr, map_base, map_size, __PAGE_HYPERVISOR) )
        return;

    /* TXT Heap */
    txt_heap_base =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_BASE);
    txt_heap_size =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_SIZE);

    /* SINIT */
    sinit_base =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE);
    sinit_size =
        *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE);

    destroy_xen_mappings((unsigned long)__va(map_base << PAGE_SHIFT),
                         (unsigned long)__va((map_base + map_size) << PAGE_SHIFT));
}
Example #14
0
static u64 __init mem_hole_size(u64 start, u64 end)
{
	unsigned long start_pfn = PFN_UP(start);
	unsigned long end_pfn = PFN_DOWN(end);

	if (start_pfn < end_pfn)
		return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
	return 0;
}
Example #15
0
/* it returns the next free pfn after initrd */
static unsigned long __init init_initrd(void)
{
	unsigned long end;

	/*
	 * Board specific code or command line parser should have
	 * already set up initrd_start and initrd_end. In these cases
	 * perfom sanity checks and use them if all looks good.
	 */
	if (!initrd_start || initrd_end <= initrd_start) {
#ifdef CONFIG_PROBE_INITRD_HEADER
		u32 *initrd_header;

		/*
		 * See if initrd has been added to the kernel image by
		 * arch/mips/boot/addinitrd.c. In that case a header is
		 * prepended to initrd and is made up by 8 bytes. The first
		 * word is a magic number and the second one is the size of
		 * initrd.  Initrd start must be page aligned in any cases.
		 */
		initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
		if (initrd_header[0] != 0x494E5244)
			goto disable;
		initrd_start = (unsigned long)(initrd_header + 2);
		initrd_end = initrd_start + initrd_header[1];
#else
		goto disable;
#endif
	}

	if (initrd_start & ~PAGE_MASK) {
		pr_err("initrd start must be page aligned\n");
		goto disable;
	}
	if (initrd_start < PAGE_OFFSET) {
		pr_err("initrd start < PAGE_OFFSET\n");
		goto disable;
	}

	/*
	 * Sanitize initrd addresses. For example firmware
	 * can't guess if they need to pass them through
	 * 64-bits values if the kernel has been built in pure
	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
	 * addresses now, so the code can now safely use __pa().
	 */
	end = __pa(initrd_end);
	initrd_end = (unsigned long)__va(end);
	initrd_start = (unsigned long)__va(__pa(initrd_start));

	ROOT_DEV = Root_RAM0;
	return PFN_UP(end);
disable:
	initrd_start = 0;
	initrd_end = 0;
	return 0;
}
Example #16
0
static void __init bootmem_init(void)
{
	unsigned long start_pfn, bootmap_size;
	unsigned long size = initrd_end - initrd_start;

	start_pfn = PFN_UP(__pa(&_end));

	min_low_pfn = PFN_UP(MEMORY_START);
	max_low_pfn = PFN_UP(MEMORY_START + MEMORY_SIZE);

	/* Initialize the boot-time allocator with low memory only. */
	bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
					 min_low_pfn, max_low_pfn);
	add_active_range(0, min_low_pfn, max_low_pfn);

	free_bootmem(PFN_PHYS(start_pfn),
		     (max_low_pfn - start_pfn) << PAGE_SHIFT);
	memory_present(0, start_pfn, max_low_pfn);

	/* Reserve space for the bootmem bitmap. */
	reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT);

	if (size == 0) {
		printk(KERN_INFO "Initrd not found or empty");
		goto disable;
	}

	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
		printk(KERN_ERR "Initrd extends beyond end of memory");
		goto disable;
	}

	/* Reserve space for the initrd bitmap. */
	reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
	initrd_below_start_ok = 1;

	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
		 initrd_start, size);
	return;
disable:
	printk(KERN_CONT " - disabling initrd\n");
	initrd_start = 0;
	initrd_end = 0;
}
Example #17
0
void __init bootmem_init(void)
{
	struct memblock_region *reg;
	unsigned long bootmap_size;
	unsigned long free_pfn, end_pfn, start_pfn;

	init_mm.start_code = (unsigned long)_stext;
	init_mm.end_code = (unsigned long)_etext;
	init_mm.end_data = (unsigned long)_edata;
	init_mm.brk = (unsigned long)_end;

	memblock_init();
	memblock_add(memory_start, memory_end);

	if(((unsigned long)__pa(_end) < memory_start) || ((unsigned long)__pa(_end) > memory_end))
		printk("BUG: your kernel is not located in the ddr sdram");

	start_pfn = PFN_UP(memory_start);
	free_pfn = PFN_UP(__pa((unsigned long)_end));
	end_pfn = PFN_DOWN(memory_end);

	//memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(free_pfn - start_pfn));
	memblock_reserve(__pa(_stext), _end - _stext);

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && initrd_end && initrd_start < initrd_end) {
		memblock_reserve(initrd_start, initrd_end - initrd_start);
	}
#endif

	bootmap_size = init_bootmem(free_pfn, end_pfn);
	memblock_reserve(PFN_PHYS(free_pfn), bootmap_size);

	free_bootmem(PFN_PHYS(free_pfn), PFN_PHYS(end_pfn - free_pfn));

	for_each_memblock(reserved, reg)
		reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);

	memory_start += PAGE_OFFSET;
	memory_end += PAGE_OFFSET;

	memblock_analyze();
	memblock_dump_all();
}
Example #18
0
/*
 * On SH machines the conventional approach is to stash system RAM
 * in node 0, and other memory blocks in to node 1 and up, ordered by
 * latency. Each node's pgdat is node-local at the beginning of the node,
 * immediately followed by the node mem map.
 */
void __init setup_memory(void)
{
	unsigned long free_pfn = PFN_UP(__pa(_end));

	/*
	 * Node 0 sets up its pgdat at the first available pfn,
	 * and bumps it up before setting up the bootmem allocator.
	 */
	NODE_DATA(0) = pfn_to_kaddr(free_pfn);
	memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
	free_pfn += PFN_UP(sizeof(struct pglist_data));
	NODE_DATA(0)->bdata = &bootmem_node_data[0];

	/* Set up node 0 */
	setup_bootmem_allocator(free_pfn);

	/* Give the platforms a chance to hook up their nodes */
	plat_mem_setup();
}
Example #19
0
static unsigned long __init xen_set_identity(const struct e820entry *list,
					     ssize_t map_size)
{
	phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
	phys_addr_t start_pci = last;
	const struct e820entry *entry;
	unsigned long identity = 0;
	int i;

	for (i = 0, entry = list; i < map_size; i++, entry++) {
		phys_addr_t start = entry->addr;
		phys_addr_t end = start + entry->size;

		if (start < last)
			start = last;

		if (end <= start)
			continue;

		/* Skip over the 1MB region. */
		if (last > end)
			continue;

		if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) {
			if (start > start_pci)
				identity += set_phys_range_identity(
						PFN_UP(start_pci), PFN_DOWN(start));

			/* Without saving 'last' we would gooble RAM too
			 * at the end of the loop. */
			last = end;
			start_pci = end;
			continue;
		}
		start_pci = min(start, start_pci);
		last = end;
	}
	if (last > start_pci)
		identity += set_phys_range_identity(
					PFN_UP(start_pci), PFN_DOWN(last));
	return identity;
}
Example #20
0
/**
 * Find the ranges of physical addresses that do not correspond to
 * e820 RAM areas and mark the corresponding pages as nosave for
 * hibernation (32 bit) or software suspend and suspend to RAM (64 bit).
 *
 * This function requires the e820 map to be sorted and without any
 * overlapping entries and assumes the first e820 area to be RAM.
 */
void __init e820_mark_nosave_regions(unsigned long limit_pfn)
{
	int i;
	unsigned long pfn;

	pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
	for (i = 1; i < e820.nr_map; i++) {
		struct e820entry *ei = &e820.map[i];

		if (pfn < PFN_UP(ei->addr))
			register_nosave_region(pfn, PFN_UP(ei->addr));

		pfn = PFN_DOWN(ei->addr + ei->size);
		if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
			register_nosave_region(PFN_UP(ei->addr), pfn);

		if (pfn >= limit_pfn)
			break;
	}
}
Example #21
0
static void __init setup_memory(void)
{
	unsigned long start_pfn;

	/*
	 * Partially used pages are not usable - thus
	 * we are rounding upwards:
	 */
	start_pfn = PFN_UP(__pa(_end));
	setup_bootmem_allocator(start_pfn);
}
Example #22
0
static void tboot_create_trampoline(void)
{
	u32 map_base, map_size;

	/* Create identity map for tboot shutdown code. */
	map_base = PFN_DOWN(tboot->tboot_base);
	map_size = PFN_UP(tboot->tboot_size);
	if (map_tboot_pages(map_base << PAGE_SHIFT, map_base, map_size))
		panic("tboot: Error mapping tboot pages (mfns) @ 0x%x, 0x%x\n",
		      map_base, map_size);
}
Example #23
0
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
    u64 start, end;
    u32 hotpluggable;
    int node, pxm;

    if (srat_disabled())
        goto out_err;
    if (ma->header.length != sizeof(struct acpi_srat_mem_affinity))
        goto out_err_bad_srat;
    if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
        goto out_err;
    hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
    if (hotpluggable && !save_add_info())
        goto out_err;

    start = ma->base_address;
    end = start + ma->length;
    pxm = ma->proximity_domain;
    if (acpi_srat_revision <= 1)
        pxm &= 0xff;

    node = setup_node(pxm);
    if (node < 0) {
        printk(KERN_ERR "SRAT: Too many proximity domains.\n");
        goto out_err_bad_srat;
    }

    if (numa_add_memblk(node, start, end) < 0)
        goto out_err_bad_srat;

    node_set(node, numa_nodes_parsed);

    pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
            node, pxm,
            (unsigned long long) start, (unsigned long long) end - 1,
            hotpluggable ? " hotplug" : "",
            ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");

    /* Mark hotplug range in memblock. */
    if (hotpluggable && memblock_mark_hotplug(start, ma->length))
        pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
                (unsigned long long)start, (unsigned long long)end - 1);

    max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));

    return 0;
out_err_bad_srat:
    bad_srat();
out_err:
    return -1;
}
Example #24
0
/*
 * free_bootmem_late - free bootmem pages directly to page allocator
 * @addr: starting address of the range
 * @size: size of the range in bytes
 *
 * This is only useful when the bootmem allocator has already been torn
 * down, but we are still initializing the system.  Pages are given directly
 * to the page allocator, no bootmem metadata is updated because it is gone.
 */
void __init free_bootmem_late(unsigned long addr, unsigned long size)
{
	unsigned long cursor, end;

	cursor = PFN_UP(addr);
	end = PFN_DOWN(addr + size);

	for (; cursor < end; cursor++) {
		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
		totalram_pages++;
	}
}
Example #25
0
static void __init bootmem_init_one_node(unsigned int nid)
{
	unsigned long total_pages, paddr;
	unsigned long end_pfn;
	struct pglist_data *p;

	p = NODE_DATA(nid);

	/* Nothing to do.. */
	if (!p->node_spanned_pages)
		return;

	end_pfn = p->node_start_pfn + p->node_spanned_pages;
#ifdef CONFIG_HIGHMEM
	if (end_pfn > max_low_pfn)
		end_pfn = max_low_pfn;
#endif

	total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);

	paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
	if (!paddr)
		panic("Can't allocate bootmap for nid[%d]\n", nid);

	init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);

	free_bootmem_with_active_regions(nid, end_pfn);

	/*
	 * XXX Handle initial reservations for the system memory node
	 * only for the moment, we'll refactor this later for handling
	 * reservations in other nodes.
	 */
	if (nid == 0) {
		struct memblock_region *reg;

		/* Reserve the sections we're already using. */
		for_each_memblock(reserved, reg) {
			unsigned long size = reg->size;

#ifdef CONFIG_HIGHMEM
			/* ...but not highmem */
			if (PFN_DOWN(reg->base) >= highstart_pfn)
				continue;

			if (PFN_UP(reg->base + size) > highstart_pfn)
				size = (highstart_pfn - PFN_DOWN(reg->base))
				       << PAGE_SHIFT;
#endif

			reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
		}
	}
Example #26
0
static void free_init_pages(const char *what, unsigned long start, unsigned long end) {
	unsigned long pfn;
	printk("Freeing %s mem: %ldk freed\n", what, (end-start) >> 10);

	for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++) {
		struct page* page = pfn_to_page(pfn);

		ClearPageReserved(page);
		init_page_count(page);
		__free_page(page);
		totalram_pages++;
	}
}
Example #27
0
static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
			      unsigned long nr_pages, unsigned long last_val))
{
	phys_addr_t start = 0;
	unsigned long ret_val = 0;
	const struct e820entry *entry = xen_e820_map;
	int i;

	/*
	 * Combine non-RAM regions and gaps until a RAM region (or the
	 * end of the map) is reached, then call the provided function
	 * to perform its duty on the non-RAM region.
	 *
	 * The combined non-RAM regions are rounded to a whole number
	 * of pages so any partial pages are accessible via the 1:1
	 * mapping.  This is needed for some BIOSes that put (for
	 * example) the DMI tables in a reserved region that begins on
	 * a non-page boundary.
	 */
	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
		phys_addr_t end = entry->addr + entry->size;
		if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
			unsigned long start_pfn = PFN_DOWN(start);
			unsigned long end_pfn = PFN_UP(end);

			if (entry->type == E820_RAM)
				end_pfn = PFN_UP(entry->addr);

			if (start_pfn < end_pfn)
				ret_val = func(start_pfn, end_pfn, nr_pages,
					       ret_val);
			start = end;
		}
	}

	return ret_val;
}
/*
 * free_bootmem_late - free bootmem pages directly to page allocator
 * @addr: starting address of the range
 * @size: size of the range in bytes
 *
 * This is only useful when the bootmem allocator has already been torn
 * down, but we are still initializing the system.  Pages are given directly
 * to the page allocator, no bootmem metadata is updated because it is gone.
 */
void free_bootmem_late(unsigned long addr, unsigned long size)
{
	unsigned long cursor, end;

	kmemleak_free_part(__va(addr), size);

	cursor = PFN_UP(addr);
	end = PFN_DOWN(addr + size);

	for (; cursor < end; cursor++) {
		__free_pages_bootmem(pfn_to_page(cursor), 0);
		totalram_pages++;
	}
}
Example #29
0
static bool __init acpi_memory_banned(unsigned long address,
                                      unsigned long size)
{
    unsigned long mfn, nr_pages, i;

    mfn = PFN_DOWN(address);
    nr_pages = PFN_UP((address & ~PAGE_MASK) + size);
    for ( i = 0 ; i < nr_pages; i++ )
        if ( !page_is_ram_type(mfn + i, RAM_TYPE_RESERVED) &&
             !page_is_ram_type(mfn + i, RAM_TYPE_ACPI) )
            return true;

    return false;
}
Example #30
0
static unsigned long __init __free_memory_core(phys_addr_t start,
				 phys_addr_t end)
{
	unsigned long start_pfn = PFN_UP(start);
	unsigned long end_pfn = min_t(unsigned long,
				      PFN_DOWN(end), max_low_pfn);

	if (start_pfn > end_pfn)
		return 0;

	__free_pages_memory(start_pfn, end_pfn);

	return end_pfn - start_pfn;
}