Exemple #1
0
/* Allocate NODE_DATA for a node on the local memory */
static void __init alloc_node_data(int nid)
{
	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
	u64 nd_pa;
	void *nd;
	int tnid;

	/*
	 * Allocate node data.  Try node-local memory and then any node.
	 * Never allocate in DMA zone.
	 */
	nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
	if (!nd_pa) {
		nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
					      MEMBLOCK_ALLOC_ACCESSIBLE);
		if (!nd_pa) {
			pr_err("Cannot find %zu bytes in node %d\n",
			       nd_size, nid);
			return;
		}
	}
	nd = __va(nd_pa);

	/* report and initialize */
	printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
	       nd_pa, nd_pa + nd_size - 1);
	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
	if (tnid != nid)
		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);

	node_data[nid] = nd;
	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));

	node_set_online(nid);
}
Exemple #2
0
void cpu_node_probe(void)
{
	int i, highest = 0;
	gda_t *gdap = GDA;

	/*
	 * Initialize the arrays to invalid nodeid (-1)
	 */
	for (i = 0; i < MAX_COMPACT_NODES; i++)
		compact_to_nasid_node[i] = INVALID_NASID;
	for (i = 0; i < MAX_NASIDS; i++)
		nasid_to_compact_node[i] = INVALID_CNODEID;
	for (i = 0; i < MAXCPUS; i++)
		cpuid_to_compact_node[i] = INVALID_CNODEID;

	/*
	 * MCD - this whole "compact node" stuff can probably be dropped,
	 * as we can handle sparse numbering now
	 */
	nodes_clear(node_online_map);
	for (i = 0; i < MAX_COMPACT_NODES; i++) {
		nasid_t nasid = gdap->g_nasidtable[i];
		if (nasid == INVALID_NASID)
			break;
		compact_to_nasid_node[i] = nasid;
		nasid_to_compact_node[nasid] = i;
		node_set_online(num_online_nodes());
		highest = do_cpumask(i, nasid, highest);
	}

	printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
}
Exemple #3
0
/*
 * Function: smp_dump_qct()
 *
 * Description: gets memory layout from the quad config table.  This
 * function also updates node_online_map with the nodes (quads) present.
 */
static void __init smp_dump_qct(void)
{
	int node;
	struct eachquadmem *eq;
	struct sys_cfg_data *scd =
		(struct sys_cfg_data *)__va(SYS_CFG_DATA_PRIV_ADDR);

	nodes_clear(node_online_map);
	for_each_node(node) {
		if (scd->quads_present31_0 & (1 << node)) {
			node_set_online(node);
			eq = &scd->eq[node];
			/* Convert to pages */
			node_start_pfn[node] = MB_TO_PAGES(
				eq->hi_shrd_mem_start - eq->priv_mem_size);
			node_end_pfn[node] = MB_TO_PAGES(
				eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);

			memory_present(node,
				node_start_pfn[node], node_end_pfn[node]);
			node_remap_size[node] = node_memmap_size_bytes(node,
							node_start_pfn[node],
							node_end_pfn[node]);
		}
	}
}
Exemple #4
0
void __init setup_arch(char **cmdline_p)
{
    ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);

    boot_cpu_data.cpu_clock = M32R_CPUCLK;
    boot_cpu_data.bus_clock = M32R_BUSCLK;
    boot_cpu_data.timer_divide = M32R_TIMER_DIVIDE;

#ifdef CONFIG_BLK_DEV_RAM
    rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
    rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
    rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif

    if (!MOUNT_ROOT_RDONLY)
        root_mountflags &= ~MS_RDONLY;

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
    conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
    conswitchp = &dummy_con;
#endif
#endif

#ifdef CONFIG_DISCONTIGMEM
    nodes_clear(node_online_map);
    node_set_online(0);
    node_set_online(1);
#endif	/* CONFIG_DISCONTIGMEM */

    init_mm.start_code = (unsigned long) _text;
    init_mm.end_code = (unsigned long) _etext;
    init_mm.end_data = (unsigned long) _edata;
    init_mm.brk = (unsigned long) _end;

    code_resource.start = virt_to_phys(_text);
    code_resource.end = virt_to_phys(_etext)-1;
    data_resource.start = virt_to_phys(_etext);
    data_resource.end = virt_to_phys(_edata)-1;

    parse_mem_cmdline(cmdline_p);

    setup_memory();

    paging_init();
}
Exemple #5
0
/* Initialize NODE_DATA for a node on the local memory */
static void __init setup_node_data(int nid, u64 start, u64 end)
{
	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
	bool remapped = false;
	u64 nd_pa;
	void *nd;
	int tnid;

	/*
	 * Don't confuse VM with a node that doesn't have the
	 * minimum amount of memory:
	 */
	if (end && (end - start) < NODE_MIN_SIZE)
		return;

	/* initialize remap allocator before aligning to ZONE_ALIGN */
	init_alloc_remap(nid, start, end);

	start = roundup(start, ZONE_ALIGN);

	printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
	       nid, start, end - 1);

	/*
	 * Allocate node data.  Try remap allocator first, node-local
	 * memory and then any node.  Never allocate in DMA zone.
	 */
	nd = alloc_remap(nid, nd_size);
	if (nd) {
		nd_pa = __pa(nd);
		remapped = true;
	} else {
		nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
		if (!nd_pa) {
			pr_err("Cannot find %zu bytes in node %d\n",
			       nd_size, nid);
			return;
		}
		nd = __va(nd_pa);
	}

	/* report and initialize */
	printk(KERN_INFO "  NODE_DATA [mem %#010Lx-%#010Lx]%s\n",
	       nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
	if (!remapped && tnid != nid)
		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);

	node_data[nid] = nd;
	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
	NODE_DATA(nid)->node_id = nid;
	NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
	NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;

	node_set_online(nid);
}
Exemple #6
0
// start_pfn : 뱅크 0 시작 주소의 물리 small page 번호		(0x20000)
// end_pfn   : 뱅크 0의 마지막 주소의 물리 small page 번호	(0x4f800)
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;
	// pg_data_t : struct pglist_data

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	// boot_pages : 6
	// start_pfn ~ end_pfn 까지를 bitmap으로 바꿨을 때 총 프레임의 갯수가 반환됨.
	
	// boot_pages << PAGE_SHIFT : 0x6000, L1_CACHE_BYTES : 64, _pfn_to_phys(end_pfn) : 0x4F800000
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));
	// non-reserved 영역에서 비트맵용 영역을 만들어 가져옴
	// 영역의 시작 주소가 반환됨(물리)

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);	// 비어 있는 함수임
	pgdat = NODE_DATA(0);
	//*pgdat = contig_page_data
	//
	//	.bdata = &bootmem_node_data[0]
	//			 현재는 0으로 되어 있는 값임
	//
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
	// bdata_list 에 등록
	// bitmap 값을 0xFF로 초기화

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
	// for (reg = memblock.memory.regions; reg < (memblock.memory.regions + memblock.memory.cnt), reg++)
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);
		// start : 0x20000
		// end   : 0xA0000

		if (end >= end_pfn)
			end = end_pfn;
		if (start >= end)
			break;
		
		// start : 0x20000000, (end - start) << PAGE_SHIFT : 0x2F800000
		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
		// start부터 end에 해당하는 bitmap을 전부 0으로 설정
		// 일단 전부 FREE로 만듬
	}
// ARM10C 20131207
// min: 0x20000, max_low: 0x4f800
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	// start_pfn: 0x20000, end_pfn: 0x4f800, end_pfn - start_pfn: 0x2f800
	// boot_pages: 0x6
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);

	// boot_pages << PAGE_SHIFT: 0x6000, L1_CACHE_BYTES: 64
	// __pfn_to_phys(0x4f800); 0x4f800000
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);

	// pglist_data.bdata 의 bootmem_node_data 주소로 설정
	pgdat = NODE_DATA(0);

	// pgdat: ?, __phys_to_pfn(bitmap): ?, start_pfn: 0x20000, end_pfn: 0x4f800
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
		// start: 0x20000
		unsigned long start = memblock_region_memory_base_pfn(reg);
		// end: 0xA0000
		unsigned long end = memblock_region_memory_end_pfn(reg);

		// end: 0xA0000, end_pfn: 0x4f800
		if (end >= end_pfn)
			// end: 0x4f800
			end = end_pfn;
		// start: 0x20000, end: 0x4f800
		if (start >= end)
			break;

		// __pfn_to_phys(0x20000): 0x20000000, (end - start) << PAGE_SHIFT: 0x2f800000
		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
	}
Exemple #8
0
/* Initialize NODE_DATA for a node on the local memory */
static void __init setup_node_data(int nid, u64 start, u64 end)
{
    const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
    u64 nd_pa;
    void *nd;
    int tnid;

    /*
     * Don't confuse VM with a node that doesn't have the
     * minimum amount of memory:
     */
    if (end && (end - start) < NODE_MIN_SIZE)
        return;

    start = roundup(start, ZONE_ALIGN);

    printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
           nid, start, end - 1);

    /*
     * Allocate node data.  Try node-local memory and then any node.
     * Never allocate in DMA zone.
     */
    nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
    if (!nd_pa) {
        nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
                                      MEMBLOCK_ALLOC_ACCESSIBLE);
        if (!nd_pa) {
            pr_err("Cannot find %zu bytes in node %d\n",
                   nd_size, nid);
            return;
        }
    }
    nd = __va(nd_pa);

    /* report and initialize */
    printk(KERN_INFO "  NODE_DATA [mem %#010Lx-%#010Lx]\n",
           nd_pa, nd_pa + nd_size - 1);
    tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
    if (tnid != nid)
        printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);

    node_data[nid] = nd;
    memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
    NODE_DATA(nid)->node_id = nid;
    NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
    NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;

    node_set_online(nid);
}
int add_memory(int nid, u64 start, u64 size)
{
	pg_data_t *pgdat = NULL;
	int new_pgdat = 0;
	struct resource *res;
	int ret;

	res = register_memory_resource(start, size);
	if (!res)
		return -EEXIST;

	if (!node_online(nid)) {
		pgdat = hotadd_new_pgdat(nid, start);
		if (!pgdat)
			return -ENOMEM;
		new_pgdat = 1;
	}

	/* call arch's memory hotadd */
	ret = arch_add_memory(nid, start, size);

	if (ret < 0)
		goto error;

	/* we online node here. we can't roll back from here. */
	node_set_online(nid);

	cpuset_track_online_nodes();

	if (new_pgdat) {
		ret = register_one_node(nid);
		/*
		 * If sysfs file of new node can't create, cpu on the node
		 * can't be hot-added. There is no rollback way now.
		 * So, check by BUG_ON() to catch it reluctantly..
		 */
		BUG_ON(ret);
	}

	return ret;
error:
	/* rollback pgdat allocation and others */
	if (new_pgdat)
		rollback_node_hotadd(nid, pgdat);
	if (res)
		release_memory_resource(res);

	return ret;
}
Exemple #10
0
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
    unsigned long bootmap_pages;
    unsigned long start_pfn, end_pfn;
    unsigned long bootmem_paddr;

    /* Don't allow bogus node assignment */
    BUG_ON(nid > MAX_NUMNODES || nid <= 0);

    start_pfn = start >> PAGE_SHIFT;
    end_pfn = end >> PAGE_SHIFT;

    pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
                     PAGE_KERNEL);

    lmb_add(start, end - start);

    __add_active_range(nid, start_pfn, end_pfn);

    /* Node-local pgdat */
    NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
                                         SMP_CACHE_BYTES, end));
    memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));

    NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
    NODE_DATA(nid)->node_start_pfn = start_pfn;
    NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;

    /* Node-local bootmap */
    bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
    bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
                                   PAGE_SIZE, end);
    init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
                      start_pfn, end_pfn);

    free_bootmem_with_active_regions(nid, end_pfn);

    /* Reserve the pgdat and bootmap space with the bootmem allocator */
    reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
                         sizeof(struct pglist_data), BOOTMEM_DEFAULT);
    reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
                         bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);

    /* It's up */
    node_set_online(nid);

    /* Kick sparsemem */
    sparse_memory_present_with_active_regions(nid);
}
static void __init MP_translation_info(struct mpc_trans *m)
{
	printk(KERN_INFO
	    "Translation: record %d, type %d, quad %d, global %d, local %d\n",
	       mpc_record, m->trans_type, m->trans_quad, m->trans_global,
	       m->trans_local);

	if (mpc_record >= MAX_MPC_ENTRY)
		printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
	else
		translation_table[mpc_record] = m; /* stash this for later */

	if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
		node_set_online(m->trans_quad);
}
Exemple #12
0
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
	unsigned long bootmap_pages, bootmap_start, bootmap_size;
	unsigned long start_pfn, free_pfn, end_pfn;

	/* Don't allow bogus node assignment */
	BUG_ON(nid > MAX_NUMNODES || nid == 0);

	/*
	 * The free pfn starts at the beginning of the range, and is
	 * advanced as necessary for pgdat and node map allocations.
	 */
	free_pfn = start_pfn = start >> PAGE_SHIFT;
	end_pfn = end >> PAGE_SHIFT;

	__add_active_range(nid, start_pfn, end_pfn);

	/* Node-local pgdat */
	NODE_DATA(nid) = pfn_to_kaddr(free_pfn);
	free_pfn += PFN_UP(sizeof(struct pglist_data));
	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));

	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
	NODE_DATA(nid)->node_start_pfn = start_pfn;
	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;

	/* Node-local bootmap */
	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bootmap_start = (unsigned long)pfn_to_kaddr(free_pfn);
	bootmap_size = init_bootmem_node(NODE_DATA(nid), free_pfn, start_pfn,
				    end_pfn);

	free_bootmem_with_active_regions(nid, end_pfn);

	/* Reserve the pgdat and bootmap space with the bootmem allocator */
	reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
			     sizeof(struct pglist_data), BOOTMEM_DEFAULT);
	reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT,
			     bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);

	/* It's up */
	node_set_online(nid);

	/* Kick sparsemem */
	sparse_memory_present_with_active_regions(nid);
}
Exemple #13
0
static void __init arm_bootmem_init(struct meminfo *mi,
	unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;
	int i;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);
	pgdat = NODE_DATA(0);
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	for_each_bank(i, mi) {
		struct membank *bank = &mi->bank[i];
		if (!bank->highmem)
			free_bootmem(bank_phys_start(bank), bank_phys_size(bank));
	}

	/*
	 * Reserve the memblock reserved regions in bootmem.
	 */
	for (i = 0; i < memblock.reserved.cnt; i++) {
		phys_addr_t start = memblock_start_pfn(&memblock.reserved, i);
		if (start >= start_pfn &&
		    memblock_end_pfn(&memblock.reserved, i) <= end_pfn)
			reserve_bootmem_node(pgdat, __pfn_to_phys(start),
				memblock_size_bytes(&memblock.reserved, i),
				BOOTMEM_DEFAULT);
	}
}
Exemple #14
0
/*
 * Function: smp_dump_qct()
 *
 * Description: gets memory layout from the quad config table.  This
 * function also increments numnodes with the number of nodes (quads)
 * present.
 */
static void __init smp_dump_qct(void)
{
	int node;
	struct eachquadmem *eq;
	struct sys_cfg_data *scd =
		(struct sys_cfg_data *)__va(SYS_CFG_DATA_PRIV_ADDR);

	numnodes = 0;
	for(node = 0; node < MAX_NUMNODES; node++) {
		if(scd->quads_present31_0 & (1 << node)) {
			node_set_online(node);
			numnodes++;
			eq = &scd->eq[node];
			/* Convert to pages */
			node_start_pfn[node] = MB_TO_PAGES(
				eq->hi_shrd_mem_start - eq->priv_mem_size);
			node_end_pfn[node] = MB_TO_PAGES(
				eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);
		}
	}
}
Exemple #15
0
static int __init numa_register_nodes(void)
{
	int nid;
	struct memblock_region *mblk;

	/* Check that valid nid is set to memblks */
	for_each_memblock(memory, mblk)
		if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) {
			pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
				mblk->nid, mblk->base,
				mblk->base + mblk->size - 1);
			return -EINVAL;
		}

	/* Finally register nodes. */
	for_each_node_mask(nid, numa_nodes_parsed) {
		unsigned long start_pfn, end_pfn;

		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
		setup_node_data(nid, start_pfn, end_pfn);
		node_set_online(nid);
	}
static inline void numaq_register_node(int node, struct sys_cfg_data *scd)
{
	struct eachquadmem *eq = scd->eq + node;

	node_set_online(node);

	/* Convert to pages */
	node_start_pfn[node] =
		 MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size);

	node_end_pfn[node] =
		 MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);

	e820_register_active_regions(node, node_start_pfn[node],
						node_end_pfn[node]);

	memory_present(node, node_start_pfn[node], node_end_pfn[node]);

	node_remap_size[node] = node_memmap_size_bytes(node,
					node_start_pfn[node],
					node_end_pfn[node]);
}
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);
	pgdat = NODE_DATA(0);
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (end >= end_pfn)
			end = end_pfn;
		if (start >= end)
			break;

		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
	}
Exemple #18
0
/*
 * First memory setup routine called from setup_arch()
 * 1. setup swapper's mm @init_mm
 * 2. Count the pages we have and setup bootmem allocator
 * 3. zone setup
 */
void __init setup_arch_memory(void)
{
	unsigned long zones_size[MAX_NR_ZONES];
	unsigned long zones_holes[MAX_NR_ZONES];

	init_mm.start_code = (unsigned long)_text;
	init_mm.end_code = (unsigned long)_etext;
	init_mm.end_data = (unsigned long)_edata;
	init_mm.brk = (unsigned long)_end;

	/* first page of system - kernel .vector starts here */
	min_low_pfn = ARCH_PFN_OFFSET;

	/* Last usable page of low mem */
	max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);

#ifdef CONFIG_FLATMEM
	/* pfn_valid() uses this */
	max_mapnr = max_low_pfn - min_low_pfn;
#endif

	/*------------- bootmem allocator setup -----------------------*/

	/*
	 * seed the bootmem allocator after any DT memory node parsing or
	 * "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
	 *
	 * Only low mem is added, otherwise we have crashes when allocating
	 * mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
	 * avail memory, ending in highmem with a > 32-bit address. However
	 * it then tries to memset it with a truncaed 32-bit handle, causing
	 * the crash
	 */

	memblock_add_node(low_mem_start, low_mem_sz, 0);
	memblock_reserve(CONFIG_LINUX_LINK_BASE,
			 __pa(_end) - CONFIG_LINUX_LINK_BASE);

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start)
		memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
#endif

	early_init_fdt_reserve_self();
	early_init_fdt_scan_reserved_mem();

	memblock_dump_all();

	/*----------------- node/zones setup --------------------------*/
	memset(zones_size, 0, sizeof(zones_size));
	memset(zones_holes, 0, sizeof(zones_holes));

	zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
	zones_holes[ZONE_NORMAL] = 0;

	/*
	 * We can't use the helper free_area_init(zones[]) because it uses
	 * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
	 * when our kernel doesn't start at PAGE_OFFSET, i.e.
	 * PAGE_OFFSET != CONFIG_LINUX_RAM_BASE
	 */
	free_area_init_node(0,			/* node-id */
			    zones_size,		/* num pages per zone */
			    min_low_pfn,	/* first pfn of node */
			    zones_holes);	/* holes */

#ifdef CONFIG_HIGHMEM
	/*
	 * Populate a new node with highmem
	 *
	 * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
	 * than addresses in normal ala low memory (0x8000_0000 based).
	 * Even with PAE, the huge peripheral space hole would waste a lot of
	 * mem with single mem_map[]. This warrants a mem_map per region design.
	 * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
	 *
	 * DISCONTIGMEM in turns requires multiple nodes. node 0 above is
	 * populated with normal memory zone while node 1 only has highmem
	 */
	node_set_online(1);

	min_high_pfn = PFN_DOWN(high_mem_start);
	max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);

	zones_size[ZONE_NORMAL] = 0;
	zones_holes[ZONE_NORMAL] = 0;

	zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn;
	zones_holes[ZONE_HIGHMEM] = 0;

	free_area_init_node(1,			/* node-id */
			    zones_size,		/* num pages per zone */
			    min_high_pfn,	/* first pfn of node */
			    zones_holes);	/* holes */

	high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
	kmap_init();
#endif
}
Exemple #19
0
void __init setup_bootmem_allocator(unsigned long free_pfn)
{
	unsigned long bootmap_size;

	/*
	 * Find a proper area for the bootmem bitmap. After this
	 * bootstrap step all allocations (until the page allocator
	 * is intact) must be done via bootmem_alloc().
	 */
	bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn,
					 min_low_pfn, max_low_pfn);

	add_active_range(0, min_low_pfn, max_low_pfn);
	register_bootmem_low_pages();

	node_set_online(0);

	/*
	 * Reserve the kernel text and
	 * Reserve the bootmem bitmap. We do this in two steps (first step
	 * was init_bootmem()), because this catches the (definitely buggy)
	 * case of us accidentally initializing the bootmem allocator with
	 * an invalid RAM area.
	 */
	reserve_bootmem(__MEMORY_START+PAGE_SIZE,
		(PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START);

	/*
	 * reserve physical page 0 - it's a special BIOS page on many boxes,
	 * enabling clean reboots, SMP operation, laptop functions.
	 */
	reserve_bootmem(__MEMORY_START, PAGE_SIZE);

	sparse_memory_present_with_active_regions(0);

#ifdef CONFIG_BLK_DEV_INITRD
	ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
	if (&__rd_start != &__rd_end) {
		LOADER_TYPE = 1;
		INITRD_START = PHYSADDR((unsigned long)&__rd_start) -
					__MEMORY_START;
		INITRD_SIZE = (unsigned long)&__rd_end -
			      (unsigned long)&__rd_start;
	}

	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
			reserve_bootmem(INITRD_START + __MEMORY_START,
					INITRD_SIZE);
			initrd_start = INITRD_START + PAGE_OFFSET +
					__MEMORY_START;
			initrd_end = initrd_start + INITRD_SIZE;
		} else {
			printk("initrd extends beyond end of memory "
			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				    INITRD_START + INITRD_SIZE,
				    max_low_pfn << PAGE_SHIFT);
			initrd_start = 0;
		}
	}
#endif
#ifdef CONFIG_KEXEC
	if (crashk_res.start != crashk_res.end)
		reserve_bootmem(crashk_res.start,
			crashk_res.end - crashk_res.start + 1);
#endif
}
Exemple #20
0
void __init acpi_numa_arch_fixup(void)
{
	int i, j, node_from, node_to;

	/* If there's no SRAT, fix the phys_id and mark node 0 online */
	if (srat_num_cpus == 0) {
		node_set_online(0);
		node_cpuid[0].phys_id = hard_smp_processor_id();
		return;
	}

	/*
	 * MCD - This can probably be dropped now.  No need for pxm ID to node ID
	 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
	 */
	nodes_clear(node_online_map);
	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
		if (pxm_bit_test(i)) {
			int nid = acpi_map_pxm_to_node(i);
			node_set_online(nid);
		}
	}

	/* set logical node id in memory chunk structure */
	for (i = 0; i < num_node_memblks; i++)
		node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);

	/* assign memory bank numbers for each chunk on each node */
	for_each_online_node(i) {
		int bank;

		bank = 0;
		for (j = 0; j < num_node_memblks; j++)
			if (node_memblk[j].nid == i)
				node_memblk[j].bank = bank++;
	}

	/* set logical node id in cpu structure */
	for_each_possible_early_cpu(i)
		node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);

	printk(KERN_INFO "Number of logical nodes in system = %d\n",
	       num_online_nodes());
	printk(KERN_INFO "Number of memory chunks in system = %d\n",
	       num_node_memblks);

	if (!slit_table)
		return;
	memset(numa_slit, -1, sizeof(numa_slit));
	for (i = 0; i < slit_table->locality_count; i++) {
		if (!pxm_bit_test(i))
			continue;
		node_from = pxm_to_node(i);
		for (j = 0; j < slit_table->locality_count; j++) {
			if (!pxm_bit_test(j))
				continue;
			node_to = pxm_to_node(j);
			node_distance(node_from, node_to) =
			    slit_table->entry[i * slit_table->locality_count + j];
		}
	}

#ifdef SLIT_DEBUG
	printk("ACPI 2.0 SLIT locality table:\n");
	for_each_online_node(i) {
		for_each_online_node(j)
		    printk("%03d ", node_distance(i, j));
		printk("\n");
	}
#endif
}
Exemple #21
0
void __init
acpi_numa_arch_fixup (void)
{
	int i, j, node_from, node_to;

	/* If there's no SRAT, fix the phys_id and mark node 0 online */
	if (srat_num_cpus == 0) {
		node_set_online(0);
		node_cpuid[0].phys_id = hard_smp_processor_id();
		return;
	}

	/* calculate total number of nodes in system from PXM bitmap */
	numnodes = 0;		/* init total nodes in system */

	memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
	memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
		if (pxm_bit_test(i)) {
			pxm_to_nid_map[i] = numnodes;
			node_set_online(numnodes);
			nid_to_pxm_map[numnodes++] = i;
		}
	}

	/* set logical node id in memory chunk structure */
	for (i = 0; i < num_node_memblks; i++)
		node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];

	/* assign memory bank numbers for each chunk on each node */
	for (i = 0; i < numnodes; i++) {
		int bank;

		bank = 0;
		for (j = 0; j < num_node_memblks; j++)
			if (node_memblk[j].nid == i)
				node_memblk[j].bank = bank++;
	}

	/* set logical node id in cpu structure */
	for (i = 0; i < srat_num_cpus; i++)
		node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];

	printk(KERN_INFO "Number of logical nodes in system = %d\n", numnodes);
	printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks);

	if (!slit_table) return;
	memset(numa_slit, -1, sizeof(numa_slit));
	for (i=0; i<slit_table->localities; i++) {
		if (!pxm_bit_test(i))
			continue;
		node_from = pxm_to_nid_map[i];
		for (j=0; j<slit_table->localities; j++) {
			if (!pxm_bit_test(j))
				continue;
			node_to = pxm_to_nid_map[j];
			node_distance(node_from, node_to) =
				slit_table->entry[i*slit_table->localities + j];
		}
	}

#ifdef SLIT_DEBUG
	printk("ACPI 2.0 SLIT locality table:\n");
	for (i = 0; i < numnodes; i++) {
		for (j = 0; j < numnodes; j++)
			printk("%03d ", node_distance(i,j));
		printk("\n");
	}
#endif
}
Exemple #22
0
int __init get_memcfg_from_srat(void)
{
	int i, j, nid;

	if (srat_disabled())
		goto out_fail;

	if (acpi_numa_init() < 0)
		goto out_fail;

	if (num_memory_chunks == 0) {
		printk(KERN_DEBUG
			 "could not find any ACPI SRAT memory areas.\n");
		goto out_fail;
	}

	/* Calculate total number of nodes in system from PXM bitmap and create
	 * a set of sequential node IDs starting at zero.  (ACPI doesn't seem
	 * to specify the range of _PXM values.)
	 */
	/*
	 * MCD - we no longer HAVE to number nodes sequentially.  PXM domain
	 * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically
	 * 32, so we will continue numbering them in this manner until MAX_NUMNODES
	 * approaches MAX_PXM_DOMAINS for i386.
	 */
	nodes_clear(node_online_map);
	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
		if (BMAP_TEST(pxm_bitmap, i)) {
			int nid = acpi_map_pxm_to_node(i);
			node_set_online(nid);
		}
	}
	BUG_ON(num_online_nodes() == 0);

	/* set cnode id in memory chunk structure */
	for (i = 0; i < num_memory_chunks; i++)
		node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm);

	printk(KERN_DEBUG "pxm bitmap: ");
	for (i = 0; i < sizeof(pxm_bitmap); i++) {
		printk(KERN_CONT "%02x ", pxm_bitmap[i]);
	}
	printk(KERN_CONT "\n");
	printk(KERN_DEBUG "Number of logical nodes in system = %d\n",
			 num_online_nodes());
	printk(KERN_DEBUG "Number of memory chunks in system = %d\n",
			 num_memory_chunks);

	for (i = 0; i < MAX_LOCAL_APIC; i++)
		set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i]));

	for (j = 0; j < num_memory_chunks; j++){
		struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
		printk(KERN_DEBUG
			"chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
		       j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
		if (node_read_chunk(chunk->nid, chunk))
			continue;

		memblock_x86_register_active_regions(chunk->nid, chunk->start_pfn,
					     min(chunk->end_pfn, max_pfn));
	}
	/* for out of order entries in SRAT */
	sort_node_map();

	for_each_online_node(nid) {
		unsigned long start = node_start_pfn[nid];
		unsigned long end = min(node_end_pfn[nid], max_pfn);

		memory_present(nid, start, end);
		node_remap_size[nid] = node_memmap_size_bytes(nid, start, end);
	}
	return 1;
out_fail:
	printk(KERN_DEBUG "failed to get NUMA memory information from SRAT"
			" table\n");
	return 0;
}
Exemple #23
0
static int __init parse_numa_properties(void)
{
	struct device_node *cpu = NULL;
	struct device_node *memory = NULL;
	int depth;
	int max_domain = 0;
	long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT;
	unsigned long i;

	if (strstr(saved_command_line, "numa=off")) {
		printk(KERN_WARNING "NUMA disabled by user\n");
		return -1;
	}

	numa_memory_lookup_table =
		(char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));

	for (i = 0; i < entries ; i++)
		numa_memory_lookup_table[i] = ARRAY_INITIALISER;

	depth = find_min_common_depth();

	printk(KERN_INFO "NUMA associativity depth for CPU/Memory: %d\n", depth);
	if (depth < 0)
		return depth;

	for_each_cpu(i) {
		int numa_domain;

		cpu = find_cpu_node(i);

		if (cpu) {
			numa_domain = of_node_numa_domain(cpu, depth);
			of_node_put(cpu);

			if (numa_domain >= MAX_NUMNODES) {
				/*
			 	 * POWER4 LPAR uses 0xffff as invalid node,
				 * dont warn in this case.
			 	 */
				if (numa_domain != 0xffff)
					printk(KERN_ERR "WARNING: cpu %ld "
					       "maps to invalid NUMA node %d\n",
					       i, numa_domain);
				numa_domain = 0;
			}
		} else {
			printk(KERN_ERR "WARNING: no NUMA information for "
			       "cpu %ld\n", i);
			numa_domain = 0;
		}

		node_set_online(numa_domain);

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		map_cpu_to_node(i, numa_domain);
	}

	memory = NULL;
	while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
		unsigned long start;
		unsigned long size;
		int numa_domain;
		int ranges;
		unsigned int *memcell_buf;
		unsigned int len;

		memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
		if (!memcell_buf || len <= 0)
			continue;

		ranges = memory->n_addrs;
new_range:
		/* these are order-sensitive, and modify the buffer pointer */
		start = read_cell_ul(memory, &memcell_buf);
		size = read_cell_ul(memory, &memcell_buf);

		start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
		size = _ALIGN_UP(size, MEMORY_INCREMENT);

		numa_domain = of_node_numa_domain(memory, depth);

		if (numa_domain >= MAX_NUMNODES) {
			if (numa_domain != 0xffff)
				printk(KERN_ERR "WARNING: memory at %lx maps "
				       "to invalid NUMA node %d\n", start,
				       numa_domain);
			numa_domain = 0;
		}

		node_set_online(numa_domain);

		if (max_domain < numa_domain)
			max_domain = numa_domain;

		/* 
		 * For backwards compatibility, OF splits the first node
		 * into two regions (the first being 0-4GB). Check for
		 * this simple case and complain if there is a gap in
		 * memory
		 */
		if (node_data[numa_domain].node_spanned_pages) {
			unsigned long shouldstart =
				node_data[numa_domain].node_start_pfn + 
				node_data[numa_domain].node_spanned_pages;
			if (shouldstart != (start / PAGE_SIZE)) {
				printk(KERN_ERR "Hole in node, disabling "
						"region start %lx length %lx\n",
						start, size);
				continue;
			}
			node_data[numa_domain].node_spanned_pages +=
				size / PAGE_SIZE;
		} else {
			node_data[numa_domain].node_start_pfn =
				start / PAGE_SIZE;
			node_data[numa_domain].node_spanned_pages =
				size / PAGE_SIZE;
		}

		for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
			numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
				numa_domain;

		dbg("memory region %lx to %lx maps to domain %d\n",
		    start, start+size, numa_domain);

		ranges--;
		if (ranges)
			goto new_range;
	}

	numnodes = max_domain + 1;

	return 0;
}
Exemple #24
0
/* Parse the ACPI Static Resource Affinity Table */
static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
{
	u8 *start, *end, *p;
	int i, j, nid;

	start = (u8 *)(&(sratp->reserved) + 1);	/* skip header */
	p = start;
	end = (u8 *)sratp + sratp->header.length;

	memset(pxm_bitmap, 0, sizeof(pxm_bitmap));	/* init proximity domain bitmap */
	memset(node_memory_chunk, 0, sizeof(node_memory_chunk));

	num_memory_chunks = 0;
	while (p < end) {
		switch (*p) {
		case ACPI_SRAT_TYPE_CPU_AFFINITY:
			parse_cpu_affinity_structure(p);
			break;
		case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
			parse_memory_affinity_structure(p);
			break;
		default:
			printk("ACPI 2.0 SRAT: unknown entry skipped: type=0x%02X, len=%d\n", p[0], p[1]);
			break;
		}
		p += p[1];
		if (p[1] == 0) {
			printk("acpi20_parse_srat: Entry length value is zero;"
				" can't parse any further!\n");
			break;
		}
	}

	if (num_memory_chunks == 0) {
		printk("could not finy any ACPI SRAT memory areas.\n");
		goto out_fail;
	}

	/* Calculate total number of nodes in system from PXM bitmap and create
	 * a set of sequential node IDs starting at zero.  (ACPI doesn't seem
	 * to specify the range of _PXM values.)
	 */
	/*
	 * MCD - we no longer HAVE to number nodes sequentially.  PXM domain
	 * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically
	 * 32, so we will continue numbering them in this manner until MAX_NUMNODES
	 * approaches MAX_PXM_DOMAINS for i386.
	 */
	nodes_clear(node_online_map);
	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
		if (BMAP_TEST(pxm_bitmap, i)) {
			int nid = acpi_map_pxm_to_node(i);
			node_set_online(nid);
		}
	}
	BUG_ON(num_online_nodes() == 0);

	/* set cnode id in memory chunk structure */
	for (i = 0; i < num_memory_chunks; i++)
		node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm);

	printk("pxm bitmap: ");
	for (i = 0; i < sizeof(pxm_bitmap); i++) {
		printk("%02X ", pxm_bitmap[i]);
	}
	printk("\n");
	printk("Number of logical nodes in system = %d\n", num_online_nodes());
	printk("Number of memory chunks in system = %d\n", num_memory_chunks);

	for (i = 0; i < MAX_APICID; i++)
		apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]);

	for (j = 0; j < num_memory_chunks; j++){
		struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
		printk("chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
		       j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
		node_read_chunk(chunk->nid, chunk);
		add_active_range(chunk->nid, chunk->start_pfn, chunk->end_pfn);
	}
 
	for_each_online_node(nid) {
		unsigned long start = node_start_pfn[nid];
		unsigned long end = node_end_pfn[nid];

		memory_present(nid, start, end);
		node_remap_size[nid] = node_memmap_size_bytes(nid, start, end);
	}
	return 1;
out_fail:
	return 0;
}
Exemple #25
0
void __init acpi_numa_arch_fixup(void)
{
	int i, j, node_from, node_to;

	
	if (srat_num_cpus == 0) {
		node_set_online(0);
		node_cpuid[0].phys_id = hard_smp_processor_id();
		return;
	}

	nodes_clear(node_online_map);
	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
		if (pxm_bit_test(i)) {
			int nid = acpi_map_pxm_to_node(i);
			node_set_online(nid);
		}
	}

	
	for (i = 0; i < num_node_memblks; i++)
		node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);

	
	for_each_online_node(i) {
		int bank;

		bank = 0;
		for (j = 0; j < num_node_memblks; j++)
			if (node_memblk[j].nid == i)
				node_memblk[j].bank = bank++;
	}

	
	for_each_possible_early_cpu(i)
		node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);

	printk(KERN_INFO "Number of logical nodes in system = %d\n",
	       num_online_nodes());
	printk(KERN_INFO "Number of memory chunks in system = %d\n",
	       num_node_memblks);

	if (!slit_table) {
		for (i = 0; i < MAX_NUMNODES; i++)
			for (j = 0; j < MAX_NUMNODES; j++)
				node_distance(i, j) = i == j ? LOCAL_DISTANCE :
							REMOTE_DISTANCE;
		return;
	}

	memset(numa_slit, -1, sizeof(numa_slit));
	for (i = 0; i < slit_table->locality_count; i++) {
		if (!pxm_bit_test(i))
			continue;
		node_from = pxm_to_node(i);
		for (j = 0; j < slit_table->locality_count; j++) {
			if (!pxm_bit_test(j))
				continue;
			node_to = pxm_to_node(j);
			node_distance(node_from, node_to) =
			    slit_table->entry[i * slit_table->locality_count + j];
		}
	}

#ifdef SLIT_DEBUG
	printk("ACPI 2.0 SLIT locality table:\n");
	for_each_online_node(i) {
		for_each_online_node(j)
		    printk("%03d ", node_distance(i, j));
		printk("\n");
	}
#endif
}