示例#1
0
static __init int setup_node(int pxm)
{
	unsigned node = pxm2node[pxm];
	if (node == 0xff) {
		if (nodes_weight(nodes_found) >= MAX_NUMNODES)
			return -1;
		node = first_unset_node(nodes_found); 
		node_set(node, nodes_found);
		pxm2node[pxm] = node;
	}
	return pxm2node[pxm];
}
示例#2
0
文件: numa.c 项目: ANFS/ANFS-kernel
int acpi_map_pxm_to_node(int pxm)
{
	int node = pxm_to_node_map[pxm];

	if (node < 0) {
		if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
			return NUMA_NO_NODE;
		node = first_unset_node(nodes_found_map);
		__acpi_map_pxm_to_node(pxm, node);
		node_set(node, nodes_found_map);
	}

	return node;
}
示例#3
0
int __cpuinit acpi_map_pxm_to_node(int pxm)
{
	int node = pxm_to_node_map[pxm];

	if (node < 0){
		if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
			return NID_INVAL;
		node = first_unset_node(nodes_found_map);
		pxm_to_node_map[pxm] = node;
		node_to_pxm_map[node] = pxm;
		node_set(node, nodes_found_map);
	}

	return node;
}
示例#4
0
/*
 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
 * to max_addr.  The return value is the number of nodes allocated.
 */
static int __init split_nodes_interleave(struct numa_meminfo *ei,
					 struct numa_meminfo *pi,
					 u64 addr, u64 max_addr, int nr_nodes)
{
	nodemask_t physnode_mask = NODE_MASK_NONE;
	u64 size;
	int big;
	int nid = 0;
	int i, ret;

	if (nr_nodes <= 0)
		return -1;
	if (nr_nodes > MAX_NUMNODES) {
		pr_info("numa=fake=%d too large, reducing to %d\n",
			nr_nodes, MAX_NUMNODES);
		nr_nodes = MAX_NUMNODES;
	}

#ifdef XEN_HETEROMEM_FAKENUMA
	printk(KERN_ALERT "Trying to emualte nr_nodes %d \n",nr_nodes);
#endif

	/*
	 * Calculate target node size.  x86_32 freaks on __udivdi3() so do
	 * the division in ulong number of pages and convert back.
	 */
	size = max_addr - addr - mem_hole_size(addr, max_addr);

#ifdef XEN_HETEROMEM_FAKENUMA
	printk(KERN_ALERT "max_addr %lu, "
			"addr %lu, "
			"mem_hole_size(addr, max_addr) %lu, "
			"size %lu, \n",
			max_addr, addr, mem_hole_size(addr, max_addr), size);
#endif

	size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);

#ifdef XEN_HETEROMEM_FAKENUMA
	printk(KERN_ALERT "size %lu, "
			"(unsigned long)(size >> PAGE_SHIFT) %lu, "
			"PFN_PHYS((unsigned long)(size >> PAGE_SHIFT)/nr_nodes)  %lu \n ",
			size, (unsigned long)(size >> PAGE_SHIFT), PFN_PHYS((unsigned long)(size >> PAGE_SHIFT)/nr_nodes));
#endif

	/*
	 * Calculate the number of big nodes that can be allocated as a result
	 * of consolidating the remainder.
	 */
	big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
		FAKE_NODE_MIN_SIZE;

#ifdef XEN_HETEROMEM_FAKENUMA
	printk(KERN_ALERT "Trying to emualte big nodes %d, "
			"Pages %lu, "
			"FAKE_NODE_MIN_HASH_MASK %u, "
			"size & ~FAKE_NODE_MIN_HASH_MASK %u, "
			"FAKE_NODE_MIN_SIZE %u ," 
			"nr_nodes %u \n",
			big, size, FAKE_NODE_MIN_HASH_MASK, 
			size & ~FAKE_NODE_MIN_HASH_MASK, FAKE_NODE_MIN_SIZE, 
			nr_nodes);
#endif

	size &= FAKE_NODE_MIN_HASH_MASK;
	if (!size) {
		pr_err("Not enough memory for each node.  "
			"NUMA emulation disabled.\n");
		return -1;
	}

	for (i = 0; i < pi->nr_blks; i++)
		node_set(pi->blk[i].nid, physnode_mask);

	/*
	 * Continue to fill physical nodes with fake nodes until there is no
	 * memory left on any of them.
	 */
	while (nodes_weight(physnode_mask)) {
		for_each_node_mask(i, physnode_mask) {
			u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
			u64 start, limit, end;
			int phys_blk;

			phys_blk = emu_find_memblk_by_nid(i, pi);
			if (phys_blk < 0) {
				node_clear(i, physnode_mask);
				continue;
			}
			start = pi->blk[phys_blk].start;
			limit = pi->blk[phys_blk].end;
			end = start + size;

			if (nid < big)
				end += FAKE_NODE_MIN_SIZE;

			/*
			 * Continue to add memory to this fake node if its
			 * non-reserved memory is less than the per-node size.
			 */
			while (end - start - mem_hole_size(start, end) < size) {
				end += FAKE_NODE_MIN_SIZE;
				if (end > limit) {
					end = limit;
					break;
				}
			}

			/*
			 * If there won't be at least FAKE_NODE_MIN_SIZE of
			 * non-reserved memory in ZONE_DMA32 for the next node,
			 * this one must extend to the boundary.
			 */
			if (end < dma32_end && dma32_end - end -
			    mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
				end = dma32_end;

			/*
			 * If there won't be enough non-reserved memory for the
			 * next node, this one must extend to the end of the
			 * physical node.
			 */
			if (limit - end - mem_hole_size(end, limit) < size)
				end = limit;

			ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
					       phys_blk,
					       min(end, limit) - start);
			if (ret < 0)
				return ret;
		}
	}
示例#5
0
/* Use the information discovered above to actually set up the nodes. */
int __init acpi_scan_nodes(unsigned long start, unsigned long end)
{
	int i;
	if (acpi_numa <= 0)
		return -1;

	/* First clean up the node list */
	for_each_node_mask(i, nodes_parsed) {
		cutoff_node(i, start, end);
		if (nodes[i].start == nodes[i].end)
			node_clear(i, nodes_parsed);
	}

	memnode_shift = compute_hash_shift(nodes, nodes_weight(nodes_parsed));
	if (memnode_shift < 0) {
		printk(KERN_ERR
		     "SRAT: No NUMA node hash function found. Contact maintainer\n");
		bad_srat();
		return -1;
	}

	/* Finally register nodes */
	for_each_node_mask(i, nodes_parsed)
		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
	for (i = 0; i < NR_CPUS; i++) { 
		if (cpu_to_node[i] == NUMA_NO_NODE)
			continue;
		if (!node_isset(cpu_to_node[i], nodes_parsed))
			cpu_to_node[i] = NUMA_NO_NODE;