示例#1
0
void cpu_node_probe(void)
{
	int i, highest = 0;
	gda_t *gdap = GDA;

	/*
	 * Initialize the arrays to invalid nodeid (-1)
	 */
	for (i = 0; i < MAX_COMPACT_NODES; i++)
		compact_to_nasid_node[i] = INVALID_NASID;
	for (i = 0; i < MAX_NASIDS; i++)
		nasid_to_compact_node[i] = INVALID_CNODEID;
	for (i = 0; i < MAXCPUS; i++)
		cpuid_to_compact_node[i] = INVALID_CNODEID;

	/*
	 * MCD - this whole "compact node" stuff can probably be dropped,
	 * as we can handle sparse numbering now
	 */
	nodes_clear(node_online_map);
	for (i = 0; i < MAX_COMPACT_NODES; i++) {
		nasid_t nasid = gdap->g_nasidtable[i];
		if (nasid == INVALID_NASID)
			break;
		compact_to_nasid_node[i] = nasid;
		nasid_to_compact_node[nasid] = i;
		node_set_online(num_online_nodes());
		highest = do_cpumask(i, nasid, highest);
	}

	printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
}
示例#2
0
/**
 * sn_init_pdas - setup node data areas
 *
 * One time setup for Node Data Area.  Called by sn_setup().
 */
static void __init sn_init_pdas(char **cmdline_p)
{
	cnodeid_t cnode;

	memset(pda->cnodeid_to_nasid_table, -1,
	       sizeof(pda->cnodeid_to_nasid_table));
	for_each_online_node(cnode)
		pda->cnodeid_to_nasid_table[cnode] =
		    pxm_to_nasid(nid_to_pxm_map[cnode]);

	numionodes = num_online_nodes();
	scan_for_ionodes();

	/*
	 * Allocate & initalize the nodepda for each node.
	 */
	for_each_online_node(cnode) {
		nodepdaindr[cnode] =
		    alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
		memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
		memset(nodepdaindr[cnode]->phys_cpuid, -1, 
		    sizeof(nodepdaindr[cnode]->phys_cpuid));
	}

	/*
	 * Allocate & initialize nodepda for TIOs.  For now, put them on node 0.
	 */
	for (cnode = num_online_nodes(); cnode < numionodes; cnode++) {
		nodepdaindr[cnode] =
		    alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
		memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
	}

	/*
	 * Now copy the array of nodepda pointers to each nodepda.
	 */
	for (cnode = 0; cnode < numionodes; cnode++)
		memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
		       sizeof(nodepdaindr));

	/*
	 * Set up IO related platform-dependent nodepda fields.
	 * The following routine actually sets up the hubinfo struct
	 * in nodepda.
	 */
	for_each_online_node(cnode) {
		bte_init_node(nodepdaindr[cnode], cnode);
	}

	/*
	 * Initialize the per node hubdev.  This includes IO Nodes and 
	 * headless/memless nodes.
	 */
	for (cnode = 0; cnode < numionodes; cnode++) {
		hubdev_init_node(nodepdaindr[cnode], cnode);
	}
}
示例#3
0
/*
 * Great future plan:
 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
 * Always point %gs to its beginning
 */
void __init setup_per_cpu_areas(void)
{ 
	int i;
	unsigned long size;

#ifdef CONFIG_HOTPLUG_CPU
	prefill_possible_map();
#endif

	/* Copy section for each CPU (we discard the original) */
	size = PERCPU_ENOUGH_ROOM;

	printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
	for_each_cpu_mask (i, cpu_possible_map) {
		char *ptr;

		if (!NODE_DATA(cpu_to_node(i))) {
			printk("cpu with no node %d, num_online_nodes %d\n",
			       i, num_online_nodes());
			ptr = alloc_bootmem(size);
		} else { 
			ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
		}
		if (!ptr)
			panic("Cannot allocate cpu data for CPU %d\n", i);
		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
	}
int __init prominfo_init(void)
{
    struct proc_dir_entry **entp;
    cnodeid_t cnodeid;
    unsigned long nasid;
    int size;
    char name[NODE_NAME_LEN];

    if (!ia64_platform_is("sn2"))
        return 0;

    size = num_online_nodes() * sizeof(struct proc_dir_entry *);
    proc_entries = kzalloc(size, GFP_KERNEL);
    if (!proc_entries)
        return -ENOMEM;

    sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);

    entp = proc_entries;
    for_each_online_node(cnodeid) {
        sprintf(name, "node%d", cnodeid);
        *entp = proc_mkdir(name, sgi_prominfo_entry);
        nasid = cnodeid_to_nasid(cnodeid);
        create_proc_read_entry("fit", 0, *entp, read_fit_entry,
                               (void *)nasid);
        create_proc_read_entry("version", 0, *entp,
                               read_version_entry, (void *)nasid);
        entp++;
    }

    return 0;
}
示例#5
0
static int __init numaq_tsc_disable(void)
{
	if (num_online_nodes() > 1) {
		printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
		setup_clear_cpu_cap(X86_FEATURE_TSC);
	}
	return 0;
}
示例#6
0
static int __init numaq_tsc_disable(void)
{
	if (num_online_nodes() > 1) {
		printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
		tsc_disable = 1;
	}
	return 0;
}
示例#7
0
void __cpuinit numaq_tsc_disable(void)
{
	if (!found_numaq)
		return;

	if (num_online_nodes() > 1) {
		printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
		setup_clear_cpu_cap(X86_FEATURE_TSC);
	}
}
示例#8
0
static int __init topology_init(void)
{
	int i;

	for (i = 0; i < num_online_nodes(); i++)
		arch_register_node(i);
	for (i = 0; i < NR_CPUS; i++)
		if (cpu_possible(i)) arch_register_cpu(i);
	for (i = 0; i < num_online_memblks(); i++)
		arch_register_memblk(i);
	return 0;
}
示例#9
0
文件: setup.c 项目: cywzl/spice4xen
void __init build_cnode_tables(void)
{
	int nasid;
	int node;
	lboard_t *brd;

	memset(physical_node_map, -1, sizeof(physical_node_map));
	memset(sn_cnodeid_to_nasid, -1,
			sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));

	/*
	 * First populate the tables with C/M bricks. This ensures that
	 * cnode == node for all C & M bricks.
	 */
	for_each_online_node(node) {
		nasid = pxm_to_nasid(node_to_pxm(node));
		sn_cnodeid_to_nasid[node] = nasid;
		physical_node_map[nasid] = node;
	}

	/*
	 * num_cnodes is total number of C/M/TIO bricks. Because of the 256 node
	 * limit on the number of nodes, we can't use the generic node numbers 
	 * for this. Note that num_cnodes is incremented below as TIOs or
	 * headless/memoryless nodes are discovered.
	 */
	num_cnodes = num_online_nodes();

	/* fakeprom does not support klgraph */
	if (IS_RUNNING_ON_FAKE_PROM())
		return;

	/* Find TIOs & headless/memoryless nodes and add them to the tables */
	for_each_online_node(node) {
		kl_config_hdr_t *klgraph_header;
		nasid = cnodeid_to_nasid(node);
		klgraph_header = ia64_sn_get_klconfig_addr(nasid);
		if (klgraph_header == NULL)
			BUG();
		brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
		while (brd) {
			if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
				sn_cnodeid_to_nasid[num_cnodes] = brd->brd_nasid;
				physical_node_map[brd->brd_nasid] = num_cnodes++;
			}
			brd = find_lboard_next(brd);
		}
	}
}
示例#10
0
int __init get_memcfg_from_srat(void)
{
	int i, j, nid;

	if (srat_disabled())
		goto out_fail;

	if (acpi_numa_init() < 0)
		goto out_fail;

	if (num_memory_chunks == 0) {
		printk(KERN_DEBUG
			 "could not find any ACPI SRAT memory areas.\n");
		goto out_fail;
	}

	/* Calculate total number of nodes in system from PXM bitmap and create
	 * a set of sequential node IDs starting at zero.  (ACPI doesn't seem
	 * to specify the range of _PXM values.)
	 */
	/*
	 * MCD - we no longer HAVE to number nodes sequentially.  PXM domain
	 * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically
	 * 32, so we will continue numbering them in this manner until MAX_NUMNODES
	 * approaches MAX_PXM_DOMAINS for i386.
	 */
	nodes_clear(node_online_map);
	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
		if (BMAP_TEST(pxm_bitmap, i)) {
			int nid = acpi_map_pxm_to_node(i);
			node_set_online(nid);
		}
	}
	BUG_ON(num_online_nodes() == 0);

	/* set cnode id in memory chunk structure */
	for (i = 0; i < num_memory_chunks; i++)
		node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm);

	printk(KERN_DEBUG "pxm bitmap: ");
	for (i = 0; i < sizeof(pxm_bitmap); i++) {
		printk(KERN_CONT "%02x ", pxm_bitmap[i]);
	}
	printk(KERN_CONT "\n");
	printk(KERN_DEBUG "Number of logical nodes in system = %d\n",
			 num_online_nodes());
	printk(KERN_DEBUG "Number of memory chunks in system = %d\n",
			 num_memory_chunks);

	for (i = 0; i < MAX_LOCAL_APIC; i++)
		set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i]));

	for (j = 0; j < num_memory_chunks; j++){
		struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
		printk(KERN_DEBUG
			"chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
		       j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
		if (node_read_chunk(chunk->nid, chunk))
			continue;

		memblock_x86_register_active_regions(chunk->nid, chunk->start_pfn,
					     min(chunk->end_pfn, max_pfn));
	}
	/* for out of order entries in SRAT */
	sort_node_map();

	for_each_online_node(nid) {
		unsigned long start = node_start_pfn[nid];
		unsigned long end = min(node_end_pfn[nid], max_pfn);

		memory_present(nid, start, end);
		node_remap_size[nid] = node_memmap_size_bytes(nid, start, end);
	}
	return 1;
out_fail:
	printk(KERN_DEBUG "failed to get NUMA memory information from SRAT"
			" table\n");
	return 0;
}
示例#11
0
文件: setup.c 项目: cywzl/spice4xen
/**
 * sn_cpu_init - initialize per-cpu data areas
 * @cpuid: cpuid of the caller
 *
 * Called during cpu initialization on each cpu as it starts.
 * Currently, initializes the per-cpu data area for SNIA.
 * Also sets up a few fields in the nodepda.  Also known as
 * platform_cpu_init() by the ia64 machvec code.
 */
void __cpuinit sn_cpu_init(void)
{
	int cpuid;
	int cpuphyid;
	int nasid;
	int subnode;
	int slice;
	int cnode;
	int i;
	static int wars_have_been_checked;

	cpuid = smp_processor_id();
	if (cpuid == 0 && IS_MEDUSA()) {
		if (ia64_sn_is_fake_prom())
			sn_prom_type = 2;
		else
			sn_prom_type = 1;
		printk(KERN_INFO "Running on medusa with %s PROM\n",
		       (sn_prom_type == 1) ? "real" : "fake");
	}

	memset(pda, 0, sizeof(pda));
	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
				&sn_hub_info->nasid_bitmask,
				&sn_hub_info->nasid_shift,
				&sn_system_size, &sn_sharing_domain_size,
				&sn_partition_id, &sn_coherency_id,
				&sn_region_size))
		BUG();
	sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;

	/*
	 * Don't check status. The SAL call is not supported on all PROMs
	 * but a failure is harmless.
	 */
	(void) ia64_sn_set_cpu_number(cpuid);

	/*
	 * The boot cpu makes this call again after platform initialization is
	 * complete.
	 */
	if (nodepdaindr[0] == NULL)
		return;

	for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
		if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
			break;

	cpuphyid = get_sapicid();

	if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
		BUG();

	for (i=0; i < MAX_NUMNODES; i++) {
		if (nodepdaindr[i]) {
			nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
			nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
			nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
		}
	}

	cnode = nasid_to_cnodeid(nasid);

	sn_nodepda = nodepdaindr[cnode];

	pda->led_address =
	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
	pda->led_state = LED_ALWAYS_SET;
	pda->hb_count = HZ / 2;
	pda->hb_state = 0;
	pda->idle_flag = 0;

	if (cpuid != 0) {
		/* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
		memcpy(sn_cnodeid_to_nasid,
		       (&per_cpu(__sn_cnodeid_to_nasid, 0)),
		       sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
	}

	/*
	 * Check for WARs.
	 * Only needs to be done once, on BSP.
	 * Has to be done after loop above, because it uses this cpu's
	 * sn_cnodeid_to_nasid table which was just initialized if this
	 * isn't cpu 0.
	 * Has to be done before assignment below.
	 */
	if (!wars_have_been_checked) {
		sn_check_for_wars();
		wars_have_been_checked = 1;
	}
	sn_hub_info->shub_1_1_found = shub_1_1_found;

	/*
	 * Set up addresses of PIO/MEM write status registers.
	 */
	{
		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2,
			SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
		u64 *pio;
		pio = is_shub1() ? pio1 : pio2;
		pda->pio_write_status_addr =
		   (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]);
		pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
	}

	/*
	 * WAR addresses for SHUB 1.x.
	 */
	if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
		int buddy_nasid;
		buddy_nasid =
		    cnodeid_to_nasid(numa_node_id() ==
				     num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
		pda->pio_shub_war_cam_addr =
		    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
							      SH1_PI_CAM_CONTROL);
	}
}
示例#12
0
void __init acpi_numa_arch_fixup(void)
{
	int i, j, node_from, node_to;

	/* If there's no SRAT, fix the phys_id and mark node 0 online */
	if (srat_num_cpus == 0) {
		node_set_online(0);
		node_cpuid[0].phys_id = hard_smp_processor_id();
		return;
	}

	/*
	 * MCD - This can probably be dropped now.  No need for pxm ID to node ID
	 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
	 */
	nodes_clear(node_online_map);
	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
		if (pxm_bit_test(i)) {
			int nid = acpi_map_pxm_to_node(i);
			node_set_online(nid);
		}
	}

	/* set logical node id in memory chunk structure */
	for (i = 0; i < num_node_memblks; i++)
		node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);

	/* assign memory bank numbers for each chunk on each node */
	for_each_online_node(i) {
		int bank;

		bank = 0;
		for (j = 0; j < num_node_memblks; j++)
			if (node_memblk[j].nid == i)
				node_memblk[j].bank = bank++;
	}

	/* set logical node id in cpu structure */
	for_each_possible_early_cpu(i)
		node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);

	printk(KERN_INFO "Number of logical nodes in system = %d\n",
	       num_online_nodes());
	printk(KERN_INFO "Number of memory chunks in system = %d\n",
	       num_node_memblks);

	if (!slit_table)
		return;
	memset(numa_slit, -1, sizeof(numa_slit));
	for (i = 0; i < slit_table->locality_count; i++) {
		if (!pxm_bit_test(i))
			continue;
		node_from = pxm_to_node(i);
		for (j = 0; j < slit_table->locality_count; j++) {
			if (!pxm_bit_test(j))
				continue;
			node_to = pxm_to_node(j);
			node_distance(node_from, node_to) =
			    slit_table->entry[i * slit_table->locality_count + j];
		}
	}

#ifdef SLIT_DEBUG
	printk("ACPI 2.0 SLIT locality table:\n");
	for_each_online_node(i) {
		for_each_online_node(j)
		    printk("%03d ", node_distance(i, j));
		printk("\n");
	}
#endif
}
示例#13
0
/**
 * sn_cpu_init - initialize per-cpu data areas
 * @cpuid: cpuid of the caller
 *
 * Called during cpu initialization on each cpu as it starts.
 * Currently, initializes the per-cpu data area for SNIA.
 * Also sets up a few fields in the nodepda.  Also known as
 * platform_cpu_init() by the ia64 machvec code.
 */
void __init sn_cpu_init(void)
{
	int cpuid;
	int cpuphyid;
	int nasid;
	int subnode;
	int slice;
	int cnode;
	int i;
	static int wars_have_been_checked;

	memset(pda, 0, sizeof(pda));
	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
				&sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
				&sn_coherency_id, &sn_region_size))
		BUG();
	sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;

	/*
	 * The boot cpu makes this call again after platform initialization is
	 * complete.
	 */
	if (nodepdaindr[0] == NULL)
		return;

	cpuid = smp_processor_id();
	cpuphyid = get_sapicid();

	if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
		BUG();

	for (i=0; i < MAX_NUMNODES; i++) {
		if (nodepdaindr[i]) {
			nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
			nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
			nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
		}
	}

	cnode = nasid_to_cnodeid(nasid);

	pda->p_nodepda = nodepdaindr[cnode];
	pda->led_address =
	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
	pda->led_state = LED_ALWAYS_SET;
	pda->hb_count = HZ / 2;
	pda->hb_state = 0;
	pda->idle_flag = 0;

	if (cpuid != 0) {
		memcpy(pda->cnodeid_to_nasid_table,
		       pdacpu(0)->cnodeid_to_nasid_table,
		       sizeof(pda->cnodeid_to_nasid_table));
	}

	/*
	 * Check for WARs.
	 * Only needs to be done once, on BSP.
	 * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
	 * Has to be done before assignment below.
	 */
	if (!wars_have_been_checked) {
		sn_check_for_wars();
		wars_have_been_checked = 1;
	}
	sn_hub_info->shub_1_1_found = shub_1_1_found;

	/*
	 * Set up addresses of PIO/MEM write status registers.
	 */
	{
		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, 
			SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
		u64 *pio;
		pio = is_shub1() ? pio1 : pio2;
		pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
		pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
	}

	/*
	 * WAR addresses for SHUB 1.x.
	 */
	if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
		int buddy_nasid;
		buddy_nasid =
		    cnodeid_to_nasid(numa_node_id() ==
				     num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
		pda->pio_shub_war_cam_addr =
		    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
							      SH1_PI_CAM_CONTROL);
	}
}
示例#14
0
/**
 *
 * test_alloc_runtest - Allocate and free a number of pages from a ZONE_NORMAL
 * @params: Parameters read from the proc entry
 * @argc:   Number of parameters actually entered
 * @procentry: Proc buffer to write to
 *
 * If pages is set to 0, pages will be allocated until the pages_high watermark
 * is hit
 * Returns
 * 0  on success
 * -1 on failure
 *
 */
int test_alloc_runtest(int *params, int argc, int procentry) {
	unsigned long order;		/* Order of pages */
	unsigned long numpages;		/* Number of pages to allocate */
	struct page **pages;		/* Pages that were allocated */
	unsigned long attempts=0;
	unsigned long alloced=0;
	unsigned long nextjiffies = jiffies;
	unsigned long lastjiffies = jiffies;
	unsigned long success=0;
	unsigned long fail=0;
	unsigned long resched_count=0;
	unsigned long aborted=0;
	unsigned long long start_cycles, cycles;
	unsigned long page_dma=0, page_dma32=0, page_normal=0, page_highmem=0, page_easyrclm=0;
	struct zone *zone;
	char finishString[60];
	int timing_pages, pages_required;

	/* Set gfp_flags based on the module parameter */
	if (gfp_highuser) {
#ifdef GFP_RCLMUSER
		vmr_printk("Using highmem with GFP_RCLMUSER\n");
		gfp_flags = GFP_RCLMUSER;
#elif defined(GFP_HIGH_MOVABLE)
		vmr_printk("Using highmem with GFP_HIGH_MOVABLE\n");
		gfp_flags = GFP_HIGH_MOVABLE;
#elif defined(GFP_HIGHUSER_MOVABLE)
		vmr_printk("Using highmem with GFP_HIGHUSER_MOVABLE\n");
		gfp_flags = GFP_HIGHUSER_MOVABLE;
#else
		vmr_printk("Using highmem with GFP_HIGHUSER | __GFP_EASYRCLM\n");
		gfp_flags = GFP_HIGHUSER | __GFP_EASYRCLM;
#endif
	} else {
		vmr_printk("Using lowmem\n");
		gfp_flags |= __GFP_EASYRCLM|__GFP_MOVABLE;
	}
	vmr_printk("__GFP_EASYRCLM is 0x%8X\n", __GFP_EASYRCLM);
	vmr_printk("__GFP_MOVABLE  is 0x%8X\n", __GFP_MOVABLE);
	vmr_printk("gfp_flags       = 0x%8X\n", gfp_flags);
	
	/* Get the parameters */
	order = params[0];
	numpages = params[1];

	/* Make sure a buffer is available */
	if (vmrproc_checkbuffer(testinfo[HIGHALLOC_REPORT])) BUG();
	if (vmrproc_checkbuffer(testinfo[HIGHALLOC_TIMING])) BUG();
	if (vmrproc_checkbuffer(testinfo[HIGHALLOC_BUDDYINFO])) BUG();
	vmrproc_openbuffer(&testinfo[HIGHALLOC_REPORT]);
	vmrproc_openbuffer(&testinfo[HIGHALLOC_TIMING]);
	vmrproc_openbuffer(&testinfo[HIGHALLOC_BUDDYINFO]);

	/* Check parameters */
	if (order < 0 || order >= MAX_ORDER) {
		vmr_printk("Order request of %lu makes no sense\n", order);
		return -1;
	}

	if (numpages < 0) {
		vmr_printk("Number of pages %lu makes no sense\n", numpages);
		return -1;
	}

	/* 
	 * Allocate memory to store pointers to pages.
	 */
	pages = __vmalloc((numpages+1) * sizeof(struct page **),
			GFP_KERNEL|__GFP_HIGHMEM|__GFP_KERNRCLM|__GFP_RECLAIMABLE,
			PAGE_KERNEL);
	if (pages == NULL) {
		printp("Failed to allocate space to store page pointers\n");
		vmrproc_closebuffer(&testinfo[HIGHALLOC_REPORT]);
		vmrproc_closebuffer(&testinfo[HIGHALLOC_TIMING]);
		vmrproc_closebuffer(&testinfo[HIGHALLOC_BUDDYINFO]);
		return 0;
	}

	/* Setup proc buffer for timings */
	timing_pages = testinfo[HIGHALLOC_TIMING].procbuf_size / PAGE_SIZE;
	pages_required = (numpages * 20) / PAGE_SIZE;
	if (pages_required > timing_pages) {
		vmrproc_growbuffer(pages_required - timing_pages, 
					&testinfo[HIGHALLOC_TIMING]);
	}

	/* Setup proc buffer for highorder alloc */
	timing_pages = testinfo[HIGHALLOC_BUDDYINFO].procbuf_size / PAGE_SIZE;
	pages_required = (numpages * ((256 + 30 + 15 * MAX_ORDER) * num_online_nodes())) / PAGE_SIZE;
	if (pages_required > timing_pages) {
		vmrproc_growbuffer(pages_required - timing_pages, 
					&testinfo[HIGHALLOC_BUDDYINFO]);
	}

#if defined(OOM_DISABLE) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19))
	/* Disable OOM Killer */
	vmr_printk("Disabling OOM killer for running process\n");
	oomkilladj = current->oomkilladj;
	current->oomkilladj = OOM_DISABLE;
#endif /* OOM_DISABLE */

	/*
	 * Attempt to allocate the requested number of pages
	 */
	while (attempts++ != numpages) {
		struct page *page;
		if (lastjiffies > jiffies) nextjiffies = jiffies;
		while (jiffies < nextjiffies) check_resched(resched_count);
		nextjiffies = jiffies + ( (HZ * ms_delay)/1000);

		/* Print message if this is taking a long time */
		if (jiffies - lastjiffies > HZ) {
			printk("High order alloc test attempts: %lu (%lu)\n",
					attempts-1, alloced);
		}

		/* Print out a message every so often anyway */
		if (attempts > 1 && (attempts-1) % 10 == 0) {
			printp_entry(HIGHALLOC_TIMING, "\n");
			printk("High order alloc test attempts: %lu (%lu)\n",
					attempts-1, alloced);
		}

		lastjiffies = jiffies;

		start_cycles = read_clockcycles();
		page = alloc_pages(gfp_flags | __GFP_NOWARN, order);
		cycles = read_clockcycles() - start_cycles;

		if (page) {
			printp_entry(HIGHALLOC_TIMING, "%-11llu ", cycles);
			printp_buddyinfo(testinfo, HIGHALLOC_BUDDYINFO, attempts, 1);
			success++;
			pages[alloced++] = page;

			/* Count what zone this is */
			zone = page_zone(page);
			if (zone->name != NULL && !strcmp(zone->name, "EasyRclm")) page_easyrclm++;
			if (zone->name != NULL && !strcmp(zone->name, "Movable")) page_easyrclm++;
			if (zone->name != NULL && !strcmp(zone->name, "HighMem")) page_highmem++;
			if (zone->name != NULL && !strcmp(zone->name, "Normal")) page_normal++;
			if (zone->name != NULL && !strcmp(zone->name, "DMA32")) page_dma32++;
			if (zone->name != NULL && !strcmp(zone->name, "DMA")) page_dma++;


			/* Give up if it takes more than 60 seconds to allocate */
			if (jiffies - lastjiffies > HZ * 600) {
				printk("Took more than 600 seconds to allocate a block, giving up");
				aborted = attempts;
				attempts = numpages;
				break;
			}

		} else {
			printp_entry(HIGHALLOC_TIMING, "-%-10llu ", cycles);
			printp_buddyinfo(testinfo, HIGHALLOC_BUDDYINFO, attempts, 0);
			fail++;

			/* Give up if it takes more than 30 seconds to fail */
			if (jiffies - lastjiffies > HZ * 1200) {
				printk("Took more than 1200 seconds and still failed to allocate, giving up");
				aborted = attempts;
				attempts = numpages;
				break;
			}
		}
	}

	/* Re-enable OOM Killer state */
#ifdef OOM_DISABLED
	vmr_printk("Re-enabling OOM Killer status\n");
	current->oomkilladj = oomkilladj;
#endif

	vmr_printk("Test completed with %lu allocs, printing results\n", alloced);

	/* Print header */
	printp("Order:                 %lu\n", order);
	printp("Allocation type:       %s\n", gfp_highuser ? "HighMem" : "Normal");
	printp("Attempted allocations: %lu\n", numpages);
	printp("Success allocs:        %lu\n", success);
	printp("Failed allocs:         %lu\n", fail);
	printp("DMA32 zone allocs:       %lu\n", page_dma32);
	printp("DMA zone allocs:       %lu\n", page_dma);
	printp("Normal zone allocs:    %lu\n", page_normal);
	printp("HighMem zone allocs:   %lu\n", page_highmem);
	printp("EasyRclm zone allocs:  %lu\n", page_easyrclm);
	printp("%% Success:            %lu\n", (success * 100) / (unsigned long)numpages);

	/*
	 * Free up the pages
	 */
	vmr_printk("Test complete, freeing %lu pages\n", alloced);
	if (alloced > 0) {
		do {
			alloced--;
			if (pages[alloced] != NULL)
				__free_pages(pages[alloced], order);
		} while (alloced != 0);
		vfree(pages);
	}
	
	if (aborted == 0)
		strcpy(finishString, "Test completed successfully\n");
	else
		sprintf(finishString, "Test aborted after %lu allocations due to delays\n", aborted);
	
	printp(finishString);
	vmr_printk("Test completed, closing buffer\n");

	vmrproc_closebuffer(&testinfo[HIGHALLOC_REPORT]);
	vmrproc_closebuffer(&testinfo[HIGHALLOC_TIMING]);
	vmrproc_closebuffer(&testinfo[HIGHALLOC_BUDDYINFO]);
	vmr_printk("%s", finishString);
	return 0;
}
示例#15
0
/* Parse the ACPI Static Resource Affinity Table */
static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
{
	u8 *start, *end, *p;
	int i, j, nid;

	start = (u8 *)(&(sratp->reserved) + 1);	/* skip header */
	p = start;
	end = (u8 *)sratp + sratp->header.length;

	memset(pxm_bitmap, 0, sizeof(pxm_bitmap));	/* init proximity domain bitmap */
	memset(node_memory_chunk, 0, sizeof(node_memory_chunk));

	num_memory_chunks = 0;
	while (p < end) {
		switch (*p) {
		case ACPI_SRAT_TYPE_CPU_AFFINITY:
			parse_cpu_affinity_structure(p);
			break;
		case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
			parse_memory_affinity_structure(p);
			break;
		default:
			printk("ACPI 2.0 SRAT: unknown entry skipped: type=0x%02X, len=%d\n", p[0], p[1]);
			break;
		}
		p += p[1];
		if (p[1] == 0) {
			printk("acpi20_parse_srat: Entry length value is zero;"
				" can't parse any further!\n");
			break;
		}
	}

	if (num_memory_chunks == 0) {
		printk("could not finy any ACPI SRAT memory areas.\n");
		goto out_fail;
	}

	/* Calculate total number of nodes in system from PXM bitmap and create
	 * a set of sequential node IDs starting at zero.  (ACPI doesn't seem
	 * to specify the range of _PXM values.)
	 */
	/*
	 * MCD - we no longer HAVE to number nodes sequentially.  PXM domain
	 * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically
	 * 32, so we will continue numbering them in this manner until MAX_NUMNODES
	 * approaches MAX_PXM_DOMAINS for i386.
	 */
	nodes_clear(node_online_map);
	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
		if (BMAP_TEST(pxm_bitmap, i)) {
			int nid = acpi_map_pxm_to_node(i);
			node_set_online(nid);
		}
	}
	BUG_ON(num_online_nodes() == 0);

	/* set cnode id in memory chunk structure */
	for (i = 0; i < num_memory_chunks; i++)
		node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm);

	printk("pxm bitmap: ");
	for (i = 0; i < sizeof(pxm_bitmap); i++) {
		printk("%02X ", pxm_bitmap[i]);
	}
	printk("\n");
	printk("Number of logical nodes in system = %d\n", num_online_nodes());
	printk("Number of memory chunks in system = %d\n", num_memory_chunks);

	for (i = 0; i < MAX_APICID; i++)
		apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]);

	for (j = 0; j < num_memory_chunks; j++){
		struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
		printk("chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
		       j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
		node_read_chunk(chunk->nid, chunk);
		add_active_range(chunk->nid, chunk->start_pfn, chunk->end_pfn);
	}
 
	for_each_online_node(nid) {
		unsigned long start = node_start_pfn[nid];
		unsigned long end = node_end_pfn[nid];

		memory_present(nid, start, end);
		node_remap_size[nid] = node_memmap_size_bytes(nid, start, end);
	}
	return 1;
out_fail:
	return 0;
}
示例#16
0
void __init acpi_numa_arch_fixup(void)
{
	int i, j, node_from, node_to;

	
	if (srat_num_cpus == 0) {
		node_set_online(0);
		node_cpuid[0].phys_id = hard_smp_processor_id();
		return;
	}

	nodes_clear(node_online_map);
	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
		if (pxm_bit_test(i)) {
			int nid = acpi_map_pxm_to_node(i);
			node_set_online(nid);
		}
	}

	
	for (i = 0; i < num_node_memblks; i++)
		node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);

	
	for_each_online_node(i) {
		int bank;

		bank = 0;
		for (j = 0; j < num_node_memblks; j++)
			if (node_memblk[j].nid == i)
				node_memblk[j].bank = bank++;
	}

	
	for_each_possible_early_cpu(i)
		node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);

	printk(KERN_INFO "Number of logical nodes in system = %d\n",
	       num_online_nodes());
	printk(KERN_INFO "Number of memory chunks in system = %d\n",
	       num_node_memblks);

	if (!slit_table) {
		for (i = 0; i < MAX_NUMNODES; i++)
			for (j = 0; j < MAX_NUMNODES; j++)
				node_distance(i, j) = i == j ? LOCAL_DISTANCE :
							REMOTE_DISTANCE;
		return;
	}

	memset(numa_slit, -1, sizeof(numa_slit));
	for (i = 0; i < slit_table->locality_count; i++) {
		if (!pxm_bit_test(i))
			continue;
		node_from = pxm_to_node(i);
		for (j = 0; j < slit_table->locality_count; j++) {
			if (!pxm_bit_test(j))
				continue;
			node_to = pxm_to_node(j);
			node_distance(node_from, node_to) =
			    slit_table->entry[i * slit_table->locality_count + j];
		}
	}

#ifdef SLIT_DEBUG
	printk("ACPI 2.0 SLIT locality table:\n");
	for_each_online_node(i) {
		for_each_online_node(j)
		    printk("%03d ", node_distance(i, j));
		printk("\n");
	}
#endif
}