Beispiel #1
0
unsigned int __init dom0_max_vcpus(void)
{
    unsigned int i, max_vcpus, limit;
    nodeid_t node;

    for ( i = 0; i < dom0_nr_pxms; ++i )
        if ( (node = pxm_to_node(dom0_pxms[i])) != NUMA_NO_NODE )
            node_set(node, dom0_nodes);
    nodes_and(dom0_nodes, dom0_nodes, node_online_map);
    if ( nodes_empty(dom0_nodes) )
        dom0_nodes = node_online_map;
    for_each_node_mask ( node, dom0_nodes )
        cpumask_or(&dom0_cpus, &dom0_cpus, &node_to_cpumask(node));
    cpumask_and(&dom0_cpus, &dom0_cpus, cpupool0->cpu_valid);
    if ( cpumask_empty(&dom0_cpus) )
        cpumask_copy(&dom0_cpus, cpupool0->cpu_valid);

    max_vcpus = cpumask_weight(&dom0_cpus);
    if ( opt_dom0_max_vcpus_min > max_vcpus )
        max_vcpus = opt_dom0_max_vcpus_min;
    if ( opt_dom0_max_vcpus_max < max_vcpus )
        max_vcpus = opt_dom0_max_vcpus_max;
    limit = dom0_pvh ? HVM_MAX_VCPUS : MAX_VIRT_CPUS;
    if ( max_vcpus > limit )
        max_vcpus = limit;

    return max_vcpus;
}
Beispiel #2
0
static acpi_status __devinit
acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
{
	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
	union acpi_object *obj;
	struct acpi_madt_io_sapic *iosapic;
	unsigned int gsi_base;
	int pxm, node;

	/* Only care about objects w/ a method that returns the MADT */
	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
		return AE_OK;

	if (!buffer.length || !buffer.pointer)
		return AE_OK;

	obj = buffer.pointer;
	if (obj->type != ACPI_TYPE_BUFFER ||
	    obj->buffer.length < sizeof(*iosapic)) {
		kfree(buffer.pointer);
		return AE_OK;
	}

	iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;

	if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
		kfree(buffer.pointer);
		return AE_OK;
	}

	gsi_base = iosapic->global_irq_base;

	kfree(buffer.pointer);

	/*
	 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
	 * us which node to associate this with.
	 */
	pxm = acpi_get_pxm(handle);
	if (pxm < 0)
		return AE_OK;

	node = pxm_to_node(pxm);

	if (node >= MAX_NUMNODES || !node_online(node) ||
	    cpus_empty(node_to_cpumask(node)))
		return AE_OK;

	/* We know a gsi to node mapping! */
	map_iosapic_to_node(gsi_base, node);
	return AE_OK;
}
Beispiel #3
0
static ssize_t node_read_cpumap(struct sys_device * dev, char * buf)
{
	struct node *node_dev = to_node(dev);
	cpumask_t mask = node_to_cpumask(node_dev->sysdev.id);
	int len;

	/* 2004/06/03: buf currently PAGE_SIZE, need > 1 char per 4 bits. */
	BUILD_BUG_ON(MAX_NUMNODES/4 > PAGE_SIZE/2);

	len = cpumask_scnprintf(buf, PAGE_SIZE-1, mask);
	len += sprintf(buf + len, "\n");
	return len;
}
Beispiel #4
0
/*
 * register_node - Setup a driverfs device for a node.
 * @num - Node number to use when creating the device.
 *
 * Initialize and register the node device.
 */
int __init register_node(struct node *node, int num, struct node *parent)
{
	int error;

	node->cpumap = node_to_cpumask(num);
	node->sysdev.id = num;
	node->sysdev.cls = &node_class;
	error = sys_device_register(&node->sysdev);

	if (!error){
		sysdev_create_file(&node->sysdev, &attr_cpumap);
		sysdev_create_file(&node->sysdev, &attr_meminfo);
	}
	return error;
}
Beispiel #5
0
void domain_update_node_affinity(struct domain *d)
{
    cpumask_t cpumask = CPU_MASK_NONE;
    nodemask_t nodemask = NODE_MASK_NONE;
    struct vcpu *v;
    unsigned int node;

    spin_lock(&d->node_affinity_lock);

    for_each_vcpu ( d, v )
        cpus_or(cpumask, cpumask, v->cpu_affinity);

    for_each_online_node ( node )
        if ( cpus_intersects(node_to_cpumask(node), cpumask) )
            node_set(node, nodemask);

    d->node_affinity = nodemask;
    spin_unlock(&d->node_affinity_lock);
}
Beispiel #6
0
        free_cpumask_var(cpumask);
        return;
    }

    online = cpupool_online_cpumask(d->cpupool);

    spin_lock(&d->node_affinity_lock);

    for_each_vcpu ( d, v )
    {
        cpumask_and(online_affinity, v->cpu_affinity, online);
        cpumask_or(cpumask, cpumask, online_affinity);
    }

    for_each_online_node ( node )
        if ( cpumask_intersects(&node_to_cpumask(node), cpumask) )
            node_set(node, nodemask);

    d->node_affinity = nodemask;
    spin_unlock(&d->node_affinity_lock);

    free_cpumask_var(online_affinity);
    free_cpumask_var(cpumask);
}


struct domain *get_domain_by_id(domid_t dom)
{
    struct domain *d;

    rcu_read_lock(&domlist_read_lock);
Beispiel #7
0
static int sn_topology_show(struct seq_file *s, void *d)
{
	int sz;
	int pt;
	int e = 0;
	int i;
	int j;
	const char *slabname;
	int ordinal;
	cpumask_t cpumask;
	char slice;
	struct cpuinfo_ia64 *c;
	struct sn_hwperf_port_info *ptdata;
	struct sn_hwperf_object_info *p;
	struct sn_hwperf_object_info *obj = d;	/* this object */
	struct sn_hwperf_object_info *objs = s->private; /* all objects */
	int rack, bay, slot, slab;
	u8 shubtype;
	u8 system_size;
	u8 sharing_size;
	u8 partid;
	u8 coher;
	u8 nasid_shift;
	u8 region_size;
	u16 nasid_mask;
	int nasid_msb;
	int pci_bus_ordinal = 0;

	if (obj == objs) {
		seq_printf(s, "# sn_topology version 2\n");
		seq_printf(s, "# objtype ordinal location partition"
			" [attribute value [, ...]]\n");

		if (ia64_sn_get_sn_info(0,
			&shubtype, &nasid_mask, &nasid_shift, &system_size,
			&sharing_size, &partid, &coher, &region_size))
			BUG();
		for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
			if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
				break;
		}
		seq_printf(s, "partition %u %s local "
			"shubtype %s, "
			"nasid_mask 0x%016lx, "
			"nasid_bits %d:%d, "
			"system_size %d, "
			"sharing_size %d, "
			"coherency_domain %d, "
			"region_size %d\n",

			partid, system_utsname.nodename,
			shubtype ? "shub2" : "shub1", 
			(u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
			system_size, sharing_size, coher, region_size);
	}

	if (SN_HWPERF_FOREIGN(obj)) {
		/* private in another partition: not interesting */
		return 0;
	}

	for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
		if (obj->name[i] == ' ')
			obj->name[i] = '_';
	}

	slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
	seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
		obj->sn_hwp_this_part ? "local" : "shared", obj->name);

	if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
		seq_putc(s, '\n');
	else {
		seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
		for (i=0; i < numionodes; i++) {
			seq_printf(s, i ? ":%d" : ", dist %d",
				node_distance(ordinal, i));
		}
		seq_putc(s, '\n');

		/*
		 * CPUs on this node, if any
		 */
		cpumask = node_to_cpumask(ordinal);
		for_each_online_cpu(i) {
			if (cpu_isset(i, cpumask)) {
				slice = 'a' + cpuid_to_slice(i);
				c = cpu_data(i);
				seq_printf(s, "cpu %d %s%c local"
					" freq %luMHz, arch ia64",
					i, obj->location, slice,
					c->proc_freq / 1000000);
				for_each_online_cpu(j) {
					seq_printf(s, j ? ":%d" : ", dist %d",
						node_distance(
						    cpuid_to_cnodeid(i),
						    cpuid_to_cnodeid(j)));
				}
				seq_putc(s, '\n');
			}
		}

		/*
		 * PCI busses attached to this node, if any
		 */
		if (sn_hwperf_location_to_bpos(obj->location,
			&rack, &bay, &slot, &slab)) {
			/* export pci bus info */
			print_pci_topology(s, obj, &pci_bus_ordinal,
				rack, bay, slot, slab);

		}
	}

	if (obj->ports) {
		/*
		 * numalink ports
		 */
		sz = obj->ports * sizeof(struct sn_hwperf_port_info);
		if ((ptdata = vmalloc(sz)) == NULL)
			return -ENOMEM;
		e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
				      SN_HWPERF_ENUM_PORTS, obj->id, sz,
				      (u64) ptdata, 0, 0, NULL);
		if (e != SN_HWPERF_OP_OK)
			return -EINVAL;
		for (ordinal=0, p=objs; p != obj; p++) {
			if (!SN_HWPERF_FOREIGN(p))
				ordinal += p->ports;
		}
		for (pt = 0; pt < obj->ports; pt++) {
			for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
				if (ptdata[pt].conn_id == p->id) {
					break;
				}
			}
			seq_printf(s, "numalink %d %s-%d",
			    ordinal+pt, obj->location, ptdata[pt].port);

			if (i >= sn_hwperf_obj_cnt) {
				/* no connection */
				seq_puts(s, " local endpoint disconnected"
					    ", protocol unknown\n");
				continue;
			}

			if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
				/* both ends local to this partition */
				seq_puts(s, " local");
			else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
				/* both ends of the link in foreign partiton */
				seq_puts(s, " foreign");
			else
				/* link straddles a partition */
				seq_puts(s, " shared");

			/*
			 * Unlikely, but strictly should query the LLP config
			 * registers because an NL4R can be configured to run
			 * NL3 protocol, even when not talking to an NL3 router.
			 * Ditto for node-node.
			 */
			seq_printf(s, " endpoint %s-%d, protocol %s\n",
				p->location, ptdata[pt].conn_port,
				(SN_HWPERF_IS_NL3ROUTER(obj) ||
				SN_HWPERF_IS_NL3ROUTER(p)) ?  "LLP3" : "LLP4");
		}
		vfree(ptdata);
	}

	return 0;
}