Exemple #1
0
/*
 * Update cpu_present_mask and paca(s) for a new cpu node.  The wrinkle
 * here is that a cpu device node may represent up to two logical cpus
 * in the SMT case.  We must honor the assumption in other code that
 * the logical ids for sibling SMT threads x and y are adjacent, such
 * that x^1 == y and y^1 == x.
 */
static int pseries_add_processor(struct device_node *np)
{
	unsigned int cpu;
	cpumask_var_t candidate_mask, tmp;
	int err = -ENOSPC, len, nthreads, i;
	const __be32 *intserv;

	intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
	if (!intserv)
		return 0;

	zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
	zalloc_cpumask_var(&tmp, GFP_KERNEL);

	nthreads = len / sizeof(u32);
	for (i = 0; i < nthreads; i++)
		cpumask_set_cpu(i, tmp);

	cpu_maps_update_begin();

	BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));

	/* Get a bitmap of unoccupied slots. */
	cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
	if (cpumask_empty(candidate_mask)) {
		/* If we get here, it most likely means that NR_CPUS is
		 * less than the partition's max processors setting.
		 */
		printk(KERN_ERR "Cannot add cpu %pOF; this system configuration"
		       " supports %d logical cpus.\n", np,
		       num_possible_cpus());
		goto out_unlock;
	}

	while (!cpumask_empty(tmp))
		if (cpumask_subset(tmp, candidate_mask))
			/* Found a range where we can insert the new cpu(s) */
			break;
		else
			cpumask_shift_left(tmp, tmp, nthreads);

	if (cpumask_empty(tmp)) {
		printk(KERN_ERR "Unable to find space in cpu_present_mask for"
		       " processor %s with %d thread(s)\n", np->name,
		       nthreads);
		goto out_unlock;
	}

	for_each_cpu(cpu, tmp) {
		BUG_ON(cpu_present(cpu));
		set_cpu_present(cpu, true);
		set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
	}
/*
 * Initializes PIC ITE entries PRM 9.5.6.26
 * XLP restricts CPU affinity to 8 groups. Though configurable, they are
 * programmed to have the following patterns.
 * 0 =>	Only 0th cpu on the node
 * 1 => All local threads in node; mask = (0xffffffff) on node
 * 2 => cpu0-15 on node; mask = 0x0000ffff & online_cpu_mask on nodes
 * 3 => cpu15-31 on node; mask = 0xffff0000 & online_cpu_mask on node
 * 4 => All cpus on all nodes; i.e.,
 * mask = (0xffffffff_ffffffff_ffffffff_ffffffff & physical online cpu map)
 * These are programmer defined groups and can be changed as warranted.
 * Added 5 => CPUs 0-11
 * Added 6 => CPUs 0-7
 * Added 7 => CPUs 0-3
 * Actual programmed value will take into consideration cpu_online_mask.
 *
 * There is a major issue that needs addressing when run in multi node mode
 * Number of nodes must be determined and programmed correctly, if a bit in ITE
 * is programmed without physical thread being present, when interrupt is
 * dispatched to that CPU under global scheme, system would hang. Thus this
 * scenario should be avoided. That is why phys_cpu_present_map is used
 *
 * This function simply initializes the xlp_ites entries with proposed
 * CPUmasks.  */
static void xlp_ites_init(void)
{
    u64 bm = 0x1;
    u8 node;
    struct cpumask m;

    cpumask_clear(&m);
    for_each_online_node(node) {
        /* Simply set the static pattern in all */
        bm = 1;
        u32_to_cpumask(&xlp_ites[node][0], bm);
        cpumask_shift_left(&xlp_ites[node][0], &xlp_ites[node][0], NLM_MAX_CPU_PER_NODE * node); /* directs only to cpu0 of node `node` */

        bm = 0xffffffff;
        u32_to_cpumask(&xlp_ites[node][1], bm);
        cpumask_shift_left(&xlp_ites[node][1], &xlp_ites[node][1], NLM_MAX_CPU_PER_NODE * node); /* directs to all cpus of node `node` */
        cpumask_or(&m, &m, &xlp_ites[node][1]);

        bm = 0x0000ffff;
        u32_to_cpumask(&xlp_ites[node][2], bm);
        cpumask_shift_left(&xlp_ites[node][2], &xlp_ites[node][2], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0xffff0000;
        u32_to_cpumask(&xlp_ites[node][3], bm);
        cpumask_shift_left(&xlp_ites[node][3], &xlp_ites[node][3], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0x000000ff;
        u32_to_cpumask(&xlp_ites[node][5], bm);
        cpumask_shift_left(&xlp_ites[node][5], &xlp_ites[node][5], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0x000000f0;
        u32_to_cpumask(&xlp_ites[node][6], bm);
        cpumask_shift_left(&xlp_ites[node][6], &xlp_ites[node][6], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

        bm = 0x0000000f;
        u32_to_cpumask(&xlp_ites[node][7], bm);
        cpumask_shift_left(&xlp_ites[node][7], &xlp_ites[node][7], NLM_MAX_CPU_PER_NODE * node); /* directs to specified cpus of node `node` */

    }
    for_each_online_node(node) {
        cpumask_copy(&xlp_ites[node][4], &m);
    }
//	dump_all_ites();
}
Exemple #3
0
/* On return cpumask will be altered to indicate CPUs changed.
 * CPUs with states changed will be set in the mask,
 * CPUs with status unchanged will be unset in the mask. */
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
				cpumask_var_t cpus)
{
	int cpu;
	int cpuret = 0;
	int ret = 0;

	if (cpumask_empty(cpus))
		return 0;

	for_each_cpu(cpu, cpus) {
		switch (state) {
		case DOWN:
			cpuret = cpu_down(cpu);
			break;
		case UP:
			cpuret = cpu_up(cpu);
			break;
		}
		if (cpuret) {
			pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
					__func__,
					((state == UP) ? "up" : "down"),
					cpu, cpuret);
			if (!ret)
				ret = cpuret;
			if (state == UP) {
				/* clear bits for unchanged cpus, return */
				cpumask_shift_right(cpus, cpus, cpu);
				cpumask_shift_left(cpus, cpus, cpu);
				break;
			} else {
				/* clear bit for unchanged cpu, continue */
				cpumask_clear_cpu(cpu, cpus);
			}
		}
	}

	return ret;
}