示例#1
0
/*
 * Update cpu_present_map and paca(s) for a new cpu node.  The wrinkle
 * here is that a cpu device node may represent up to two logical cpus
 * in the SMT case.  We must honor the assumption in other code that
 * the logical ids for sibling SMT threads x and y are adjacent, such
 * that x^1 == y and y^1 == x.
 */
static int pSeries_add_processor(struct device_node *np)
{
	unsigned int cpu;
	cpumask_t candidate_map, tmp = CPU_MASK_NONE;
	int err = -ENOSPC, len, nthreads, i;
	u32 *intserv;

	intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s", &len);
	if (!intserv)
		return 0;

	nthreads = len / sizeof(u32);
	for (i = 0; i < nthreads; i++)
		cpu_set(i, tmp);

	lock_cpu_hotplug();

	BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));

	/* Get a bitmap of unoccupied slots. */
	cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
	if (cpus_empty(candidate_map)) {
		/* If we get here, it most likely means that NR_CPUS is
		 * less than the partition's max processors setting.
		 */
		printk(KERN_ERR "Cannot add cpu %s; this system configuration"
		       " supports %d logical cpus.\n", np->full_name,
		       cpus_weight(cpu_possible_map));
		goto out_unlock;
	}

	while (!cpus_empty(tmp))
		if (cpus_subset(tmp, candidate_map))
			/* Found a range where we can insert the new cpu(s) */
			break;
		else
			cpus_shift_left(tmp, tmp, nthreads);

	if (cpus_empty(tmp)) {
		printk(KERN_ERR "Unable to find space in cpu_present_map for"
		       " processor %s with %d thread(s)\n", np->name,
		       nthreads);
		goto out_unlock;
	}

	for_each_cpu_mask(cpu, tmp) {
		BUG_ON(cpu_isset(cpu, cpu_present_map));
		cpu_set(cpu, cpu_present_map);
		set_hard_smp_processor_id(cpu, *intserv++);
	}
示例#2
0
文件: xics.c 项目: 3null/fastsocket
static int get_irq_server(unsigned int virq, cpumask_t cpumask,
			  unsigned int strict_check)
{
	int server;
	/* For the moment only implement delivery to all cpus or one cpu */
	cpumask_t tmp = CPU_MASK_NONE;

	if (!distribute_irqs)
		return default_server;

	if (!cpus_subset(cpu_possible_map, cpumask)) {
		cpus_and(tmp, cpu_online_map, cpumask);

		server = first_cpu(tmp);

		if (server < NR_CPUS)
			return get_hard_smp_processor_id(server);

		if (strict_check)
			return -1;
	}

	if (cpus_equal(cpu_online_map, cpu_present_map))
		return default_distrib_server;

	return default_server;
}
示例#3
0
/* Called from set_cpus_allowed().
 * PRE: current holds task_lock(owner)
 * PRE: owner->thread.perfctr == perfctr
 */
void __vperfctr_set_cpus_allowed(struct task_struct *owner,
				 struct vperfctr *perfctr,
				 cpumask_t new_mask)
{
	if (!cpus_subset(new_mask, perfctr->cpumask)) {
		atomic_set(&perfctr->bad_cpus_allowed, 1);
		printk(KERN_WARNING "perfctr: process %d (comm %s) issued unsafe"
		       " set_cpus_allowed() on process %d (comm %s)\n",
		       current->pid, current->comm, owner->pid, owner->comm);
	} else
		atomic_set(&perfctr->bad_cpus_allowed, 0);
}
示例#4
0
//Huh? This seems to be used on ia64 even if !CONFIG_SMP
void smp_send_event_check_mask(const cpumask_t *mask)
{
    int cpu;

    /*  Not for me.  */
    if (cpus_subset(*mask, *cpumask_of(smp_processor_id())))
        return;

    //printf("smp_send_event_check_mask called\n");

    for (cpu = 0; cpu < NR_CPUS; ++cpu)
        if (cpu_isset(cpu, *mask) && cpu != smp_processor_id())
	    platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}