Exemple #1
0
static void
tsunami_update_irq_hw(unsigned long mask)
{
	register tsunami_cchip *cchip = TSUNAMI_cchip;
	unsigned long isa_enable = 1UL << 55;
	register int bcpu = boot_cpuid;

#ifdef CONFIG_SMP
	volatile unsigned long *dim0, *dim1, *dim2, *dim3;
	unsigned long mask0, mask1, mask2, mask3, dummy;

	mask &= ~isa_enable;
	mask0 = mask & cpu_irq_affinity[0];
	mask1 = mask & cpu_irq_affinity[1];
	mask2 = mask & cpu_irq_affinity[2];
	mask3 = mask & cpu_irq_affinity[3];

	if (bcpu == 0) mask0 |= isa_enable;
	else if (bcpu == 1) mask1 |= isa_enable;
	else if (bcpu == 2) mask2 |= isa_enable;
	else mask3 |= isa_enable;

	dim0 = &cchip->dim0.csr;
	dim1 = &cchip->dim1.csr;
	dim2 = &cchip->dim2.csr;
	dim3 = &cchip->dim3.csr;
	if (!cpu_possible(0)) dim0 = &dummy;
	if (!cpu_possible(1)) dim1 = &dummy;
	if (!cpu_possible(2)) dim2 = &dummy;
	if (!cpu_possible(3)) dim3 = &dummy;

	*dim0 = mask0;
	*dim1 = mask1;
	*dim2 = mask2;
	*dim3 = mask3;
	mb();
	*dim0;
	*dim1;
	*dim2;
	*dim3;
#else
	volatile unsigned long *dimB;
	if (bcpu == 0) dimB = &cchip->dim0.csr;
	else if (bcpu == 1) dimB = &cchip->dim1.csr;
	else if (bcpu == 2) dimB = &cchip->dim2.csr;
	else dimB = &cchip->dim3.csr;

	*dimB = mask | isa_enable;
	mb();
	*dimB;
#endif
}
Exemple #2
0
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned cpu;

	xen_init_lock_cpu(0);

	smp_store_cpu_info(0);
	cpu_data(0).x86_max_cores = 1;
	set_cpu_sibling_map(0);

	if (xen_smp_intr_init(0))
		BUG();

	xen_cpu_initialized_map = cpumask_of_cpu(0);

	/* Restrict the possible_map according to max_cpus. */
	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
		for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
			continue;
		cpu_clear(cpu, cpu_possible_map);
	}

	for_each_possible_cpu (cpu) {
		struct task_struct *idle;

		if (cpu == 0)
			continue;

		idle = fork_idle(cpu);
		if (IS_ERR(idle))
			panic("failed fork for CPU %d", cpu);

		cpu_set(cpu, cpu_present_map);
	}
}
static struct net_device_stats *get_stats(struct net_device *dev)
{
	struct net_device_stats *stats = dev->priv;
	int i;

	if (!stats) {
		return NULL;
	}

	memset(stats, 0, sizeof(struct net_device_stats));

	for (i=0; i < NR_CPUS; i++) {
		struct net_device_stats *lb_stats;

		if (!cpu_possible(i)) 
			continue;
		lb_stats = &per_cpu(loopback_stats, i);
		stats->rx_bytes   += lb_stats->rx_bytes;
		stats->tx_bytes   += lb_stats->tx_bytes;
		stats->rx_packets += lb_stats->rx_packets;
		stats->tx_packets += lb_stats->tx_packets;
	}
				
	return stats;
}
Exemple #4
0
static void vcpu_hotplug(unsigned int cpu)
{
	int err;
	char dir[32], state[32];

	if (!cpu_possible(cpu))
		return;

	sprintf(dir, "cpu/%u", cpu);
	err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
	if (err != 1) {
		printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
		return;
	}

	if (strcmp(state, "online") == 0) {
		enable_hotplug_cpu(cpu);
	} else if (strcmp(state, "offline") == 0) {
		(void)cpu_down(cpu);
		disable_hotplug_cpu(cpu);
	} else {
		printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
		       state, cpu);
	}
}
Exemple #5
0
STATIC int
xfs_stats_clear_proc_handler(
	ctl_table	*ctl,
	int		write,
	struct file	*filp,
	void		*buffer,
	size_t		*lenp)
{
	int		c, ret, *valp = ctl->data;
	__uint32_t	vn_active;

	ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp);

	if (!ret && write && *valp) {
		printk("XFS Clearing xfsstats\n");
		for (c = 0; c < NR_CPUS; c++) {
			if (!cpu_possible(c)) continue;
			preempt_disable();
			/* save vn_active, it's a universal truth! */
			vn_active = per_cpu(xfsstats, c).vn_active;
			memset(&per_cpu(xfsstats, c), 0,
			       sizeof(struct xfsstats));
			per_cpu(xfsstats, c).vn_active = vn_active;
			preempt_enable();
		}
		xfs_stats_clear = 0;
	}

	return ret;
}
Exemple #6
0
static void __init smp_init(void)
{
   unsigned int i;
   unsigned j = 1;

   /* FIXME: This should be done in userspace --RR */
   for (i = 0; i < NR_CPUS; i++) {
      if (num_online_cpus() >= max_cpus)
         break;
      if (cpu_possible(i) && !cpu_online(i)) {
         cpu_up(i);
         j++;
      }
   }

   /* Any cleanup work */
   printk("Brought up %u CPUs\n", j);
   smp_cpus_done(max_cpus);
#if 0
   /* Get other processors into their bootup holding patterns. */

   smp_threads_ready=1;
   smp_commence();
#endif
}
Exemple #7
0
int cpu_up(unsigned int cpu)
{
	int err = 0;

	if (!cpu_possible(cpu)) {
		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
		       cpu);
#if defined(CONFIG_IA64)
		pr_err("please check additional_cpus= boot parameter\n");
#endif
		return -EINVAL;
	}

	err = try_online_node(cpu_to_node(cpu));
	if (err)
		return err;

	cpu_maps_update_begin();

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_up(cpu, 0);

out:
	cpu_maps_update_done();
	return err;
}
static void vcpu_hotplug(unsigned int cpu)
{
	int err;
	char dir[32], state[32];

	if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
		return;

	sprintf(dir, "cpu/%d", cpu);
	err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
	if (err != 1) {
		printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
		return;
	}

	if (strcmp(state, "online") == 0) {
		cpu_set(cpu, xenbus_allowed_cpumask);
		(void)cpu_up(cpu);
	} else if (strcmp(state, "offline") == 0) {
		cpu_clear(cpu, xenbus_allowed_cpumask);
		(void)cpu_down(cpu);
	} else {
		printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
		       state, cpu);
	}
}
int __cpuinit cpu_up(unsigned int cpu)
{
	int err = 0;

	if(cpu_online(cpu))
	{
		printk(KERN_INFO "[Stability] CPU_%d has already on \n",cpu);
		return err;
	}

#ifdef	CONFIG_MEMORY_HOTPLUG
	int nid;
	pg_data_t	*pgdat;
#endif

	if (!cpu_possible(cpu)) {
		printk(KERN_ERR "can't online cpu %d because it is not "
			"configured as may-hotadd at boot time\n", cpu);
#if defined(CONFIG_IA64)
		printk(KERN_ERR "please check additional_cpus= boot "
				"parameter\n");
#endif
		return -EINVAL;
	}

#ifdef	CONFIG_MEMORY_HOTPLUG
	nid = cpu_to_node(cpu);
	if (!node_online(nid)) {
		err = mem_online_node(nid);
		if (err)
			return err;
	}

	pgdat = NODE_DATA(nid);
	if (!pgdat) {
		printk(KERN_ERR
			"Can't online cpu %d due to NULL pgdat\n", cpu);
		return -ENOMEM;
	}

	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
		mutex_lock(&zonelists_mutex);
		build_all_zonelists(NULL);
		mutex_unlock(&zonelists_mutex);
	}
#endif

	cpu_maps_update_begin();

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_up(cpu, 0);

out:
	cpu_maps_update_done();
	return err;
}
Exemple #10
0
static int __init topology_init(void)
{
    int i;

    for (i = 0; i < NR_CPUS; i++)
        if (cpu_possible(i)) arch_register_cpu(i);
    return 0;
}
Exemple #11
0
static int __init topology_init(void)
{
	int cpu_id;

	for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++)
		if (cpu_possible(cpu_id))
			register_cpu(&cpu[cpu_id], cpu_id, NULL);

	return 0;
}
Exemple #12
0
int __init
smp_get_max_cpus (void)
{
    int i, max_cpus = 0;

    for ( i = 0; i < nr_cpu_ids; i++ )
        if ( cpu_possible(i) )
            max_cpus++;

    return max_cpus;
}
Exemple #13
0
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
    unsigned cpu;
    unsigned int i;

    if (skip_ioapic_setup) {
        char *m = (max_cpus == 0) ?
                  "The nosmp parameter is incompatible with Xen; " \
                  "use Xen dom0_max_vcpus=1 parameter" :
                  "The noapic parameter is incompatible with Xen";

        xen_raw_printk(m);
        panic(m);
    }
    xen_init_lock_cpu(0);

    smp_store_cpu_info(0);
    cpu_data(0).x86_max_cores = 1;

    for_each_possible_cpu(i) {
        zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
        zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
        zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
    }
    set_cpu_sibling_map(0);

    if (xen_smp_intr_init(0))
        BUG();

    if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
        panic("could not allocate xen_cpu_initialized_map\n");

    cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));

    /* Restrict the possible_map according to max_cpus. */
    while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
        for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
            continue;
        set_cpu_possible(cpu, false);
    }

    for_each_possible_cpu (cpu) {
        struct task_struct *idle;

        if (cpu == 0)
            continue;

        idle = fork_idle(cpu);
        if (IS_ERR(idle))
            panic("failed fork for CPU %d", cpu);

        set_cpu_present(cpu, true);
    }
}
static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
{
	int cpu;

	for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) {
		if (!cpu_possible(cpu))
			continue;
		*idx = cpu + 1;
		return &per_cpu(avc_cache_stats, cpu);
	}
	return NULL;
}
Exemple #15
0
static int __init topology_init(void)
{
    int i;

    for (i = 0; i < MAX_NUMNODES; i++) {
        if (node_online(i))
            arch_register_node(i);
    }
    for (i = 0; i < NR_CPUS; i++)
        if (cpu_possible(i)) arch_register_cpu(i);
    return 0;
}
Exemple #16
0
static int __init topology_init(void)
{
	int i;

	for (i = 0; i < num_online_nodes(); i++)
		arch_register_node(i);
	for (i = 0; i < NR_CPUS; i++)
		if (cpu_possible(i)) arch_register_cpu(i);
	for (i = 0; i < num_online_memblks(); i++)
		arch_register_memblk(i);
	return 0;
}
Exemple #17
0
static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	int cpu;

	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
		return &per_cpu(ip_conntrack_stat, cpu);
	}

	return NULL;
}
Exemple #18
0
/*
 * Same function as cpu_to_node() but used if called before the
 * per_cpu areas are setup.
 */
int early_cpu_to_node(int cpu)
{
	if (early_per_cpu_ptr(x86_cpu_to_node_map))
		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];

	if (!cpu_possible(cpu)) {
		printk(KERN_WARNING
			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
		dump_stack();
		return NUMA_NO_NODE;
	}
	return per_cpu(x86_cpu_to_node_map, cpu);
}
Exemple #19
0
static unsigned long
fold_field(void *mib[], int offt)
{
	unsigned long res = 0;
	int i;

	for (i = 0; i < NR_CPUS; i++) {
		if (!cpu_possible(i))
			continue;
		res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
		res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
	}
	return res;
}
Exemple #20
0
static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq));
	int cpu;

	for (cpu = *pos; cpu < nr_cpu_ids; cpu++) {
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu + 1;
		return per_cpu_ptr(snet->stats, cpu);
	}

	return NULL;
}
RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
{
#if defined(CONFIG_SMP)
    if (RT_UNLIKELY(idCpu >= NR_CPUS))
        return false;

# if defined(cpu_possible)
    return cpu_possible(idCpu);
# else /* < 2.5.29 */
    return idCpu < (RTCPUID)smp_num_cpus;
# endif
#else
    return idCpu == RTMpCpuId();
#endif
}
Exemple #22
0
static void vcpu_hotplug(unsigned int cpu)
{
	if (cpu >= nr_cpu_ids || !cpu_possible(cpu))
		return;

	switch (vcpu_online(cpu)) {
	case 1:
		enable_hotplug_cpu(cpu);
		break;
	case 0:
		disable_hotplug_cpu(cpu);
		break;
	default:
		break;
	}
}
int irq_select_affinity(unsigned int irq)
{
	static int last_cpu;
	int cpu = last_cpu + 1;

	if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
		return 1;

	while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity))
		cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
	last_cpu = cpu;

	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
	irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
	return 0;
}
Exemple #24
0
static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
	int cpu;

	if (*pos == 0)
		return SEQ_START_TOKEN;

	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
		return &per_cpu(ip_conntrack_stat, cpu);
	}

	return NULL;
}
static void kexec_prepare_cpus(void)
{
	int my_cpu, i, notified=-1;

	smp_call_function(kexec_smp_down, NULL, /* wait */0);
	my_cpu = get_cpu();

	/* check the others cpus are now down (via paca hw cpu id == -1) */
	for (i=0; i < NR_CPUS; i++) {
		if (i == my_cpu)
			continue;

		while (paca[i].hw_cpu_id != -1) {
			barrier();
			if (!cpu_possible(i)) {
				printk("kexec: cpu %d hw_cpu_id %d is not"
						" possible, ignoring\n",
						i, paca[i].hw_cpu_id);
				break;
			}
			if (!cpu_online(i)) {
				/* Fixme: this can be spinning in
				 * pSeries_secondary_wait with a paca
				 * waiting for it to go online.
				 */
				printk("kexec: cpu %d hw_cpu_id %d is not"
						" online, ignoring\n",
						i, paca[i].hw_cpu_id);
				break;
			}
			if (i != notified) {
				printk( "kexec: waiting for cpu %d (physical"
						" %d) to go down\n",
						i, paca[i].hw_cpu_id);
				notified = i;
			}
		}
	}

	/* after we tell the others to go down */
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(0, 0);

	put_cpu();

	local_irq_disable();
}
Exemple #26
0
static void *synproxy_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
	struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq));
	int cpu;

	if (*pos == 0)
		return SEQ_START_TOKEN;

	for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) {
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu + 1;
		return per_cpu_ptr(snet->stats, cpu);
	}

	return NULL;
}
static void vcpu_hotplug(unsigned int cpu)
{
	if (!cpu_possible(cpu))
		return;

	switch (vcpu_online(cpu)) {
	case 1:
		enable_hotplug_cpu(cpu);
		break;
	case 0:
		(void)cpu_down(cpu);
		disable_hotplug_cpu(cpu);
		break;
	default:
		break;
	}
}
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned cpu;
	unsigned int i;

	xen_init_lock_cpu(0);

	smp_store_cpu_info(0);
	cpu_data(0).x86_max_cores = 1;

	for_each_possible_cpu(i) {
		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
	}
	set_cpu_sibling_map(0);

	if (xen_smp_intr_init(0))
		BUG();

	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
		panic("could not allocate xen_cpu_initialized_map\n");

	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));

	/* Restrict the possible_map according to max_cpus. */
	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
			continue;
		set_cpu_possible(cpu, false);
	}

	for_each_possible_cpu (cpu) {
		struct task_struct *idle;

		if (cpu == 0)
			continue;

		idle = fork_idle(cpu);
		if (IS_ERR(idle))
			panic("failed fork for CPU %d", cpu);

		set_cpu_present(cpu, true);
	}
}
Exemple #29
0
int __init ppc_init(void)
{
	int i;

	/* clear the progress line */
	if ( ppc_md.progress ) ppc_md.progress("             ", 0xffff);

	/* register CPU devices */
	for (i = 0; i < NR_CPUS; i++)
		if (cpu_possible(i))
			register_cpu(&cpu_devices[i], i, NULL);

	/* call platform init */
	if (ppc_md.init != NULL) {
		ppc_md.init();
	}
	return 0;
}
static void crash_kexec_wait_realmode(int cpu)
{
	unsigned int msecs;
	int i;

	msecs = 10000;
	for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
		if (i == cpu)
			continue;

		while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
			barrier();
			if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
				break;
			msecs--;
			mdelay(1);
		}
	}
	mb();
}