static int __init topology_init(void) { int cpu; register_nodes(); register_cpu_notifier(&sysfs_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); if (ppc_md.cpu_die) c->hotpluggable = 1; if (cpu_online(cpu) || c->hotpluggable) { register_cpu(c, cpu); device_create_file(&c->dev, &dev_attr_physical_id); } if (cpu_online(cpu)) register_cpu_online(cpu); } #ifdef CONFIG_PPC64 sysfs_create_dscr_default(); #endif return 0; }
static int __init topology_init(void) { int cpu; register_nodes(); register_cpu_notifier(&sysfs_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); /* * For now, we just see if the system supports making * the RTAS calls for CPU hotplug. But, there may be a * more comprehensive way to do this for an individual * CPU. For instance, the boot cpu might never be valid * for hotplugging. */ if (ppc_md.cpu_die) c->hotpluggable = 1; if (cpu_online(cpu) || c->hotpluggable) { register_cpu(c, cpu); device_create_file(&c->dev, &dev_attr_physical_id); } if (cpu_online(cpu)) register_cpu_online(cpu); } #ifdef CONFIG_PPC64 sysfs_create_dscr_default(); #endif /* CONFIG_PPC64 */ return 0; }
static int __init topology_init(void) { int cpu; register_nodes(); register_cpu_notifier(&sysfs_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); /* * For now, we just see if the system supports making * the RTAS calls for CPU hotplug. But, there may be a * more comprehensive way to do this for an individual * CPU. For instance, the boot cpu might never be valid * for hotplugging. */ if (!ppc_md.cpu_die) c->no_control = 1; if (cpu_online(cpu) || (c->no_control == 0)) { register_cpu(c, cpu); sysdev_create_file(&c->sysdev, &attr_physical_id); } if (cpu_online(cpu)) register_cpu_online(cpu); } return 0; }
static int __init topology_init(void) { int i, ret; #ifdef CONFIG_NEED_MULTIPLE_NODES for_each_online_node(i) register_one_node(i); #endif for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); c->hotpluggable = 1; ret = register_cpu(c, i); if (unlikely(ret)) printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n", __func__, i, ret); } #if defined(CONFIG_NUMA) && !defined(CONFIG_SMP) for_each_online_node(i) if (i != numa_node_id()) register_cpu_under_node(raw_smp_processor_id(), i); #endif return 0; }
static int __init topology_init(void) { int i, ret; #ifdef CONFIG_NEED_MULTIPLE_NODES for_each_online_node(i) register_one_node(i); #endif for_each_present_cpu(i) { ret = register_cpu(&per_cpu(cpu_devices, i), i); if (unlikely(ret)) printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n", __FUNCTION__, i, ret); } #if defined(CONFIG_NUMA) && !defined(CONFIG_SMP) /* * In the UP case, make sure the CPU association is still * registered under each node. Without this, sysfs fails * to make the connection between nodes other than node0 * and cpu0. */ for_each_online_node(i) if (i != numa_node_id()) register_cpu_under_node(raw_smp_processor_id(), i); #endif return 0; }
static int __init topology_init(void) { int num; for_each_present_cpu(num) { register_cpu(&per_cpu(cpu_devices, num), num); } return 0; }
static int __init topology_init(void) { int cpu; for_each_present_cpu(cpu) register_cpu(&per_cpu(cpu_topology, cpu), cpu); return 0; }
static int __init topology_init(void) { int i; for_each_present_cpu(i) register_cpu(&cpu_devices[i], i); return 0; }
static int __init topology_init(void) { int cpu_id; for_each_possible_cpu(cpu_id) register_cpu(&cpu[cpu_id], cpu_id); return 0; }
static int __init topology_init(void) { int i; for_each_possible_cpu(i) { return register_cpu(&cpu_devices[i], i); } return 0; }
static int __init topology_init(void) { int cpu_id; for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) if (cpu_possible(cpu_id)) register_cpu(&cpu[cpu_id], cpu_id, NULL); return 0; }
int arch_register_cpu(int num) { struct node *parent = NULL; #ifdef CONFIG_NUMA int node = cpu_to_node(num); if (node_online(node)) parent = &node_devices[node].node; #endif /* CONFIG_NUMA */ return register_cpu(&cpu_devices[num].cpu, num, parent); }
static int __init topology_init(void) { int i; for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_data, i); cpu->hotpluggable = !!i; register_cpu(cpu, i); } return 0; }
int __ref arch_register_cpu(int num) { #ifdef CONFIG_ACPI /* * If CPEI can be re-targetted or if this is not * CPEI target, then it is hotpluggable */ if (can_cpei_retarget() || !is_cpu_cpei_target(num)) sysfs_cpus[num].cpu.hotpluggable = 1; map_cpu_to_node(num, node_cpuid[num].nid); #endif return register_cpu(&sysfs_cpus[num].cpu, num); }
int arch_register_cpu(int num) { /* * CPU0 cannot be offlined due to several * restrictions and assumptions in kernel. This basically * doesnt add a control file, one cannot attempt to offline * BSP. */ if (!num) cpu_devices[num].cpu.no_control = 1; return register_cpu(&cpu_devices[num].cpu, num); }
int arch_register_cpu(int num) { #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) /* * If CPEI cannot be re-targetted, and this is * CPEI target, then dont create the control file */ if (!can_cpei_retarget() && is_cpu_cpei_target(num)) sysfs_cpus[num].cpu.no_control = 1; map_cpu_to_node(num, node_cpuid[num].nid); #endif return register_cpu(&sysfs_cpus[num].cpu, num); }
static int __init topology_init(void) { int cpu; int ret; for_each_cpu(cpu) { ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " "failed (%d)\n", cpu, ret); } return 0; }
static int __init topology_init(void) { int i, ret; for_each_present_cpu(i) { ret = register_cpu(&per_cpu(cpu_devices, i), i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " "failed (%d)\n", i, ret); } return 0; }
int arch_register_cpu(int num) { /* * CPU0 cannot be offlined due to several * restrictions and assumptions in kernel. This basically * doesnt add a control file, one cannot attempt to offline * BSP. * * Also certain PCI quirks require not to enable hotplug control * for all CPU's. */ if (num && enable_cpu_hotplug) cpu_devices[num].cpu.hotpluggable = 1; return register_cpu(&cpu_devices[num].cpu, num); }
static int __init topology_init(void) { int i, ret; #ifdef CONFIG_NUMA for_each_online_node(i) register_one_node(i); #endif /* CONFIG_NUMA */ for_each_present_cpu(i) { ret = register_cpu(&per_cpu(cpu_devices, i), i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " "failed (%d)\n", i, ret); } return 0; }
int __init ppc_init(void) { int i; /* clear the progress line */ if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); /* register CPU devices */ for (i = 0; i < NR_CPUS; i++) if (cpu_possible(i)) register_cpu(&cpu_devices[i], i, NULL); /* call platform init */ if (ppc_md.init != NULL) { ppc_md.init(); } return 0; }
static int __init topology_init(void) { int i, ret; for_each_present_cpu(i) { /* * register_cpu takes a per_cpu pointer and * just points it at another per_cpu struct... */ ret = register_cpu(&per_cpu(cpu_devices, i), i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " "failed (%d)\n", i, ret); } return 0; }
int arch_register_cpu(int num) { struct node *parent = NULL; #ifdef CONFIG_NUMA parent = &sysfs_nodes[cpu_to_node(num)]; #endif /* CONFIG_NUMA */ #ifdef CONFIG_ACPI_BOOT /* * If CPEI cannot be re-targetted, and this is * CPEI target, then dont create the control file */ if (!can_cpei_retarget() && is_cpu_cpei_target(num)) sysfs_cpus[num].cpu.no_control = 1; #endif return register_cpu(&sysfs_cpus[num].cpu, num, parent); }
static int __init topology_init(void) { int cpu; struct node *parent = NULL; register_nodes(); register_cpu_notifier(&sysfs_cpu_nb); for_each_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); #ifdef CONFIG_NUMA /* The node to which a cpu belongs can't be known * until the cpu is made present. */ parent = NULL; if (cpu_present(cpu)) parent = &node_devices[cpu_to_node(cpu)]; #endif /* * For now, we just see if the system supports making * the RTAS calls for CPU hotplug. But, there may be a * more comprehensive way to do this for an individual * CPU. For instance, the boot cpu might never be valid * for hotplugging. */ if (!ppc_md.cpu_die) c->no_control = 1; if (cpu_online(cpu) || (c->no_control == 0)) { register_cpu(c, cpu, parent); sysdev_create_file(&c->sysdev, &attr_physical_id); } if (cpu_online(cpu)) register_cpu_online(cpu); } return 0; }
static int __init topology_init(void) { int cpu; register_nodes(); check_mmu_stats(); register_cpu_notifier(&sysfs_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); register_cpu(c, cpu); if (cpu_online(cpu)) register_cpu_online(cpu); } return 0; }
int __init ppc_init(void) { int cpu; /* clear the progress line */ if (ppc_md.progress) ppc_md.progress(" ", 0xffff); /* register CPU devices */ for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); c->hotpluggable = 1; register_cpu(c, cpu); } /* call platform init */ if (ppc_md.init != NULL) { ppc_md.init(); } return 0; }
static int __init topology_init(void) { int i, ret; #ifdef CONFIG_NUMA for_each_online_node(i) register_one_node(i); #endif for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); c->hotpluggable = 1; ret = register_cpu(c, i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " "failed (%d)\n", i, ret); } return 0; }
static int __init topology_init(void) { int cpu; for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); register_cpu(c, cpu); #ifdef CONFIG_PERFORMANCE_COUNTERS sysdev_create_file(&c->sysdev, &attr_pc0event); sysdev_create_file(&c->sysdev, &attr_pc0count); sysdev_create_file(&c->sysdev, &attr_pc1event); sysdev_create_file(&c->sysdev, &attr_pc1count); sysdev_create_file(&c->sysdev, &attr_pccycles); sysdev_create_file(&c->sysdev, &attr_pcenable); #endif } return 0; }
static int __init arch_register_cpu(int num) { return register_cpu(&sysfs_cpus[num].cpu, num); }
static int __init arch_register_cpu(int num) { return register_cpu(&per_cpu(cpu_devices, num).cpu, num); }