/* This is called very early */ static void __init smp_init_pseries(void) { int i; pr_debug(" -> smp_init_pSeries()\n"); alloc_bootmem_cpumask_var(&of_spin_mask); /* * Mark threads which are still spinning in hold loops * * We know prom_init will not have started them if RTAS supports * query-cpu-stopped-state. */ if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) { if (cpu_has_feature(CPU_FTR_SMT)) { for_each_present_cpu(i) { if (cpu_thread_in_core(i) == 0) cpumask_set_cpu(i, of_spin_mask); } } else cpumask_copy(of_spin_mask, cpu_present_mask); cpumask_clear_cpu(boot_cpuid, of_spin_mask); }
static void __init init_irq_default_affinity(void) { alloc_bootmem_cpumask_var(&irq_default_affinity); #if defined(CONFIG_MIPS_BRCM) cpumask_set_cpu(0, (cpumask_t *)&irq_default_affinity); #else cpumask_setall(irq_default_affinity); #endif }
static int __init irq_affinity_setup(char *str) { alloc_bootmem_cpumask_var(&irq_default_affinity); cpulist_parse(str, irq_default_affinity); /* * Set at least the boot cpu. We don't want to end up with * bugreports caused by random comandline masks */ cpumask_set_cpu(smp_processor_id(), irq_default_affinity); return 1; }
/* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: cpumask_of_node() is not valid until after this is done. * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) */ void __init setup_node_to_cpumask_map(void) { unsigned int node; /* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES) setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); /* cpumask_of_node() will now work */ pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); }
/* This is called very early */ static void __init smp_init_pseries(void) { int i; pr_debug(" -> smp_init_pSeries()\n"); alloc_bootmem_cpumask_var(&of_spin_mask); /* Mark threads which are still spinning in hold loops. */ if (cpu_has_feature(CPU_FTR_SMT)) { for_each_present_cpu(i) { if (cpu_thread_in_core(i) == 0) cpumask_set_cpu(i, of_spin_mask); } } else {
void __init setup_node_to_cpumask_map(void) { unsigned int node, num = 0; if (nr_node_ids == MAX_NUMNODES) { for_each_node_mask(node, node_possible_map) num = node; nr_node_ids = num + 1; } for (node = 0; node < nr_node_ids; node++) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); }
/* Parse the boot-time nohz CPU list from the kernel parameters. */ static int __init tick_nohz_full_setup(char *str) { int cpu; alloc_bootmem_cpumask_var(&nohz_full_mask); if (cpulist_parse(str, nohz_full_mask) < 0) { pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); return 1; } cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, nohz_full_mask)) { pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); cpumask_clear_cpu(cpu, nohz_full_mask); } have_nohz_full_mask = true; return 1; }
static void __init init_irq_default_affinity(void) { alloc_bootmem_cpumask_var(&irq_default_affinity); cpumask_setall(irq_default_affinity); }