static void __init xen_filter_cpu_maps(void) { int i, rc; unsigned int subtract = 0; if (!xen_initial_domain()) return; num_processors = 0; disabled_cpus = 0; for (i = 0; i < nr_cpu_ids; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; set_cpu_possible(i, true); } else { set_cpu_possible(i, false); set_cpu_present(i, false); subtract++; } } #ifdef CONFIG_HOTPLUG_CPU if (subtract) nr_cpu_ids = nr_cpu_ids - subtract; #endif }
/* Early cpumask setup - runs on TP0 */ static void brcmstb_smp_setup(void) { __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; set_cpu_possible(0, 1); set_cpu_present(0, 1); __cpu_number_map[1] = 1; __cpu_logical_map[1] = 1; set_cpu_possible(1, 1); set_cpu_present(1, 1); #if defined(CONFIG_BMIPS4380) /* NBK and weak order flags */ set_c0_brcm_config_0(0x30000); /* * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other TP * MIPS interrupt 2 (HW INT 0) is the TP0 L1 controller output * MIPS interrupt 3 (HW INT 1) is the TP1 L1 controller output */ change_c0_brcm_cmt_intr(0xf8018000, (0x02 << 27) | (0x03 << 15)); #elif defined(CONFIG_BMIPS5000) /* enable raceless SW interrupts */ set_c0_brcm_config(0x03 << 22); /* clear any pending SW interrupts */ write_c0_brcm_action(0x2000 | (0 << 9) | (0 << 8)); write_c0_brcm_action(0x2000 | (0 << 9) | (1 << 8)); write_c0_brcm_action(0x2000 | (1 << 9) | (0 << 8)); write_c0_brcm_action(0x2000 | (1 << 9) | (1 << 8)); #endif }
static void __init xen_filter_cpu_maps(void) { int i, rc; unsigned int subtract = 0; if (!xen_initial_domain()) return; num_processors = 0; disabled_cpus = 0; for (i = 0; i < nr_cpu_ids; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; set_cpu_possible(i, true); } else { set_cpu_possible(i, false); set_cpu_present(i, false); subtract++; } } #ifdef CONFIG_HOTPLUG_CPU /* This is akin to using 'nr_cpus' on the Linux command line. * Which is OK as when we use 'dom0_max_vcpus=X' we can only * have up to X, while nr_cpu_ids is greater than X. This * normally is not a problem, except when CPU hotplugging * is involved and then there might be more than X CPUs * in the guest - which will not work as there is no * hypercall to expand the max number of VCPUs an already * running guest has. So cap it up to X. */ if (subtract) nr_cpu_ids = nr_cpu_ids - subtract; #endif }
static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest) { static int tot_cpus_found = 0; lboard_t *brd; klcpu_t *acpu; int cpus_found = 0; cpuid_t cpuid; brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27); do { acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU); while (acpu) { cpuid = acpu->cpu_info.virtid; /* cnode is not valid for completely disabled brds */ if (get_actual_nasid(brd) == brd->brd_nasid) cpuid_to_compact_node[cpuid] = cnode; if (cpuid > highest) highest = cpuid; /* Only let it join in if it's marked enabled */ if ((acpu->cpu_info.flags & KLINFO_ENABLE) && (tot_cpus_found != NR_CPUS)) { <<<<<<< HEAD set_cpu_possible(cpuid, true); ======= <<<<<<< HEAD set_cpu_possible(cpuid, true); ======= cpu_set(cpuid, cpu_possible_map); >>>>>>> 58a75b6a81be54a8b491263ca1af243e9d8617b9 >>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2
static void __init octeon_smp_setup(void) { const int coreid = cvmx_get_core_num(); int cpus; int id; struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get(); #ifdef CONFIG_HOTPLUG_CPU int core_mask = octeon_get_boot_coremask(); unsigned int num_cores = cvmx_octeon_num_cores(); #endif /* The present CPUs are initially just the boot cpu (CPU 0). */ for (id = 0; id < NR_CPUS; id++) { set_cpu_possible(id, id == 0); set_cpu_present(id, id == 0); } __cpu_number_map[coreid] = 0; __cpu_logical_map[0] = coreid; /* The present CPUs get the lowest CPU numbers. */ cpus = 1; for (id = 0; id < NR_CPUS; id++) { if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) { set_cpu_possible(cpus, true); set_cpu_present(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #ifdef CONFIG_HOTPLUG_CPU /* * The possible CPUs are all those present on the chip. We * will assign CPU numbers for possible cores as well. Cores * are always consecutively numberd from 0. */ for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr && id < num_cores && id < NR_CPUS; id++) { if (!(core_mask & (1 << id))) { set_cpu_possible(cpus, true); __cpu_number_map[id] = cpus; __cpu_logical_map[cpus] = id; cpus++; } } #endif octeon_smp_hotplug_setup(); }
void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores == 0) { printk(KERN_ERR "S5PV310: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "S5PV310: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); }
// ARM10C 20140215 static void __init exynos_smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); // scu_base: 0xF8800000 unsigned int i, ncores; // read_cpuid_part_number(): 0x0000C0F0, ARM_CPU_PART_CORTEX_A9: 0xC090 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) ncores = scu_base ? scu_get_core_count(scu_base) : 1; else /* * CPU Nodes are passed thru DT and set_cpu_possible * is set by "arm_dt_init_cpu_maps". */ return; // return 수행 /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores; /* * Currently we can't call ioremap here because * SoC detection won't work until after init_early. */ scu_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_SCU_BASE); BUG_ON(!scu_base); ncores = scu_get_core_count(scu_base); /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "OMAP4: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
void __init smp_init_cpus(void) { unsigned int i, ncores; /* * NoteXXX: CPU 1 may not be reset clearly after power-ON. * Need to apply a S/W workaround to manualy reset it first. */ u32 val; val = *(volatile u32 *)0xF0009010; mt65xx_reg_sync_writel(val | 0x2, 0xF0009010); udelay(10); mt65xx_reg_sync_writel(val & ~0x2, 0xF0009010); udelay(10); ncores = scu_get_core_count((void *)SCU_BASE); if (ncores > NR_CPUS) { printk(KERN_WARNING "SCU core count (%d) > NR_CPUS (%d)\n", ncores, NR_CPUS); printk(KERN_WARNING "set nr_cores to NR_CPUS (%d)\n", NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(irq_raise_softirq); }
static void ct_ca9x4_init_cpu_map(void) { int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU)); for (i = 0; i < ncores; ++i) set_cpu_possible(i, true); }
void __init smp_init_cpus(void) { int i; for (i = 0; i < NR_CPUS; i++) set_cpu_possible(i, true); }
void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos5250()) ncores = 2; else if (soc_is_exynos4412() || soc_is_exynos5410()) ncores = 4; else ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* Parse GIC cpu interface entries in MADT for SMP init */ void __init acpi_init_cpus(void) { int count, i; /* * do a partial walk of MADT to determine how many CPUs * we have including disabled CPUs, and get information * we need for SMP init */ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, acpi_parse_gic_cpu_interface, 0); if (!count) { pr_err("No GIC CPU interface entries present\n"); return; } else if (count < 0) { pr_err("Error parsing GIC CPU interface entry\n"); return; } if (!bootcpu_valid) { pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n"); return; } for (i = 0; i < enabled_cpus; i++) set_cpu_possible(i, true); /* Make boot-up look pretty */ pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus); }
static void j2_prepare_cpus(unsigned int max_cpus) { struct device_node *np; unsigned i, max = 1; np = of_find_compatible_node(NULL, NULL, "jcore,ipi-controller"); if (!np) goto out; j2_ipi_irq = irq_of_parse_and_map(np, 0); j2_ipi_trigger = of_iomap(np, 0); if (!j2_ipi_irq || !j2_ipi_trigger) goto out; np = of_find_compatible_node(NULL, NULL, "jcore,cpuid-mmio"); if (!np) goto out; sh2_cpuid_addr = of_iomap(np, 0); if (!sh2_cpuid_addr) goto out; if (request_irq(j2_ipi_irq, j2_ipi_interrupt_handler, IRQF_PERCPU, "ipi", (void *)j2_ipi_interrupt_handler) != 0) goto out; max = max_cpus; out: /* Disable any cpus past max_cpus, or all secondaries if we didn't * get the necessary resources to support SMP. */ for (i=max; i<NR_CPUS; i++) { set_cpu_possible(i, false); set_cpu_present(i, false); } }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { #ifdef NOT_FOR_L4 void __iomem *scu_base = scu_base_addr(); #endif unsigned int i, ncores; #ifdef NOT_FOR_L4 ncores = scu_base ? scu_get_core_count(scu_base) : 1; #else ncores = l4x_nr_cpus; #endif /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "Realview: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(l4x_raise_softirq); }
static void __init ux500_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; static void __iomem *scu_base; unsigned int ncores; int i; np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (!np) { pr_err("No SCU base address\n"); return; } scu_base = of_iomap(np, 0); of_node_put(np); if (!scu_base) { pr_err("No SCU remap\n"); return; } scu_enable(scu_base); ncores = scu_get_core_count(scu_base); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); iounmap(scu_base); }
void mcpm_smp_init_cpus(void) { unsigned int i, ncores; ncores = MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER; printk("[%s] ncores=%d\n", __func__, ncores); /* * sanity check, the cr_cpu_ids is configured form CONFIG_NR_CPUS */ if (ncores > nr_cpu_ids) { printk("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) { set_cpu_possible(i, true); } #ifdef CONFIG_ARCH_SUN9IW1 /* FIXME: init sun9i mcpm cpu map */ sun9i_mcpm_cpu_map_init(); #endif #ifdef CONFIG_ARCH_SUN8IW6 /* FIXME: init sun9i mcpm cpu map */ sun8i_mcpm_cpu_map_init(); #endif #if defined(CONFIG_ARM_SUNXI_CPUIDLE) set_smp_cross_call(sunxi_raise_softirq); #else set_smp_cross_call(gic_raise_softirq); #endif }
/* * Setup the set of possible CPUs (via set_cpu_possible) */ void sunxi_smp_init_cpus(void) { unsigned int i, ncores; ncores = get_nr_cores(); pr_debug("[%s] ncores=%d\n", __func__, ncores); /* * sanity check, the cr_cpu_ids is configured form CONFIG_NR_CPUS */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) { set_cpu_possible(i, true); } #if defined(CONFIG_ARM_SUNXI_CPUIDLE) set_smp_cross_call(sunxi_raise_softirq); #else set_smp_cross_call(gic_raise_softirq); #endif }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores; /* * Currently we can't call ioremap here because * SoC detection won't work until after init_early. */ scu_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_SCU_BASE); BUG_ON(!scu_base); ncores = scu_get_core_count(scu_base); /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
static void shx3_smp_setup(void) { unsigned int cpu = 0; int i, num; init_cpu_possible(cpumask_of(cpu)); /* Enable light sleep for the boot CPU */ __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu)); __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; /* * Do this stupidly for now.. we don't have an easy way to probe * for the total number of cores. */ for (i = 1, num = 0; i < NR_CPUS; i++) { set_cpu_possible(i, true); __cpu_number_map[i] = ++num; __cpu_logical_map[num] = i; } printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores = get_core_count(); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init omap4_smp_init_cpus(void) { unsigned int i = 0, ncores = 1, cpu_id; /* Use ARM cpuid check here, as SoC detection will not work so early */ cpu_id = read_cpuid_id() & CPU_MASK; if (cpu_id == CPU_CORTEX_A9) { /* * Currently we can't call ioremap here because * SoC detection won't work until after init_early. */ scu_base = OMAP2_L4_IO_ADDRESS(scu_a9_get_base()); BUG_ON(!scu_base); ncores = scu_get_core_count(scu_base); } else if (cpu_id == CPU_CORTEX_A15) { ncores = OMAP5_CORE_COUNT; } /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); }
static void shx3_smp_setup(void) { unsigned int cpu = 0; int i, num; init_cpu_possible(cpumask_of(cpu)); /* */ __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu)); __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; /* */ for (i = 1, num = 0; i < NR_CPUS; i++) { set_cpu_possible(i, true); __cpu_number_map[i] = ++num; __cpu_logical_map[num] = i; } printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); }
static void __init vexpress_dt_smp_init_cpus(void) { int ncores = 0, i; switch (vexpress_dt_scu) { case GENERIC_SCU: ncores = of_scan_flat_dt(vexpress_dt_cpus_num, NULL); break; case CORTEX_A9_SCU: ncores = scu_get_core_count(vexpress_dt_cortex_a9_scu_base); break; default: WARN_ON(1); break; } if (ncores < 2) return; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; ++i) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * cpu_possible_mask should be static, it cannot change as CPUs * are onlined, or offlined. The reason is per-cpu data-structures * are allocated by some modules at init time, and dont expect to * do this dynamically on cpu arrival/departure. * cpu_present_mask on the other hand can change dynamically. * In case when cpu_hotplug is not compiled, then we resort to current * behaviour, which is cpu_possible == cpu_present. * - Ashok Raj * * Three ways to find out the number of additional hotplug CPUs: * - If the BIOS specified disabled CPUs in ACPI/mptables use that. * - The user can overwrite it with additional_cpus=NUM * - Otherwise don't reserve additional CPUs. */ __init void prefill_possible_map(void) { int i; int possible, disabled_cpus; disabled_cpus = total_cpus - available_cpus; if (additional_cpus == -1) { if (disabled_cpus > 0) additional_cpus = disabled_cpus; else additional_cpus = 0; } possible = available_cpus + additional_cpus; if (possible > nr_cpu_ids) possible = nr_cpu_ids; printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", possible, max((possible - available_cpus), 0)); for (i = 0; i < possible; i++) set_cpu_possible(i, true); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int ncores = available_cpus(); unsigned int i; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); /* If only one CPU is possible, platform_smp_prepare_cpus() will never get called. We must therefore initialize the reset handler here. If there is more than one CPU, we must wait until after the cpu_present_mask has been updated with all present CPUs in platform_smp_prepare_cpus() before initializing the reset handler. */ if (ncores == 1) { tegra_cpu_reset_handler_init(); tegra_all_cpus_booted = true; } set_smp_cross_call(gic_raise_softirq); }
static void __init cps_smp_setup(void) { unsigned int ncores, nvpes, core_vpes; unsigned long core_entry; int c, v; /* Detect & record VPE topology */ ncores = mips_cm_numcores(); pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); for (c = nvpes = 0; c < ncores; c++) { core_vpes = core_vpe_count(c); pr_cont("%c%u", c ? ',' : '{', core_vpes); /* Use the number of VPEs in core 0 for smp_num_siblings */ if (!c) smp_num_siblings = core_vpes; for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { cpu_data[nvpes + v].core = c; #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) cpu_data[nvpes + v].vpe_id = v; #endif } nvpes += core_vpes; } pr_cont("} total %u\n", nvpes); /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { set_cpu_possible(v, true); set_cpu_present(v, true); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } /* Set a coherent default CCA (CWB) */ change_c0_config(CONF_CM_CMASK, 0x5); /* Core 0 is powered up (we're running on it) */ bitmap_set(core_power, 0, 1); /* Initialise core 0 */ mips_cps_core_init(); /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); if (mips_cm_revision() >= CM_REV_CM3) { core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); write_gcr_bev_base(core_entry); } #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) cpumask_set_cpu(0, &mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ }
static void __init boot_cpu_init(void) { int cpu = smp_processor_id(); /* Mark the boot cpu "present", "online" etc for SMP and UP case */ set_cpu_online(cpu, true); set_cpu_present(cpu, true); set_cpu_possible(cpu, true); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. The msm8x60 * does not support the ARM SCU, so just set the possible cpu mask to * NR_CPUS. */ void __init smp_init_cpus(void) { unsigned int i; for (i = 0; i < NR_CPUS; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
static void __init boot_cpu_init(void) { int cpu = smp_processor_id(); /* 主核总是可用的 */ set_cpu_online(cpu, true); set_cpu_active(cpu, true); set_cpu_present(cpu, true); set_cpu_possible(cpu, true); }