void mcpm_smp_init_cpus(void) { unsigned int i, ncores; ncores = MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER; printk("[%s] ncores=%d\n", __func__, ncores); /* * sanity check, the cr_cpu_ids is configured form CONFIG_NR_CPUS */ if (ncores > nr_cpu_ids) { printk("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) { set_cpu_possible(i, true); } #ifdef CONFIG_ARCH_SUN9IW1 /* FIXME: init sun9i mcpm cpu map */ sun9i_mcpm_cpu_map_init(); #endif #ifdef CONFIG_ARCH_SUN8IW6 /* FIXME: init sun9i mcpm cpu map */ sun8i_mcpm_cpu_map_init(); #endif #if defined(CONFIG_ARM_SUNXI_CPUIDLE) set_smp_cross_call(sunxi_raise_softirq); #else set_smp_cross_call(gic_raise_softirq); #endif }
/* * Setup the set of possible CPUs (via set_cpu_possible) */ void sunxi_smp_init_cpus(void) { unsigned int i, ncores; ncores = get_nr_cores(); pr_debug("[%s] ncores=%d\n", __func__, ncores); /* * sanity check, the cr_cpu_ids is configured form CONFIG_NR_CPUS */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) { set_cpu_possible(i, true); } #if defined(CONFIG_ARM_SUNXI_CPUIDLE) set_smp_cross_call(sunxi_raise_softirq); #else set_smp_cross_call(gic_raise_softirq); #endif }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { #ifdef NOT_FOR_L4 void __iomem *scu_base = scu_base_addr(); #endif unsigned int i, ncores; #ifdef NOT_FOR_L4 ncores = scu_base ? scu_get_core_count(scu_base) : 1; #else ncores = l4x_nr_cpus; #endif /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "Realview: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(l4x_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init omap4_smp_init_cpus(void) { unsigned int i = 0, ncores = 1, cpu_id; /* Use ARM cpuid check here, as SoC detection will not work so early */ cpu_id = read_cpuid(CPUID_ID) & CPU_MASK; if (cpu_id == CPU_CORTEX_A9) { /* * Currently we can't call ioremap here because * SoC detection won't work until after init_early. */ scu_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_SCU_BASE); BUG_ON(!scu_base); ncores = scu_get_core_count(scu_base); } else if (cpu_id == CPU_CORTEX_A15) { ncores = OMAP5_CORE_COUNT; } /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores; /* * Currently we can't call ioremap here because * SoC detection won't work until after init_early. */ scu_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_SCU_BASE); BUG_ON(!scu_base); ncores = scu_get_core_count(scu_base); /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
static void __init vexpress_dt_smp_init_cpus(void) { int ncores = 0, i; switch (vexpress_dt_scu) { case GENERIC_SCU: ncores = of_scan_flat_dt(vexpress_dt_cpus_num, NULL); break; case CORTEX_A9_SCU: ncores = scu_get_core_count(vexpress_dt_cortex_a9_scu_base); break; default: WARN_ON(1); break; } if (ncores < 2) return; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; ++i) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores; /* * Currently we can't call ioremap here because * SoC detection won't work until after init_early. */ scu_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_SCU_BASE); BUG_ON(!scu_base); ncores = scu_get_core_count(scu_base); /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "OMAP4: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int ncores = available_cpus(); unsigned int i; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); /* If only one CPU is possible, platform_smp_prepare_cpus() will never get called. We must therefore initialize the reset handler here. If there is more than one CPU, we must wait until after the cpu_present_mask has been updated with all present CPUs in platform_smp_prepare_cpus() before initializing the reset handler. */ if (ncores == 1) { tegra_cpu_reset_handler_init(); tegra_all_cpus_booted = true; } set_smp_cross_call(gic_raise_softirq); }
void __init smp_init_cpus(void) { unsigned int i, ncores; /* * NoteXXX: CPU 1 may not be reset clearly after power-ON. * Need to apply a S/W workaround to manualy reset it first. */ u32 val; val = *(volatile u32 *)0xF0009010; mt65xx_reg_sync_writel(val | 0x2, 0xF0009010); udelay(10); mt65xx_reg_sync_writel(val & ~0x2, 0xF0009010); udelay(10); ncores = scu_get_core_count((void *)SCU_BASE); if (ncores > NR_CPUS) { printk(KERN_WARNING "SCU core count (%d) > NR_CPUS (%d)\n", ncores, NR_CPUS); printk(KERN_WARNING "set nr_cores to NR_CPUS (%d)\n", NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(irq_raise_softirq); }
void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos5250()) ncores = 2; else if (soc_is_exynos4412() || soc_is_exynos5410()) ncores = 4; else ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. The msm8x60 * does not support the ARM SCU, so just set the possible cpu mask to * NR_CPUS. */ void __init smp_init_cpus(void) { unsigned int i; for (i = 0; i < NR_CPUS; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores = scu_get_core_count(scu_base); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
void __init smp_init_cpus(void) { unsigned int i, ncores = get_core_count(); for (i = 0; i < ncores; i++) cpu_set(i, cpu_possible_map); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores = scu_get_core_count(RK30_SCU_BASE); if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); #ifdef CONFIG_FIQ set_smp_cross_call(gic_raise_softirq_non_secure); #else set_smp_cross_call(gic_raise_softirq); #endif }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { int i, ncores; ncores = scu_get_core_count(SCU_PERIPH_BASE); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init msm8625_smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
void __init smp_init_cpus(void) { unsigned int i, ncores; ncores = scu_get_core_count(NULL); pr_debug("[%s] ncores=%d\n", __FUNCTION__, ncores); for (i = 0; i < ncores; i++) { set_cpu_possible(i, true); } set_smp_cross_call(gic_raise_softirq); }
static void ct_ca9x4_init_cpu_map(void) { int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU)); if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; ++i) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores = scu_get_core_count(scu_base); if (ncores > NR_CPUS) { printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores = get_core_count(); if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { /* FIXME: ncores needs to come from DT */ unsigned int i, ncores = 4; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
static void __init arm_smp_init_cpus(void) { unsigned int i, ncores; ncores = (__raw_readl(MSM_APCS_GCC_BASE + 0x30)) & 0xF; if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init ux500_smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores = get_core_count(); edb_putstr("smp_init_cpus\n"); /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "hisik3: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init highbank_smp_init_cpus(void) { unsigned int i, ncores; ncores = scu_get_core_count(scu_base_addr); /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "highbank: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init socfpga_smp_init_cpus(void) { unsigned int i, ncores; ncores = scu_get_core_count(socfpga_scu_base_addr); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); /* sanity check */ if (ncores > num_possible_cpus()) { pr_warn("socfpga: no. of cores (%d) greater than configured" "maximum of %d - clipping\n", ncores, num_possible_cpus()); ncores = num_possible_cpus(); } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores = get_core_count(); /* sanity check */ if (ncores == 0) { printk(KERN_ERR "OX820: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "OX820: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores == 0) { pr_err("mx6: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { pr_warning("mx6: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { unsigned int i, ncores; /* Never released */ scu_base = ioremap(OMAP44XX_SCU_BASE, SZ_256); BUG_ON(!scu_base); ncores = scu_get_core_count(scu_base); /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "OMAP4: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }