// ARM10C 20140215 static void __init exynos_smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); // scu_base: 0xF8800000 unsigned int i, ncores; // read_cpuid_part_number(): 0x0000C0F0, ARM_CPU_PART_CORTEX_A9: 0xC090 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) ncores = scu_base ? scu_get_core_count(scu_base) : 1; else /* * CPU Nodes are passed thru DT and set_cpu_possible * is set by "arm_dt_init_cpu_maps". */ return; // return 수행 /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); }
static unsigned int __init r8a7779_get_core_count(void) { void __iomem *scu_base = scu_base_addr(); shmobile_twd_init(&twd_local_timer); return scu_get_core_count(scu_base); }
static void __init brcm_smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); unsigned int cpu = smp_processor_id(); int i; /* sanity check */ if (ncores == 0) { printk(KERN_ERR "MPCORE: strange CPU count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "MPCORE: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } printk(KERN_INFO "%d cores has been found\n",ncores); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); /* * Initialise the SCU if there are more than one CPU and let * them know where to start. Note that, on modern versions of * MILO, the "poke" doesn't actually do anything until each * individual core is sent a soft interrupt to get it out of * WFI */ if (max_cpus > 1) { /* nobody is to be released from the pen yet */ pen_release = -1; /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ // percpu_timer_setup(); scu_enable(scu_base_addr()); /* Wakeup other cores in an SoC-specific manner */ plat_wake_secondary_cpu( max_cpus, platform_secondary_startup ); } }
void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos5250()) ncores = 2; else if (soc_is_exynos4412() || soc_is_exynos5410()) ncores = 4; else ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { #ifdef NOT_FOR_L4 void __iomem *scu_base = scu_base_addr(); #endif unsigned int i, ncores; #ifdef NOT_FOR_L4 ncores = scu_base ? scu_get_core_count(scu_base) : 1; #else ncores = l4x_nr_cpus; #endif /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "Realview: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(l4x_raise_softirq); }
static inline unsigned int get_core_count(void) { void __iomem *scu_base = scu_base_addr(); if (scu_base) return scu_get_core_count(scu_base); return 1; }
static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) { int i; if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. * * Try using firmware operation first and fall back to * boot register if it fails. */ for (i = 1; i < max_cpus; ++i) { unsigned long phys_cpu; unsigned long boot_addr; phys_cpu = cpu_logical_map(i); boot_addr = virt_to_phys(exynos4_secondary_startup); if (call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr)) __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); } }
void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores == 0) { printk(KERN_ERR "S5PV310: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "S5PV310: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); unsigned long addr; int i; edb_trace(1); edb_putstr("smp_prepare_cpus\n"); /* sanity check */ if (ncores == 0) { printk(KERN_ERR "hisik3: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "hisik3: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) { WARN(1, "hisik3: smp max cpus should NOT more cores than exist\n"); max_cpus = ncores; } /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base_addr()); addr = (unsigned long) IO_ADDRESS(MEMORY_AXI_SECOND_CPU_BOOT_ADDR); printk("poke_milo addr 0x%lx at 0x%x\n", addr, virt_to_phys(k3v2_secondary_startup)); /* * Write the address of secondary startup into the system-wide flags * register. The BootMonitor waits for this register to become * non-zero. */ writel(BSYM(virt_to_phys(k3v2_secondary_startup)), addr); wmb(); flush_cache_all(); edb_putstr("smp_prepare_cpus out\n"); }
static void wmt_pm_resume(void) { #ifdef CONFIG_SMP *(volatile unsigned int *)MPCORE_PRIVATE_MEM |= BIT5 | BIT6; scu_enable(scu_base_addr()); #endif return; }
/* * for arch/arm/kernel/smp.c:smp_prepare_cpus(unsigned int max_cpus) */ void __init platform_smp_prepare_cpus(unsigned int max_cpus) { void __iomem *scu_base; pr_debug("[%s] enter\n", __FUNCTION__); scu_base = scu_base_addr(); scu_enable(scu_base); }
/* * Setup the SCU */ static void scu_enable(void) { u32 scu_ctrl; void __iomem *scu_base = scu_base_addr(); scu_ctrl = __raw_readl(scu_base + SCU_CTRL); scu_ctrl |= 1; __raw_writel(scu_ctrl, scu_base + SCU_CTRL); }
void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); unsigned int cpu = smp_processor_id(); int i; /* sanity check */ if (ncores == 0) { printk(KERN_ERR "Realview: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "Realview: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } smp_store_cpu_info(cpu); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); /* * Initialise the SCU if there are more than one CPU and let * them know where to start. Note that, on modern versions of * MILO, the "poke" doesn't actually do anything until each * individual core is sent a soft interrupt to get it out of * WFI */ if (max_cpus > 1) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); scu_enable(scu_base_addr()); poke_milo(); } }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init msm8625_smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) { void __iomem *scu_base = scu_base_addr(); spin_lock(&scu_lock); tmp = __raw_readl(scu_base + 8); tmp &= ~clr; tmp |= set; spin_unlock(&scu_lock); /* disable cache coherency after releasing the lock */ __raw_writel(tmp, scu_base + 8); }
static unsigned int __init get_core_count(void) { unsigned int ncores; void __iomem *scu_base = scu_base_addr(); if (scu_base) { ncores = __raw_readl(scu_base + SCU_CONFIG); ncores = (ncores & 0x03) + 1; } else ncores = 1; return ncores; }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { if (!soc_is_exynos5250()) scu_enable(scu_base_addr()); /* */ __raw_writel(virt_to_phys(exynos4_secondary_startup), CPU1_BOOT_REG); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base_addr()); wakeup_secondary(); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { *(volatile unsigned int *)MPCORE_PRIVATE_MEM |= BIT5 | BIT6; scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(virt_to_phys(wmt_secondary_startup), CPU1_BOOT_REG); }
static void __init realview_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The BootMonitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(virt_to_phys(versatile_secondary_startup), __io_address(REALVIEW_SYS_FLAGSSET)); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i, cpu, value; void __iomem *cpu_ptr; for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base_addr()); cpu_ptr = ioremap_nocache(MSM8625_CPU_PHYS, SZ_8); if (!cpu_ptr) { pr_err("failed to ioremap for secondary cores\n"); return; } msm8625_boot_vector_init(cpu_ptr, virt_to_phys(msm_secondary_startup)); iounmap(cpu_ptr); for_each_possible_cpu(cpu) { switch (cpu) { case 0: break; case 1: remapper_address(MSM8625_CPU_PHYS, 0x34); per_cpu_data(cpu, 0x0, 0x3c, MSM8625_INT_ACSR_MP_CORE_IPC1); enable_boot_remapper(BIT(26), 0x30); break; case 2: remapper_address((MSM8625_CPU_PHYS >> 16), 0x4C); per_cpu_data(cpu, 0x8, 0x50, MSM8625_INT_ACSR_MP_CORE_IPC2); enable_boot_remapper(BIT(25), 0x48); break; case 3: value = __raw_readl(MSM_CFG_CTL_BASE + 0x4C); remapper_address(value | MSM8625_CPU_PHYS, 0x4C); per_cpu_data(cpu, 0xC, 0x50, MSM8625_INT_ACSR_MP_CORE_IPC3); enable_boot_remapper(BIT(26), 0x48); break; } } }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init ux500_smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > nr_cpu_ids) { pr_warn("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); }
static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) { int i; if (!(soc_is_exynos5250() || soc_is_exynos5440())) scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ for (i = 1; i < max_cpus; ++i) __raw_writel(virt_to_phys(exynos4_secondary_startup), cpu_boot_reg(cpu_logical_map(i))); }
void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos5250() || soc_is_exynos3250()) ncores = 2; else if (soc_is_exynos4412() || soc_is_exynos5410() || soc_is_exynos4415() || soc_is_exynos3470()) ncores = 4; else if (soc_is_exynos5260()) #ifdef CONFIG_EXYNOS5_MP ncores = NR_CPUS; #else ncores = read_cpuid_mpidr() & 0x100 ? 4 : 2; #endif else if (soc_is_exynos5420())
void __init r8a7779_smp_prepare_cpus(void) { int cpu = cpu_logical_map(0); scu_enable(scu_base_addr()); /* Map the reset vector (in headsmp.S) */ __raw_writel(__pa(shmobile_secondary_vector), AVECR); /* enable cache coherency on CPU0 */ modify_scu_cpu_psr(0, 3 << (cpu * 8)); r8a7779_pm_init(); /* power off secondary CPUs */ r8a7779_platform_cpu_kill(1); r8a7779_platform_cpu_kill(2); r8a7779_platform_cpu_kill(3); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > nr_cpu_ids) { pr_err("SMP: %u cores greater than maximum (%u), clipping\n", ncores, nr_cpu_ids); ncores = nr_cpu_ids; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(gic_raise_softirq); }
/* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ void __init smp_init_cpus(void) { void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > NR_CPUS) { printk(KERN_WARNING "Realview: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); }
void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = num_possible_cpus(); unsigned int cpu = smp_processor_id(); int i; smp_store_cpu_info(cpu); /* are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); /* * Initialise the SCU if there are more than one CPU and let * them know where to start. */ if (max_cpus > 1) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM); } }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM); }