static void __init ux500_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; static void __iomem *scu_base; unsigned int ncores; int i; np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (!np) { pr_err("No SCU base address\n"); return; } scu_base = of_iomap(np, 0); of_node_put(np); if (!scu_base) { pr_err("No SCU remap\n"); return; } scu_enable(scu_base); ncores = scu_get_core_count(scu_base); for (i = 0; i < ncores; i++) set_cpu_possible(i, true); iounmap(scu_base); }
/* * Clear hardware registers after Apps powers up. */ static void msm_pm_config_hw_after_power_up(void) { if (cpu_is_msm7x30() || cpu_is_msm8x55()) { __raw_writel(0, APPS_SECOP); mb(); __raw_writel(0, APPS_PWRDOWN); mb(); msm_spm_reinit(); } else if (cpu_is_msm8625()) { __raw_writel(0, APPS_PWRDOWN); mb(); if (power_collapsed) { /* * enable the SCU while coming out of power * collapse. */ scu_enable(MSM_SCU_BASE); /* * Program the top csr to put the core1 into GDFS. */ configure_top_csr(); } } else { __raw_writel(0, APPS_PWRDOWN); mb(); __raw_writel(0, APPS_CLK_SLEEP_EN); mb(); } }
static int __init scu_cpu_prepare(unsigned int cpu) { int rc; physical_addr_t _start_secondary_pa; /* Get physical address secondary startup code */ rc = vmm_host_va2pa((virtual_addr_t)&_start_secondary_nopen, &_start_secondary_pa); if (rc) { return rc; } /* Enable snooping through SCU */ if (scu_base) { scu_enable((void *)scu_base); } /* Write to clear address */ if (clear_addr[cpu]) { vmm_writel(~0x0, (void *)clear_addr[cpu]); } /* Write to release address */ if (release_addr[cpu]) { vmm_writel((u32)_start_secondary_pa, (void *)release_addr[cpu]); } return VMM_OK; }
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) { void *startup_addr = omap4_secondary_startup; void __iomem *base = omap_get_wakeupgen_base(); /* * Initialise the SCU and wake up the secondary core using * wakeup_secondary(). */ if (scu_base) scu_enable(scu_base); if (cpu_is_omap446x()) { startup_addr = omap4460_secondary_startup; pm44xx_errata |= PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD; } /* * Write the address of secondary startup routine into the * AuxCoreBoot1 where ROM code will jump and start executing * on secondary core once out of WFE * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) omap_auxcoreboot_addr(virt_to_phys(startup_addr)); else __raw_writel(virt_to_phys(omap5_secondary_startup), base + OMAP_AUX_CORE_BOOT_1); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { /* Always mark the boot CPU as initialized. */ cpumask_set_cpu(0, to_cpumask(tegra_cpu_init_bits)); if (max_cpus == 1) tegra_all_cpus_booted = true; /* If we're here, it means that more than one CPU was found by smp_init_cpus() which also means that it did not initialize the reset handler. Do it now before the secondary CPUs are started. */ tegra_cpu_reset_handler_init(); #if defined(CONFIG_HAVE_ARM_SCU) { u32 scu_ctrl = __raw_readl(scu_base) | 1 << 3 | /* Enable speculative line fill*/ 1 << 5 | /* Enable IC standby */ 1 << 6; /* Enable SCU standby */ if (!(scu_ctrl & 1)) __raw_writel(scu_ctrl, scu_base); } #endif scu_enable(scu_base); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; /* * Remap the first three addresses at zero which are used * for 32bit long jump for SMP. Look at zynq_cpu1_start() */ #if defined(CONFIG_PHYS_OFFSET) && (CONFIG_PHYS_OFFSET != 0) zero = ioremap(0, 12); if (!zero) { printk(KERN_WARNING "!!!! BOOTUP jump vectors can't be used !!!!\n"); while (1) ; } #else /* The first three addresses at zero are already mapped */ zero = (u8 *)CONFIG_PAGE_OFFSET; #endif /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(SCU_PERIPH_BASE); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; printk(KERN_ERR "%s %d\n", __FUNCTION__, __LINE__); /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base); /* * Enable gic interrupts on the secondary CPU so the interrupt that wakes * it from WFI can be received */ writel(1, __io_address(OX820_GIC_CPUN_BASE_ADDR(1))); /* Write the address that we want the cpu to start at. */ writel(virt_to_phys(ox820_secondary_startup), HOLDINGPEN_LOCATION); smp_wmb(); writel(1, HOLDINGPEN_CPU); smp_wmb(); }
static void exynos_pm_resume(void) { u32 cpuid = read_cpuid_part(); if (exynos_pm_central_resume()) goto early_wakeup; /* For release retention */ exynos_pm_release_retention(); s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); if (cpuid == ARM_CPU_PART_CORTEX_A9) scu_enable(S5P_VA_SCU); if (call_firmware_op(resume) == -ENOSYS && cpuid == ARM_CPU_PART_CORTEX_A9) exynos_cpu_restore_register(); early_wakeup: /* Clear SLEEP mode set in INFORM1 */ pmu_raw_writel(0x0, S5P_INFORM1); exynos_set_delayed_reset_assertion(true); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; u32 bstadd; u32 bstsize; u32 *phead_address; if (get_ambarella_bstmem_info(&bstadd, &bstsize) != AMB_BST_MAGIC) { pr_err("Can't find SMP BST!\n"); return; } phead_address = get_ambarella_bstmem_head(); if (phead_address == (u32 *)AMB_BST_INVALID) { pr_err("Can't find SMP BST Head!\n"); return; } for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base); for (i = 1; i < max_cpus; i++) { phead_address[PROCESSOR_START_0 + i] = BSYM( virt_to_phys(ambarella_secondary_startup)); phead_address[PROCESSOR_STATUS_0 + i] = AMB_BST_START_COUNTER; } ambcache_flush_range((void *)(phead_address), AMBARELLA_BST_HEAD_CACHE_SIZE); smp_max_cpus = max_cpus; }
static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) { int i; if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. * * Try using firmware operation first and fall back to * boot register if it fails. */ for (i = 1; i < max_cpus; ++i) { unsigned long phys_cpu; unsigned long boot_addr; phys_cpu = cpu_logical_map(i); boot_addr = virt_to_phys(exynos4_secondary_startup); if (call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr)) __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); } }
static void __init sti_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np; void __iomem *scu_base; u32 __iomem *cpu_strt_ptr; u32 release_phys; int cpu; unsigned long entry_pa = virt_to_phys(sti_secondary_startup); np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (np) { scu_base = of_iomap(np, 0); scu_enable(scu_base); of_node_put(np); } if (max_cpus <= 1) return; for_each_possible_cpu(cpu) { np = of_get_cpu_node(cpu, NULL); if (!np) continue; if (of_property_read_u32(np, "cpu-release-addr", &release_phys)) { pr_err("CPU %d: missing or invalid cpu-release-addr " "property\n", cpu); continue; } /* * holding pen is usually configured in SBC DMEM but can also be * in RAM. */ if (!memblock_is_memory(release_phys)) cpu_strt_ptr = ioremap(release_phys, sizeof(release_phys)); else cpu_strt_ptr = (u32 __iomem *)phys_to_virt(release_phys); __raw_writel(entry_pa, cpu_strt_ptr); /* * wmb so that data is actually written * before cache flush is done */ smp_wmb(); sync_cache_w(cpu_strt_ptr); if (!memblock_is_memory(release_phys)) iounmap(cpu_strt_ptr); } }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); unsigned long addr; int i; edb_trace(1); edb_putstr("smp_prepare_cpus\n"); /* sanity check */ if (ncores == 0) { printk(KERN_ERR "hisik3: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "hisik3: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) { WARN(1, "hisik3: smp max cpus should NOT more cores than exist\n"); max_cpus = ncores; } /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base_addr()); addr = (unsigned long) IO_ADDRESS(MEMORY_AXI_SECOND_CPU_BOOT_ADDR); printk("poke_milo addr 0x%lx at 0x%x\n", addr, virt_to_phys(k3v2_secondary_startup)); /* * Write the address of secondary startup into the system-wide flags * register. The BootMonitor waits for this register to become * non-zero. */ writel(BSYM(virt_to_phys(k3v2_secondary_startup)), addr); wmb(); flush_cache_all(); edb_putstr("smp_prepare_cpus out\n"); }
static void __init tegra_smp_prepare_cpus(unsigned int max_cpus) { /* Always mark the boot CPU (CPU0) as initialized. */ cpumask_set_cpu(0, &tegra_cpu_init_mask); if (scu_a9_has_base()) scu_enable(IO_ADDRESS(scu_a9_get_base())); }
static void ct_ca9x4_smp_enable(unsigned int max_cpus) { int i; for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(MMIO_P2V(A9_MPCORE_SCU)); }
static void wmt_pm_resume(void) { #ifdef CONFIG_SMP *(volatile unsigned int *)MPCORE_PRIVATE_MEM |= BIT5 | BIT6; scu_enable(scu_base_addr()); #endif return; }
/* * for arch/arm/kernel/smp.c:smp_prepare_cpus(unsigned int max_cpus) */ void __init platform_smp_prepare_cpus(unsigned int max_cpus) { void __iomem *scu_base; pr_debug("[%s] enter\n", __FUNCTION__); scu_base = scu_base_addr(); scu_enable(scu_base); }
void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); unsigned int cpu = smp_processor_id(); int i; /* sanity check */ if (ncores == 0) { printk(KERN_ERR "Realview: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "Realview: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } smp_store_cpu_info(cpu); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; #ifdef CONFIG_LOCAL_TIMERS /* * Enable the local timer for primary CPU. If the device is * dummy (!CONFIG_LOCAL_TIMERS), it was already registers in * realview_timer_init */ local_timer_setup(); #endif /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) cpu_set(i, cpu_present_map); /* * Initialise the SCU if there are more than one CPU and let * them know where to start. Note that, on modern versions of * MILO, the "poke" doesn't actually do anything until each * individual core is sent a soft interrupt to get it out of * WFI */ if (max_cpus > 1) { scu_enable(); poke_milo(); } }
static void __init emev2_smp_prepare_cpus(unsigned int max_cpus) { int cpu = cpu_logical_map(0); scu_enable(scu_base); /* enable cache coherency on CPU0 */ modify_scu_cpu_psr(0, 3 << (cpu * 8)); }
static void __init emev2_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(shmobile_scu_base); /* Tell ROM loader about our vector (in headsmp-scu.S) */ emev2_set_boot_vector(__pa(shmobile_secondary_vector_scu)); /* enable cache coherency on booting CPU */ scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { /* * Initialise the SCU and wake up the secondary core using * wakeup_secondary(). */ scu_enable(scu_base); wakeup_secondary(); }
void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); unsigned int cpu = smp_processor_id(); int i; /* sanity check */ if (ncores == 0) { printk(KERN_ERR "Realview: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "Realview: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } smp_store_cpu_info(cpu); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); /* * Initialise the SCU if there are more than one CPU and let * them know where to start. Note that, on modern versions of * MILO, the "poke" doesn't actually do anything until each * individual core is sent a soft interrupt to get it out of * WFI */ if (max_cpus > 1) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); scu_enable(scu_base_addr()); poke_milo(); } }
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) { /* * Initialise the SCU and wake up the secondary core using * wakeup_secondary(). */ if (scu_base) scu_enable(scu_base); wakeup_secondary(); }
void __init sti_smp_prepare_cpus(unsigned int max_cpus) { void __iomem *scu_base = NULL; struct device_node *np = of_find_compatible_node( NULL, NULL, "arm,cortex-a9-scu"); if (np) { scu_base = of_iomap(np, 0); scu_enable(scu_base); of_node_put(np); } }
static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *node; unsigned int i; node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); if (!node) { pr_err("%s: missing scu\n", __func__); return; } scu_base_addr = of_iomap(node, 0); if (!scu_base_addr) { pr_err("%s: could not map scu registers\n", __func__); return; } node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-smp-sram"); if (!node) { pr_err("%s: could not find sram dt node\n", __func__); return; } if (rockchip_smp_prepare_sram(node)) return; node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu"); if (!node) { pr_err("%s: could not find pmu dt node\n", __func__); return; } pmu_base_addr = of_iomap(node, 0); if (!pmu_base_addr) { pr_err("%s: could not map pmu registers\n", __func__); return; } /* enable the SCU power domain */ pmu_set_power_domain(PMU_PWRDN_SCU, true); /* * While the number of cpus is gathered from dt, also get the number * of cores from the scu to verify this value when booting the cores. */ ncores = scu_get_core_count(scu_base_addr); scu_enable(scu_base_addr); /* Make sure that all cores except the first are really off */ for (i = 1; i < ncores; i++) pmu_set_power_domain(0 + i, false); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable((void *)SCU_BASE); /* write the address of slave startup into the system-wide flags register */ mt65xx_reg_sync_writel(virt_to_phys(mt_secondary_startup), SLAVE_JUMP_REG); }
static void __init spear13xx_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(scu_base); /* * Write the address of secondary startup into the system-wide location * (presently it is in SRAM). The BootMonitor waits until it receives a * soft interrupt, and then the secondary CPU branches to this address. */ __raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION); }
static void __init emev2_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(shmobile_scu_base); /* Tell ROM loader about our vector (in headsmp-scu.S, headsmp.S) */ emev2_set_boot_vector(__pa(shmobile_boot_vector)); shmobile_boot_fn = virt_to_phys(shmobile_boot_scu); shmobile_boot_arg = (unsigned long)shmobile_scu_base; /* enable cache coherency on booting CPU */ scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM); }
static void __init zynq_smp_prepare_cpus(unsigned int max_cpus) { int i; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(zynq_scu_base); }
void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); unsigned int cpu = smp_processor_id(); int i; /* sanity check */ if (ncores == 0) { printk(KERN_ERR "OMAP4: strange core count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "OMAP4: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } smp_store_cpu_info(cpu); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); if (max_cpus > 1) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); /* * Initialise the SCU and wake up the secondary core using * wakeup_secondary(). */ scu_enable(scu_base); wakeup_secondary(); } }