void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; u32 bstadd; u32 bstsize; u32 *phead_address; if (get_ambarella_bstmem_info(&bstadd, &bstsize) != AMB_BST_MAGIC) { pr_err("Can't find SMP BST!\n"); return; } phead_address = get_ambarella_bstmem_head(); if (phead_address == (u32 *)AMB_BST_INVALID) { pr_err("Can't find SMP BST Head!\n"); return; } for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base); for (i = 1; i < max_cpus; i++) { phead_address[PROCESSOR_START_0 + i] = BSYM( virt_to_phys(ambarella_secondary_startup)); phead_address[PROCESSOR_STATUS_0 + i] = AMB_BST_START_COUNTER; } ambcache_flush_range((void *)(phead_address), AMBARELLA_BST_HEAD_CACHE_SIZE); smp_max_cpus = max_cpus; }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = get_core_count(); unsigned long addr; int i; edb_trace(1); edb_putstr("smp_prepare_cpus\n"); /* sanity check */ if (ncores == 0) { printk(KERN_ERR "hisik3: strange CM count of 0? Default to 1\n"); ncores = 1; } if (ncores > NR_CPUS) { printk(KERN_WARNING "hisik3: no. of cores (%d) greater than configured " "maximum of %d - clipping\n", ncores, NR_CPUS); ncores = NR_CPUS; } /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) { WARN(1, "hisik3: smp max cpus should NOT more cores than exist\n"); max_cpus = ncores; } /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base_addr()); addr = (unsigned long) IO_ADDRESS(MEMORY_AXI_SECOND_CPU_BOOT_ADDR); printk("poke_milo addr 0x%lx at 0x%x\n", addr, virt_to_phys(k3v2_secondary_startup)); /* * Write the address of secondary startup into the system-wide flags * register. The BootMonitor waits for this register to become * non-zero. */ writel(BSYM(virt_to_phys(k3v2_secondary_startup)), addr); wmb(); flush_cache_all(); edb_putstr("smp_prepare_cpus out\n"); }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * This is really belt and braces; we hold unintended secondary * CPUs in the holding pen until we're ready for them. However, * since we haven't sent them a soft interrupt, they shouldn't * be there. */ writew((BSYM(virt_to_phys(vexpress_secondary_startup))>>16), SECOND_START_ADDR_HI); writew(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR_LO); pen_release = cpu; #if defined(CONFIG_MSTAR_STR_DBGMSG) { unsigned int *ptr=&pen_release; printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr); } #endif __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ smp_cross_call(cpumask_of(cpu)); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } #if defined(CONFIG_MSTAR_STR_DBGMSG) { unsigned int *ptr=&pen_release; printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr); } #endif /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM); }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { u32 timeout = 100000; u32 *phead_address; int retval = 0; spin_lock(&boot_lock); phead_address = get_ambarella_bstmem_head(); if (phead_address == (u32 *)AMB_BST_INVALID) { pr_err("Can't find SMP BST Header!\n"); retval = -EPERM; goto boot_secondary_exit; } #ifdef CONFIG_OUTER_CACHE smp_l2_mode = outer_is_enabled(); if (smp_l2_mode) ambcache_l2_disable_raw(); #endif phead_address[PROCESSOR_START_0 + cpu] = BSYM( virt_to_phys(ambarella_secondary_startup)); phead_address[PROCESSOR_STATUS_0 + cpu] = AMB_BST_START_COUNTER; ambcache_flush_range((void *)(phead_address), AMBARELLA_BST_HEAD_CACHE_SIZE); smp_cross_call(cpumask_of(cpu), 1); while (timeout) { ambcache_inv_range((void *)(phead_address), AMBARELLA_BST_HEAD_CACHE_SIZE); if (phead_address[PROCESSOR_START_0 + cpu] == AMB_BST_INVALID) break; udelay(10); timeout--; } if (phead_address[PROCESSOR_STATUS_0 + cpu] > 0) { pr_err("CPU%d: spurious wakeup %d times.\n", cpu, phead_address[PROCESSOR_STATUS_0 + cpu]); } if (phead_address[PROCESSOR_START_0 + cpu] != AMB_BST_INVALID) { pr_err("CPU%d: tmo[%d] [0x%08x].\n", cpu, timeout, phead_address[PROCESSOR_START_0 + cpu]); retval = -EPERM; } boot_secondary_exit: spin_unlock(&boot_lock); return retval; }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The BootMonitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(BSYM(virt_to_phys(versatile_secondary_startup)), __io_address(REALVIEW_SYS_FLAGSSET)); }
void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = num_possible_cpus(); unsigned int cpu = smp_processor_id(); int i; smp_store_cpu_info(cpu); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); /* * Initialise the SCU if there are more than one CPU and let * them know where to start. */ if (max_cpus > 1) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); scu_enable((void __iomem *)(PERI_ADDRESS(0x16000000))); // SCU PA = 0x16000000 /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ //printk("_Secondary_startup physical address = 0x%08x\n",BSYM(virt_to_phys(vexpress_secondary_startup))); //writew((BSYM(virt_to_phys(vexpress_secondary_startup))>>16), SECOND_START_ADDR_HI); writel(0xbabe, SECOND_MAGIC_NUMBER_ADRESS); writel(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR); __cpuc_flush_kern_all(); } }
static void __init poke_milo(void) { /* nobody is to be released from the pen yet */ pen_release = -1; /* * Write the address of secondary startup into the system-wide flags * register. The BootMonitor waits for this register to become * non-zero. */ __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)), __io_address(REALVIEW_SYS_FLAGSSET)); mb(); }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ ct_desc->smp_enable(max_cpus); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR)); writel(BSYM(virt_to_phys(versatile_secondary_startup)), MMIO_P2V(V2M_SYS_FLAGSSET)); }
void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = num_possible_cpus(); unsigned int cpu = smp_processor_id(); int i; smp_store_cpu_info(cpu); /* are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); /* * Initialise the SCU if there are more than one CPU and let * them know where to start. */ if (max_cpus > 1) { /* * Enable the local timer or broadcast device for the * boot CPU, but only if we have more than one CPU. */ percpu_timer_setup(); scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM); } }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. */ for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the * system-wide flags register. The boot monitor waits * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM); }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ write_pen_release(cpu); if (!(__raw_readl(S5P_ARM_CORE1_STATUS) & S5P_CORE_LOCAL_PWR_EN)) { __raw_writel(S5P_CORE_LOCAL_PWR_EN, S5P_ARM_CORE1_CONFIGURATION); timeout = 10; /* wait max 10 ms until cpu1 is on */ while ((__raw_readl(S5P_ARM_CORE1_STATUS) & S5P_CORE_LOCAL_PWR_EN) != S5P_CORE_LOCAL_PWR_EN) { if (timeout-- == 0) break; mdelay(1); } if (timeout == 0) { printk(KERN_ERR "cpu1 power enable failed"); spin_unlock(&boot_lock); return -ETIMEDOUT; } } /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), CPU1_BOOT_REG); gic_raise_softirq(cpumask_of(cpu), 1); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }