/* * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. * @state: the final state of the cluster: * CLUSTER_UP: no destructive teardown was done and the cluster has been * restored to the previous state (CPU cache still active); or * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off * (CPU cache disabled, L2 cache either enabled or disabled). */ void __mcpm_outbound_leave_critical(unsigned int cluster, int state) { dmb(); mcpm_sync.clusters[cluster].cluster = state; sync_cache_w(&mcpm_sync.clusters[cluster].cluster); dsb_sev(); }
static void per_cpu_sw_state_wr(u32 cpu, int val) { per_cpu(per_cpu_sw_state, cpu) = val; dmb(); sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); dsb_sev(); }
static void __init wakeup_secondary(void) { static struct clockdomain *cpu1_clkdm; static void __iomem *sar_base; /* * Write the address of secondary startup routine into the * AuxCoreBoot1 where ROM code will jump and start executing * on secondary core once out of WFE * A barrier is added to ensure that write buffer is drained */ omap_auxcoreboot_addr(virt_to_phys(omap_secondary_startup)); smp_wmb(); sar_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K); __raw_writel(virt_to_phys(omap_secondary_startup), sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET); if (!cpu1_clkdm) cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); /* * Send a 'sev' to wake the secondary core from WFE. * Drain the outstanding writes to memory */ dsb_sev(); mb(); }
static void __init wakeup_secondary(void) { void *startup_addr = omap_secondary_startup; void __iomem *base = omap_get_wakeupgen_base(); if (cpu_is_omap446x()) { startup_addr = omap_secondary_startup_4460; pm44xx_errata |= PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD; } /* * Write the address of secondary startup routine into the * AuxCoreBoot1 where ROM code will jump and start executing * on secondary core once out of WFE * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) omap_auxcoreboot_addr(virt_to_phys(startup_addr)); else __raw_writel(virt_to_phys(omap5_secondary_startup), base + OMAP_AUX_CORE_BOOT_1); smp_wmb(); /* * Send a 'sev' to wake the secondary core from WFE. * Drain the outstanding writes to memory */ dsb_sev(); mb(); }
static void __init wakeup_secondary(void) { #if defined(CHIPREG_BOOT_2ND_ADDR_OFFSET) void __iomem *chipRegBase; chipRegBase = IOMEM(KONA_CHIPREG_VA); /* Chip-it FPGA has problems writing to this address hence * workaround */ #ifdef CONFIG_MACH_HAWAII_FPGA writel((virt_to_phys(kona_secondary_startup) & (~0x3))|0x1, chipRegBase + 0x1C4); #else writel((virt_to_phys(kona_secondary_startup) & (~0x3))|0x1, chipRegBase + CHIPREG_BOOT_2ND_ADDR_OFFSET); #endif smp_wmb(); /* * Send a 'sev' to wake the secondary core from WFE. * Drain the outstanding writes to memory */ dsb_sev(); mb(); #endif }
/* * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the * cluster can be torn down without disrupting this CPU. * To avoid deadlocks, this must be called before a CPU is powered down. * The CPU cache (SCTRL.C bit) is expected to be off. * However L2 cache might or might not be active. */ void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) { dmb(); mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); dsb_sev(); }
static int rk3036_sys_set_power_domain(enum pmu_power_domain pd, bool on) { if (on) { #ifdef CONFIG_SMP if (PD_CPU_1 == pd) { writel_relaxed(0x20000 , RK_CRU_VIRT + RK3036_CRU_SOFTRST0_CON); dsb(); udelay(10); writel_relaxed(virt_to_phys(secondary_startup), RK3036_IMEM_VIRT + 8); writel_relaxed(0xDEADBEAF, RK3036_IMEM_VIRT + 4); dsb_sev(); } #endif } else { #ifdef CONFIG_SMP if (PD_CPU_1 == pd) { writel_relaxed(0x20002 , RK_CRU_VIRT + RK3036_CRU_SOFTRST0_CON); dsb(); } #endif } return 0; }
static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) { static struct clockdomain *cpu1_clkdm; static bool booted; void __iomem *base = omap_get_wakeupgen_base(); /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * Update the AuxCoreBoot0 with boot state for secondary core. * omap_secondary_startup() routine will hold the secondary core till * the AuxCoreBoot1 register is updated with cpu state * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) omap_modify_auxcoreboot0(0x200, 0xfffffdff); else __raw_writel(0x20, base + OMAP_AUX_CORE_BOOT_0); flush_cache_all(); smp_wmb(); if (!cpu1_clkdm) cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); /* * The SGI(Software Generated Interrupts) are not wakeup capable * from low power states. This is known limitation on OMAP4 and * needs to be worked around by using software forced clockdomain * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to * software force wakeup. The clockdomain is then put back to * hardware supervised mode. * More details can be found in OMAP4430 TRM - Version J * Section : * 4.3.4.2 Power States of CPU0 and CPU1 */ if (booted) { clkdm_wakeup(cpu1_clkdm); clkdm_allow_idle(cpu1_clkdm); } else { dsb_sev(); booted = true; } gic_raise_softirq(cpumask_of(cpu), 0); /* * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return 0; }
static int __cpuinit brcm_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ write_pen_release(cpu); dsb_sev(); /* * Timeout set on purpose in jiffies so that on slow processors * that must also have low HZ it will wait longer. */ timeout = jiffies + (HZ * 10); udelay(100); /* * If the secondary CPU was waiting on WFE, it should * be already watching <pen_release>, or it could be * waiting in WFI, send it an IPI to be sure it wakes. */ if (pen_release != -1) tick_broadcast(cpumask_of(cpu)); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
int __cpuinit meson_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. */ printk("write pen_release: %d\n",cpu_logical_map(cpu)); write_pen_release(cpu_logical_map(cpu)); #ifndef CONFIG_MESON_TRUSTZONE // check_and_rewrite_cpu_entry(); meson_set_cpu_ctrl_addr(cpu, (const uint32_t)virt_to_phys(meson_secondary_startup)); meson_set_cpu_power_ctrl(cpu, 1); timeout = jiffies + (10* HZ); while(meson_get_cpu_ctrl_addr(cpu)); { if(!time_before(jiffies, timeout)) return -EPERM; } #endif meson_secondary_set(cpu); dsb_sev(); // smp_send_reschedule(cpu); timeout = jiffies + (10* HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
static void __init wakeup_secondary(void) { /* * Write the address of secondary startup routine into the * AuxCoreBoot1 where ROM code will jump and start executing * on secondary core once out of WFE * A barrier is added to ensure that write buffer is drained */ omap_auxcoreboot_addr(virt_to_phys(omap_secondary_startup)); smp_wmb(); /* * Send a 'sev' to wake the secondary core from WFE. * Drain the outstanding writes to memory */ dsb_sev(); mb(); }
static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned int pcpu, pcluster, ret; extern void secondary_startup(void); cpu_to_pcpu(cpu, &pcpu, &pcluster); pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n", __func__, cpu, pcpu, pcluster); mcpm_set_entry_vector(pcpu, pcluster, NULL); ret = mcpm_cpu_power_up(pcpu, pcluster); if (ret) return ret; mcpm_set_entry_vector(pcpu, pcluster, secondary_startup); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); dsb_sev(); return 0; }
static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned int mpidr, pcpu, pcluster, ret; extern void secondary_startup(void); mpidr = cpu_logical_map(cpu); pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n", __func__, cpu, pcpu, pcluster); mcpm_set_entry_vector(pcpu, pcluster, NULL); ret = mcpm_cpu_power_up(pcpu, pcluster); if (ret) return ret; mcpm_set_entry_vector(pcpu, pcluster, secondary_startup); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); dsb_sev(); return 0; }
static void __init wakeup_secondary(void) { #if defined(CHIPREG_BOOT_2ND_ADDR_OFFSET) void __iomem *chipRegBase; chipRegBase = IOMEM(KONA_CHIPREG_VA); writel((virt_to_phys(kona_secondary_startup) & (~0x3))|0x1, chipRegBase + CHIPREG_BOOT_2ND_ADDR_OFFSET); smp_wmb(); /* * Send a 'sev' to wake the secondary core from WFE. * Drain the outstanding writes to memory */ dsb_sev(); mb(); #endif }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { static bool first = true; if (first) { unsigned long sz = 0x10; pmu_set_power_domain(PD_A9_1, false); memcpy(RK30_IMEM_BASE, rk30_sram_secondary_startup, sz); flush_icache_range((unsigned long)RK30_IMEM_BASE, (unsigned long)RK30_IMEM_BASE + sz); outer_clean_range(0, sz); first = false; } dsb_sev(); pmu_set_power_domain(PD_A9_1, true); return 0; }
/** * rockchip_smp_prepare_sram - populate necessary sram block * Starting cores execute the code residing at the start of the on-chip sram * after power-on. Therefore make sure, this sram region is reserved and * big enough. After this check, copy the trampoline code that directs the * core to the real startup code in ram into the sram-region. * @node: mmio-sram device node */ static int __init rockchip_smp_prepare_sram(struct device_node *node) { unsigned int trampoline_sz = &rockchip_secondary_trampoline_end - &rockchip_secondary_trampoline; struct resource res; unsigned int rsize; int ret; ret = of_address_to_resource(node, 0, &res); if (ret < 0) { pr_err("%s: could not get address for node %s\n", __func__, node->full_name); return ret; } rsize = resource_size(&res); if (rsize < trampoline_sz) { pr_err("%s: reserved block with size 0x%x is to small for trampoline size 0x%x\n", __func__, rsize, trampoline_sz); return -EINVAL; } sram_base_addr = of_iomap(node, 0); /* set the boot function for the sram code */ rockchip_boot_fn = virt_to_phys(rockchip_secondary_startup); /* copy the trampoline to sram, that runs during startup of the core */ memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz); flush_cache_all(); outer_clean_range(0, trampoline_sz); dsb_sev(); return 0; }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; int ret; /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); watchdog_save(); ret = exynos_power_up_cpu(cpu); if (ret) { spin_unlock(&boot_lock); return ret; } /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ write_pen_release(cpu_logical_map(cpu)); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); #ifdef CONFIG_ARM_TRUSTZONE if (!soc_is_exynos5410()) { if (soc_is_exynos4412()) exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0); else exynos_smc(SMC_CMD_CPU1BOOT, 0, 0, 0); } #endif __raw_writel(virt_to_phys(exynos4_secondary_startup), cpu_boot_info[cpu].boot_base); watchdog_restore(); if (soc_is_exynos5410()) dsb_sev(); else arm_send_ping_ipi(cpu); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ pen_release = cpu; smp_wmb(); clean_dcache_area((void *) &pen_release, sizeof(pen_release)); outer_clean_range(__pa(&pen_release), __pa(&pen_release + sizeof(pen_release))); dsb_sev(); /* * Timeout set on purpose in jiffies so that on slow processors * that must also have low HZ it will wait longer. */ timeout = jiffies + 128; udelay(100); /* * If the secondary CPU was waiting on WFE, it should * be already watching <pen_release>, or it could be * waiting in WFI, send it an IPI to be sure it wakes. */ if( pen_release != -1 ) { smp_cross_call(cpumask_of(cpu)); } while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } if (arch_is_coherent()) { outer_cache.inv_range = NULL; outer_cache.clean_range = NULL; outer_cache.flush_range = NULL; outer_cache.sync = NULL; } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) { static struct clockdomain *cpu1_clkdm; static bool booted; static struct powerdomain *cpu1_pwrdm; void __iomem *base = omap_get_wakeupgen_base(); /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * Update the AuxCoreBoot0 with boot state for secondary core. * omap4_secondary_startup() routine will hold the secondary core till * the AuxCoreBoot1 register is updated with cpu state * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) omap_modify_auxcoreboot0(0x200, 0xfffffdff); else __raw_writel(0x20, base + OMAP_AUX_CORE_BOOT_0); if (!cpu1_clkdm && !cpu1_pwrdm) { cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm"); } /* * The SGI(Software Generated Interrupts) are not wakeup capable * from low power states. This is known limitation on OMAP4 and * needs to be worked around by using software forced clockdomain * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to * software force wakeup. The clockdomain is then put back to * hardware supervised mode. * More details can be found in OMAP4430 TRM - Version J * Section : * 4.3.4.2 Power States of CPU0 and CPU1 */ if (booted && cpu1_pwrdm && cpu1_clkdm) { /* * GIC distributor control register has changed between * CortexA9 r1pX and r2pX. The Control Register secure * banked version is now composed of 2 bits: * bit 0 == Secure Enable * bit 1 == Non-Secure Enable * The Non-Secure banked register has not changed * Because the ROM Code is based on the r1pX GIC, the CPU1 * GIC restoration will cause a problem to CPU0 Non-Secure SW. * The workaround must be: * 1) Before doing the CPU1 wakeup, CPU0 must disable * the GIC distributor * 2) CPU1 must re-enable the GIC distributor on * it's wakeup path. */ if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { local_irq_disable(); gic_dist_disable(); } /* * Ensure that CPU power state is set to ON to avoid CPU * powerdomain transition on wfi */ clkdm_wakeup(cpu1_clkdm); omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON); clkdm_allow_idle(cpu1_clkdm); if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { while (gic_dist_disabled()) { udelay(1); cpu_relax(); } gic_timer_retrigger(); local_irq_enable(); } } else { dsb_sev(); booted = true; } arch_send_wakeup_ipi_mask(cpumask_of(cpu)); /* * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return 0; }
static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; struct device_node *np; np = of_find_matching_node(NULL, clk_ids); if (!np) return -ENODEV; clk_base = of_iomap(np, 0); if (!clk_base) return -ENOMEM; /* * write the address of secondary startup into the clkc register * at offset 0x2bC, then write the magic number 0x3CAF5D62 to the * clkc register at offset 0x2b8, which is what boot rom code is * waiting for. This would wake up the secondary core from WFE */ #define SIRFSOC_CPU1_JUMPADDR_OFFSET 0x2bc __raw_writel(__pa_symbol(sirfsoc_secondary_startup), clk_base + SIRFSOC_CPU1_JUMPADDR_OFFSET); #define SIRFSOC_CPU1_WAKEMAGIC_OFFSET 0x2b8 __raw_writel(0x3CAF5D62, clk_base + SIRFSOC_CPU1_WAKEMAGIC_OFFSET); /* make sure write buffer is drained */ mb(); spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting prima2_pen_release. * * Note that "prima2_pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ prima2_pen_release = cpu_logical_map(cpu); sync_cache_w(&prima2_pen_release); /* * Send the secondary CPU SEV, thereby causing the boot monitor to read * the JUMPADDR and WAKEMAGIC, and branch to the address found there. */ dsb_sev(); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (prima2_pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return prima2_pen_release != -1 ? -ENOSYS : 0; }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { static struct clockdomain *cpu1_clkdm; static bool booted; /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * Update the AuxCoreBoot0 with boot state for secondary core. * omap_secondary_startup() routine will hold the secondary core till * the AuxCoreBoot1 register is updated with cpu state * A barrier is added to ensure that write buffer is drained */ omap_modify_auxcoreboot0(0x200, 0xfffffdff); flush_cache_all(); smp_wmb(); if (!cpu1_clkdm) cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); /* * The SGI(Software Generated Interrupts) are not wakeup capable * from low power states. This is known limitation on OMAP4 and * needs to be worked around by using software forced clockdomain * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to * software force wakeup. After the wakeup, CPU1 restores its * clockdomain hardware supervised mode. * More details can be found in OMAP4430 TRM - Version J * Section : * 4.3.4.2 Power States of CPU0 and CPU1 */ if (booted) { /* * GIC distributor control register has changed between * CortexA9 r1pX and r2pX. The Control Register secure * banked version is now composed of 2 bits: * bit 0 == Secure Enable * bit 1 == Non-Secure Enable * The Non-Secure banked register has not changed * Because the ROM Code is based on the r1pX GIC, the CPU1 * GIC restoration will cause a problem to CPU0 Non-Secure SW. * The workaround must be: * 1) Before doing the CPU1 wakeup, CPU0 must disable * the GIC distributor * 2) CPU1 must re-enable the GIC distributor on * it's wakeup path. */ if (!cpu_is_omap443x()) { local_irq_disable(); gic_dist_disable(); } clkdm_wakeup(cpu1_clkdm); if (!cpu_is_omap443x()) { while (gic_dist_disabled()) { udelay(1); cpu_relax(); } gic_timer_retrigger(); local_irq_enable(); } } else { clkdm_init_mpu1(cpu1_clkdm); dsb_sev(); booted = true; } /* * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return 0; }
static int exynos_power_up_cpu(unsigned int cpu) { unsigned int timeout; unsigned int val; void __iomem *power_base; #ifndef CONFIG_EXYNOS5_MP unsigned int cluster = (read_cpuid_mpidr() >> 8) & 0xf; #endif unsigned int lpe_bits, lpe_bits_status, enabled = 0; power_base = cpu_boot_info[cpu].power_base; if (power_base == 0) return -EPERM; val = __raw_readl(power_base + 0x4); if (soc_is_exynos5260()) { if (val & 0x40000) enabled = 1; else { val = __raw_readl(power_base + 0x8); if (val & EXYNOS5_USE_SC_COUNTER) { val &= ~EXYNOS5_USE_SC_COUNTER; val |= EXYNOS5_USE_SC_FEEDBACK; __raw_writel(val, power_base + 0x8); } lpe_bits = 0x000F000F; lpe_bits_status = 0x4000F; #ifndef CONFIG_ARM_TRUSTZONE __raw_writel(0, cpu_boot_info[cpu].boot_base); #endif } } else { if (val & EXYNOS_CORE_LOCAL_PWR_EN) enabled = 1; else { lpe_bits = EXYNOS_CORE_LOCAL_PWR_EN; lpe_bits_status = EXYNOS_CORE_LOCAL_PWR_EN; } } if (!enabled) { __raw_writel(lpe_bits, power_base); /* wait max 10 ms until cpu is on */ timeout = 10; while (timeout) { val = __raw_readl(power_base + 0x4); if ((val & lpe_bits) == lpe_bits_status) break; mdelay(1); timeout--; } if (timeout == 0) { printk(KERN_ERR "cpu%d power up failed", cpu); return -ETIMEDOUT; } } #ifdef CONFIG_EXYNOS5_MP if (cpu < 4) { #else if (cluster) { #endif while(!__raw_readl(EXYNOS_PMU_SPARE2)) udelay(10); udelay(10); if (soc_is_exynos5260()) { val = __raw_readl(power_base + 0x4); val |= (0xf << 8); __raw_writel(val, power_base + 0x4); pr_debug("cpu%d: SWRESEET\n", cpu); __raw_writel((0x1 << 1), power_base + 0xc); } else { printk(KERN_DEBUG "cpu%d: SWRESET\n", cpu); val = ((1 << 20) | (1 << 8)) << cpu; __raw_writel(val, EXYNOS_SWRESET); } } return 0; } #else #error "exynos_power_up_cpu() does not defined" #endif int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; int ret; /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); #ifdef CONFIG_WATCHDOG if (soc_is_exynos5250()) watchdog_save(); #endif #ifdef CONFIG_SOC_EXYNOS4415 __raw_writel(0x0, cpu_boot_info[cpu].boot_base); #endif ret = exynos_power_up_cpu(cpu); if (ret) { spin_unlock(&boot_lock); return ret; } /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ write_pen_release(cpu_logical_map(cpu)); /* * Send the secondary CPU a soft interrupt, thereby causing * the boot monitor to read the system wide flags register, * and branch to the address found there. */ timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); #ifdef CONFIG_ARM_TRUSTZONE if (soc_is_exynos4210() || soc_is_exynos4212() || soc_is_exynos5250()) exynos_smc(SMC_CMD_CPU1BOOT, 0, 0, 0); else if (soc_is_exynos4412() || soc_is_exynos4415()) exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0); #endif __raw_writel(virt_to_phys(exynos4_secondary_startup), cpu_boot_info[cpu].boot_base); #ifdef CONFIG_WATCHDOG if (soc_is_exynos5250()) watchdog_restore(); #endif if (soc_is_exynos3250() || soc_is_exynos3470() || soc_is_exynos5410() || soc_is_exynos5420() || soc_is_exynos5260()) dsb_sev(); else arm_send_ping_ipi(cpu); if (pen_release == -1) break; udelay(10); } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
static int rk3288_pmu_set_power_domain(enum pmu_power_domain pd, bool on) { unsigned long flags; spin_lock_irqsave(&pmu_pd_lock, flags); if (rk3288_pmu_power_domain_is_on(pd) == on) goto out; if (!on) { /* if power down, idle request to NIU first */ if (pd == PD_VIO) { SAVE_QOS(vio0_iep_qos, VIO0_IEP); SAVE_QOS(vio0_vip_qos, VIO0_VIP); SAVE_QOS(vio0_vop_qos, VIO0_VOP); SAVE_QOS(vio1_isp_r_qos, VIO1_ISP_R); SAVE_QOS(vio1_isp_w0_qos, VIO1_ISP_W0); SAVE_QOS(vio1_isp_w1_qos, VIO1_ISP_W1); SAVE_QOS(vio1_vop_qos, VIO1_VOP); SAVE_QOS(vio2_rga_r_qos, VIO2_RGA_R); SAVE_QOS(vio2_rga_w_qos, VIO2_RGA_W); rk3288_pmu_set_idle_request(IDLE_REQ_VIO, true); } else if (pd == PD_VIDEO) { SAVE_QOS(video_qos, VIDEO); rk3288_pmu_set_idle_request(IDLE_REQ_VIDEO, true); } else if (pd == PD_GPU) { SAVE_QOS(gpu_r_qos, GPU_R); SAVE_QOS(gpu_w_qos, GPU_W); rk3288_pmu_set_idle_request(IDLE_REQ_GPU, true); } else if (pd == PD_HEVC) { SAVE_QOS(hevc_r_qos, HEVC_R); SAVE_QOS(hevc_w_qos, HEVC_W); rk3288_pmu_set_idle_request(IDLE_REQ_HEVC, true); } else if (pd >= PD_CPU_1 && pd <= PD_CPU_3) { writel_relaxed(0x20002 << (pd - PD_CPU_1), RK_CRU_VIRT + RK3288_CRU_SOFTRSTS_CON(0)); dsb(); } } rk3288_do_pmu_set_power_domain(pd, on); if (on) { /* if power up, idle request release to NIU */ if (pd == PD_VIO) { rk3288_pmu_set_idle_request(IDLE_REQ_VIO, false); RESTORE_QOS(vio0_iep_qos, VIO0_IEP); RESTORE_QOS(vio0_vip_qos, VIO0_VIP); RESTORE_QOS(vio0_vop_qos, VIO0_VOP); RESTORE_QOS(vio1_isp_r_qos, VIO1_ISP_R); RESTORE_QOS(vio1_isp_w0_qos, VIO1_ISP_W0); RESTORE_QOS(vio1_isp_w1_qos, VIO1_ISP_W1); RESTORE_QOS(vio1_vop_qos, VIO1_VOP); RESTORE_QOS(vio2_rga_r_qos, VIO2_RGA_R); RESTORE_QOS(vio2_rga_w_qos, VIO2_RGA_W); } else if (pd == PD_VIDEO) { rk3288_pmu_set_idle_request(IDLE_REQ_VIDEO, false); RESTORE_QOS(video_qos, VIDEO); } else if (pd == PD_GPU) { rk3288_pmu_set_idle_request(IDLE_REQ_GPU, false); RESTORE_QOS(gpu_r_qos, GPU_R); RESTORE_QOS(gpu_w_qos, GPU_W); } else if (pd == PD_HEVC) { rk3288_pmu_set_idle_request(IDLE_REQ_HEVC, false); RESTORE_QOS(hevc_r_qos, HEVC_R); RESTORE_QOS(hevc_w_qos, HEVC_W); } else if (pd >= PD_CPU_1 && pd <= PD_CPU_3) { writel_relaxed(0x20000 << (pd - PD_CPU_1), RK_CRU_VIRT + RK3288_CRU_SOFTRSTS_CON(0)); dsb(); udelay(10); writel_relaxed(virt_to_phys(secondary_startup), RK3288_IMEM_VIRT + 8); writel_relaxed(0xDEADBEAF, RK3288_IMEM_VIRT + 4); dsb_sev(); } } out: spin_unlock_irqrestore(&pmu_pd_lock, flags); return 0; }