Example #1
0
void mcpm_cpu_power_down(void)
{
	unsigned int mpidr, cpu, cluster;
	bool cpu_going_down, last_man;
	phys_reset_t phys_reset;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
	if (WARN_ON_ONCE(!platform_ops))
	       return;
	BUG_ON(!irqs_disabled());

	setup_mm_for_reboot();

	__mcpm_cpu_going_down(cpu, cluster);
	arch_spin_lock(&mcpm_lock);
	BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);

	mcpm_cpu_use_count[cluster][cpu]--;
	BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
	       mcpm_cpu_use_count[cluster][cpu] != 1);
	cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
	last_man = mcpm_cluster_unused(cluster);

	if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
		platform_ops->cpu_powerdown_prepare(cpu, cluster);
		platform_ops->cluster_powerdown_prepare(cluster);
		arch_spin_unlock(&mcpm_lock);
		platform_ops->cluster_cache_disable();
		__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
	} else {
		if (cpu_going_down)
			platform_ops->cpu_powerdown_prepare(cpu, cluster);
		arch_spin_unlock(&mcpm_lock);
		/*
		 * If cpu_going_down is false here, that means a power_up
		 * request raced ahead of us.  Even if we do not want to
		 * shut this CPU down, the caller still expects execution
		 * to return through the system resume entry path, like
		 * when the WFI is aborted due to a new IRQ or the like..
		 * So let's continue with cache cleaning in all cases.
		 */
		platform_ops->cpu_cache_disable();
	}

	__mcpm_cpu_down(cpu, cluster);

	/* Now we are prepared for power-down, do it: */
	if (cpu_going_down)
		wfi();

	/*
	 * It is possible for a power_up request to happen concurrently
	 * with a power_down request for the same CPU. In this case the
	 * CPU might not be able to actually enter a powered down state
	 * with the WFI instruction if the power_up request has removed
	 * the required reset condition.  We must perform a re-entry in
	 * the kernel as if the power_up method just had deasserted reset
	 * on the CPU.
	 */
	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
	phys_reset(virt_to_phys(mcpm_entry_point));

	/* should never get here */
	BUG();
}
/*
 * mmp_pm_down - Programs CPU to enter the specified state
 *
 * @addr: address points to the state selected by cpu governor
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static void mmp_pm_down(unsigned long addr)
{
    int *idx = (int *)addr;
    int mpidr, cpu, cluster;
    bool skip_wfi = false, last_man = false;

    mpidr = read_cpuid_mpidr();
    cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
    cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

    pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
    BUG_ON(cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER);

    __mcpm_cpu_going_down(cpu, cluster);

    arch_spin_lock(&mmp_lpm_lock);

    mmp_pm_use_count[cluster][cpu]--;

    if (mmp_pm_use_count[cluster][cpu] == 0) {
        mmp_enter_lpm[cluster][cpu] = (1 << (*idx + 1)) - 1;
        *idx = mmp_idle->cpudown_state;
        if (cluster_is_idle(cluster)) {
            cpu_cluster_pm_enter();
            find_coupled_state(idx, cluster);
            if (*idx >= mmp_idle->wakeup_state &&
                    *idx < mmp_idle->l2_flush_state &&
                    mmp_idle->ops->save_wakeup) {
                mmp_wake_saved = 1;
                mmp_idle->ops->save_wakeup();
            }
            BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
            last_man = true;
        }
        if (mmp_idle->ops->set_pmu)
            mmp_idle->ops->set_pmu(cpu, *idx);
    } else if (mmp_pm_use_count[cluster][cpu] == 1) {
        /*
         * A power_up request went ahead of us.
         * Even if we do not want to shut this CPU down,
         * the caller expects a certain state as if the WFI
         * was aborted.  So let's continue with cache cleaning.
         */
        skip_wfi = true;
        *idx = INVALID_LPM;
    } else
        BUG();

    if (last_man && (*idx >= mmp_idle->cpudown_state) && (*idx != LPM_D2_UDR)) {
        cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_M2_OR_DEEPER_ENTER, *idx);
#ifdef CONFIG_VOLDC_STAT
        vol_dcstat_event(VLSTAT_LPM_ENTRY, *idx, 0);
        vol_ledstatus_event(*idx);
#endif
    }

    trace_pxa_cpu_idle(LPM_ENTRY(*idx), cpu, cluster);
    cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_ENTER, *idx);

    if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
        arch_spin_unlock(&mmp_lpm_lock);
        __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
        __mcpm_cpu_down(cpu, cluster);
        if (*idx >= mmp_idle->l2_flush_state)
            ca7_power_down_udr();
        else
            ca7_power_down();
    } else {
        arch_spin_unlock(&mmp_lpm_lock);
        __mcpm_cpu_down(cpu, cluster);
        ca7_power_down();
    }

    if (!skip_wfi)
        cpu_do_idle();
}