/*
 * arm_enter_idle_state - Programs CPU to enter the specified state
 *
 * @dev: cpuidle device
 * @drv: cpuidle driver
 * @idx: state index
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int arm_enter_idle_state(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int idx)
{
	int ret = 0;
	struct timeval start_time, end_time;
	long usec_elapsed;
	if (cpuidle_debug) {
		do_gettimeofday(&start_time);
	}
	switch (idx) {
	case STANDBY:
		cpu_do_idle();
		break;
	case L_SLEEP:
		light_sleep_en();
		cpu_do_idle();
		light_sleep_dis();
		break;
	case CORE_PD:
		light_sleep_en();
		cpu_pm_enter();
		ret = cpu_suspend(idx);
		cpu_pm_exit();
		light_sleep_dis();
		break;
	case CLUSTER_PD:
		light_sleep_en();
		cpu_pm_enter();
		cpu_cluster_pm_enter();
		ret = cpu_suspend(idx);
		cpu_cluster_pm_exit();
		cpu_pm_exit();
		light_sleep_dis();
		break;
#ifdef CONFIG_ARCH_SCX35LT8
	case TOP_PD:
		light_sleep_en();
		cpu_pm_enter();
		cpu_cluster_pm_enter();
		ret = cpu_suspend(idx);
		cpu_cluster_pm_exit();
		cpu_pm_exit();
		light_sleep_dis();
		break;
#endif
	default:
		cpu_do_idle();
		WARN(1, "[CPUIDLE]: NO THIS IDLE LEVEL!!!");
	}
	if (cpuidle_debug) {
		do_gettimeofday(&end_time);
		usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 +
			(end_time.tv_usec - start_time.tv_usec);
		pr_info("[CPUIDLE] Enter idle state: %d ,usec_elapsed = %ld \n",
				idx, usec_elapsed);
	}
	return ret ? -1 : idx;
}
Ejemplo n.º 2
0
static int imx6sx_enter_wait(struct cpuidle_device *dev,
			    struct cpuidle_driver *drv, int index)
{
	imx6q_set_lpm(WAIT_UNCLOCKED);

	switch (index) {
	case 1:
		cpu_do_idle();
		break;
	case 2:
		imx6_enable_rbc(true);
		imx_gpc_set_arm_power_in_lpm(true);
		imx_set_cpu_jump(0, v7_cpu_resume);
		/* Need to notify there is a cpu pm operation. */
		cpu_pm_enter();
		cpu_cluster_pm_enter();

		cpu_suspend(0, imx6sx_idle_finish);

		cpu_cluster_pm_exit();
		cpu_pm_exit();
		imx_gpc_set_arm_power_in_lpm(false);
		imx6_enable_rbc(false);
		break;
	default:
		break;
	}

	imx6q_set_lpm(WAIT_CLOCKED);

	return index;
}
Ejemplo n.º 3
0
static int cpu_pm_suspend(void)
{
	int ret;

	ret = cpu_pm_enter();
	if (ret)
		return ret;

	ret = cpu_cluster_pm_enter();
	return ret;
}
Ejemplo n.º 4
0
Archivo: pm.c Proyecto: 1youhun1/linux
void tegra_idle_lp2_last(void)
{
	tegra_pmc_pm_set(TEGRA_SUSPEND_LP2);

	cpu_cluster_pm_enter();
	suspend_cpu_complex();

	cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu);

	restore_cpu_complex();
	cpu_cluster_pm_exit();
}
Ejemplo n.º 5
0
static int highbank_pm_enter(suspend_state_t state)
{
	cpu_pm_enter();
	cpu_cluster_pm_enter(0);

	highbank_set_cpu_jump(0, cpu_resume);
	cpu_suspend(0, highbank_suspend_finish);

	cpu_cluster_pm_exit(0);
	cpu_pm_exit();

	highbank_smc1(0x102, 0x1);
	if (scu_base_addr)
		scu_enable(scu_base_addr);
	return 0;
}
static int cpu_pm_suspend(void)
{
	int ret;
	
#if 0	// Block temporarily until debug_exynos5260.c is applied.
#ifdef CONFIG_SEC_PM
	if (FLAG_T32_EN)
		goto out;
#endif
#endif

	ret = cpu_pm_enter();
	if (ret)
		return ret;

#if 0	// Block temporarily until debug_exynos5260.c is applied.
#ifdef CONFIG_SEC_PM
out:
#endif
#endif
	ret = cpu_cluster_pm_enter();
	return ret;
}
Ejemplo n.º 7
0
/*
 * mmp_pm_down - Programs CPU to enter the specified state
 *
 * @addr: address points to the state selected by cpu governor
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static void mmp_pm_down(unsigned long addr)
{
    int *idx = (int *)addr;
    int mpidr, cpu, cluster;
    bool skip_wfi = false, last_man = false;

    mpidr = read_cpuid_mpidr();
    cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
    cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

    pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
    BUG_ON(cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER);

    __mcpm_cpu_going_down(cpu, cluster);

    arch_spin_lock(&mmp_lpm_lock);

    mmp_pm_use_count[cluster][cpu]--;

    if (mmp_pm_use_count[cluster][cpu] == 0) {
        mmp_enter_lpm[cluster][cpu] = (1 << (*idx + 1)) - 1;
        *idx = mmp_idle->cpudown_state;
        if (cluster_is_idle(cluster)) {
            cpu_cluster_pm_enter();
            find_coupled_state(idx, cluster);
            if (*idx >= mmp_idle->wakeup_state &&
                    *idx < mmp_idle->l2_flush_state &&
                    mmp_idle->ops->save_wakeup) {
                mmp_wake_saved = 1;
                mmp_idle->ops->save_wakeup();
            }
            BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
            last_man = true;
        }
        if (mmp_idle->ops->set_pmu)
            mmp_idle->ops->set_pmu(cpu, *idx);
    } else if (mmp_pm_use_count[cluster][cpu] == 1) {
        /*
         * A power_up request went ahead of us.
         * Even if we do not want to shut this CPU down,
         * the caller expects a certain state as if the WFI
         * was aborted.  So let's continue with cache cleaning.
         */
        skip_wfi = true;
        *idx = INVALID_LPM;
    } else
        BUG();

    if (last_man && (*idx >= mmp_idle->cpudown_state) && (*idx != LPM_D2_UDR)) {
        cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_M2_OR_DEEPER_ENTER, *idx);
#ifdef CONFIG_VOLDC_STAT
        vol_dcstat_event(VLSTAT_LPM_ENTRY, *idx, 0);
        vol_ledstatus_event(*idx);
#endif
    }

    trace_pxa_cpu_idle(LPM_ENTRY(*idx), cpu, cluster);
    cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_ENTER, *idx);

    if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
        arch_spin_unlock(&mmp_lpm_lock);
        __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
        __mcpm_cpu_down(cpu, cluster);
        if (*idx >= mmp_idle->l2_flush_state)
            ca7_power_down_udr();
        else
            ca7_power_down();
    } else {
        arch_spin_unlock(&mmp_lpm_lock);
        __mcpm_cpu_down(cpu, cluster);
        ca7_power_down();
    }

    if (!skip_wfi)
        cpu_do_idle();
}