Exemplo n.º 1
0
/**
 * next_valid_state - Find next valid C-state
 * @dev: cpuidle device
 * @state: Currently selected C-state
 *
 * If the current state is valid, it is returned back to the caller.
 * Else, this function searches for a lower c-state which is still
 * valid.
 *
 * A state is valid if the 'valid' field is enabled and
 * if it satisfies the enable_off_mode condition.
 */
static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
					      struct cpuidle_state *curr)
{
	struct cpuidle_state *next = NULL;
	struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr);
	u32 mpu_deepest_state = PWRDM_POWER_RET;
	u32 core_deepest_state = PWRDM_POWER_RET;

	if (off_mode_enabled) {
		mpu_deepest_state = PWRDM_POWER_OFF;
		/*
		 * Erratum i583: valable for ES rev < Es1.2 on 3630.
		 * CORE OFF mode is not supported in a stable form, restrict
		 * instead the CORE state to RET.
		 */
		if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
			core_deepest_state = PWRDM_POWER_OFF;
	}

	/* Check if current state is valid */
	if ((cx->valid) &&
	    (cx->mpu_state >= mpu_deepest_state) &&
	    (cx->core_state >= core_deepest_state)) {
		return curr;
	} else {
		int idx = OMAP3_NUM_STATES - 1;

		/* Reach the current state starting at highest C-state */
		for (; idx >= 0; idx--) {
			if (&dev->states[idx] == curr) {
				next = &dev->states[idx];
				break;
			}
		}

		/* Should never hit this condition */
		WARN_ON(next == NULL);

		/*
		 * Drop to next valid state.
		 * Start search from the next (lower) state.
		 */
		idx--;
		for (; idx >= 0; idx--) {
			cx = cpuidle_get_statedata(&dev->states[idx]);
			if ((cx->valid) &&
			    (cx->mpu_state >= mpu_deepest_state) &&
			    (cx->core_state >= core_deepest_state)) {
				next = &dev->states[idx];
				break;
			}
		}
		/*
		 * C1 is always valid.
		 * So, no need to check for 'next==NULL' outside this loop.
		 */
	}

	return next;
}
static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
						struct cpuidle_state *curr)
{
	struct cpuidle_state *next = NULL;
	struct omap3_processor_cx *cx;

	cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr);

	/* Check if current state is valid */
	if (cx->valid) {
		return curr;
	} else {
		u8 idx = OMAP3_STATE_MAX;

		/*
		 * Reach the current state starting at highest C-state
		 */
		for (; idx >= OMAP3_STATE_C1; idx--) {
			if (&dev->states[idx] == curr) {
				next = &dev->states[idx];
				break;
			}
		}

		/*
		 * Should never hit this condition.
		 */
		WARN_ON(next == NULL);

		/*
		 * Drop to next valid state.
		 * Start search from the next (lower) state.
		 */
		idx--;
		for (; idx >= OMAP3_STATE_C1; idx--) {
			struct omap3_processor_cx *cx;

			cx = cpuidle_get_statedata(&dev->states[idx]);
			if (cx->valid) {
				next = &dev->states[idx];
				break;
			}
		}
		/*
		 * C1 and C2 are always valid.
		 * So, no need to check for 'next==NULL' outside this loop.
		 */
	}

	return next;
}
static int msm_cpuidle_enter(
	struct cpuidle_device *dev, struct cpuidle_driver *drv, int index)
{
	int ret = 0;
	int i;
	enum msm_pm_sleep_mode pm_mode;

	pm_mode = msm_pm_idle_enter(dev, drv, index);

	for (i = 0; i < dev->state_count; i++) {
		struct cpuidle_state_usage *st_usage = &dev->states_usage[i];
		enum msm_pm_sleep_mode last_mode =
			(enum msm_pm_sleep_mode)cpuidle_get_statedata(st_usage);

		if (last_mode == pm_mode) {
			ret = i;
			break;
		}
	}


	local_irq_enable();

	return ret;
}
Exemplo n.º 4
0
static int msm_cpuidle_enter(
	struct cpuidle_device *dev, struct cpuidle_driver *drv, int index)
{
	int ret = 0;
	int i = 0;
	enum msm_pm_sleep_mode pm_mode;
	struct cpuidle_state_usage *st_usage = NULL;

#ifdef CONFIG_CPU_PM
	cpu_pm_enter();
#endif

	pm_mode = msm_pm_idle_prepare(dev, drv, index);
	dev->last_residency = msm_pm_idle_enter(pm_mode);
	for (i = 0; i < dev->state_count; i++) {
		st_usage = &dev->states_usage[i];
		if ((enum msm_pm_sleep_mode) cpuidle_get_statedata(st_usage)
		    == pm_mode) {
			ret = i;
			break;
		}
	}

#ifdef CONFIG_CPU_PM
	cpu_pm_exit();
#endif

	local_irq_enable();

	return ret;
}
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	ktime_t  kt1, kt2;
	s64 idle_time;
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);

	pr = __this_cpu_read(processors);
	dev->last_residency = 0;

	if (unlikely(!pr))
		return -EINVAL;

	local_irq_disable();

	lapic_timer_state_broadcast(pr, cx, 1);
	kt1 = ktime_get_real();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));

	
	dev->last_residency = (int)idle_time;

	local_irq_enable();
	cx->usage++;
	lapic_timer_state_broadcast(pr, cx, 0);

	return index;
}
Exemplo n.º 6
0
/**
 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
 * @dev: the target CPU
 * @state: the state data
 *
 * This is equivalent to the HALT instruction.
 */
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	ktime_t  kt1, kt2;
	s64 idle_time;
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	local_irq_disable();

	/* Do not access any ACPI IO ports in suspend path */
	if (acpi_idle_suspend) {
		local_irq_enable();
		cpu_relax();
		return 0;
	}

	lapic_timer_state_broadcast(pr, cx, 1);
	kt1 = ktime_get_real();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));

	local_irq_enable();
	cx->usage++;
	lapic_timer_state_broadcast(pr, cx, 0);

	return idle_time;
}
Exemplo n.º 7
0
/**
 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
 * @dev: the target CPU
 * @state: the state data
 *
 * This is equivalent to the HALT instruction.
 */
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	u32 t1, t2;
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	local_irq_disable();

	/* Do not access any ACPI IO ports in suspend path */
	if (acpi_idle_suspend) {
		local_irq_enable();
		cpu_relax();
		return 0;
	}

	acpi_state_timer_broadcast(pr, cx, 1);
	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
	acpi_idle_do_entry(cx);
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);

	local_irq_enable();
	cx->usage++;
	acpi_state_timer_broadcast(pr, cx, 0);

	return ticks_elapsed_in_us(t1, t2);
}
Exemplo n.º 8
0
/**
 * omap3_enter_idle_bm - Checks for any bus activity
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This
 * function checks for any pending activity and then programs the
 * device to the specified or a safer state.
 */
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
			       struct cpuidle_state *state)
{
	struct cpuidle_state *new_state = next_valid_state(dev, state);
	u32 core_next_state, per_next_state = 0, per_saved_state = 0;
	u32 cam_state;
	struct omap3_processor_cx *cx;
	int ret;

	if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) {
		BUG_ON(!dev->safe_state);
		new_state = dev->safe_state;
		goto select_state;
	}

	cx = cpuidle_get_statedata(state);
	core_next_state = cx->core_state;

	/*
	 * FIXME: we currently manage device-specific idle states
	 *        for PER and CORE in combination with CPU-specific
	 *        idle states.  This is wrong, and device-specific
	 *        idle managment needs to be separated out into 
	 *        its own code.
	 */

	/*
	 * Prevent idle completely if CAM is active.
	 * CAM does not have wakeup capability in OMAP3.
	 */
	cam_state = pwrdm_read_pwrst(cam_pd);
	if (cam_state == PWRDM_POWER_ON) {
		new_state = dev->safe_state;
		goto select_state;
	}

	/*
	 * Prevent PER off if CORE is not in retention or off as this
	 * would disable PER wakeups completely.
	 */
	per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
	if ((per_next_state == PWRDM_POWER_OFF) &&
	    (core_next_state > PWRDM_POWER_RET))
		per_next_state = PWRDM_POWER_RET;

	/* Are we changing PER target state? */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_next_state);

select_state:
	dev->last_state = new_state;
	ret = omap3_enter_idle(dev, new_state);

	/* Restore original PER state if it was modified */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_saved_state);

	return ret;
}
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;

	pr = __this_cpu_read(processors);
	dev->last_residency = 0;

	if (unlikely(!pr))
		return -EINVAL;

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return -EINVAL;
		}
	}

	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	kt1 = ktime_get_real();
	
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	
	dev->last_residency = (int)idle_time;

	
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return index;
}
Exemplo n.º 10
0
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
                            struct cpuidle_state *state)
{
    struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
    struct timespec ts_preidle, ts_postidle, ts_idle;
    u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
    u32 saved_mpu_state;

    current_cx_state = *cx;

    /* Used to keep track of the total time in idle */
    getnstimeofday(&ts_preidle);

    local_irq_disable();
    local_fiq_disable();

    if (!enable_off_mode) {
        if (mpu_state < PWRDM_POWER_RET)
            mpu_state = PWRDM_POWER_RET;
        if (core_state < PWRDM_POWER_RET)
            core_state = PWRDM_POWER_RET;
    }

    if (omap_irq_pending() || need_resched())
        goto return_sleep_time;

    saved_mpu_state = pwrdm_read_next_pwrst(mpu_pd);
    pwrdm_set_next_pwrst(mpu_pd, mpu_state);
    pwrdm_set_next_pwrst(core_pd, core_state);

    if (cx->type == OMAP3_STATE_C1) {
        pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
        pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
    }

    /* Execute ARM wfi */
    omap_sram_idle();

    if (cx->type == OMAP3_STATE_C1) {
        pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
        pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
    }

    pwrdm_set_next_pwrst(mpu_pd, saved_mpu_state);

return_sleep_time:
    getnstimeofday(&ts_postidle);
    ts_idle = timespec_sub(ts_postidle, ts_preidle);

    local_irq_enable();
    local_fiq_enable();

    return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
}
Exemplo n.º 11
0
static int msm_cpuidle_enter(
	struct cpuidle_device *dev, struct cpuidle_driver *drv, int index)
{
	int ret = 0;
	int i = 0;
	enum msm_pm_sleep_mode pm_mode;
	struct cpuidle_state_usage *st_usage = NULL;
#ifdef CONFIG_MSM_SLEEP_STATS
	struct atomic_notifier_head *head =
			&__get_cpu_var(msm_cpuidle_notifiers);
#endif

	local_irq_disable();

#ifdef CONFIG_MSM_SLEEP_STATS
	atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_ENTER, NULL);
#endif

#ifdef CONFIG_CPU_PM
	cpu_pm_enter();
#endif

	pm_mode = msm_pm_idle_prepare(dev, drv, index);
	dev->last_residency = msm_pm_idle_enter(pm_mode);

#ifdef CONFIG_SHSYS_CUST
	if (sh_pm_is_idle_pc()) {
		sh_pm_set_idle_pc_flg_off();
	}
#endif

	for (i = 0; i < dev->state_count; i++) {
		st_usage = &dev->states_usage[i];
		if ((enum msm_pm_sleep_mode) cpuidle_get_statedata(st_usage)
		    == pm_mode) {
			ret = i;
			break;
		}
	}

#ifdef CONFIG_CPU_PM
	cpu_pm_exit();
#endif

#ifdef CONFIG_MSM_SLEEP_STATS
	atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_EXIT, NULL);
#endif

	local_irq_enable();

	return ret;
}
Exemplo n.º 12
0
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @drv: cpuidle driver with cpuidle state information
 * @index: the index of suggested state
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);

	pr = __this_cpu_read(processors);

	if (unlikely(!pr))
		return -EINVAL;

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			return -EINVAL;
		}
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);

	sched_clock_idle_wakeup_event(0);

	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	return index;
}
Exemplo n.º 13
0
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;

	current_cx_state = *cx;

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);
	
//&*&*&*BC1_110630: add cpu idle control flag
	if (omap_irq_pending() || need_resched() || omap3_idle_bm_check())  
		goto return_sleep_time;
//&*&*&*BC2_110630: add cpu idle control flag

	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}

	/* Execute ARM wfi */
	omap_sram_idle();

	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	local_irq_enable();
	local_fiq_enable();

	return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
}
Exemplo n.º 14
0
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
				struct cpuidle_state *state)
{
	if (omap3_idle_bm_check()) {
		/* Someone's busy, pick a safe idle state. */
		if (dev->safe_state) {
			return dev->safe_state->enter(dev, dev->safe_state);
		}
		else {
			struct omap3_processor_cx *cx;

			cx = cpuidle_get_statedata(state);
			omap_sram_idle(cx->mpu_state);
			return 0;
		}
	}
	return omap3_enter_idle(dev, state);
}
Exemplo n.º 15
0
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);

	if (omap_irq_pending() || need_resched())
		goto return_sleep_time;

	/* Deny idle for C1 */
	if (state == &dev->states[0]) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}

	/* Execute ARM wfi */
	omap_sram_idle(false);

	/* Re-allow idle for C1 */
	if (state == &dev->states[0]) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	local_irq_enable();
	local_fiq_enable();

	return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
}
Exemplo n.º 16
0
/**
 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
 * @dev: the target CPU
 * @drv: cpuidle driver containing cpuidle state info
 * @index: index of target state
 *
 * This is equivalent to the HALT instruction.
 */
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);

	pr = __this_cpu_read(processors);

	if (unlikely(!pr))
		return -EINVAL;

	lapic_timer_state_broadcast(pr, cx, 1);
	acpi_idle_do_entry(cx);

	lapic_timer_state_broadcast(pr, cx, 0);

	return index;
}
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);

	ACPI_FLUSH_CPU_CACHE();

	while (1) {

		if (cx->entry_method == ACPI_CSTATE_HALT)
			safe_halt();
		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
			inb(cx->address);
			
			inl(acpi_gbl_FADT.xpm_timer_block.address);
		} else
			return -ENODEV;
	}

	
	return 0;
}
Exemplo n.º 18
0
/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @state: the state data
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;


	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
		if (dev->safe_state) {
			dev->last_state = dev->safe_state;
			return dev->safe_state->enter(dev, dev->safe_state);
		} else {
			local_irq_disable();
			acpi_safe_halt();
			local_irq_enable();
			return 0;
		}
	}

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return 0;
		}
	}

	acpi_unlazy_tlb(smp_processor_id());

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	kt1 = ktime_get_real();
	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	acpi_idle_do_entry(cx);

	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		spin_unlock(&c3_lock);
	}
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return idle_time;
}
Exemplo n.º 19
0
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @state: the state data
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
				  struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return 0;
		}
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	kt1 = ktime_get_real();
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return idle_time;
}
Exemplo n.º 20
0
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);

	if (omap_irq_pending() || need_resched())
		goto return_sleep_time;

/* S[, 20120922, [email protected], PM from froyo. */
#if defined(CONFIG_PRODUCT_LGE_LU6800)
	/* Deny idle for C1 */
	if (te_cpu_idle_block == 1 || doing_wakeup == 1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}
#else
	/* Deny idle for C1 */
	if (state == &dev->states[0]) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}
#endif
/* E], 20120922, [email protected], PM from froyo. */

	/* Execute ARM wfi */
	omap_sram_idle(false);

/* S[, 20120922, [email protected], PM from froyo. */
#if defined(CONFIG_PRODUCT_LGE_LU6800)
	/* Re-allow idle for C1 */
	if (te_cpu_idle_block == 1 || doing_wakeup == 1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}
#else
	/* Re-allow idle for C1 */
	if (state == &dev->states[0]) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}
#endif
/* E], 20120922, [email protected], PM from froyo. */

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	local_irq_enable();
	local_fiq_enable();

	return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
}
Exemplo n.º 21
0
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx;
	u8 cur_per_state, cur_neon_state, pre_neon_state, pre_per_state;
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 fclken_core, iclken_core, fclken_per, iclken_per;
	u32 sdrcpwr_val, sdrc_power_register = 0x0;
	int wakeup_latency;
	int core_sleep_flg = 0;
	u32 per_ctx_saved = 0;
	int ret = -1;
#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	int idle_status = 0;
#endif

	local_irq_disable();
	local_fiq_disable();

	if (need_resched()) {
		local_irq_enable();
		local_fiq_enable();
		return 0;
	}

#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	sw_latency_arr[swlat_arr_wrptr].sleep_start =
				 omap_32k_sync_timer_read();
#endif
	PM_PREPWSTST_MPU = 0xFF;
	PM_PREPWSTST_CORE = 0xFF;
	PM_PREPWSTST_NEON = 0xFF;
	PM_PREPWSTST_PER = 0xFF;

	cx = cpuidle_get_statedata(state);

	target_state.mpu_state = cx->mpu_state;
	target_state.core_state = cx->core_state;

	/* take a time marker for residency */
	getnstimeofday(&ts_preidle);

	if (cx->type == OMAP3_STATE_C0) {
		omap_sram_idle();
		goto return_sleep_time;
	}

	if (cx->type > OMAP3_STATE_C1)
		sched_clock_idle_sleep_event(); /* about to enter deep idle */

	correct_target_state();
	wakeup_latency = cx->wakeup_latency;
	if (target_state.core_state != cx->core_state) {
		/* Currently, this can happen only for core_off */
		/* Adjust wakeup latency to that of core_cswr state */
		/* Hard coded now and needs to be made more generic */
		/* omap3_power_states[4] is CSWR for core */
		wakeup_latency = omap3_power_states[4].wakeup_latency;
	}

	/* Reprogram next wake up tick to adjust for wake latency */
	if (wakeup_latency > 1000) {
		struct tick_device *d = tick_get_device(smp_processor_id());
		ktime_t adjust, next, now = ktime_get();
		if (ktime_to_ns(ktime_sub(d->evtdev->next_event, now)) >
			(wakeup_latency * 1000 + NSEC_PER_MSEC)) {
			adjust = ktime_set(0, (wakeup_latency * 1000));
			next = ktime_sub(d->evtdev->next_event, adjust);
			clockevents_program_event(d->evtdev, next, now);
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;

	prcm_get_power_domain_state(DOM_PER, &cur_per_state);
	prcm_get_power_domain_state(DOM_NEON, &cur_neon_state);

	fclken_core = CM_FCLKEN1_CORE;
	iclken_core = CM_ICLKEN1_CORE;
	fclken_per = CM_FCLKEN_PER;
	iclken_per = CM_ICLKEN_PER;

	/* If target state if core_off, save registers
	 * before changing anything
	 */
	if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
		prcm_save_registers(&target_state);
		omap_uart_save_ctx(0);
		omap_uart_save_ctx(1);
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;

	/* Program MPU and NEON to target state */
	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
		if ((cur_neon_state == PRCM_ON) &&
			(target_state.neon_state != PRCM_ON)) {

		if (target_state.neon_state == PRCM_OFF)
			omap3_save_neon_context();

#ifdef CONFIG_HW_SUP_TRANS
		/* Facilitating SWSUP RET, from HWSUP mode */
			prcm_set_clock_domain_state(DOM_NEON,
					PRCM_NO_AUTO, PRCM_FALSE);
			prcm_set_power_domain_state(DOM_NEON, PRCM_ON,
						PRCM_FORCE);
#endif
			prcm_force_power_domain_state(DOM_NEON,
				target_state.neon_state);
		}
#ifdef CONFIG_MPU_OFF
		/* Populate scrathpad restore address */
		*(scratchpad_restore_addr) = restore_pointer_address;
#endif
		if (target_state.core_state > PRCM_CORE_CSWR_MEMRET) {
			ret = omap3_save_secure_ram_context(
						target_state.core_state);
			if (ret)
				printk(KERN_ERR "omap3_save_secure_ram_context"
						"failed in idle %x\n", ret);
			if (core_off_notification != NULL)
				core_off_notification(PRCM_TRUE);
		}
		prcm_set_mpu_domain_state(target_state.mpu_state);
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;

	/* Program CORE and PER to target state */
	if (target_state.core_state > PRCM_CORE_ACTIVE) {
		/* Log core sleep attmept */
		core_sleep_flg = 1;


#ifdef CONFIG_OMAP_SMARTREFLEX
		disable_smartreflex(SR1_ID);
		disable_smartreflex(SR2_ID);
#endif
		/* Workaround for Silicon Errata 1.64 */
		if (is_sil_rev_equal_to(OMAP3430_REV_ES1_0)) {
			if (CM_CLKOUT_CTRL & 0x80)
				CM_CLKOUT_CTRL &= ~(0x80);
		}

		prcm_set_core_domain_state(target_state.core_state);
		/* Enable Autoidle for GPT1 explicitly - Errata 1.4 */
		CM_AUTOIDLE_WKUP |= 0x1;
		/* Disable UART-1,2 */
		CM_FCLKEN1_CORE &= ~0x6000;
		/* Disable HSUSB OTG ICLK explicitly*/
		CM_ICLKEN1_CORE &= ~0x10;
		/* Enabling IO_PAD capabilities */
		PM_WKEN_WKUP |= 0x100;
		if (cur_per_state == PRCM_ON && cx->type >= OMAP3_STATE_C3 &&
					!(CM_FCLKEN_PER & PER_FCLK_MASK)) {
			/* In ES3.1, Enable IO Daisy chain */
			if (is_sil_rev_greater_than(OMAP3430_REV_ES3_0)) {
				PM_WKEN_WKUP |= 0x10000;
				/* Wait for daisy chain to be ready */
				while ((PM_WKST_WKUP & 0x10000) == 0x0)
					;
				/* clear the status */
				PM_WKST_WKUP &= ~0x10000;
			}
			omap3_save_per_context();
			prcm_set_power_domain_state(DOM_PER, PRCM_OFF,
							 PRCM_AUTO);
			per_ctx_saved = 1;
			CM_FCLKEN_PER = 0;
			CM_ICLKEN_PER = 0;
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;

	if (target_state.core_state == PRCM_CORE_OFF) {
		if (!is_device_type_gp() &&
				is_sil_rev_greater_than(OMAP3430_REV_ES2_1)) {

			/* es3 series bug */
			sdrc_power_register = sdrc_read_reg(SDRC_POWER);
			sdrcpwr_val = sdrc_power_register &
						~(SDRC_PWR_AUTOCOUNT_MASK |
						SDRC_PWR_CLKCTRL_MASK);
			lock_scratchpad_sem();
			sdrcpwr_val |= 0x120;
			save_to_scratchpad(SCRATHPAD_SDRCPWR_OFFSET,
							sdrcpwr_val);
			unlock_scratchpad_sem();
		}
	}

#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	sw_latency_arr[swlat_arr_wrptr].sleep_end = omap_32k_sync_timer_read();
	idle_status++;
#endif
	omap_sram_idle();

	if (target_state.core_state == PRCM_CORE_OFF) {
		if (!is_device_type_gp() &&
				is_sil_rev_greater_than(OMAP3430_REV_ES2_1))
		sdrc_write_reg(sdrc_power_register, SDRC_POWER);
	}

restore:
	/* In case of ES3.1, disable IO daisy chain */
	if (is_sil_rev_greater_than(OMAP3430_REV_ES3_0) && per_ctx_saved)
		PM_WKEN_WKUP &= ~(0x10000);

	/* Disabling IO_PAD capabilities */
	if (core_sleep_flg)
		PM_WKEN_WKUP &= ~(0x100);

	/* Disabling IO_PAD capabilities */
	PM_WKEN_WKUP &= ~(0x100);
#ifdef OMAP3_START_RNG
	/*Capture the PM_PREPWSTST_CORE to be used later
	* for starting the RNG (Random Number Generator)*/
	prepwst_core_rng = PM_PREPWSTST_CORE;
#endif

	CM_FCLKEN1_CORE = fclken_core;
	CM_ICLKEN1_CORE = iclken_core;

	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
#ifdef CONFIG_MPU_OFF
		/* On ES 2.0, if scrathpad is populated with valid
		* pointer, warm reset does not work
		* So populate scrathpad restore address only in
		* cpuidle and suspend calls
		*/
		*(scratchpad_restore_addr) = 0x0;
#endif
		prcm_set_mpu_domain_state(PRCM_MPU_ACTIVE);
		if ((cur_neon_state == PRCM_ON) &&
			(target_state.mpu_state > PRCM_MPU_INACTIVE)) {
			prcm_force_power_domain_state(DOM_NEON, cur_neon_state);
			prcm_get_pre_power_domain_state(DOM_NEON,
				&pre_neon_state);

		if (pre_neon_state == PRCM_OFF)
			omap3_restore_neon_context();

#ifdef CONFIG_HW_SUP_TRANS
			prcm_set_power_domain_state(DOM_NEON, PRCM_ON,
							PRCM_AUTO);
#endif
		}
	}

	/* Continue core restoration part, only if Core-Sleep is attempted */
	if ((target_state.core_state > PRCM_CORE_ACTIVE) && core_sleep_flg) {
		prcm_set_core_domain_state(PRCM_CORE_ACTIVE);

#ifdef CONFIG_OMAP_SMARTREFLEX
		enable_smartreflex(SR1_ID);
		enable_smartreflex(SR2_ID);
#endif

		if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
#ifdef CONFIG_OMAP34XX_OFFMODE
			context_restore_update(DOM_CORE1);
#endif
			prcm_restore_registers(&target_state);
			prcm_restore_core_context(target_state.core_state);
			omap3_restore_core_settings();
			}
		/* Errata 1.4
		* if the timer device gets idled which is when we
		* are cutting the timer ICLK which is when we try
		* to put Core to RET.
		* Wait Period = 2 timer interface clock cycles +
		* 1 timer functional clock cycle
		* Interface clock = L4 clock. For the computation L4
		* clock is assumed at 50MHz (worst case).
		* Functional clock = 32KHz
		* Wait Period = 2*10^-6/50 + 1/32768 = 0.000030557 = 30.557uSec
		* Roundingoff the delay value to a safer 50uSec
		*/
		omap_udelay(GPTIMER_WAIT_DELAY);
		CM_AUTOIDLE_WKUP &= ~(0x1);
		if (core_off_notification != NULL)
			core_off_notification(PRCM_FALSE);
	}

	if (cur_per_state == PRCM_ON) {
		CM_FCLKEN_PER = fclken_per;
		CM_ICLKEN_PER = iclken_per;
		prcm_get_pre_power_domain_state(DOM_PER, &pre_per_state);
		if (pre_per_state == PRCM_OFF && per_ctx_saved) {
			if (enable_debug)
				per_off++;
			omap3_restore_per_context();
			post_uart_inactivity();
#ifdef CONFIG_OMAP34XX_OFFMODE
			context_restore_update(DOM_PER);
#endif
		}
	}

	pr_debug("MPU state:%x,CORE state:%x\n", PM_PREPWSTST_MPU,
							PM_PREPWSTST_CORE);
	store_prepwst();

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	if (cx->type > OMAP3_STATE_C1)
		sched_clock_idle_wakeup_event(timespec_to_ns(&ts_idle));

#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	if (idle_status) {
		sw_latency_arr[swlat_arr_wrptr].wkup_end =
					omap_32k_sync_timer_read();
		sw_latency_arr[swlat_arr_wrptr].wkup_start =
					wakeup_start_32ksync;

		sw_latency_arr[swlat_arr_wrptr].cstate =
			((PM_PREPWSTST_MPU & 0x3) << 2) |
			 (PM_PREPWSTST_CORE & 0x3) |
			 (omap_readl(0x48306CB0) << 16);

		swlat_arr_wrptr++;

		if (swlat_arr_wrptr == SW_LATENCY_ARR_SIZE)
			swlat_arr_wrptr = 0;
	}
#endif

	local_irq_enable();
	local_fiq_enable();

#ifdef OMAP3_START_RNG
	if (!is_device_type_gp()) {
		/*Start RNG after interrupts are enabled
		 * and only when CORE OFF was successful
		 */
		if (!(prepwst_core_rng & 0x3)) {
			ret = omap3_start_rng();
			if (ret)
				printk(KERN_INFO"Failed to generate new"
						" RN in idle %x\n", ret);
			prepwst_core_rng = 0xFF;
		}
	}
#endif

	return (u32)timespec_to_ns(&ts_idle)/1000;
}
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;


	pr = __this_cpu_read(processors);
	dev->last_residency = 0;

	if (unlikely(!pr))
		return -EINVAL;

	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
		if (drv->safe_state_index >= 0) {
			return drv->states[drv->safe_state_index].enter(dev,
						drv, drv->safe_state_index);
		} else {
			local_irq_disable();
			acpi_safe_halt();
			local_irq_enable();
			return -EINVAL;
		}
	}

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return -EINVAL;
		}
	}

	acpi_unlazy_tlb(smp_processor_id());

	
	sched_clock_idle_sleep_event();
	lapic_timer_state_broadcast(pr, cx, 1);

	kt1 = ktime_get_real();
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		c3_cpu_count++;
		
		if (c3_cpu_count == num_online_cpus())
			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		raw_spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	acpi_idle_do_entry(cx);

	
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		raw_spin_unlock(&c3_lock);
	}
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	
	dev->last_residency = (int)idle_time;

	
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return index;
}
Exemplo n.º 23
0
/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @drv: cpuidle driver containing state data
 * @index: the index of suggested state
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);

	pr = __this_cpu_read(processors);

	if (unlikely(!pr))
		return -EINVAL;

	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
		if (drv->safe_state_index >= 0) {
			return drv->states[drv->safe_state_index].enter(dev,
						drv, drv->safe_state_index);
		} else {
			acpi_safe_halt();
			return -EBUSY;
		}
	}

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			return -EINVAL;
		}
	}

	acpi_unlazy_tlb(smp_processor_id());

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		raw_spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	acpi_idle_do_entry(cx);

	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		raw_spin_unlock(&c3_lock);
	}

	sched_clock_idle_wakeup_event(0);

	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	lapic_timer_state_broadcast(pr, cx, 0);
	return index;
}
/**
 * omap4_enter_idle - Programs OMAP4 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified low power state selected by the governor.
 * Returns the amount of time spent in the low power state.
 */
static int omap4_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap4_processor_cx *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 cpu1_state;
	int cpu_id = smp_processor_id();

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

	/*
	 * Do only WFI for non-boot CPU(aux cores)
	 */
	if (dev->cpu) {
		wmb();
		DO_WFI();
		goto return_sleep_time;
	}

	/*
	 * Do only a WFI as long as CPU1 is online
	 */
	if (num_online_cpus() > 1) {
		wmb();
		DO_WFI();
		goto return_sleep_time;
	}

	/*
	 * Hold on till CPU1 hits OFF
	 */
	cpu1_state = pwrdm_read_pwrst(cpu1_pd);
	if (cpu1_state != PWRDM_POWER_OFF) {
		wmb();
		DO_WFI();
		goto return_sleep_time;
	}

	if (cx->type > OMAP4_STATE_C1)
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);

#ifdef CONFIG_PM_DEBUG
	pwrdm_pre_transition();
#endif
	pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
	omap4_set_pwrdm_state(mpu_pd, cx->mpu_state);
	pwrdm_set_logic_retst(core_pd, cx->core_logic_state);
	omap4_set_pwrdm_state(core_pd, cx->core_state);

	omap4_enter_sleep(dev->cpu, cx->cpu0_state);


	/* restore the MPU and CORE states to ON */
	omap4_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
	omap4_set_pwrdm_state(core_pd, PWRDM_POWER_ON);

#ifdef CONFIG_PM_DEBUG
	pwrdm_post_transition();
#endif
	if (cx->type > OMAP4_STATE_C1)
		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	local_irq_enable();
	local_fiq_enable();


	return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;;
}
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @index: the index of suggested state
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
	ktime_t  kt1, kt2;
	s64 idle_time;
	s64 sleep_ticks = 0;

	pr = __get_cpu_var(processors);
	dev->last_residency = 0;

	if (unlikely(!pr))
		return -EINVAL;

	local_irq_disable();

	if (acpi_idle_suspend) {
		local_irq_enable();
		cpu_relax();
		return -EBUSY;
	}

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();
	}

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return -EINVAL;
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	kt1 = ktime_get_real();
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));

	sleep_ticks = us_to_pm_timer_ticks(idle_time);

	/* Update device last_residency*/
	dev->last_residency = (int)idle_time;

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return index;
}
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 mpu_state = cx->mpu_state, core_state = cx->core_state, cam_state = 0, dss_state = 0, per_state = 0;
	/* modified for mp3 current -- begin */
	u32 mpu_prev,core_prev =0 ;
	current_cx_state = *cx;
	int requested=cx->type;
	static int cam_deny = 0;
	u32 wkdep_per_value = 0;
 	 wkdep_per_value = omap_readl(0x483070C8);

	
	/* modified for mp3 current -- end*/
	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);

	if (omap_irq_pending() || need_resched())
		goto return_sleep_time;

      /* Keep  CAM domain active during ISP usecases */
	if(( front_cam_in_use || back_cam_in_use || (stream_on)) ){
		pwrdm_for_each_clkdm(cam_pd, _cpuidle_deny_idle);
		cam_deny = 1 ;
	}
/*
	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}
*/
	if (dss_suspend_flag && audio_on) {
        omap_writel(wkdep_per_value & ~(1<<1) ,0x483070C8 );//PM_WKDEP_PER
	}

	/* Execute ARM wfi */
	omap_sram_idle();
/*
	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}
*/
	/* Keep  CAM domain active during ISP usecases */
      if(cam_deny){
      		pwrdm_for_each_clkdm(cam_pd, _cpuidle_allow_idle);
		cam_deny = 0;
	}

	   if(!dss_suspend_flag){
	  omap_writel(wkdep_per_value, 0x483070C8); //PM_WKDEP_PER
	  }

     core_state = pwrdm_read_prev_pwrst(core_pd);
     mpu_state = pwrdm_read_prev_pwrst(mpu_pd);
     cam_state = pwrdm_read_prev_pwrst(cam_pd);
	dss_state  = pwrdm_read_prev_pwrst(dss_pd);
     per_state =  pwrdm_read_prev_pwrst(per_pd);
		
    //printk(KERN_INFO "requested C%d, actual core=%d, mpu=%d cam = %d dss = %d per = %d \n", requested, core_state, mpu_state,cam_state);

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	/* modified for mp3 current -- begin */
	mpu_prev = omap_readl(0x483069E8);
	mpu_prev = mpu_prev & 0x3 ; 
	core_prev = omap_readl(0x48306AE8);
	core_prev = core_prev & 0x3 ; 	
	/* modified for mp3 current -- end */

	local_irq_enable();
	local_fiq_enable();

	return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
}
Exemplo n.º 27
0
/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @state: the state data
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	u32 t1, t2;
	int sleep_ticks = 0;

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	if (acpi_idle_bm_check()) {
		if (dev->safe_state) {
			dev->last_state = dev->safe_state;
			return dev->safe_state->enter(dev, dev->safe_state);
		} else {
			local_irq_disable();
			acpi_safe_halt();
			local_irq_enable();
			return 0;
		}
	}

	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

	acpi_unlazy_tlb(smp_processor_id());

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	acpi_state_timer_broadcast(pr, cx, 1);

	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
		spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
	acpi_idle_do_entry(cx);
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);

	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		spin_unlock(&c3_lock);
	}

#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
	/* TSC could halt in idle, so notify users */
	if (tsc_halts_in_c(ACPI_STATE_C3))
		mark_tsc_unstable("TSC halts in idle");
#endif
	sleep_ticks = ticks_elapsed(t1, t2);
	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	acpi_state_timer_broadcast(pr, cx, 0);
	cx->time += sleep_ticks;
	return ticks_elapsed_in_us(t1, t2);
}
Exemplo n.º 28
0
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @state: the state data
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
				  struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	u32 t1, t2;
	int sleep_ticks = 0;

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	acpi_state_timer_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);

#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
	/* TSC could halt in idle, so notify users */
	if (tsc_halts_in_c(cx->type))
		mark_tsc_unstable("TSC halts in idle");;
#endif
	sleep_ticks = ticks_elapsed(t1, t2);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	acpi_state_timer_broadcast(pr, cx, 0);
	cx->time += sleep_ticks;
	return ticks_elapsed_in_us(t1, t2);
}
Exemplo n.º 29
0
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 mpu_state, core_state;
	u8 idx;

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	/*
	 * Check if the chosen idle state is valid.
	 * If no, drop down to a lower valid state.
	 *
	 * (Expects the lowest idle state to be always VALID).
	 */
	if (!cx->valid) {
		for (idx = (cx->type - 1); idx > OMAP3_STATE_C1; idx--) {
			if (omap3_power_states[idx].valid)
				break;
		}
		state = &(dev->states[idx]);
		dev->last_state = state ;

		cx = cpuidle_get_statedata(state);
	}

	current_cx_state = *cx;

	mpu_state = cx->mpu_state;
	core_state = cx->core_state;

	local_irq_disable();
	local_fiq_disable();

	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);

	if (omap_irq_pending())
		goto return_sleep_time;

	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}

	/* Execute ARM wfi */
	omap_sram_idle();

	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	local_irq_enable();
	local_fiq_enable();

	return (u32)timespec_to_ns(&ts_idle)/1000;
}
Exemplo n.º 30
0
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 core_next_state, per_next_state = 0, per_saved_state = 0;
	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;

	current_cx_state = *cx;

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);

	/*
	 * Don't allow PER to go to OFF in idle state
	 * transitions.
	 * This is a tempory fix for display flashing issue
	 * which occurs when off mode is enabled
	 */

	per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
	if (per_next_state == PWRDM_POWER_OFF)
		per_next_state = PWRDM_POWER_RET;

	/* Are we changing PER target state? */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_next_state);


	if (omap_irq_pending() || need_resched())
		goto return_sleep_time;

	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}

	/* Execute ARM wfi */
	omap_sram_idle();

	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	/* Restore original PER state if it was modified */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_saved_state);

	local_irq_enable();
	local_fiq_enable();

	return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
}