Beispiel #1
0
static int omap3_pm_suspend(void)
{
	int sr1_status = 0;
	int sr2_status = 0;
	struct system_power_state trg_st;
	u32 suspend_state;
	int ret;

	sr1_status = disable_smartreflex(SR1_ID);
	sr2_status = disable_smartreflex(SR2_ID);

#ifdef CONFIG_OMAP34XX_OFFMODE
	context_restore_update(DOM_PER);
	context_restore_update(DOM_CORE1);

	trg_st.mpu_state	= PRCM_MPU_OFF;
	suspend_state		= PRCM_OFF;
#else
	trg_st.mpu_state	= PRCM_MPU_CSWR_L2RET;
	suspend_state		= PRCM_RET;
#endif

#ifndef CONFIG_ARCH_OMAP3410
	trg_st.neon_state	= suspend_state;
	trg_st.gfx_state	= suspend_state;
#endif
	trg_st.iva2_state	= suspend_state;
	trg_st.dss_state	= suspend_state;
	trg_st.cam_state	= suspend_state;
	trg_st.per_state	= suspend_state;
#ifndef CONFIG_CORE_OFF
	/* If we keep CORE in RET instead of OFF, we also keep PER in RET. */
	trg_st.per_state	= PRCM_RET;
#endif
	trg_st.usbhost_state	= suspend_state;


#ifndef CONFIG_ARCH_OMAP3410
	if (trg_st.neon_state == PRCM_OFF)
		omap3_save_neon_context();
#endif
	if (trg_st.per_state == PRCM_OFF)
		omap3_save_per_context();

#ifdef CONFIG_CORE_OFF
	trg_st.core_state	= PRCM_CORE_OFF;
#else
	trg_st.core_state	= PRCM_CORE_CSWR_MEMRET;
#endif

	if (trg_st.core_state == PRCM_CORE_CSWR_MEMRET) {
		memory_logic_res_setting(&trg_st);
	}

	if (trg_st.core_state >= PRCM_CORE_OSWR_MEMRET) {
		prcm_save_core_context(trg_st.core_state);
	}

#ifdef CONFIG_OMAP_32K_TIMER
	omap_disable_system_timer();
#endif

#if CONFIG_MACH_SIRLOIN_3630
	if( cpu_is_omap36xx() && is_omap3_rev_equal_to(OMAP3630_REV_ES1_1)) {
		/*  
		 * As a part of JEDEC violation workaround for 3630ES1.1
		 * set vdd2 voltage to 1.0875V (vsel == 39/ 0x27)
		 */
		if(trg_st.core_state >= PRCM_CORE_OFF ) {
			u32 target_opp = prcm_get_current_vdd2_opp();
			sr_voltagescale(target_opp, target_opp, 0x27 );
		}
	}
#endif	

	/* prcm_set_chip_power_mode() will suspend the system. Function will
	 * return upon system wake-up.
	 */
	ret = prcm_set_chip_power_mode(&trg_st);
	if (ret != 0)
		printk(KERN_ERR "Could not enter target state in pm_suspend\n");
	else {
//		printk(KERN_INFO
//			"Successfully put chip to target state in suspend\n");
	}


#ifndef CONFIG_ARCH_OMAP3410
	if (trg_st.neon_state == PRCM_OFF)
		omap3_restore_neon_context();
#endif
	if (trg_st.per_state == PRCM_OFF)
		omap3_restore_per_context();

#ifdef CONFIG_CORE_OFF
	omap3_restore_core_settings();
#else
	if (trg_st.core_state >= PRCM_CORE_OSWR_MEMRET) {
#ifdef CONFIG_OMAP34XX_OFFMODE
		context_restore_update(DOM_CORE1);
#endif
		prcm_restore_core_context(trg_st.core_state);
	}

	if (trg_st.core_state > PRCM_CORE_CSWR_MEMRET) {
		restore_sram_functions();	/*restore sram data */
		omap_sram_idle_setup();
	}
#endif

	if (sr1_status) enable_smartreflex(SR1_ID);
	if (sr2_status) enable_smartreflex(SR2_ID);

#ifdef CONFIG_OMAP_32K_TIMER
	omap_enable_system_timer();
#endif
	return 0;
}
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx;
	u8 cur_per_state, cur_neon_state, pre_neon_state, pre_per_state;
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 fclken_core, iclken_core, fclken_per, iclken_per;
	u32 sdrcpwr_val, sdrc_power_register = 0x0;
	int wakeup_latency;
	int core_sleep_flg = 0;
	u32 per_ctx_saved = 0;
	int ret = -1;
#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	int idle_status = 0;
#endif

	local_irq_disable();
	local_fiq_disable();

	if (need_resched()) {
		local_irq_enable();
		local_fiq_enable();
		return 0;
	}

#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	sw_latency_arr[swlat_arr_wrptr].sleep_start =
				 omap_32k_sync_timer_read();
#endif
	PM_PREPWSTST_MPU = 0xFF;
	PM_PREPWSTST_CORE = 0xFF;
	PM_PREPWSTST_NEON = 0xFF;
	PM_PREPWSTST_PER = 0xFF;

	cx = cpuidle_get_statedata(state);

	target_state.mpu_state = cx->mpu_state;
	target_state.core_state = cx->core_state;

	/* take a time marker for residency */
	getnstimeofday(&ts_preidle);

	if (cx->type == OMAP3_STATE_C0) {
		omap_sram_idle();
		goto return_sleep_time;
	}

	if (cx->type > OMAP3_STATE_C1)
		sched_clock_idle_sleep_event(); /* about to enter deep idle */

	correct_target_state();
	wakeup_latency = cx->wakeup_latency;
	if (target_state.core_state != cx->core_state) {
		/* Currently, this can happen only for core_off */
		/* Adjust wakeup latency to that of core_cswr state */
		/* Hard coded now and needs to be made more generic */
		/* omap3_power_states[4] is CSWR for core */
		wakeup_latency = omap3_power_states[4].wakeup_latency;
	}

	/* Reprogram next wake up tick to adjust for wake latency */
	if (wakeup_latency > 1000) {
		struct tick_device *d = tick_get_device(smp_processor_id());
		ktime_t adjust, next, now = ktime_get();
		if (ktime_to_ns(ktime_sub(d->evtdev->next_event, now)) >
			(wakeup_latency * 1000 + NSEC_PER_MSEC)) {
			adjust = ktime_set(0, (wakeup_latency * 1000));
			next = ktime_sub(d->evtdev->next_event, adjust);
			clockevents_program_event(d->evtdev, next, now);
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;

	prcm_get_power_domain_state(DOM_PER, &cur_per_state);
	prcm_get_power_domain_state(DOM_NEON, &cur_neon_state);

	fclken_core = CM_FCLKEN1_CORE;
	iclken_core = CM_ICLKEN1_CORE;
	fclken_per = CM_FCLKEN_PER;
	iclken_per = CM_ICLKEN_PER;

	/* If target state if core_off, save registers
	 * before changing anything
	 */
	if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
		prcm_save_registers(&target_state);
		omap_uart_save_ctx(0);
		omap_uart_save_ctx(1);
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;

	/* Program MPU and NEON to target state */
	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
		if ((cur_neon_state == PRCM_ON) &&
			(target_state.neon_state != PRCM_ON)) {

		if (target_state.neon_state == PRCM_OFF)
			omap3_save_neon_context();

#ifdef CONFIG_HW_SUP_TRANS
		/* Facilitating SWSUP RET, from HWSUP mode */
			prcm_set_clock_domain_state(DOM_NEON,
					PRCM_NO_AUTO, PRCM_FALSE);
			prcm_set_power_domain_state(DOM_NEON, PRCM_ON,
						PRCM_FORCE);
#endif
			prcm_force_power_domain_state(DOM_NEON,
				target_state.neon_state);
		}
#ifdef CONFIG_MPU_OFF
		/* Populate scrathpad restore address */
		*(scratchpad_restore_addr) = restore_pointer_address;
#endif
		if (target_state.core_state > PRCM_CORE_CSWR_MEMRET) {
			ret = omap3_save_secure_ram_context(
						target_state.core_state);
			if (ret)
				printk(KERN_ERR "omap3_save_secure_ram_context"
						"failed in idle %x\n", ret);
			if (core_off_notification != NULL)
				core_off_notification(PRCM_TRUE);
		}
		prcm_set_mpu_domain_state(target_state.mpu_state);
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;

	/* Program CORE and PER to target state */
	if (target_state.core_state > PRCM_CORE_ACTIVE) {
		/* Log core sleep attmept */
		core_sleep_flg = 1;


#ifdef CONFIG_OMAP_SMARTREFLEX
		disable_smartreflex(SR1_ID);
		disable_smartreflex(SR2_ID);
#endif
		/* Workaround for Silicon Errata 1.64 */
		if (is_sil_rev_equal_to(OMAP3430_REV_ES1_0)) {
			if (CM_CLKOUT_CTRL & 0x80)
				CM_CLKOUT_CTRL &= ~(0x80);
		}

		prcm_set_core_domain_state(target_state.core_state);
		/* Enable Autoidle for GPT1 explicitly - Errata 1.4 */
		CM_AUTOIDLE_WKUP |= 0x1;
		/* Disable UART-1,2 */
		CM_FCLKEN1_CORE &= ~0x6000;
		/* Disable HSUSB OTG ICLK explicitly*/
		CM_ICLKEN1_CORE &= ~0x10;
		/* Enabling IO_PAD capabilities */
		PM_WKEN_WKUP |= 0x100;
		if (cur_per_state == PRCM_ON && cx->type >= OMAP3_STATE_C3 &&
					!(CM_FCLKEN_PER & PER_FCLK_MASK)) {
			/* In ES3.1, Enable IO Daisy chain */
			if (is_sil_rev_greater_than(OMAP3430_REV_ES3_0)) {
				PM_WKEN_WKUP |= 0x10000;
				/* Wait for daisy chain to be ready */
				while ((PM_WKST_WKUP & 0x10000) == 0x0)
					;
				/* clear the status */
				PM_WKST_WKUP &= ~0x10000;
			}
			omap3_save_per_context();
			prcm_set_power_domain_state(DOM_PER, PRCM_OFF,
							 PRCM_AUTO);
			per_ctx_saved = 1;
			CM_FCLKEN_PER = 0;
			CM_ICLKEN_PER = 0;
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;

	if (target_state.core_state == PRCM_CORE_OFF) {
		if (!is_device_type_gp() &&
				is_sil_rev_greater_than(OMAP3430_REV_ES2_1)) {

			/* es3 series bug */
			sdrc_power_register = sdrc_read_reg(SDRC_POWER);
			sdrcpwr_val = sdrc_power_register &
						~(SDRC_PWR_AUTOCOUNT_MASK |
						SDRC_PWR_CLKCTRL_MASK);
			lock_scratchpad_sem();
			sdrcpwr_val |= 0x120;
			save_to_scratchpad(SCRATHPAD_SDRCPWR_OFFSET,
							sdrcpwr_val);
			unlock_scratchpad_sem();
		}
	}

#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	sw_latency_arr[swlat_arr_wrptr].sleep_end = omap_32k_sync_timer_read();
	idle_status++;
#endif
	omap_sram_idle();

	if (target_state.core_state == PRCM_CORE_OFF) {
		if (!is_device_type_gp() &&
				is_sil_rev_greater_than(OMAP3430_REV_ES2_1))
		sdrc_write_reg(sdrc_power_register, SDRC_POWER);
	}

restore:
	/* In case of ES3.1, disable IO daisy chain */
	if (is_sil_rev_greater_than(OMAP3430_REV_ES3_0) && per_ctx_saved)
		PM_WKEN_WKUP &= ~(0x10000);

	/* Disabling IO_PAD capabilities */
	if (core_sleep_flg)
		PM_WKEN_WKUP &= ~(0x100);

	/* Disabling IO_PAD capabilities */
	PM_WKEN_WKUP &= ~(0x100);
#ifdef OMAP3_START_RNG
	/*Capture the PM_PREPWSTST_CORE to be used later
	* for starting the RNG (Random Number Generator)*/
	prepwst_core_rng = PM_PREPWSTST_CORE;
#endif

	CM_FCLKEN1_CORE = fclken_core;
	CM_ICLKEN1_CORE = iclken_core;

	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
#ifdef CONFIG_MPU_OFF
		/* On ES 2.0, if scrathpad is populated with valid
		* pointer, warm reset does not work
		* So populate scrathpad restore address only in
		* cpuidle and suspend calls
		*/
		*(scratchpad_restore_addr) = 0x0;
#endif
		prcm_set_mpu_domain_state(PRCM_MPU_ACTIVE);
		if ((cur_neon_state == PRCM_ON) &&
			(target_state.mpu_state > PRCM_MPU_INACTIVE)) {
			prcm_force_power_domain_state(DOM_NEON, cur_neon_state);
			prcm_get_pre_power_domain_state(DOM_NEON,
				&pre_neon_state);

		if (pre_neon_state == PRCM_OFF)
			omap3_restore_neon_context();

#ifdef CONFIG_HW_SUP_TRANS
			prcm_set_power_domain_state(DOM_NEON, PRCM_ON,
							PRCM_AUTO);
#endif
		}
	}

	/* Continue core restoration part, only if Core-Sleep is attempted */
	if ((target_state.core_state > PRCM_CORE_ACTIVE) && core_sleep_flg) {
		prcm_set_core_domain_state(PRCM_CORE_ACTIVE);

#ifdef CONFIG_OMAP_SMARTREFLEX
		enable_smartreflex(SR1_ID);
		enable_smartreflex(SR2_ID);
#endif

		if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
#ifdef CONFIG_OMAP34XX_OFFMODE
			context_restore_update(DOM_CORE1);
#endif
			prcm_restore_registers(&target_state);
			prcm_restore_core_context(target_state.core_state);
			omap3_restore_core_settings();
			}
		/* Errata 1.4
		* if the timer device gets idled which is when we
		* are cutting the timer ICLK which is when we try
		* to put Core to RET.
		* Wait Period = 2 timer interface clock cycles +
		* 1 timer functional clock cycle
		* Interface clock = L4 clock. For the computation L4
		* clock is assumed at 50MHz (worst case).
		* Functional clock = 32KHz
		* Wait Period = 2*10^-6/50 + 1/32768 = 0.000030557 = 30.557uSec
		* Roundingoff the delay value to a safer 50uSec
		*/
		omap_udelay(GPTIMER_WAIT_DELAY);
		CM_AUTOIDLE_WKUP &= ~(0x1);
		if (core_off_notification != NULL)
			core_off_notification(PRCM_FALSE);
	}

	if (cur_per_state == PRCM_ON) {
		CM_FCLKEN_PER = fclken_per;
		CM_ICLKEN_PER = iclken_per;
		prcm_get_pre_power_domain_state(DOM_PER, &pre_per_state);
		if (pre_per_state == PRCM_OFF && per_ctx_saved) {
			if (enable_debug)
				per_off++;
			omap3_restore_per_context();
			post_uart_inactivity();
#ifdef CONFIG_OMAP34XX_OFFMODE
			context_restore_update(DOM_PER);
#endif
		}
	}

	pr_debug("MPU state:%x,CORE state:%x\n", PM_PREPWSTST_MPU,
							PM_PREPWSTST_CORE);
	store_prepwst();

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	if (cx->type > OMAP3_STATE_C1)
		sched_clock_idle_wakeup_event(timespec_to_ns(&ts_idle));

#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	if (idle_status) {
		sw_latency_arr[swlat_arr_wrptr].wkup_end =
					omap_32k_sync_timer_read();
		sw_latency_arr[swlat_arr_wrptr].wkup_start =
					wakeup_start_32ksync;

		sw_latency_arr[swlat_arr_wrptr].cstate =
			((PM_PREPWSTST_MPU & 0x3) << 2) |
			 (PM_PREPWSTST_CORE & 0x3) |
			 (omap_readl(0x48306CB0) << 16);

		swlat_arr_wrptr++;

		if (swlat_arr_wrptr == SW_LATENCY_ARR_SIZE)
			swlat_arr_wrptr = 0;
	}
#endif

	local_irq_enable();
	local_fiq_enable();

#ifdef OMAP3_START_RNG
	if (!is_device_type_gp()) {
		/*Start RNG after interrupts are enabled
		 * and only when CORE OFF was successful
		 */
		if (!(prepwst_core_rng & 0x3)) {
			ret = omap3_start_rng();
			if (ret)
				printk(KERN_INFO"Failed to generate new"
						" RN in idle %x\n", ret);
			prepwst_core_rng = 0xFF;
		}
	}
#endif

	return (u32)timespec_to_ns(&ts_idle)/1000;
}
Beispiel #3
0
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx;
	struct timespec ts_preidle;
	struct timespec ts_postidle;
	struct timespec ts_idle;

	/* Used for LPR mode DSS context save/restore. */
	u32 pm_wken_dss      = 0;
	u32 pm_pwstctrl_dss  = 0;
	u32 cm_clkstctrl_dss = 0;
	u32 cm_fclken_dss    = 0;
	u32 cm_iclken_dss    = 0;
	u32 cm_autoidle_dss  = 0;

	u32 fclken_core;
	u32 iclken_core;
	u32 fclken_per;
	u32 iclken_per;

	int wakeup_latency;

	struct system_power_state target_state;
	struct system_power_state cur_state;

#ifdef CONFIG_HW_SUP_TRANS
	u32 sleepdep_per;
	u32 wakedep_per;
#endif /* #ifdef CONFIG_HW_SUP_TRANS */

	u32 sdrc_power_register = 0;
	int core_sleep_flg = 0;
	int got_console_lock = 0;

	/* Disable interrupts.
	 */
	local_irq_disable();
	local_fiq_disable();

	/* If need resched - return immediately
	 */ 
	if( need_resched()) {
		local_fiq_enable();
		local_irq_enable();
		return 0;
	}

	/* Reset previous power state registers.
	 */
	clear_prepwstst();

	omap3_idle_setup_wkup_sources ();
	
	/* Set up target state from state context provided by cpuidle.
	 */
	cx = cpuidle_get_statedata(state);
	target_state.mpu_state  = cx->mpu_state;
	target_state.core_state = cx->core_state;
	target_state.neon_state = 0; /* Avoid gcc warning. Will be set in
					adjust_target_states(). */

	/* take a time marker for residency.
	 */
	getnstimeofday(&ts_preidle);

	/* If the requested state is C0, we bail here...
	 */
	if (cx->type == OMAP3_STATE_C1) {
		omap_sram_idle(target_state.mpu_state);
		goto return_sleep_time;
	}

	if (cx->type > OMAP3_STATE_C2)
		sched_clock_idle_sleep_event(); /* about to enter deep idle */

	/* Adjust PER and NEON domain target states as well as CORE domain
	 * target state depending on MPU/CORE setting, enable_off sysfs entry
	 * and PER timer status.
	 */
	adjust_target_states(&target_state);

	wakeup_latency = cx->wakeup_latency;
	/* NOTE:
	 *   We will never get the condition below as we are not supporting
	 *   CORE OFF right now. Keeping this code around for future reference.
	 */
	if (target_state.core_state != cx->core_state) {
		/* Currently, this can happen only for core_off. Adjust wakeup
		 * latency to that of core_cswr state. Hard coded now and needs
		 * to be made more generic omap3_power_states[4] is CSWR for
		 * core */
		wakeup_latency = omap3_power_states[4].wakeup_latency;
	}

	/* Reprogram next wake up tick to adjust for wake latency */
	if (wakeup_latency > 1000) {
		struct tick_device *d = tick_get_device(smp_processor_id());
		ktime_t now = ktime_get();

		if (ktime_to_ns(ktime_sub(d->evtdev->next_event, now)) >
					(wakeup_latency * 1000 + NSEC_PER_MSEC)) {
			ktime_t adjust = ktime_set(0, (wakeup_latency * 1000));
			ktime_t next   = ktime_sub(d->evtdev->next_event, adjust);
			clockevents_program_event(d->evtdev, next, now);
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;


	/* Remember the current power states and clock settings.
	 */
	prcm_get_power_domain_state(DOM_PER,  &cur_state.per_state);
	prcm_get_power_domain_state(DOM_CAM,  &cur_state.cam_state);
	prcm_get_power_domain_state(DOM_SGX,  &cur_state.sgx_state);
	prcm_get_power_domain_state(DOM_NEON, &cur_state.neon_state);

	fclken_core = CM_FCLKEN1_CORE;
	iclken_core = CM_ICLKEN1_CORE;
	fclken_per  = CM_FCLKEN_PER;
	iclken_per  = CM_ICLKEN_PER;

#ifdef CONFIG_HW_SUP_TRANS
	/* Facilitating SWSUP RET, from HWSUP mode */
	sleepdep_per = CM_SLEEPDEP_PER;
	wakedep_per  = PM_WKDEP_PER;
#endif /* #ifdef CONFIG_HW_SUP_TRANS */

	/* If target state if core_off, save registers before changing
	 * anything.
	 */
	if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
		prcm_save_registers();
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;


	/* Program MPU and NEON to target state */
	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
		if ((cur_state.neon_state == PRCM_ON) &&
		    (target_state.neon_state != PRCM_ON)) {

			if (target_state.neon_state == PRCM_OFF)
				omap3_save_neon_context();

			prcm_transition_domain_to(DOM_NEON, target_state.neon_state);
		}
#ifdef _disabled_CONFIG_MPU_OFF
		/* Populate scratchpad restore address */
		scratchpad_set_restore_addr();
#endif
		/* TODO No support for OFF Mode yet
		if(target_state.core_state > PRCM_CORE_CSWR_MEMRET)
			omap3_save_secure_ram_context(target_state.core_state);
		*/
		prcm_set_mpu_domain_state(target_state.mpu_state);
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;


	/* Program CORE and PER to target state */
	if (target_state.core_state > PRCM_CORE_ACTIVE) {
		/* Log core sleep attempt */
		core_sleep_flg = 1;

		/* Lock the console to prevent potential access to UARTs.
		 */
		if (0 == try_acquire_console_sem()) {
			got_console_lock = 1;
		}

		/* Handle PER, CAM and SGX domains. */
		if ((cur_state.per_state == PRCM_ON) &&
		    (target_state.per_state != PRCM_ON)) {

			if (target_state.per_state == PRCM_OFF) {
				omap3_save_per_context();
			}

			prcm_transition_domain_to(DOM_PER, target_state.per_state);
		}
		if (PRCM_ON == cur_state.cam_state)
			prcm_transition_domain_to(DOM_CAM, PRCM_RET);
		if (PRCM_ON == cur_state.sgx_state)
			prcm_transition_domain_to(DOM_SGX, PRCM_RET);

		disable_smartreflex(SR1_ID);
		disable_smartreflex(SR2_ID);

		prcm_set_core_domain_state(target_state.core_state);

		/* Enable Autoidle for GPT1 explicitly - Errata 1.4 */
		CM_AUTOIDLE_WKUP |= 0x1;

		/* Disable HSUSB OTG ICLK explicitly*/
		CM_ICLKEN1_CORE &= ~0x10;

		/* Enabling GPT1 wake-up capabilities */
		PM_WKEN_WKUP |= 0x1;

		/* Errata 2.15
		 * Configure UARTs to ForceIdle. Otherwise they can prevent
		 * CORE RET.
		 */
		omap24xx_uart_set_force_idle();

		CM_ICLKEN1_CORE &= ~(1<<7); /* MAILBOXES */
		CM_ICLKEN1_CORE &= ~(1<<6); /* OMAPCTRL */

		/* If we are in LPR mode we need to set up DSS accordingly.
		 */
		if (omap2_disp_lpr_is_enabled()) {
			pm_wken_dss      = PM_WKEN_DSS;
			pm_pwstctrl_dss  = PM_PWSTCTRL_DSS;
			cm_clkstctrl_dss = CM_CLKSTCTRL_DSS;
			cm_fclken_dss    = CM_FCLKEN_DSS;
			cm_iclken_dss    = CM_ICLKEN_DSS;
			cm_autoidle_dss  = CM_AUTOIDLE_DSS;
			PM_WKEN_DSS      = 0x00000001;
			PM_PWSTCTRL_DSS  = 0x00030107;
			CM_CLKSTCTRL_DSS = 0x00000003;
			CM_FCLKEN_DSS    = 0x00000001;
			CM_ICLKEN_DSS    = 0x00000001;
			CM_AUTOIDLE_DSS  = 0x00000001;
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;

#ifdef CONFIG_DISABLE_HFCLK
	PRM_CLKSRC_CTRL |= 0x18; /* set sysclk to stop */
#endif /* #ifdef CONFIG_DISABLE_HFCLK */


	DEBUG_STATE_CAPTURE();


	/* Errata 1.142:
	 * SDRC not sending auto-refresh when OMAP wakes-up from OFF mode
	 */
	if (!is_device_type_gp() && is_sil_rev_equal_to(OMAP3430_REV_ES3_0)) {
		sdrc_power_register = SDRC_POWER_REG;
		SDRC_POWER_REG &= ~(SDRC_PWR_AUTOCOUNT_MASK | SDRC_PWR_CLKCTRL_MASK);
		SDRC_POWER_REG |= 0x120;
		if (target_state.core_state == PRCM_CORE_OFF)
			save_scratchpad_contents();
	}

	omap_sram_idle(target_state.mpu_state);

	/* Errata 1.142:
	 * SDRC not sending auto-refresh when OMAP wakes-up from OFF mode
	 */
	if (!is_device_type_gp() && is_sil_rev_equal_to(OMAP3430_REV_ES3_0))
		SDRC_POWER_REG = sdrc_power_register;

restore:
#ifdef CONFIG_DISABLE_HFCLK
	PRM_CLKSRC_CTRL &= ~0x18;
#endif /* #ifdef CONFIG_DISABLE_HFCLK */
	/* Disabling IO_PAD capabilities */
	PM_WKEN_WKUP &= ~(0x100);

	CM_FCLKEN1_CORE = fclken_core;
	CM_ICLKEN1_CORE = iclken_core;

	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
#ifdef _disabled_CONFIG_MPU_OFF
		/* On ES 2.0, if scratchpad is populated with valid pointer,
		 * warm reset does not work So populate scratchpad restore
		 * address only in cpuidle and suspend calls
		*/
		scratchpad_clr_restore_addr();
#endif
		prcm_set_mpu_domain_state(PRCM_MPU_ACTIVE);

		if ((cur_state.neon_state == PRCM_ON) &&
		    (target_state.mpu_state > PRCM_MPU_INACTIVE)) {
			u8 pre_state;

			prcm_force_power_domain_state(DOM_NEON, cur_state.neon_state);
			prcm_get_pre_power_domain_state(DOM_NEON, &pre_state);

			if (pre_state == PRCM_OFF) {
				omap3_restore_neon_context();
			}

#ifdef CONFIG_HW_SUP_TRANS
			prcm_set_power_domain_state(DOM_NEON, POWER_DOMAIN_ON,
								PRCM_AUTO);
#endif
		}
	}

	/* Continue core restoration part, only if Core-Sleep is attempted */
	if ((target_state.core_state > PRCM_CORE_ACTIVE) && core_sleep_flg) {
		u8 pre_per_state;

		prcm_set_core_domain_state(PRCM_CORE_ACTIVE);

		omap24xx_uart_clr_force_idle();

		enable_smartreflex(SR1_ID);
		enable_smartreflex(SR2_ID);

		/* Turn PER back ON if it was ON before idle.
		 */
		if (cur_state.per_state == PRCM_ON) {
			prcm_force_power_domain_state(DOM_PER, cur_state.per_state);

			CM_ICLKEN_PER = iclken_per;
			CM_FCLKEN_PER = fclken_per;

			prcm_get_pre_power_domain_state(DOM_PER, &pre_per_state);
			if (pre_per_state == PRCM_OFF) {
				omap3_restore_per_context();
#ifdef CONFIG_OMAP34XX_OFFMODE
				context_restore_update(DOM_PER);
#endif
			}

#ifdef CONFIG_HW_SUP_TRANS
			/* Facilitating SWSUP RET, from HWSUP mode */
			CM_SLEEPDEP_PER = sleepdep_per;
			PM_WKDEP_PER    = wakedep_per;
			prcm_set_power_domain_state(DOM_PER, PRCM_ON, PRCM_AUTO);
#endif
		}

		/* Restore CAM and SGX. */
		if (PRCM_ON == cur_state.cam_state)
			prcm_transition_domain_to(DOM_CAM, PRCM_ON);
		if (PRCM_ON == cur_state.sgx_state)
			prcm_transition_domain_to(DOM_SGX, PRCM_ON);

		/* If we lost CORE context, restore it.
		 */
		if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
#ifdef CONFIG_OMAP34XX_OFFMODE
			context_restore_update(DOM_CORE1);
#endif
			prcm_restore_registers();
			prcm_restore_core_context(target_state.core_state);
#ifdef CONFIG_CORE_OFF
			omap3_restore_core_settings();
#endif
		}

		/* Restore DSS settings */
		if (omap2_disp_lpr_is_enabled()) {
			PM_WKEN_DSS      = pm_wken_dss;
			PM_PWSTCTRL_DSS  = pm_pwstctrl_dss;
			CM_CLKSTCTRL_DSS = cm_clkstctrl_dss;
			CM_FCLKEN_DSS    = cm_fclken_dss;
			CM_ICLKEN_DSS    = cm_iclken_dss;
			CM_AUTOIDLE_DSS  = cm_autoidle_dss;
		}

		/* At this point CORE and PER domain are back. We can release
		 * the console if we have it.
		 */
		if (got_console_lock) {
			release_console_sem();
		}

#ifdef CONFIG_OMAP_32K_TIMER
		/* Errata 1.4
		 * If a General Purpose Timer (GPTimer) is in posted mode
		 * (TSIRC.POSTED=1), due to internal resynchronizations, values
		 * read in TCRR, TCAR1 and TCAR2 registers right after the
		 * timer interface clock (L4) goes from stopped to active may
		 * not return the expected values. The most common event
		 * leading to this situation occurs upon wake up from idle.
		 *
		 * Software has to wait at least (2 timer interface clock
		 * cycles + 1 timer functional clock cycle) after L4 clock
		 * wakeup before reading TCRR, TCAR1 or TCAR2 registers for
		 * GPTimers in POSTED internal synchro- nization mode, and
		 * before reading WCRR register of the Watchdog timers . The
		 * same workaround must be applied before reading CR and
		 * 32KSYNCNT_REV registers of the synctimer module.
		 *
		 * Wait Period = 2 timer interface clock cycles +
		 *               1 timer functional clock cycle
		 * Interface clock  = L4 clock (50MHz worst case).
		 * Functional clock = 32KHz
		 * Wait Period = 2*10^-6/50 + 1/32768 = 0.000030557 = 30.557us
		 * Rounding off the delay value to a safer 50us.
		 */
		udelay(GPTIMER_WAIT_DELAY);
#endif

		/* Disable autoidling of GPT1.
		 */
		CM_AUTOIDLE_WKUP &= ~(0x1);
	}

	DPRINTK("MPU state:%x, CORE state:%x\n",
			PM_PREPWSTST_MPU, PM_PREPWSTST_CORE);

	/* Do wakeup event check s*/
	post_uart_activity();

	/* Update stats for sysfs entries.
	 */
	store_prepwst();

return_sleep_time:
	getnstimeofday(&ts_postidle);
#if defined(CONFIG_SYSFS) && defined(DEBUG_BAIL_STATS)
	ts_last_wake_up = ts_postidle;
#endif
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	if (cx->type > OMAP3_STATE_C2)
		sched_clock_idle_wakeup_event(timespec_to_ns(&ts_idle));


	DEBUG_STATE_PRINT(core_sleep_flg);


	local_irq_enable();
	local_fiq_enable();

	return (u32)timespec_to_ns(&ts_idle)/1000;
}