コード例 #1
0
ファイル: clock34xx.c プロジェクト: 274914765/C
int __init omap2_clk_init(void)
{
    /* struct prcm_config *prcm; */
    struct clk **clkp;
    /* u32 clkrate; */
    u32 cpu_clkflg;

    /* REVISIT: Ultimately this will be used for multiboot */
#if 0
    if (cpu_is_omap242x()) {
        cpu_mask = RATE_IN_242X;
        cpu_clkflg = CLOCK_IN_OMAP242X;
        clkp = onchip_24xx_clks;
    } else if (cpu_is_omap2430()) {
        cpu_mask = RATE_IN_243X;
        cpu_clkflg = CLOCK_IN_OMAP243X;
        clkp = onchip_24xx_clks;
    }
#endif
    if (cpu_is_omap34xx()) {
        cpu_mask = RATE_IN_343X;
        cpu_clkflg = CLOCK_IN_OMAP343X;
        clkp = onchip_34xx_clks;

        /*
         * Update this if there are further clock changes between ES2
         * and production parts
         */
        if (is_sil_rev_equal_to(OMAP3430_REV_ES1_0)) {
            /* No 3430ES1-only rates exist, so no RATE_IN_3430ES1 */
            cpu_clkflg |= CLOCK_IN_OMAP3430ES1;
        } else {
            cpu_mask |= RATE_IN_3430ES2;
            cpu_clkflg |= CLOCK_IN_OMAP3430ES2;
        }
    }

    clk_init(&omap2_clk_functions);

    for (clkp = onchip_34xx_clks;
         clkp < onchip_34xx_clks + ARRAY_SIZE(onchip_34xx_clks);
         clkp++) {
        if ((*clkp)->flags & cpu_clkflg)
            clk_register(*clkp);
    }

    /* REVISIT: Not yet ready for OMAP3 */
#if 0
    /* Check the MPU rate set by bootloader */
    clkrate = omap2_get_dpll_rate_24xx(&dpll_ck);
    for (prcm = rate_table; prcm->mpu_speed; prcm++) {
        if (!(prcm->flags & cpu_mask))
            continue;
        if (prcm->xtal_speed != sys_ck.rate)
            continue;
        if (prcm->dpll_speed <= clkrate)
             break;
    }
    curr_prcm_set = prcm;
#endif

    recalculate_root_clocks();

    printk(KERN_INFO "Clocking rate (Crystal/DPLL/ARM core): "
           "%ld.%01ld/%ld/%ld MHz\n",
           (osc_sys_ck.rate / 1000000), (osc_sys_ck.rate / 100000) % 10,
           (core_ck.rate / 1000000), (arm_fck.rate / 1000000));

    /*
     * Only enable those clocks we will need, let the drivers
     * enable other clocks as necessary
     */
    clk_enable_init_clocks();

    /* Avoid sleeping during omap2_clk_prepare_for_reboot() */
    /* REVISIT: not yet ready for 343x */
#if 0
    vclk = clk_get(NULL, "virt_prcm_set");
    sclk = clk_get(NULL, "sys_ck");
#endif
    return 0;
}
コード例 #2
0
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx;
	u8 cur_per_state, cur_neon_state, pre_neon_state, pre_per_state;
	struct timespec ts_preidle, ts_postidle, ts_idle;
	u32 fclken_core, iclken_core, fclken_per, iclken_per;
	u32 sdrcpwr_val, sdrc_power_register = 0x0;
	int wakeup_latency;
	int core_sleep_flg = 0;
	u32 per_ctx_saved = 0;
	int ret = -1;
#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	int idle_status = 0;
#endif

	local_irq_disable();
	local_fiq_disable();

	if (need_resched()) {
		local_irq_enable();
		local_fiq_enable();
		return 0;
	}

#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	sw_latency_arr[swlat_arr_wrptr].sleep_start =
				 omap_32k_sync_timer_read();
#endif
	PM_PREPWSTST_MPU = 0xFF;
	PM_PREPWSTST_CORE = 0xFF;
	PM_PREPWSTST_NEON = 0xFF;
	PM_PREPWSTST_PER = 0xFF;

	cx = cpuidle_get_statedata(state);

	target_state.mpu_state = cx->mpu_state;
	target_state.core_state = cx->core_state;

	/* take a time marker for residency */
	getnstimeofday(&ts_preidle);

	if (cx->type == OMAP3_STATE_C0) {
		omap_sram_idle();
		goto return_sleep_time;
	}

	if (cx->type > OMAP3_STATE_C1)
		sched_clock_idle_sleep_event(); /* about to enter deep idle */

	correct_target_state();
	wakeup_latency = cx->wakeup_latency;
	if (target_state.core_state != cx->core_state) {
		/* Currently, this can happen only for core_off */
		/* Adjust wakeup latency to that of core_cswr state */
		/* Hard coded now and needs to be made more generic */
		/* omap3_power_states[4] is CSWR for core */
		wakeup_latency = omap3_power_states[4].wakeup_latency;
	}

	/* Reprogram next wake up tick to adjust for wake latency */
	if (wakeup_latency > 1000) {
		struct tick_device *d = tick_get_device(smp_processor_id());
		ktime_t adjust, next, now = ktime_get();
		if (ktime_to_ns(ktime_sub(d->evtdev->next_event, now)) >
			(wakeup_latency * 1000 + NSEC_PER_MSEC)) {
			adjust = ktime_set(0, (wakeup_latency * 1000));
			next = ktime_sub(d->evtdev->next_event, adjust);
			clockevents_program_event(d->evtdev, next, now);
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;

	prcm_get_power_domain_state(DOM_PER, &cur_per_state);
	prcm_get_power_domain_state(DOM_NEON, &cur_neon_state);

	fclken_core = CM_FCLKEN1_CORE;
	iclken_core = CM_ICLKEN1_CORE;
	fclken_per = CM_FCLKEN_PER;
	iclken_per = CM_ICLKEN_PER;

	/* If target state if core_off, save registers
	 * before changing anything
	 */
	if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
		prcm_save_registers(&target_state);
		omap_uart_save_ctx(0);
		omap_uart_save_ctx(1);
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;

	/* Program MPU and NEON to target state */
	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
		if ((cur_neon_state == PRCM_ON) &&
			(target_state.neon_state != PRCM_ON)) {

		if (target_state.neon_state == PRCM_OFF)
			omap3_save_neon_context();

#ifdef CONFIG_HW_SUP_TRANS
		/* Facilitating SWSUP RET, from HWSUP mode */
			prcm_set_clock_domain_state(DOM_NEON,
					PRCM_NO_AUTO, PRCM_FALSE);
			prcm_set_power_domain_state(DOM_NEON, PRCM_ON,
						PRCM_FORCE);
#endif
			prcm_force_power_domain_state(DOM_NEON,
				target_state.neon_state);
		}
#ifdef CONFIG_MPU_OFF
		/* Populate scrathpad restore address */
		*(scratchpad_restore_addr) = restore_pointer_address;
#endif
		if (target_state.core_state > PRCM_CORE_CSWR_MEMRET) {
			ret = omap3_save_secure_ram_context(
						target_state.core_state);
			if (ret)
				printk(KERN_ERR "omap3_save_secure_ram_context"
						"failed in idle %x\n", ret);
			if (core_off_notification != NULL)
				core_off_notification(PRCM_TRUE);
		}
		prcm_set_mpu_domain_state(target_state.mpu_state);
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;

	/* Program CORE and PER to target state */
	if (target_state.core_state > PRCM_CORE_ACTIVE) {
		/* Log core sleep attmept */
		core_sleep_flg = 1;


#ifdef CONFIG_OMAP_SMARTREFLEX
		disable_smartreflex(SR1_ID);
		disable_smartreflex(SR2_ID);
#endif
		/* Workaround for Silicon Errata 1.64 */
		if (is_sil_rev_equal_to(OMAP3430_REV_ES1_0)) {
			if (CM_CLKOUT_CTRL & 0x80)
				CM_CLKOUT_CTRL &= ~(0x80);
		}

		prcm_set_core_domain_state(target_state.core_state);
		/* Enable Autoidle for GPT1 explicitly - Errata 1.4 */
		CM_AUTOIDLE_WKUP |= 0x1;
		/* Disable UART-1,2 */
		CM_FCLKEN1_CORE &= ~0x6000;
		/* Disable HSUSB OTG ICLK explicitly*/
		CM_ICLKEN1_CORE &= ~0x10;
		/* Enabling IO_PAD capabilities */
		PM_WKEN_WKUP |= 0x100;
		if (cur_per_state == PRCM_ON && cx->type >= OMAP3_STATE_C3 &&
					!(CM_FCLKEN_PER & PER_FCLK_MASK)) {
			/* In ES3.1, Enable IO Daisy chain */
			if (is_sil_rev_greater_than(OMAP3430_REV_ES3_0)) {
				PM_WKEN_WKUP |= 0x10000;
				/* Wait for daisy chain to be ready */
				while ((PM_WKST_WKUP & 0x10000) == 0x0)
					;
				/* clear the status */
				PM_WKST_WKUP &= ~0x10000;
			}
			omap3_save_per_context();
			prcm_set_power_domain_state(DOM_PER, PRCM_OFF,
							 PRCM_AUTO);
			per_ctx_saved = 1;
			CM_FCLKEN_PER = 0;
			CM_ICLKEN_PER = 0;
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;

	if (target_state.core_state == PRCM_CORE_OFF) {
		if (!is_device_type_gp() &&
				is_sil_rev_greater_than(OMAP3430_REV_ES2_1)) {

			/* es3 series bug */
			sdrc_power_register = sdrc_read_reg(SDRC_POWER);
			sdrcpwr_val = sdrc_power_register &
						~(SDRC_PWR_AUTOCOUNT_MASK |
						SDRC_PWR_CLKCTRL_MASK);
			lock_scratchpad_sem();
			sdrcpwr_val |= 0x120;
			save_to_scratchpad(SCRATHPAD_SDRCPWR_OFFSET,
							sdrcpwr_val);
			unlock_scratchpad_sem();
		}
	}

#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	sw_latency_arr[swlat_arr_wrptr].sleep_end = omap_32k_sync_timer_read();
	idle_status++;
#endif
	omap_sram_idle();

	if (target_state.core_state == PRCM_CORE_OFF) {
		if (!is_device_type_gp() &&
				is_sil_rev_greater_than(OMAP3430_REV_ES2_1))
		sdrc_write_reg(sdrc_power_register, SDRC_POWER);
	}

restore:
	/* In case of ES3.1, disable IO daisy chain */
	if (is_sil_rev_greater_than(OMAP3430_REV_ES3_0) && per_ctx_saved)
		PM_WKEN_WKUP &= ~(0x10000);

	/* Disabling IO_PAD capabilities */
	if (core_sleep_flg)
		PM_WKEN_WKUP &= ~(0x100);

	/* Disabling IO_PAD capabilities */
	PM_WKEN_WKUP &= ~(0x100);
#ifdef OMAP3_START_RNG
	/*Capture the PM_PREPWSTST_CORE to be used later
	* for starting the RNG (Random Number Generator)*/
	prepwst_core_rng = PM_PREPWSTST_CORE;
#endif

	CM_FCLKEN1_CORE = fclken_core;
	CM_ICLKEN1_CORE = iclken_core;

	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
#ifdef CONFIG_MPU_OFF
		/* On ES 2.0, if scrathpad is populated with valid
		* pointer, warm reset does not work
		* So populate scrathpad restore address only in
		* cpuidle and suspend calls
		*/
		*(scratchpad_restore_addr) = 0x0;
#endif
		prcm_set_mpu_domain_state(PRCM_MPU_ACTIVE);
		if ((cur_neon_state == PRCM_ON) &&
			(target_state.mpu_state > PRCM_MPU_INACTIVE)) {
			prcm_force_power_domain_state(DOM_NEON, cur_neon_state);
			prcm_get_pre_power_domain_state(DOM_NEON,
				&pre_neon_state);

		if (pre_neon_state == PRCM_OFF)
			omap3_restore_neon_context();

#ifdef CONFIG_HW_SUP_TRANS
			prcm_set_power_domain_state(DOM_NEON, PRCM_ON,
							PRCM_AUTO);
#endif
		}
	}

	/* Continue core restoration part, only if Core-Sleep is attempted */
	if ((target_state.core_state > PRCM_CORE_ACTIVE) && core_sleep_flg) {
		prcm_set_core_domain_state(PRCM_CORE_ACTIVE);

#ifdef CONFIG_OMAP_SMARTREFLEX
		enable_smartreflex(SR1_ID);
		enable_smartreflex(SR2_ID);
#endif

		if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
#ifdef CONFIG_OMAP34XX_OFFMODE
			context_restore_update(DOM_CORE1);
#endif
			prcm_restore_registers(&target_state);
			prcm_restore_core_context(target_state.core_state);
			omap3_restore_core_settings();
			}
		/* Errata 1.4
		* if the timer device gets idled which is when we
		* are cutting the timer ICLK which is when we try
		* to put Core to RET.
		* Wait Period = 2 timer interface clock cycles +
		* 1 timer functional clock cycle
		* Interface clock = L4 clock. For the computation L4
		* clock is assumed at 50MHz (worst case).
		* Functional clock = 32KHz
		* Wait Period = 2*10^-6/50 + 1/32768 = 0.000030557 = 30.557uSec
		* Roundingoff the delay value to a safer 50uSec
		*/
		omap_udelay(GPTIMER_WAIT_DELAY);
		CM_AUTOIDLE_WKUP &= ~(0x1);
		if (core_off_notification != NULL)
			core_off_notification(PRCM_FALSE);
	}

	if (cur_per_state == PRCM_ON) {
		CM_FCLKEN_PER = fclken_per;
		CM_ICLKEN_PER = iclken_per;
		prcm_get_pre_power_domain_state(DOM_PER, &pre_per_state);
		if (pre_per_state == PRCM_OFF && per_ctx_saved) {
			if (enable_debug)
				per_off++;
			omap3_restore_per_context();
			post_uart_inactivity();
#ifdef CONFIG_OMAP34XX_OFFMODE
			context_restore_update(DOM_PER);
#endif
		}
	}

	pr_debug("MPU state:%x,CORE state:%x\n", PM_PREPWSTST_MPU,
							PM_PREPWSTST_CORE);
	store_prepwst();

return_sleep_time:
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	if (cx->type > OMAP3_STATE_C1)
		sched_clock_idle_wakeup_event(timespec_to_ns(&ts_idle));

#ifdef CONFIG_ENABLE_SWLATENCY_MEASURE
	if (idle_status) {
		sw_latency_arr[swlat_arr_wrptr].wkup_end =
					omap_32k_sync_timer_read();
		sw_latency_arr[swlat_arr_wrptr].wkup_start =
					wakeup_start_32ksync;

		sw_latency_arr[swlat_arr_wrptr].cstate =
			((PM_PREPWSTST_MPU & 0x3) << 2) |
			 (PM_PREPWSTST_CORE & 0x3) |
			 (omap_readl(0x48306CB0) << 16);

		swlat_arr_wrptr++;

		if (swlat_arr_wrptr == SW_LATENCY_ARR_SIZE)
			swlat_arr_wrptr = 0;
	}
#endif

	local_irq_enable();
	local_fiq_enable();

#ifdef OMAP3_START_RNG
	if (!is_device_type_gp()) {
		/*Start RNG after interrupts are enabled
		 * and only when CORE OFF was successful
		 */
		if (!(prepwst_core_rng & 0x3)) {
			ret = omap3_start_rng();
			if (ret)
				printk(KERN_INFO"Failed to generate new"
						" RN in idle %x\n", ret);
			prepwst_core_rng = 0xFF;
		}
	}
#endif

	return (u32)timespec_to_ns(&ts_idle)/1000;
}
コード例 #3
0
ファイル: pm_cpuidle.c プロジェクト: mozyg/kernel
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx;
	struct timespec ts_preidle;
	struct timespec ts_postidle;
	struct timespec ts_idle;

	/* Used for LPR mode DSS context save/restore. */
	u32 pm_wken_dss      = 0;
	u32 pm_pwstctrl_dss  = 0;
	u32 cm_clkstctrl_dss = 0;
	u32 cm_fclken_dss    = 0;
	u32 cm_iclken_dss    = 0;
	u32 cm_autoidle_dss  = 0;

	u32 fclken_core;
	u32 iclken_core;
	u32 fclken_per;
	u32 iclken_per;

	int wakeup_latency;

	struct system_power_state target_state;
	struct system_power_state cur_state;

#ifdef CONFIG_HW_SUP_TRANS
	u32 sleepdep_per;
	u32 wakedep_per;
#endif /* #ifdef CONFIG_HW_SUP_TRANS */

	u32 sdrc_power_register = 0;
	int core_sleep_flg = 0;
	int got_console_lock = 0;

	/* Disable interrupts.
	 */
	local_irq_disable();
	local_fiq_disable();

	/* If need resched - return immediately
	 */ 
	if( need_resched()) {
		local_fiq_enable();
		local_irq_enable();
		return 0;
	}

	/* Reset previous power state registers.
	 */
	clear_prepwstst();

	omap3_idle_setup_wkup_sources ();
	
	/* Set up target state from state context provided by cpuidle.
	 */
	cx = cpuidle_get_statedata(state);
	target_state.mpu_state  = cx->mpu_state;
	target_state.core_state = cx->core_state;
	target_state.neon_state = 0; /* Avoid gcc warning. Will be set in
					adjust_target_states(). */

	/* take a time marker for residency.
	 */
	getnstimeofday(&ts_preidle);

	/* If the requested state is C0, we bail here...
	 */
	if (cx->type == OMAP3_STATE_C1) {
		omap_sram_idle(target_state.mpu_state);
		goto return_sleep_time;
	}

	if (cx->type > OMAP3_STATE_C2)
		sched_clock_idle_sleep_event(); /* about to enter deep idle */

	/* Adjust PER and NEON domain target states as well as CORE domain
	 * target state depending on MPU/CORE setting, enable_off sysfs entry
	 * and PER timer status.
	 */
	adjust_target_states(&target_state);

	wakeup_latency = cx->wakeup_latency;
	/* NOTE:
	 *   We will never get the condition below as we are not supporting
	 *   CORE OFF right now. Keeping this code around for future reference.
	 */
	if (target_state.core_state != cx->core_state) {
		/* Currently, this can happen only for core_off. Adjust wakeup
		 * latency to that of core_cswr state. Hard coded now and needs
		 * to be made more generic omap3_power_states[4] is CSWR for
		 * core */
		wakeup_latency = omap3_power_states[4].wakeup_latency;
	}

	/* Reprogram next wake up tick to adjust for wake latency */
	if (wakeup_latency > 1000) {
		struct tick_device *d = tick_get_device(smp_processor_id());
		ktime_t now = ktime_get();

		if (ktime_to_ns(ktime_sub(d->evtdev->next_event, now)) >
					(wakeup_latency * 1000 + NSEC_PER_MSEC)) {
			ktime_t adjust = ktime_set(0, (wakeup_latency * 1000));
			ktime_t next   = ktime_sub(d->evtdev->next_event, adjust);
			clockevents_program_event(d->evtdev, next, now);
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;


	/* Remember the current power states and clock settings.
	 */
	prcm_get_power_domain_state(DOM_PER,  &cur_state.per_state);
	prcm_get_power_domain_state(DOM_CAM,  &cur_state.cam_state);
	prcm_get_power_domain_state(DOM_SGX,  &cur_state.sgx_state);
	prcm_get_power_domain_state(DOM_NEON, &cur_state.neon_state);

	fclken_core = CM_FCLKEN1_CORE;
	iclken_core = CM_ICLKEN1_CORE;
	fclken_per  = CM_FCLKEN_PER;
	iclken_per  = CM_ICLKEN_PER;

#ifdef CONFIG_HW_SUP_TRANS
	/* Facilitating SWSUP RET, from HWSUP mode */
	sleepdep_per = CM_SLEEPDEP_PER;
	wakedep_per  = PM_WKDEP_PER;
#endif /* #ifdef CONFIG_HW_SUP_TRANS */

	/* If target state if core_off, save registers before changing
	 * anything.
	 */
	if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
		prcm_save_registers();
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto return_sleep_time;


	/* Program MPU and NEON to target state */
	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
		if ((cur_state.neon_state == PRCM_ON) &&
		    (target_state.neon_state != PRCM_ON)) {

			if (target_state.neon_state == PRCM_OFF)
				omap3_save_neon_context();

			prcm_transition_domain_to(DOM_NEON, target_state.neon_state);
		}
#ifdef _disabled_CONFIG_MPU_OFF
		/* Populate scratchpad restore address */
		scratchpad_set_restore_addr();
#endif
		/* TODO No support for OFF Mode yet
		if(target_state.core_state > PRCM_CORE_CSWR_MEMRET)
			omap3_save_secure_ram_context(target_state.core_state);
		*/
		prcm_set_mpu_domain_state(target_state.mpu_state);
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;


	/* Program CORE and PER to target state */
	if (target_state.core_state > PRCM_CORE_ACTIVE) {
		/* Log core sleep attempt */
		core_sleep_flg = 1;

		/* Lock the console to prevent potential access to UARTs.
		 */
		if (0 == try_acquire_console_sem()) {
			got_console_lock = 1;
		}

		/* Handle PER, CAM and SGX domains. */
		if ((cur_state.per_state == PRCM_ON) &&
		    (target_state.per_state != PRCM_ON)) {

			if (target_state.per_state == PRCM_OFF) {
				omap3_save_per_context();
			}

			prcm_transition_domain_to(DOM_PER, target_state.per_state);
		}
		if (PRCM_ON == cur_state.cam_state)
			prcm_transition_domain_to(DOM_CAM, PRCM_RET);
		if (PRCM_ON == cur_state.sgx_state)
			prcm_transition_domain_to(DOM_SGX, PRCM_RET);

		disable_smartreflex(SR1_ID);
		disable_smartreflex(SR2_ID);

		prcm_set_core_domain_state(target_state.core_state);

		/* Enable Autoidle for GPT1 explicitly - Errata 1.4 */
		CM_AUTOIDLE_WKUP |= 0x1;

		/* Disable HSUSB OTG ICLK explicitly*/
		CM_ICLKEN1_CORE &= ~0x10;

		/* Enabling GPT1 wake-up capabilities */
		PM_WKEN_WKUP |= 0x1;

		/* Errata 2.15
		 * Configure UARTs to ForceIdle. Otherwise they can prevent
		 * CORE RET.
		 */
		omap24xx_uart_set_force_idle();

		CM_ICLKEN1_CORE &= ~(1<<7); /* MAILBOXES */
		CM_ICLKEN1_CORE &= ~(1<<6); /* OMAPCTRL */

		/* If we are in LPR mode we need to set up DSS accordingly.
		 */
		if (omap2_disp_lpr_is_enabled()) {
			pm_wken_dss      = PM_WKEN_DSS;
			pm_pwstctrl_dss  = PM_PWSTCTRL_DSS;
			cm_clkstctrl_dss = CM_CLKSTCTRL_DSS;
			cm_fclken_dss    = CM_FCLKEN_DSS;
			cm_iclken_dss    = CM_ICLKEN_DSS;
			cm_autoidle_dss  = CM_AUTOIDLE_DSS;
			PM_WKEN_DSS      = 0x00000001;
			PM_PWSTCTRL_DSS  = 0x00030107;
			CM_CLKSTCTRL_DSS = 0x00000003;
			CM_FCLKEN_DSS    = 0x00000001;
			CM_ICLKEN_DSS    = 0x00000001;
			CM_AUTOIDLE_DSS  = 0x00000001;
		}
	}

	/* Check for pending interrupts. If there is an interrupt, return */
	if (INTCPS_PENDING_IRQ0 | INTCPS_PENDING_IRQ1 | INTCPS_PENDING_IRQ2)
		goto restore;

#ifdef CONFIG_DISABLE_HFCLK
	PRM_CLKSRC_CTRL |= 0x18; /* set sysclk to stop */
#endif /* #ifdef CONFIG_DISABLE_HFCLK */


	DEBUG_STATE_CAPTURE();


	/* Errata 1.142:
	 * SDRC not sending auto-refresh when OMAP wakes-up from OFF mode
	 */
	if (!is_device_type_gp() && is_sil_rev_equal_to(OMAP3430_REV_ES3_0)) {
		sdrc_power_register = SDRC_POWER_REG;
		SDRC_POWER_REG &= ~(SDRC_PWR_AUTOCOUNT_MASK | SDRC_PWR_CLKCTRL_MASK);
		SDRC_POWER_REG |= 0x120;
		if (target_state.core_state == PRCM_CORE_OFF)
			save_scratchpad_contents();
	}

	omap_sram_idle(target_state.mpu_state);

	/* Errata 1.142:
	 * SDRC not sending auto-refresh when OMAP wakes-up from OFF mode
	 */
	if (!is_device_type_gp() && is_sil_rev_equal_to(OMAP3430_REV_ES3_0))
		SDRC_POWER_REG = sdrc_power_register;

restore:
#ifdef CONFIG_DISABLE_HFCLK
	PRM_CLKSRC_CTRL &= ~0x18;
#endif /* #ifdef CONFIG_DISABLE_HFCLK */
	/* Disabling IO_PAD capabilities */
	PM_WKEN_WKUP &= ~(0x100);

	CM_FCLKEN1_CORE = fclken_core;
	CM_ICLKEN1_CORE = iclken_core;

	if (target_state.mpu_state > PRCM_MPU_ACTIVE) {
#ifdef _disabled_CONFIG_MPU_OFF
		/* On ES 2.0, if scratchpad is populated with valid pointer,
		 * warm reset does not work So populate scratchpad restore
		 * address only in cpuidle and suspend calls
		*/
		scratchpad_clr_restore_addr();
#endif
		prcm_set_mpu_domain_state(PRCM_MPU_ACTIVE);

		if ((cur_state.neon_state == PRCM_ON) &&
		    (target_state.mpu_state > PRCM_MPU_INACTIVE)) {
			u8 pre_state;

			prcm_force_power_domain_state(DOM_NEON, cur_state.neon_state);
			prcm_get_pre_power_domain_state(DOM_NEON, &pre_state);

			if (pre_state == PRCM_OFF) {
				omap3_restore_neon_context();
			}

#ifdef CONFIG_HW_SUP_TRANS
			prcm_set_power_domain_state(DOM_NEON, POWER_DOMAIN_ON,
								PRCM_AUTO);
#endif
		}
	}

	/* Continue core restoration part, only if Core-Sleep is attempted */
	if ((target_state.core_state > PRCM_CORE_ACTIVE) && core_sleep_flg) {
		u8 pre_per_state;

		prcm_set_core_domain_state(PRCM_CORE_ACTIVE);

		omap24xx_uart_clr_force_idle();

		enable_smartreflex(SR1_ID);
		enable_smartreflex(SR2_ID);

		/* Turn PER back ON if it was ON before idle.
		 */
		if (cur_state.per_state == PRCM_ON) {
			prcm_force_power_domain_state(DOM_PER, cur_state.per_state);

			CM_ICLKEN_PER = iclken_per;
			CM_FCLKEN_PER = fclken_per;

			prcm_get_pre_power_domain_state(DOM_PER, &pre_per_state);
			if (pre_per_state == PRCM_OFF) {
				omap3_restore_per_context();
#ifdef CONFIG_OMAP34XX_OFFMODE
				context_restore_update(DOM_PER);
#endif
			}

#ifdef CONFIG_HW_SUP_TRANS
			/* Facilitating SWSUP RET, from HWSUP mode */
			CM_SLEEPDEP_PER = sleepdep_per;
			PM_WKDEP_PER    = wakedep_per;
			prcm_set_power_domain_state(DOM_PER, PRCM_ON, PRCM_AUTO);
#endif
		}

		/* Restore CAM and SGX. */
		if (PRCM_ON == cur_state.cam_state)
			prcm_transition_domain_to(DOM_CAM, PRCM_ON);
		if (PRCM_ON == cur_state.sgx_state)
			prcm_transition_domain_to(DOM_SGX, PRCM_ON);

		/* If we lost CORE context, restore it.
		 */
		if (target_state.core_state >= PRCM_CORE_OSWR_MEMRET) {
#ifdef CONFIG_OMAP34XX_OFFMODE
			context_restore_update(DOM_CORE1);
#endif
			prcm_restore_registers();
			prcm_restore_core_context(target_state.core_state);
#ifdef CONFIG_CORE_OFF
			omap3_restore_core_settings();
#endif
		}

		/* Restore DSS settings */
		if (omap2_disp_lpr_is_enabled()) {
			PM_WKEN_DSS      = pm_wken_dss;
			PM_PWSTCTRL_DSS  = pm_pwstctrl_dss;
			CM_CLKSTCTRL_DSS = cm_clkstctrl_dss;
			CM_FCLKEN_DSS    = cm_fclken_dss;
			CM_ICLKEN_DSS    = cm_iclken_dss;
			CM_AUTOIDLE_DSS  = cm_autoidle_dss;
		}

		/* At this point CORE and PER domain are back. We can release
		 * the console if we have it.
		 */
		if (got_console_lock) {
			release_console_sem();
		}

#ifdef CONFIG_OMAP_32K_TIMER
		/* Errata 1.4
		 * If a General Purpose Timer (GPTimer) is in posted mode
		 * (TSIRC.POSTED=1), due to internal resynchronizations, values
		 * read in TCRR, TCAR1 and TCAR2 registers right after the
		 * timer interface clock (L4) goes from stopped to active may
		 * not return the expected values. The most common event
		 * leading to this situation occurs upon wake up from idle.
		 *
		 * Software has to wait at least (2 timer interface clock
		 * cycles + 1 timer functional clock cycle) after L4 clock
		 * wakeup before reading TCRR, TCAR1 or TCAR2 registers for
		 * GPTimers in POSTED internal synchro- nization mode, and
		 * before reading WCRR register of the Watchdog timers . The
		 * same workaround must be applied before reading CR and
		 * 32KSYNCNT_REV registers of the synctimer module.
		 *
		 * Wait Period = 2 timer interface clock cycles +
		 *               1 timer functional clock cycle
		 * Interface clock  = L4 clock (50MHz worst case).
		 * Functional clock = 32KHz
		 * Wait Period = 2*10^-6/50 + 1/32768 = 0.000030557 = 30.557us
		 * Rounding off the delay value to a safer 50us.
		 */
		udelay(GPTIMER_WAIT_DELAY);
#endif

		/* Disable autoidling of GPT1.
		 */
		CM_AUTOIDLE_WKUP &= ~(0x1);
	}

	DPRINTK("MPU state:%x, CORE state:%x\n",
			PM_PREPWSTST_MPU, PM_PREPWSTST_CORE);

	/* Do wakeup event check s*/
	post_uart_activity();

	/* Update stats for sysfs entries.
	 */
	store_prepwst();

return_sleep_time:
	getnstimeofday(&ts_postidle);
#if defined(CONFIG_SYSFS) && defined(DEBUG_BAIL_STATS)
	ts_last_wake_up = ts_postidle;
#endif
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	if (cx->type > OMAP3_STATE_C2)
		sched_clock_idle_wakeup_event(timespec_to_ns(&ts_idle));


	DEBUG_STATE_PRINT(core_sleep_flg);


	local_irq_enable();
	local_fiq_enable();

	return (u32)timespec_to_ns(&ts_idle)/1000;
}
コード例 #4
0
static int __init omap_mmc_probe(struct platform_device *pdev)
{
	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
	struct mmc_host *mmc;
	struct mmc_omap_host *host = NULL;
	struct resource *res;
	int ret = 0, irq;
	u32 hctl, capa;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "Platform Data is missing\n");
		return -ENXIO;
	}

	if (pdata->nr_slots == 0) {
		dev_err(&pdev->dev, "No Slots\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

	res = request_mem_region(res->start, res->end - res->start + 1,
							pdev->name);
	if (res == NULL)
		return -EBUSY;

	mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
	if (!mmc) {
		ret = -ENOMEM;
		goto err;
	}

	host		= mmc_priv(mmc);
	host->mmc	= mmc;
	host->pdata	= pdata;
	host->dev	= &pdev->dev;
	host->use_dma	= 1;
	host->dev->dma_mask = &pdata->dma_mask;
	host->dma_ch	= -1;
	host->irq	= irq;
	host->id	= pdev->id;
	host->slot_id	= 0;
	host->mapbase	= res->start;
	host->base	= ioremap(host->mapbase, SZ_4K);
	mmc->ops	= &mmc_omap_ops;
	mmc->f_min	= 400000;
	mmc->f_max	= 52000000;

	sema_init(&host->sem, 1);
	spin_lock_init(&host->clk_lock);
	spin_lock_init(&host->inits_lock);

	init_timer(&host->inact_timer);
	host->inact_timer.function = mmc_omap_inact_timer;
	host->inact_timer.data = (unsigned long) host;

	host->iclk = clk_get(&pdev->dev, "mmchs_ick");
	if (IS_ERR(host->iclk)) {
		ret = PTR_ERR(host->iclk);
		host->iclk = NULL;
		goto err1;
	}
	host->fclk = clk_get(&pdev->dev, "mmchs_fck");
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		host->fclk = NULL;
		clk_put(host->iclk);
		goto err1;
	}

	if (mmc_clk_try_enable(host) != 0) {
		clk_put(host->iclk);
		clk_put(host->fclk);
		goto err1;
	}

	if (cpu_is_omap2430()) {
		host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
		/*
		 * MMC can still work without debounce clock.
		 */
		if (IS_ERR(host->dbclk))
			dev_dbg(mmc_dev(host->mmc),
				"Failed to get debounce clock\n");
		else
			if (clk_enable(host->dbclk) != 0)
				dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
							" clk failed\n");
			else
				host->dbclk_enabled = 1;
	}

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	mmc->max_phys_segs = 1;
	mmc->max_hw_segs = 1;
#endif
	mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
	mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
	mmc->max_seg_size = mmc->max_req_size;

	mmc->ocr_avail = mmc_slot(host).ocr_mask;
	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;

	if (pdata->slots[host->slot_id].wire8) {
		if (is_sil_rev_equal_to(OMAP3430_REV_ES3_0))
			mmc->caps |= MMC_CAP_8_BIT_DATA;
		else
			mmc->caps |= MMC_CAP_4_BIT_DATA;
	}
	if (pdata->slots[host->slot_id].wire4)
		mmc->caps |= MMC_CAP_4_BIT_DATA;

	hctl = SDVS30;
	capa = VS30 | VS18;

	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | hctl);

	OMAP_HSMMC_WRITE(host->base, CAPA,
			OMAP_HSMMC_READ(host->base, CAPA) | capa);

	/* Set to SIDLE & Auto Idle mode */
	OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SIDLE_AUTOIDLE);

	/* Set SD bus power bit */
	OMAP_HSMMC_WRITE(host->base, HCTL,
			OMAP_HSMMC_READ(host->base, HCTL) | SDBP);

	/* Request IRQ for MMC operations */
	ret = request_irq(host->irq, mmc_omap_irq, IRQF_DISABLED,
			mmc_hostname(mmc), host);
	if (ret) {
		dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
		goto err_irq;
	}

	/* Request IRQ for card detect */
	if ((mmc_slot(host).card_detect_irq) && (mmc_slot(host).card_detect)) {
		ret = request_irq(mmc_slot(host).card_detect_irq,
				  omap_mmc_cd_handler,
				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
					  | IRQF_DISABLED,
				  mmc_hostname(mmc), host);
		if (ret) {
			dev_dbg(mmc_dev(host->mmc),
				"Unable to grab MMC CD IRQ\n");
			goto err_irq_cd;
		}
	} else if (mmc_slot(host).card_detect) {
		mmc->caps |= MMC_CAP_NEEDS_POLL;
	}

	INIT_WORK(&host->mmc_carddetect_work, mmc_omap_detect);
	if (pdata->init != NULL) {
		if (pdata->init(&pdev->dev) != 0) {
			dev_dbg(mmc_dev(host->mmc),
				"Unable to configure MMC IRQs\n");
			goto err_irq_cd_init;
		}
	}

	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
	OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);

	platform_set_drvdata(pdev, host);
	mmc_add_host(mmc);

	if (host->pdata->slots[host->slot_id].name != NULL) {
		ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
		if (ret < 0)
			goto err_slot_name;
	}
	if (mmc_slot(host).card_detect_irq && mmc_slot(host).card_detect &&
			host->pdata->slots[host->slot_id].get_cover_state) {
		ret = device_create_file(&mmc->class_dev, &dev_attr_cover_switch);
		if (ret < 0)
			goto err_cover_switch;
	}

	return 0;

err_cover_switch:
	device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
err_slot_name:
	mmc_remove_host(mmc);
err_irq_cd_init:
	free_irq(mmc_slot(host).card_detect_irq, host);
err_irq_cd:
	free_irq(host->irq, host);
err_irq:
	mmc_clk_try_disable(host);
	clk_put(host->fclk);
	clk_put(host->iclk);
	if (host->dbclk_enabled) {
		clk_disable(host->dbclk);
		clk_put(host->dbclk);
	}

err1:
	iounmap(host->base);
err:
	dev_dbg(mmc_dev(host->mmc), "Probe Failed\n");
	release_mem_region(res->start, res->end - res->start + 1);
	if (host)
		mmc_free_host(mmc);
	return ret;
}
コード例 #5
0
ファイル: hsmmc.c プロジェクト: smartassfox/archos_kernel_27
static int hsmmc_set_power(struct device *dev, int slot, int power_on,
				int vdd)
{
	u32 vdd_sel = 0, devconf = 0, reg = 0;
	int ret = 0;

	/* REVISIT: Using address directly till the control.h defines
	 * are settled.
	 */
#if defined(CONFIG_ARCH_OMAP2430)
	#define OMAP2_CONTROL_PBIAS 0x490024A0
#else
	#define OMAP2_CONTROL_PBIAS 0x48002520
#endif

	if (power_on) {
		if (cpu_is_omap24xx())
			devconf = omap_readl(OMAP2_CONTROL_DEVCONF1);
		else
			devconf = omap_readl(OMAP3_CONTROL_DEVCONF0);

		switch (1 << vdd) {
		case MMC_VDD_33_34:
		case MMC_VDD_32_33:
			vdd_sel = VSEL_3V;
			if (cpu_is_omap24xx())
				devconf |= OMAP2_CONTROL_DEVCONF1_ACTOV;
			break;
		case MMC_VDD_165_195:
			vdd_sel = VSEL_18V;
			if (cpu_is_omap24xx())
				devconf &= ~OMAP2_CONTROL_DEVCONF1_ACTOV;
		}

		if (cpu_is_omap24xx())
			omap_writel(devconf, OMAP2_CONTROL_DEVCONF1);
		else
			omap_writel(devconf | OMAP2_CONTROL_DEVCONF0_LBCLK,
				    OMAP3_CONTROL_DEVCONF0);

		reg = omap_readl(OMAP2_CONTROL_PBIAS);
		reg |= OMAP2_CONTROL_PBIAS_SCTRL;
		omap_writel(reg, OMAP2_CONTROL_PBIAS);

		reg = omap_readl(OMAP2_CONTROL_PBIAS);
		reg &= ~OMAP2_CONTROL_PBIAS_PWRDNZ;
		omap_writel(reg, OMAP2_CONTROL_PBIAS);

		reg = omap_readl(OMAP2_CONTROL_PBIAS);
		reg |= OMAP2_CONTROL_PBIAS_SCTRL1;
		omap_writel(reg, OMAP2_CONTROL_PBIAS);

		reg = omap_readl(OMAP2_CONTROL_PBIAS);
		reg &= ~OMAP2_CONTROL_PBIAS_PWRDNZ1;
		omap_writel(reg, OMAP2_CONTROL_PBIAS);

		ret = resource_request(rhandlemmc1, (vdd_sel == VSEL_3V ?
			T2_VMMC1_3V00 : T2_VMMC1_1V85));
		if (ret != 0)
			goto err;

		/* Enable VSIM to support MMC 8-bit on ES2 */
		if (is_sil_rev_greater_than(OMAP3430_REV_ES1_0)) {
			ret = resource_request(rhandlevsim,
				(vdd_sel == VSEL_3V ?
				T2_VSIM_3V00 : T2_VSIM_1V80));
			if (ret != 0)
				return ret;
		}
		msleep(100);
		reg = omap_readl(OMAP2_CONTROL_PBIAS);
		reg = (vdd_sel == VSEL_18V) ?
			(((reg | 0x0606) & ~0x1) & ~(1<<8))
			: (reg | 0x0707);
		omap_writel(reg, OMAP2_CONTROL_PBIAS);

		return ret;

	} else {
		/* Power OFF */

		/* For MMC1, Toggle PBIAS before every power up sequence */
		reg = omap_readl(OMAP2_CONTROL_PBIAS);
		reg &= ~OMAP2_CONTROL_PBIAS_PWRDNZ;
		omap_writel(reg, OMAP2_CONTROL_PBIAS);

		if (rhandlemmc1 != NULL) {
			ret = resource_release(rhandlemmc1);
			if (ret != 0)
				goto err;
		}

		if (is_sil_rev_greater_than(OMAP3430_REV_ES1_0)
					 && (rhandlevsim != NULL)) {
			ret = resource_release(rhandlevsim);
			if (ret != 0)
				goto err;
		}

		if (is_sil_rev_equal_to(OMAP3430_REV_ES3_0)) {
			ret = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
				LDO_CLR, VSIM_DEV_GRP);
			if (ret)
				goto err;
			ret = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
				LDO_CLR, VSIM_DEDICATED);
			if (ret)
				goto err;
		}

		/* 100ms delay required for PBIAS configuration */
		msleep(100);
		reg = omap_readl(OMAP2_CONTROL_PBIAS);
		reg |= (OMAP2_CONTROL_PBIAS_VMODE |
			OMAP2_CONTROL_PBIAS_VMODE1 |
			OMAP2_CONTROL_PBIAS_PWRDNZ |
			OMAP2_CONTROL_PBIAS_PWRDNZ1 |
			OMAP2_CONTROL_PBIAS_SCTRL |
			OMAP2_CONTROL_PBIAS_SCTRL1);
		omap_writel(reg, OMAP2_CONTROL_PBIAS);
	}

	return 0;

err:
	return 1;
}