/** * omap4_hotplug_cpu: OMAP4 CPU hotplug entry * @cpu : CPU ID * @power_state: CPU low power state. */ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); unsigned int cpu_state = 0; if (omap_rev() == OMAP4430_REV_ES1_0) return -ENXIO; if (power_state == PWRDM_POWER_OFF) cpu_state = 1; pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); set_cpu_wakeup_addr(cpu, virt_to_phys(pm_info->secondary_startup)); scu_pwrst_prepare(cpu, power_state); /* * CPU never retuns back if targeted power state is OFF mode. * CPU ONLINE follows normal CPU ONLINE ptah via * omap_secondary_startup(). */ omap4_finish_suspend(cpu_state); pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); return 0; }
/* * FIXME: This function should be called before entering off-mode after * OMAP3 secure services have been accessed. Currently it is only called * once during boot sequence, but this works as we are not using secure * services. */ static void omap3_save_secure_ram_context(u32 target_mpu_state) { u32 ret; struct clockdomain *clkd = mpu_pwrdm->pwrdm_clkdms[0]; if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* * MPU next state must be set to POWER_ON temporarily, * otherwise the WFI executed inside the ROM code * will hang the system. */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); omap2_clkdm_deny_idle(clkd); ret = _omap_save_secure_sram((u32 *) __pa(omap3_secure_ram_storage)); pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state); omap2_clkdm_allow_idle(clkd); /* Following is for error tracking, it should not happen */ if (ret) { printk(KERN_ERR "save_secure_sram() returns %08x\n", ret); while (1) ; } } }
/** * omap3_enter_idle_bm - Checks for any bus activity * @dev: cpuidle device * @state: The target state to be programmed * * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This * function checks for any pending activity and then programs the * device to the specified or a safer state. */ static int omap3_enter_idle_bm(struct cpuidle_device *dev, struct cpuidle_state *state) { struct cpuidle_state *new_state = next_valid_state(dev, state); u32 core_next_state, per_next_state = 0, per_saved_state = 0; u32 cam_state; struct omap3_processor_cx *cx; int ret; if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) { BUG_ON(!dev->safe_state); new_state = dev->safe_state; goto select_state; } cx = cpuidle_get_statedata(state); core_next_state = cx->core_state; /* * FIXME: we currently manage device-specific idle states * for PER and CORE in combination with CPU-specific * idle states. This is wrong, and device-specific * idle managment needs to be separated out into * its own code. */ /* * Prevent idle completely if CAM is active. * CAM does not have wakeup capability in OMAP3. */ cam_state = pwrdm_read_pwrst(cam_pd); if (cam_state == PWRDM_POWER_ON) { new_state = dev->safe_state; goto select_state; } /* * Prevent PER off if CORE is not in retention or off as this * would disable PER wakeups completely. */ per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); if ((per_next_state == PWRDM_POWER_OFF) && (core_next_state > PWRDM_POWER_RET)) per_next_state = PWRDM_POWER_RET; /* Are we changing PER target state? */ if (per_next_state != per_saved_state) pwrdm_set_next_pwrst(per_pd, per_next_state); select_state: dev->last_state = new_state; ret = omap3_enter_idle(dev, new_state); /* Restore original PER state if it was modified */ if (per_next_state != per_saved_state) pwrdm_set_next_pwrst(per_pd, per_saved_state); return ret; }
/** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device * @state: The target state to be programmed * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct omap3_processor_cx *cx = cpuidle_get_statedata(state); struct timespec ts_preidle, ts_postidle, ts_idle; u32 mpu_state = cx->mpu_state, core_state = cx->core_state; u32 saved_mpu_state; current_cx_state = *cx; /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); local_irq_disable(); local_fiq_disable(); if (!enable_off_mode) { if (mpu_state < PWRDM_POWER_RET) mpu_state = PWRDM_POWER_RET; if (core_state < PWRDM_POWER_RET) core_state = PWRDM_POWER_RET; } if (omap_irq_pending() || need_resched()) goto return_sleep_time; saved_mpu_state = pwrdm_read_next_pwrst(mpu_pd); pwrdm_set_next_pwrst(mpu_pd, mpu_state); pwrdm_set_next_pwrst(core_pd, core_state); if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } /* Execute ARM wfi */ omap_sram_idle(); if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } pwrdm_set_next_pwrst(mpu_pd, saved_mpu_state); return_sleep_time: getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); local_irq_enable(); local_fiq_enable(); return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; }
static void __init prcm_setup_regs(void) { int i, num_mem_banks; struct powerdomain *pwrdm; omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD, OMAP2_PRCM_SYSCONFIG_OFFSET); num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm); for (i = 0; i < num_mem_banks; i++) pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET); pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET); pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm = clkdm_get_pwrdm(dsp_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); clkdm_sleep(dsp_clkdm); pwrdm = clkdm_get_pwrdm(gfx_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); clkdm_sleep(gfx_clkdm); clkdm_for_each(omap_pm_clkdms_setup, NULL); clkdm_add_wkdep(mpu_clkdm, wkup_clkdm); #ifdef CONFIG_SUSPEND omap_pm_suspend = omap2_enter_full_retention; #endif omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_CLKSSETUP_OFFSET); omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTSETUP_OFFSET); omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK | (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) | OMAP24XX_MEMRETCTRL_MASK | (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) | (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT), OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET); omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK, WKUP_MOD, PM_WKEN); }
/* * Set the CPUx powerdomain's previous power state */ static inline void set_cpu_next_pwrst(unsigned int cpu_id, unsigned int power_state) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); }
/** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device * @state: The target state to be programmed * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct omap3_processor_cx *cx = cpuidle_get_statedata(state); struct timespec ts_preidle, ts_postidle, ts_idle; u32 mpu_state = cx->mpu_state, core_state = cx->core_state; current_cx_state = *cx; /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); local_irq_disable(); local_fiq_disable(); pwrdm_set_next_pwrst(mpu_pd, mpu_state); pwrdm_set_next_pwrst(core_pd, core_state); //&*&*&*BC1_110630: add cpu idle control flag if (omap_irq_pending() || need_resched() || omap3_idle_bm_check()) goto return_sleep_time; //&*&*&*BC2_110630: add cpu idle control flag if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } /* Execute ARM wfi */ omap_sram_idle(); if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } return_sleep_time: getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); local_irq_enable(); local_fiq_enable(); return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; }
static int __init am33xx_pm_init(void) { int ret; if (!cpu_is_am33xx()) return -ENODEV; pr_info("Power Management for AM33XX family\n"); #ifdef CONFIG_SUSPEND #ifdef CONFIG_TI_PM_DISABLE_VT_SWITCH pm_set_vt_switch(0); #endif (void) clkdm_for_each(clkdms_setup, NULL); /* CEFUSE domain should be turned off post bootup */ cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); if (cefuse_pwrdm == NULL) printk(KERN_ERR "Failed to get cefuse_pwrdm\n"); else pwrdm_set_next_pwrst(cefuse_pwrdm, PWRDM_POWER_OFF); gfx_pwrdm = pwrdm_lookup("gfx_pwrdm"); if (gfx_pwrdm == NULL) printk(KERN_ERR "Failed to get gfx_pwrdm\n"); gfx_l3_clkdm = clkdm_lookup("gfx_l3_clkdm"); if (gfx_l3_clkdm == NULL) printk(KERN_ERR "Failed to get gfx_l3_clkdm\n"); gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm"); if (gfx_l4ls_clkdm == NULL) printk(KERN_ERR "Failed to get gfx_l4ls_gfx_clkdm\n"); mpu_dev = omap_device_get_by_hwmod_name("mpu"); if (!mpu_dev) { pr_warning("%s: unable to get the mpu device\n", __func__); return -EINVAL; } ret = wkup_m3_init(); if (ret) { pr_err("Could not initialise WKUP_M3. " "Power management will be compromised\n"); enable_deep_sleep = false; } if (enable_deep_sleep) suspend_set_ops(&am33xx_pm_ops); #endif /* CONFIG_SUSPEND */ return ret; }
/* * This sets pwrdm state (other than mpu & core. Currently only ON & * RET are supported. */ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state) { u32 cur_state; int sleep_switch = -1; int ret = 0; int hwsup = 0; if (pwrdm == NULL || IS_ERR(pwrdm)) return -EINVAL; while (!(pwrdm->pwrsts & (1 << state))) { if (state == PWRDM_POWER_OFF) return ret; state--; } cur_state = pwrdm_read_next_pwrst(pwrdm); if (cur_state == state) return ret; if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) { if ((pwrdm_read_pwrst(pwrdm) > state) && (pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) { sleep_switch = LOWPOWERSTATE_SWITCH; } else { hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]); clkdm_wakeup(pwrdm->pwrdm_clkdms[0]); sleep_switch = FORCEWAKEUP_SWITCH; } } ret = pwrdm_set_next_pwrst(pwrdm, state); if (ret) { pr_err("%s: unable to set state of powerdomain: %s\n", __func__, pwrdm->name); goto err; } switch (sleep_switch) { case FORCEWAKEUP_SWITCH: if (hwsup) clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]); else clkdm_sleep(pwrdm->pwrdm_clkdms[0]); break; case LOWPOWERSTATE_SWITCH: pwrdm_set_lowpwrstchange(pwrdm); break; default: return ret; } pwrdm_state_switch(pwrdm); err: return ret; }
/* * This sets pwrdm state (other than mpu & core. Currently only ON & * RET are supported. */ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state) { u32 cur_state; int sleep_switch = 0; int ret = 0; if (pwrdm == NULL || IS_ERR(pwrdm)) return -EINVAL; while (!(pwrdm->pwrsts & (1 << state))) { if (state == PWRDM_POWER_OFF) return ret; state--; } cur_state = pwrdm_read_next_pwrst(pwrdm); if (cur_state == state) return ret; if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) { if ((pwrdm_read_pwrst(pwrdm) > state) && (pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) { sleep_switch = LOWPOWERSTATE_SWITCH; } else { clkdm_wakeup(pwrdm->pwrdm_clkdms[0]); pwrdm_wait_transition(pwrdm); sleep_switch = FORCEWAKEUP_SWITCH; } } ret = pwrdm_set_next_pwrst(pwrdm, state); if (ret) { printk(KERN_ERR "Unable to set state of powerdomain: %s\n", pwrdm->name); goto err; } switch (sleep_switch) { case FORCEWAKEUP_SWITCH: if (pwrdm->pwrdm_clkdms[0]->flags & CLKDM_CAN_ENABLE_AUTO) clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]); else clkdm_sleep(pwrdm->pwrdm_clkdms[0]); break; case LOWPOWERSTATE_SWITCH: pwrdm_set_lowpwrstchange(pwrdm); break; default: return ret; } pwrdm_wait_transition(pwrdm); pwrdm_state_switch(pwrdm); err: return ret; }
/** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device * @state: The target state to be programmed * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct omap3_idle_statedata *cx = cpuidle_get_statedata(state); struct timespec ts_preidle, ts_postidle, ts_idle; u32 mpu_state = cx->mpu_state, core_state = cx->core_state; /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); local_irq_disable(); local_fiq_disable(); pwrdm_set_next_pwrst(mpu_pd, mpu_state); pwrdm_set_next_pwrst(core_pd, core_state); if (omap_irq_pending() || need_resched()) goto return_sleep_time; /* Deny idle for C1 */ if (state == &dev->states[0]) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } /* Execute ARM wfi */ omap_sram_idle(false); /* Re-allow idle for C1 */ if (state == &dev->states[0]) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } return_sleep_time: getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); local_irq_enable(); local_fiq_enable(); return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; }
static int omap2_enter_full_retention(void) { u32 l; clk_disable(osc_ck); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL; omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0); omap2_gpio_prepare_for_idle(0); if (omap_irq_pending()) goto no_sleep; omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_POWER)); no_sleep: omap2_gpio_resume_after_idle(); clk_enable(osc_ck); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST); l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x01) omap2_prm_write_mod_reg(0x01, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x20) omap2_prm_write_mod_reg(0x20, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); return 0; }
static int am33xx_pm_suspend(void) { int state, ret = 0; struct omap_hwmod *cpgmac_oh, *gpmc_oh, *usb_oh; cpgmac_oh = omap_hwmod_lookup("cpgmac0"); usb_oh = omap_hwmod_lookup("usb_otg_hs"); gpmc_oh = omap_hwmod_lookup("gpmc"); omap_hwmod_enable(cpgmac_oh); omap_hwmod_enable(usb_oh); omap_hwmod_enable(gpmc_oh); omap_hwmod_idle(cpgmac_oh); omap_hwmod_idle(usb_oh); omap_hwmod_idle(gpmc_oh); if (gfx_l3_clkdm && gfx_l4ls_clkdm) { clkdm_sleep(gfx_l3_clkdm); clkdm_sleep(gfx_l4ls_clkdm); } /* Try to put GFX to sleep */ if (gfx_pwrdm) pwrdm_set_next_pwrst(gfx_pwrdm, PWRDM_POWER_OFF); else pr_err("Could not program GFX to low power state\n"); writel(0x0, AM33XX_CM_MPU_MPU_CLKCTRL); ret = cpu_suspend(0, am33xx_do_sram_idle); writel(0x2, AM33XX_CM_MPU_MPU_CLKCTRL); if (gfx_pwrdm) { state = pwrdm_read_pwrst(gfx_pwrdm); if (state != PWRDM_POWER_OFF) pr_err("GFX domain did not transition to low power state\n"); else pr_info("GFX domain entered low power state\n"); } /* XXX: Why do we need to wakeup the clockdomains? */ if(gfx_l3_clkdm && gfx_l4ls_clkdm) { clkdm_wakeup(gfx_l3_clkdm); clkdm_wakeup(gfx_l4ls_clkdm); } core_suspend_stat = ret; return ret; }
/* * This sets pwrdm state (other than mpu & core. Currently only ON & * RET are supported. */ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 pwrst) { u8 curr_pwrst, next_pwrst; int sleep_switch = -1, ret = 0, hwsup = 0; if (!pwrdm || IS_ERR(pwrdm)) return -EINVAL; while (!(pwrdm->pwrsts & (1 << pwrst))) { if (pwrst == PWRDM_POWER_OFF) return ret; pwrst--; } next_pwrst = pwrdm_read_next_pwrst(pwrdm); if (next_pwrst == pwrst) return ret; curr_pwrst = pwrdm_read_pwrst(pwrdm); if (curr_pwrst < PWRDM_POWER_ON) { if ((curr_pwrst > pwrst) && (pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) { sleep_switch = LOWPOWERSTATE_SWITCH; } else { hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]); clkdm_wakeup(pwrdm->pwrdm_clkdms[0]); sleep_switch = FORCEWAKEUP_SWITCH; } } ret = pwrdm_set_next_pwrst(pwrdm, pwrst); if (ret) pr_err("%s: unable to set power state of powerdomain: %s\n", __func__, pwrdm->name); switch (sleep_switch) { case FORCEWAKEUP_SWITCH: if (hwsup) clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]); else clkdm_sleep(pwrdm->pwrdm_clkdms[0]); break; case LOWPOWERSTATE_SWITCH: pwrdm_set_lowpwrstchange(pwrdm); pwrdm_wait_transition(pwrdm); pwrdm_state_switch(pwrdm); break; } return ret; }
/* * FIXME: This function should be called before entering off-mode after * OMAP3 secure services have been accessed. Currently it is only called * once during boot sequence, but this works as we are not using secure * services. */ static void omap3_save_secure_ram_context(void) { u32 ret; int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* * MPU next state must be set to POWER_ON temporarily, * otherwise the WFI executed inside the ROM code * will hang the system. */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); ret = _omap_save_secure_sram((u32 *) __pa(omap3_secure_ram_storage)); pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ if (ret) { printk(KERN_ERR "save_secure_sram() returns %08x\n", ret); while (1) ; } } }
/* * This sets pwrdm state (other than mpu & core. Currently only ON & * RET are supported. Function is assuming that clkdm doesn't have * hw_sup mode enabled. */ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state) { u32 cur_state; int sleep_switch = 0; int ret = 0; if (pwrdm == NULL || IS_ERR(pwrdm)) return -EINVAL; while (!(pwrdm->pwrsts & (1 << state))) { if (state == PWRDM_POWER_OFF) return ret; state--; } cur_state = pwrdm_read_next_pwrst(pwrdm); if (cur_state == state) return ret; if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) { omap2_clkdm_wakeup(pwrdm->pwrdm_clkdms[0]); sleep_switch = 1; pwrdm_wait_transition(pwrdm); } ret = pwrdm_set_next_pwrst(pwrdm, state); if (ret) { printk(KERN_ERR "Unable to set state of powerdomain: %s\n", pwrdm->name); goto err; } if (sleep_switch) { omap2_clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]); pwrdm_wait_transition(pwrdm); pwrdm_state_switch(pwrdm); } err: return ret; }
static void omap2_enter_full_retention(void) { u32 l; struct timespec ts_preidle, ts_postidle, ts_idle; /* There is 1 reference hold for all children of the oscillator * clock, the following will remove it. If no one else uses the * oscillator itself it will be disabled if/when we enter retention * mode. */ clk_disable(osc_ck); /* Clear old wake-up events */ /* REVISIT: These write to reserved bits? */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); /* * Set MPU powerdomain's next power state to RETENTION; * preserve logic state during retention */ pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Workaround to kill USB */ l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL; omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0); omap2_gpio_prepare_for_idle(0); if (omap2_pm_debug) { omap2_pm_dump(0, 0, 0); getnstimeofday(&ts_preidle); } /* One last check for pending IRQs to avoid extra latency due * to sleeping unnecessarily. */ if (omap_irq_pending()) goto no_sleep; /* Block console output in case it is on one of the OMAP UARTs */ if (!is_suspending()) if (!console_trylock()) goto no_sleep; omap_uart_prepare_idle(0); omap_uart_prepare_idle(1); omap_uart_prepare_idle(2); /* Jump to SRAM suspend code */ omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_POWER)); omap_uart_resume_idle(2); omap_uart_resume_idle(1); omap_uart_resume_idle(0); if (!is_suspending()) console_unlock(); no_sleep: if (omap2_pm_debug) { unsigned long long tmp; getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC; omap2_pm_dump(0, 1, tmp); } omap2_gpio_resume_after_idle(); clk_enable(osc_ck); /* clear CORE wake-up events */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); /* wakeup domain events - bit 1: GPT1, bit5 GPIO */ omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST); /* MPU domain wake events */ l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x01) omap2_prm_write_mod_reg(0x01, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x20) omap2_prm_write_mod_reg(0x20, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); /* Mask future PRCM-to-MPU interrupts */ omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); }
/* * OMAP4 MPUSS Low Power Entry Function * * The purpose of this function is to manage low power programming * of OMAP4 MPUSS subsystem * Paramenters: * cpu : CPU ID * power_state: Targetted Low power state. * * MPUSS Low power states * The basic rule is that the MPUSS power domain must be at the higher or * equal power state (state that consume more power) than the higher of the * two CPUs. For example, it is illegal for system power to be OFF, while * the power of one or both of the CPU is DORMANT. When an illegal state is * entered, then the hardware behavior is unpredictable. * * MPUSS state for the context save * save_state = * 0 - Nothing lost and no need to save: MPUSS INACTIVE * 1 - CPUx L1 and logic lost: MPUSS CSWR * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF */ void omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) { unsigned int save_state, wakeup_cpu; if (cpu > NR_CPUS) return; /* * Low power state not supported on ES1.0 silicon */ if (omap_rev() == OMAP4430_REV_ES1_0) { wmb(); do_wfi(); return; } switch (power_state) { case PWRDM_POWER_ON: case PWRDM_POWER_INACTIVE: save_state = 0; break; case PWRDM_POWER_OFF: save_state = 1; setup_wakeup_routine(cpu); save_local_timers(cpu); break; case PWRDM_POWER_RET: /* * CPUx CSWR is invalid hardware state. Additionally * CPUx OSWR doesn't give any gain vs CPUxOFF and * hence not supported */ default: /* Invalid state */ pr_debug("Invalid CPU low power state\n"); return; } /* * MPUSS book keeping should be executed by master * CPU only which is the last CPU to go down */ if (cpu) goto cpu_prepare; /* * Check MPUSS next state and save GIC if needed * GIC lost during MPU OFF and OSWR */ pwrdm_clear_all_prev_pwrst(mpuss_pd); if (omap4_device_off_read_next_state() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) { /* FIXME: Check if this can be optimised */ save_secure_all(); save_ivahd_tesla_regs(); save_l3instr_regs(); save_state = 3; goto cpu_prepare; } switch (pwrdm_read_next_pwrst(mpuss_pd)) { case PWRDM_POWER_ON: case PWRDM_POWER_INACTIVE: /* No need to save MPUSS context */ break; case PWRDM_POWER_RET: /* MPUSS OSWR, logic lost */ if (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF) { if (omap_type() != OMAP2_DEVICE_TYPE_GP) { save_gic_wakeupgen_secure(); save_l3instr_regs(); } else { save_gic(); omap4_wakeupgen_save(); } save_state = 2; } break; case PWRDM_POWER_OFF: /* MPUSS OFF */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) { save_secure_ram(); save_gic_wakeupgen_secure(); save_ivahd_tesla_regs(); save_l3instr_regs(); } else { save_gic(); omap4_wakeupgen_save(); } save_state = 3; break; default: /* Fall through */ ; } /* * Program the CPU targeted state */ cpu_prepare: clear_cpu_prev_pwrst(cpu); if (cpu) pwrdm_set_next_pwrst(cpu1_pwrdm, power_state); else pwrdm_set_next_pwrst(cpu0_pwrdm, power_state); scu_pwrst_prepare(cpu, power_state); /* * Call low level routine to enter to * targeted power state */ __omap4_cpu_suspend(cpu, save_state); wakeup_cpu = hard_smp_processor_id(); /* * Restore the CPUx and mpuss power state to ON otherwise * CPUx power domain can transitions to programmed low power * state while doing WFI outside the low powe code. On HS devices, * CPUx can do WFI outside idle thread which can result in * power domain domain transition if the previous state was * programmed to OFF/RET. */ if (wakeup_cpu) { pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON); } else { pwrdm_set_next_pwrst(cpu0_pwrdm, PWRDM_POWER_ON); pwrdm_set_next_pwrst(mpuss_pd, PWRDM_POWER_ON); } /* * Check the CPUx previous power state */ if (read_cpu_prev_pwrst(wakeup_cpu) == PWRDM_POWER_OFF) { cpu_init(); restore_mmu_table_entry(); restore_local_timers(wakeup_cpu); } /* * Check MPUSS previous power state and enable * GIC if needed. */ switch (pwrdm_read_prev_pwrst(mpuss_pd)) { case PWRDM_POWER_ON: /* No need to restore */ break; case PWRDM_POWER_RET: /* FIXME: * if (pwrdm_read_prev_logic_pwrst(mpuss_pd) == PWRDM_POWER_OFF) */ if (omap_readl(0x4a306324) == PWRDM_POWER_OFF) break; case PWRDM_POWER_OFF: /* * Enable GIC distributor */ if (!wakeup_cpu) { if ((omap_type() == OMAP2_DEVICE_TYPE_GP) && omap4_device_off_read_prev_state()) { restore_gic(); omap4_wakeupgen_restore(); } enable_gic_distributor(); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { restore_ivahd_tesla_regs(); restore_l3instr_regs(); } } /* * Enable GIC cpu inrterface */ enable_gic_cpu_interface(); break; default: ; } }
static void __init prcm_setup_regs(void) { int i, num_mem_banks; struct powerdomain *pwrdm; /* * Enable autoidle * XXX This should be handled by hwmod code or PRCM init code */ omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD, OMAP2_PRCM_SYSCONFIG_OFFSET); /* * Set CORE powerdomain memory banks to retain their contents * during RETENTION */ num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm); for (i = 0; i < num_mem_banks; i++) pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET); /* Set CORE powerdomain's next power state to RETENTION */ pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET); /* * Set MPU powerdomain's next power state to RETENTION; * preserve logic state during retention */ pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Force-power down DSP, GFX powerdomains */ pwrdm = clkdm_get_pwrdm(dsp_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); clkdm_sleep(dsp_clkdm); pwrdm = clkdm_get_pwrdm(gfx_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); clkdm_sleep(gfx_clkdm); /* Enable hardware-supervised idle for all clkdms */ clkdm_for_each(clkdms_setup, NULL); clkdm_add_wkdep(mpu_clkdm, wkup_clkdm); /* REVISIT: Configure number of 32 kHz clock cycles for sys_clk * stabilisation */ omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_CLKSSETUP_OFFSET); /* Configure automatic voltage transition */ omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTSETUP_OFFSET); omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK | (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) | OMAP24XX_MEMRETCTRL_MASK | (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) | (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT), OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET); /* Enable wake-up events */ omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK, WKUP_MOD, PM_WKEN); }
/** * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function * The purpose of this function is to manage low power programming * of OMAP4 MPUSS subsystem * @cpu : CPU ID * @power_state: Low power state. * * MPUSS states for the context save: * save_state = * 0 - Nothing lost and no need to save: MPUSS INACTIVE * 1 - CPUx L1 and logic lost: MPUSS CSWR * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF */ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) { struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); unsigned int save_state = 0; unsigned int wakeup_cpu; if (omap_rev() == OMAP4430_REV_ES1_0) return -ENXIO; switch (power_state) { case PWRDM_POWER_ON: case PWRDM_POWER_INACTIVE: save_state = 0; break; case PWRDM_POWER_OFF: save_state = 1; break; case PWRDM_POWER_RET: default: /* * CPUx CSWR is invalid hardware state. Also CPUx OSWR * doesn't make much scense, since logic is lost and $L1 * needs to be cleaned because of coherency. This makes * CPUx OSWR equivalent to CPUX OFF and hence not supported */ WARN_ON(1); return -ENXIO; } pwrdm_pre_transition(NULL); /* * Check MPUSS next state and save interrupt controller if needed. * In MPUSS OSWR or device OFF, interrupt controller contest is lost. */ mpuss_clear_prev_logic_pwrst(); if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) && (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF)) save_state = 2; cpu_clear_prev_logic_pwrst(cpu); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume)); scu_pwrst_prepare(cpu, power_state); l2x0_pwrst_prepare(cpu, save_state); /* * Call low level function with targeted low power state. */ cpu_suspend(save_state, omap4_finish_suspend); /* * Restore the CPUx power state to ON otherwise CPUx * power domain can transitions to programmed low power * state while doing WFI outside the low powe code. On * secure devices, CPUx does WFI which can result in * domain transition */ wakeup_cpu = smp_processor_id(); pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); pwrdm_post_transition(NULL); return 0; }
/** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device * @state: The target state to be programmed * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct omap3_processor_cx *cx = cpuidle_get_statedata(state); struct timespec ts_preidle, ts_postidle, ts_idle; u32 mpu_state = cx->mpu_state, core_state = cx->core_state, cam_state = 0, dss_state = 0, per_state = 0; /* modified for mp3 current -- begin */ u32 mpu_prev,core_prev =0 ; current_cx_state = *cx; int requested=cx->type; static int cam_deny = 0; u32 wkdep_per_value = 0; wkdep_per_value = omap_readl(0x483070C8); /* modified for mp3 current -- end*/ /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); local_irq_disable(); local_fiq_disable(); pwrdm_set_next_pwrst(mpu_pd, mpu_state); pwrdm_set_next_pwrst(core_pd, core_state); if (omap_irq_pending() || need_resched()) goto return_sleep_time; /* Keep CAM domain active during ISP usecases */ if(( front_cam_in_use || back_cam_in_use || (stream_on)) ){ pwrdm_for_each_clkdm(cam_pd, _cpuidle_deny_idle); cam_deny = 1 ; } /* if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } */ if (dss_suspend_flag && audio_on) { omap_writel(wkdep_per_value & ~(1<<1) ,0x483070C8 );//PM_WKDEP_PER } /* Execute ARM wfi */ omap_sram_idle(); /* if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } */ /* Keep CAM domain active during ISP usecases */ if(cam_deny){ pwrdm_for_each_clkdm(cam_pd, _cpuidle_allow_idle); cam_deny = 0; } if(!dss_suspend_flag){ omap_writel(wkdep_per_value, 0x483070C8); //PM_WKDEP_PER } core_state = pwrdm_read_prev_pwrst(core_pd); mpu_state = pwrdm_read_prev_pwrst(mpu_pd); cam_state = pwrdm_read_prev_pwrst(cam_pd); dss_state = pwrdm_read_prev_pwrst(dss_pd); per_state = pwrdm_read_prev_pwrst(per_pd); //printk(KERN_INFO "requested C%d, actual core=%d, mpu=%d cam = %d dss = %d per = %d \n", requested, core_state, mpu_state,cam_state); return_sleep_time: getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); /* modified for mp3 current -- begin */ mpu_prev = omap_readl(0x483069E8); mpu_prev = mpu_prev & 0x3 ; core_prev = omap_readl(0x48306AE8); core_prev = core_prev & 0x3 ; /* modified for mp3 current -- end */ local_irq_enable(); local_fiq_enable(); return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; }
/** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device * @state: The target state to be programmed * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct omap3_idle_statedata *cx = cpuidle_get_statedata(state); struct timespec ts_preidle, ts_postidle, ts_idle; u32 mpu_state = cx->mpu_state, core_state = cx->core_state; /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); local_irq_disable(); local_fiq_disable(); pwrdm_set_next_pwrst(mpu_pd, mpu_state); pwrdm_set_next_pwrst(core_pd, core_state); if (omap_irq_pending() || need_resched()) goto return_sleep_time; /* S[, 20120922, [email protected], PM from froyo. */ #if defined(CONFIG_PRODUCT_LGE_LU6800) /* Deny idle for C1 */ if (te_cpu_idle_block == 1 || doing_wakeup == 1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } #else /* Deny idle for C1 */ if (state == &dev->states[0]) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } #endif /* E], 20120922, [email protected], PM from froyo. */ /* Execute ARM wfi */ omap_sram_idle(false); /* S[, 20120922, [email protected], PM from froyo. */ #if defined(CONFIG_PRODUCT_LGE_LU6800) /* Re-allow idle for C1 */ if (te_cpu_idle_block == 1 || doing_wakeup == 1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } #else /* Re-allow idle for C1 */ if (state == &dev->states[0]) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } #endif /* E], 20120922, [email protected], PM from froyo. */ return_sleep_time: getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); local_irq_enable(); local_fiq_enable(); return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; }
static void __init prcm_setup_regs(void) { int i, num_mem_banks; struct powerdomain *pwrdm; /* Enable autoidle */ prm_write_mod_reg(OMAP24XX_AUTOIDLE, OCP_MOD, OMAP24XX_PRM_SYSCONFIG_OFFSET); /* Set all domain wakeup dependencies */ prm_write_mod_reg(OMAP_EN_WKUP_MASK, MPU_MOD, PM_WKDEP); prm_write_mod_reg(0, OMAP24XX_DSP_MOD, PM_WKDEP); prm_write_mod_reg(0, GFX_MOD, PM_WKDEP); prm_write_mod_reg(0, CORE_MOD, PM_WKDEP); if (cpu_is_omap2430()) prm_write_mod_reg(0, OMAP2430_MDM_MOD, PM_WKDEP); /* * Set CORE powerdomain memory banks to retain their contents * during RETENTION */ num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm); for (i = 0; i < num_mem_banks; i++) pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET); /* Set CORE powerdomain's next power state to RETENTION */ pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET); /* * Set MPU powerdomain's next power state to RETENTION; * preserve logic state during retention */ pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Force-power down DSP, GFX powerdomains */ pwrdm = clkdm_get_pwrdm(dsp_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); omap2_clkdm_sleep(dsp_clkdm); pwrdm = clkdm_get_pwrdm(gfx_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); omap2_clkdm_sleep(gfx_clkdm); /* Enable clockdomain hardware-supervised control for all clkdms */ clkdm_for_each(_pm_clkdm_enable_hwsup, NULL); /* Enable clock autoidle for all domains */ cm_write_mod_reg(OMAP24XX_AUTO_CAM | OMAP24XX_AUTO_MAILBOXES | OMAP24XX_AUTO_WDT4 | OMAP2420_AUTO_WDT3 | OMAP24XX_AUTO_MSPRO | OMAP2420_AUTO_MMC | OMAP24XX_AUTO_FAC | OMAP2420_AUTO_EAC | OMAP24XX_AUTO_HDQ | OMAP24XX_AUTO_UART2 | OMAP24XX_AUTO_UART1 | OMAP24XX_AUTO_I2C2 | OMAP24XX_AUTO_I2C1 | OMAP24XX_AUTO_MCSPI2 | OMAP24XX_AUTO_MCSPI1 | OMAP24XX_AUTO_MCBSP2 | OMAP24XX_AUTO_MCBSP1 | OMAP24XX_AUTO_GPT12 | OMAP24XX_AUTO_GPT11 | OMAP24XX_AUTO_GPT10 | OMAP24XX_AUTO_GPT9 | OMAP24XX_AUTO_GPT8 | OMAP24XX_AUTO_GPT7 | OMAP24XX_AUTO_GPT6 | OMAP24XX_AUTO_GPT5 | OMAP24XX_AUTO_GPT4 | OMAP24XX_AUTO_GPT3 | OMAP24XX_AUTO_GPT2 | OMAP2420_AUTO_VLYNQ | OMAP24XX_AUTO_DSS, CORE_MOD, CM_AUTOIDLE1); cm_write_mod_reg(OMAP24XX_AUTO_UART3 | OMAP24XX_AUTO_SSI | OMAP24XX_AUTO_USB, CORE_MOD, CM_AUTOIDLE2); cm_write_mod_reg(OMAP24XX_AUTO_SDRC | OMAP24XX_AUTO_GPMC | OMAP24XX_AUTO_SDMA, CORE_MOD, CM_AUTOIDLE3); cm_write_mod_reg(OMAP24XX_AUTO_PKA | OMAP24XX_AUTO_AES | OMAP24XX_AUTO_RNG | OMAP24XX_AUTO_SHA | OMAP24XX_AUTO_DES, CORE_MOD, OMAP24XX_CM_AUTOIDLE4); cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI, OMAP24XX_DSP_MOD, CM_AUTOIDLE); /* Put DPLL and both APLLs into autoidle mode */ cm_write_mod_reg((0x03 << OMAP24XX_AUTO_DPLL_SHIFT) | (0x03 << OMAP24XX_AUTO_96M_SHIFT) | (0x03 << OMAP24XX_AUTO_54M_SHIFT), PLL_MOD, CM_AUTOIDLE); cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL | OMAP24XX_AUTO_WDT1 | OMAP24XX_AUTO_MPU_WDT | OMAP24XX_AUTO_GPIOS | OMAP24XX_AUTO_32KSYNC | OMAP24XX_AUTO_GPT1, WKUP_MOD, CM_AUTOIDLE); /* REVISIT: Configure number of 32 kHz clock cycles for sys_clk * stabilisation */ prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP24XX_PRCM_CLKSSETUP_OFFSET); /* Configure automatic voltage transition */ prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP24XX_PRCM_VOLTSETUP_OFFSET); prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT | (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) | OMAP24XX_MEMRETCTRL | (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) | (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT), OMAP24XX_GR_MOD, OMAP24XX_PRCM_VOLTCTRL_OFFSET); /* Enable wake-up events */ prm_write_mod_reg(OMAP24XX_EN_GPIOS | OMAP24XX_EN_GPT1, WKUP_MOD, PM_WKEN); }
/** * power_domain_test - Test the power domain APIs * * Test the power domain APIs for all power domains * */ void power_domain_test() { int bank, i; int val = -EINVAL; static struct powerdomain *p, *pwrdm; for (i = 0; powerdomains_omap[i] != NULL; i++) { p = powerdomains_omap[i]; pwrdm = pwrdm_lookup(p->name); if (pwrdm) printk(KERN_INFO "PWR DM No%d = %s\n", i, pwrdm->name); else printk(KERN_INFO "PWR DM %s not supported\n", p->name); } /* i starts from 1 as gfx_pwrdm not supported in ES3.1.1 */ for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_add_clkdm(p = powerdomains_omap[i], &dummy_clkdm); if (val == 0) printk(KERN_INFO "Clock Domain Registered for %s\n", p->name); else if (val == -EINVAL) printk(KERN_ERR "Clock Domain Register FAILED!!! for" " %s\n", p->name); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_del_clkdm(p = powerdomains_omap[i], &dummy_clkdm); if (val == 0) printk(KERN_INFO "Clock Domain Unregistered for %s\n", p->name); else if (val == -EINVAL) printk(KERN_ERR "Clock Domain Unregister FAILED!!! for" " %s\n", p->name); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_get_mem_bank_count(p = powerdomains_omap[i]); printk(KERN_INFO "Bnk Cnt for %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_read_logic_pwrst(p = powerdomains_omap[i]); printk(KERN_INFO "PwrState of %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_logic_retst(p = powerdomains_omap[i], PWRDM_POWER_OFF); if (val == 0) printk(KERN_INFO "Logic RET State OFF for %s Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "OFF State not supported for %s\n", p->name); else printk(KERN_ERR "Set Logic RET State OFF FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_logic_retst(p = powerdomains_omap[i], PWRDM_POWER_RET); if (val == 0) printk(KERN_INFO "Logic RET State RET for %s Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "RET State not supported for %s\n", p->name); else printk(KERN_ERR "Logic RET State RET FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_read_pwrst(p = powerdomains_omap[i]); printk(KERN_INFO "PwrState of %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_next_pwrst(p = powerdomains_omap[i], PWRDM_POWER_OFF); if (val == 0) printk(KERN_INFO "Next PWRST for %s Set to OFF\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "OFF not supported for %s\n", p->name); else printk(KERN_ERR "Next PWRST Set to OFF FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_next_pwrst(p = powerdomains_omap[i], PWRDM_POWER_RET); if (val == 0) printk(KERN_INFO "Next PWRST for %s Set to RET\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "RET not supported for %s\n", p->name); else printk(KERN_ERR "Next PWRST Set to RET FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_next_pwrst(p = powerdomains_omap[i], PWRDM_POWER_ON); if (val == 0) printk(KERN_INFO "Next PWRST for %s Set to ON\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "ON not supported for %s\n", p->name); else printk(KERN_ERR "Next PWRST Set to ON FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_read_next_pwrst(p = powerdomains_omap[i]); printk(KERN_INFO "Next Power State of %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_read_pwrst(p = powerdomains_omap[i]); printk(KERN_INFO "Current Power State of %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_set_mem_onst(p = powerdomains_omap[i], bank, PWRDM_POWER_OFF); if (val == 0) printk(KERN_INFO "Memory ON State OFF for %s" " Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "OFF State not supported" " for %s\n", p->name); else if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else printk(KERN_ERR "Memory ON State OFF FAILED!!!" " with value %d\n", val); } } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_set_mem_onst(p = powerdomains_omap[i], bank, PWRDM_POWER_RET); if (val == 0) printk(KERN_INFO "Memory ON State RET for %s" " Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "RET State not supported" " for %s\n", p->name); else if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else printk(KERN_ERR "Memory ON State RET FAILED!!!" " with value %d\n", val); } } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_set_mem_retst(p = powerdomains_omap[i], bank, PWRDM_POWER_OFF); if (val == 0) printk(KERN_INFO "Memory RET State OFF for" " %s Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "OFF State not supported for" " %s\n", p->name); else if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else printk(KERN_ERR "Memory ON State OFF FAILED!!!" " with value %d\n", val); } } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_set_mem_retst(p = powerdomains_omap[i], bank, PWRDM_POWER_RET); if (val == 0) printk(KERN_INFO "Memory RET State RET for" " %s Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "RET State not supported for" " %s\n", p->name); else if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else printk(KERN_ERR "MEM PWRST Set FAILED!!!" " with value %d\n", val); } } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_read_mem_pwrst(p = powerdomains_omap[i], bank); if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else if (val == -EINVAL) printk(KERN_ERR "MEM PWRST Read FAILED!!!" " with value %d\n", val); else printk(KERN_INFO "MEM PWRST for bank %d of" " %s = %d\n", bank, p->name, val); } } }
static int omap2_enter_full_retention(void) { u32 l; /* There is 1 reference hold for all children of the oscillator * clock, the following will remove it. If no one else uses the * oscillator itself it will be disabled if/when we enter retention * mode. */ clk_disable(osc_ck); /* Clear old wake-up events */ /* REVISIT: These write to reserved bits? */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); /* * Set MPU powerdomain's next power state to RETENTION; * preserve logic state during retention */ pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Workaround to kill USB */ l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL; omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0); omap2_gpio_prepare_for_idle(0); /* One last check for pending IRQs to avoid extra latency due * to sleeping unnecessarily. */ if (omap_irq_pending()) goto no_sleep; /* Jump to SRAM suspend code */ omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_POWER)); no_sleep: omap2_gpio_resume_after_idle(); clk_enable(osc_ck); /* clear CORE wake-up events */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); /* wakeup domain events - bit 1: GPT1, bit5 GPIO */ omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST); /* MPU domain wake events */ l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x01) omap2_prm_write_mod_reg(0x01, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x20) omap2_prm_write_mod_reg(0x20, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); /* Mask future PRCM-to-MPU interrupts */ omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); return 0; }
/** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device * @state: The target state to be programmed * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct omap3_processor_cx *cx = cpuidle_get_statedata(state); struct timespec ts_preidle, ts_postidle, ts_idle; u32 core_next_state, per_next_state = 0, per_saved_state = 0; u32 mpu_state = cx->mpu_state, core_state = cx->core_state; current_cx_state = *cx; /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); local_irq_disable(); local_fiq_disable(); pwrdm_set_next_pwrst(mpu_pd, mpu_state); pwrdm_set_next_pwrst(core_pd, core_state); /* * Don't allow PER to go to OFF in idle state * transitions. * This is a tempory fix for display flashing issue * which occurs when off mode is enabled */ per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); if (per_next_state == PWRDM_POWER_OFF) per_next_state = PWRDM_POWER_RET; /* Are we changing PER target state? */ if (per_next_state != per_saved_state) pwrdm_set_next_pwrst(per_pd, per_next_state); if (omap_irq_pending() || need_resched()) goto return_sleep_time; if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } /* Execute ARM wfi */ omap_sram_idle(); if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } return_sleep_time: getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); /* Restore original PER state if it was modified */ if (per_next_state != per_saved_state) pwrdm_set_next_pwrst(per_pd, per_saved_state); local_irq_enable(); local_fiq_enable(); return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; }
/** * omap3_enter_idle - Programs OMAP3 to enter the specified state * @dev: cpuidle device * @state: The target state to be programmed * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int omap3_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct omap3_processor_cx *cx = cpuidle_get_statedata(state); struct timespec ts_preidle, ts_postidle, ts_idle; u32 mpu_state, core_state; u8 idx; /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); /* * Check if the chosen idle state is valid. * If no, drop down to a lower valid state. * * (Expects the lowest idle state to be always VALID). */ if (!cx->valid) { for (idx = (cx->type - 1); idx > OMAP3_STATE_C1; idx--) { if (omap3_power_states[idx].valid) break; } state = &(dev->states[idx]); dev->last_state = state ; cx = cpuidle_get_statedata(state); } current_cx_state = *cx; mpu_state = cx->mpu_state; core_state = cx->core_state; local_irq_disable(); local_fiq_disable(); pwrdm_set_next_pwrst(mpu_pd, mpu_state); pwrdm_set_next_pwrst(core_pd, core_state); if (omap_irq_pending()) goto return_sleep_time; if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } /* Execute ARM wfi */ omap_sram_idle(); if (cx->type == OMAP3_STATE_C1) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); } return_sleep_time: getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); local_irq_enable(); local_fiq_enable(); return (u32)timespec_to_ns(&ts_idle)/1000; }
static void __init prcm_setup_regs(void) { int i, num_mem_banks; struct powerdomain *pwrdm; /* Enable autoidle */ omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD, OMAP2_PRCM_SYSCONFIG_OFFSET); /* * Set CORE powerdomain memory banks to retain their contents * during RETENTION */ num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm); for (i = 0; i < num_mem_banks; i++) pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET); /* Set CORE powerdomain's next power state to RETENTION */ pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET); /* * Set MPU powerdomain's next power state to RETENTION; * preserve logic state during retention */ pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Force-power down DSP, GFX powerdomains */ pwrdm = clkdm_get_pwrdm(dsp_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); omap2_clkdm_sleep(dsp_clkdm); pwrdm = clkdm_get_pwrdm(gfx_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); omap2_clkdm_sleep(gfx_clkdm); /* * Clear clockdomain wakeup dependencies and enable * hardware-supervised idle for all clkdms */ clkdm_for_each(clkdms_setup, NULL); clkdm_add_wkdep(mpu_clkdm, wkup_clkdm); /* Enable clock autoidle for all domains */ omap2_cm_write_mod_reg(OMAP24XX_AUTO_CAM_MASK | OMAP24XX_AUTO_MAILBOXES_MASK | OMAP24XX_AUTO_WDT4_MASK | OMAP2420_AUTO_WDT3_MASK | OMAP24XX_AUTO_MSPRO_MASK | OMAP2420_AUTO_MMC_MASK | OMAP24XX_AUTO_FAC_MASK | OMAP2420_AUTO_EAC_MASK | OMAP24XX_AUTO_HDQ_MASK | OMAP24XX_AUTO_UART2_MASK | OMAP24XX_AUTO_UART1_MASK | OMAP24XX_AUTO_I2C2_MASK | OMAP24XX_AUTO_I2C1_MASK | OMAP24XX_AUTO_MCSPI2_MASK | OMAP24XX_AUTO_MCSPI1_MASK | OMAP24XX_AUTO_MCBSP2_MASK | OMAP24XX_AUTO_MCBSP1_MASK | OMAP24XX_AUTO_GPT12_MASK | OMAP24XX_AUTO_GPT11_MASK | OMAP24XX_AUTO_GPT10_MASK | OMAP24XX_AUTO_GPT9_MASK | OMAP24XX_AUTO_GPT8_MASK | OMAP24XX_AUTO_GPT7_MASK | OMAP24XX_AUTO_GPT6_MASK | OMAP24XX_AUTO_GPT5_MASK | OMAP24XX_AUTO_GPT4_MASK | OMAP24XX_AUTO_GPT3_MASK | OMAP24XX_AUTO_GPT2_MASK | OMAP2420_AUTO_VLYNQ_MASK | OMAP24XX_AUTO_DSS_MASK, CORE_MOD, CM_AUTOIDLE1); omap2_cm_write_mod_reg(OMAP24XX_AUTO_UART3_MASK | OMAP24XX_AUTO_SSI_MASK | OMAP24XX_AUTO_USB_MASK, CORE_MOD, CM_AUTOIDLE2); omap2_cm_write_mod_reg(OMAP24XX_AUTO_SDRC_MASK | OMAP24XX_AUTO_GPMC_MASK | OMAP24XX_AUTO_SDMA_MASK, CORE_MOD, CM_AUTOIDLE3); omap2_cm_write_mod_reg(OMAP24XX_AUTO_PKA_MASK | OMAP24XX_AUTO_AES_MASK | OMAP24XX_AUTO_RNG_MASK | OMAP24XX_AUTO_SHA_MASK | OMAP24XX_AUTO_DES_MASK, CORE_MOD, OMAP24XX_CM_AUTOIDLE4); omap2_cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI_MASK, OMAP24XX_DSP_MOD, CM_AUTOIDLE); /* Put DPLL and both APLLs into autoidle mode */ omap2_cm_write_mod_reg((0x03 << OMAP24XX_AUTO_DPLL_SHIFT) | (0x03 << OMAP24XX_AUTO_96M_SHIFT) | (0x03 << OMAP24XX_AUTO_54M_SHIFT), PLL_MOD, CM_AUTOIDLE); omap2_cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL_MASK | OMAP24XX_AUTO_WDT1_MASK | OMAP24XX_AUTO_MPU_WDT_MASK | OMAP24XX_AUTO_GPIOS_MASK | OMAP24XX_AUTO_32KSYNC_MASK | OMAP24XX_AUTO_GPT1_MASK, WKUP_MOD, CM_AUTOIDLE); /* REVISIT: Configure number of 32 kHz clock cycles for sys_clk * stabilisation */ omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_CLKSSETUP_OFFSET); /* Configure automatic voltage transition */ omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTSETUP_OFFSET); omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK | (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) | OMAP24XX_MEMRETCTRL_MASK | (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) | (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT), OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET); /* Enable wake-up events */ omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK, WKUP_MOD, PM_WKEN); }
/* * Initialise OMAP4 MPUSS */ int __init omap4_mpuss_init(void) { struct omap4_cpu_pm_info *pm_info; if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); return -ENODEV; } sar_base = omap4_get_sar_ram_base(); /* Initilaise per CPU PM information */ pm_info = &per_cpu(omap4_pm_info, 0x0); pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET; pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU0 pwrdm\n"); return -ENODEV; } /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(0); /* Initialise CPU0 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); pm_info = &per_cpu(omap4_pm_info, 0x1); pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU1 pwrdm\n"); return -ENODEV; } /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(1); /* Initialise CPU1 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); mpuss_pd = pwrdm_lookup("mpu_pwrdm"); if (!mpuss_pd) { pr_err("Failed to lookup MPUSS power domain\n"); return -ENODEV; } pwrdm_clear_all_prev_pwrst(mpuss_pd); mpuss_clear_prev_logic_pwrst(); /* Save device type on scratchpad for low level code to use */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) __raw_writel(1, sar_base + OMAP_TYPE_OFFSET); else __raw_writel(0, sar_base + OMAP_TYPE_OFFSET); save_l2x0_context(); return 0; }
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) { static struct clockdomain *cpu1_clkdm; static bool booted; static struct powerdomain *cpu1_pwrdm; void __iomem *base = omap_get_wakeupgen_base(); /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * Update the AuxCoreBoot0 with boot state for secondary core. * omap4_secondary_startup() routine will hold the secondary core till * the AuxCoreBoot1 register is updated with cpu state * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) omap_modify_auxcoreboot0(0x200, 0xfffffdff); else writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0); if (!cpu1_clkdm && !cpu1_pwrdm) { cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm"); } /* * The SGI(Software Generated Interrupts) are not wakeup capable * from low power states. This is known limitation on OMAP4 and * needs to be worked around by using software forced clockdomain * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to * software force wakeup. The clockdomain is then put back to * hardware supervised mode. * More details can be found in OMAP4430 TRM - Version J * Section : * 4.3.4.2 Power States of CPU0 and CPU1 */ if (booted && cpu1_pwrdm && cpu1_clkdm) { /* * GIC distributor control register has changed between * CortexA9 r1pX and r2pX. The Control Register secure * banked version is now composed of 2 bits: * bit 0 == Secure Enable * bit 1 == Non-Secure Enable * The Non-Secure banked register has not changed * Because the ROM Code is based on the r1pX GIC, the CPU1 * GIC restoration will cause a problem to CPU0 Non-Secure SW. * The workaround must be: * 1) Before doing the CPU1 wakeup, CPU0 must disable * the GIC distributor * 2) CPU1 must re-enable the GIC distributor on * it's wakeup path. */ if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { local_irq_disable(); gic_dist_disable(); } /* * Ensure that CPU power state is set to ON to avoid CPU * powerdomain transition on wfi */ clkdm_wakeup_nolock(cpu1_clkdm); pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON); clkdm_allow_idle_nolock(cpu1_clkdm); if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { while (gic_dist_disabled()) { udelay(1); cpu_relax(); } gic_timer_retrigger(); local_irq_enable(); } } else { dsb_sev(); booted = true; } arch_send_wakeup_ipi_mask(cpumask_of(cpu)); /* * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return 0; }