static int __init am33xx_pm_init(void) { int ret; if (!cpu_is_am33xx()) return -ENODEV; pr_info("Power Management for AM33XX family\n"); #ifdef CONFIG_SUSPEND #ifdef CONFIG_TI_PM_DISABLE_VT_SWITCH pm_set_vt_switch(0); #endif (void) clkdm_for_each(clkdms_setup, NULL); /* CEFUSE domain should be turned off post bootup */ cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); if (cefuse_pwrdm == NULL) printk(KERN_ERR "Failed to get cefuse_pwrdm\n"); else pwrdm_set_next_pwrst(cefuse_pwrdm, PWRDM_POWER_OFF); gfx_pwrdm = pwrdm_lookup("gfx_pwrdm"); if (gfx_pwrdm == NULL) printk(KERN_ERR "Failed to get gfx_pwrdm\n"); gfx_l3_clkdm = clkdm_lookup("gfx_l3_clkdm"); if (gfx_l3_clkdm == NULL) printk(KERN_ERR "Failed to get gfx_l3_clkdm\n"); gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm"); if (gfx_l4ls_clkdm == NULL) printk(KERN_ERR "Failed to get gfx_l4ls_gfx_clkdm\n"); mpu_dev = omap_device_get_by_hwmod_name("mpu"); if (!mpu_dev) { pr_warning("%s: unable to get the mpu device\n", __func__); return -EINVAL; } ret = wkup_m3_init(); if (ret) { pr_err("Could not initialise WKUP_M3. " "Power management will be compromised\n"); enable_deep_sleep = false; } if (enable_deep_sleep) suspend_set_ops(&am33xx_pm_ops); #endif /* CONFIG_SUSPEND */ return ret; }
/** * omap3_idle_init - Init routine for OMAP3 idle * * Registers the OMAP3 specific cpuidle driver with the cpuidle * framework with the valid set of states. */ int __init omap3_idle_init(void) { int i, count = 0; struct omap3_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_device *dev; mpu_pd = pwrdm_lookup("mpu_pwrdm"); core_pd = pwrdm_lookup("core_pwrdm"); per_pd = pwrdm_lookup("per_pwrdm"); cam_pd = pwrdm_lookup("cam_pwrdm"); omap_init_power_states(); cpuidle_register_driver(&omap3_idle_driver); dev = &per_cpu(omap3_idle_dev, smp_processor_id()); for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { cx = &omap3_power_states[i]; state = &dev->states[count]; if (!cx->valid) continue; cpuidle_set_statedata(state, cx); state->exit_latency = cx->sleep_latency + cx->wakeup_latency; state->target_residency = cx->threshold; state->flags = cx->flags; state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ? omap3_enter_idle_bm : omap3_enter_idle; if (cx->type == OMAP3_STATE_C1) dev->safe_state = state; sprintf(state->name, "C%d", count+1); strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); count++; } if (!count) return -EINVAL; dev->state_count = count; if (enable_off_mode) omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF); else omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET); if (cpuidle_register_device(dev)) { printk(KERN_ERR "%s: CPUidle register device failed\n", __func__); return -EIO; } return 0; }
/** * omap4_idle_init - Init routine for OMAP4 idle * * Registers the OMAP4 specific cpuidle driver with the cpuidle * framework with the valid set of states. */ int __init omap4_idle_init(void) { int cpu_id, i, count = 0; struct omap4_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_device *dev; mpu_pd = pwrdm_lookup("mpu_pwrdm"); cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); core_pd = pwrdm_lookup("core_pwrdm"); omap_init_power_states(); cpuidle_register_driver(&omap4_idle_driver); for_each_cpu(cpu_id, cpu_online_mask) { pr_err("CPUidle for CPU%d registered\n", cpu_id); dev = &per_cpu(omap4_idle_dev, cpu_id); dev->cpu = cpu_id; count = 0; for (i = OMAP4_STATE_C1; i < OMAP4_MAX_STATES; i++) { cx = &omap4_power_states[i]; state = &dev->states[count]; if (!cx->valid) continue; cpuidle_set_statedata(state, cx); state->exit_latency = cx->sleep_latency + cx->wakeup_latency; state->target_residency = cx->threshold; state->flags = cx->flags; if (cx->type == OMAP4_STATE_C1) dev->safe_state = state; state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ? omap4_enter_idle_bm : omap4_enter_idle; sprintf(state->name, "C%d", count+1); strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); count++; } if (!count) return -EINVAL; dev->state_count = count; if (cpuidle_register_device(dev)) { printk(KERN_ERR "%s: CPUidle register device failed\n", __func__); return -EIO; } }
void __init mapphone_panel_init(void) { int ret; dss_pwrdm = pwrdm_lookup("dss_pwrdm"); if (!dss_pwrdm) pr_info("%s: Not found dss_pwrdm\n", __func__); if (mapphone_dt_panel_init()) printk(KERN_INFO "panel: using non-dt configuration\n"); mapphone_panel_get_fb_info(); omapfb_set_platform_data(&mapphone_fb_data); ret = gpio_request(mapphone_panel_data.reset_gpio, "display reset"); if (ret) { printk(KERN_ERR "failed to get display reset gpio\n"); goto failed_reset; } if (mapphone_displ_pwr_sup_en != 0) { printk(KERN_INFO "DT: display power supply en = %d\n", mapphone_displ_pwr_sup_en); ret = gpio_request(mapphone_displ_pwr_sup_en, "LCD-pwr_sup_en"); if (ret) { printk(KERN_ERR "failed to req for LCD-pwr_sup_en\n"); goto failed_reset; } gpio_direction_output(mapphone_displ_pwr_sup_en, 1); } if (mapphone_feature_hdmi && mapphone_hdmi_5v_enable != 0) { ret = gpio_request(mapphone_hdmi_5v_enable, "HDMI-5V-En"); if (ret) { printk(KERN_ERR "Failed hdmi 5v en gpio request\n"); goto failed_hdmi_5v; } else { printk(KERN_DEBUG "Enabing hdmi 5v gpio\n"); gpio_direction_output(mapphone_hdmi_5v_enable, 1); gpio_set_value(mapphone_hdmi_5v_enable, 0); } platform_device_register(&omap_dssmgr_device); } else { /* Remove HDTV from the DSS device list */ mapphone_dss_data.num_devices--; } platform_device_register(&omap_panel_device); omap_display_init(&mapphone_dss_data); return; failed_hdmi_5v: gpio_free(mapphone_hdmi_5v_enable); failed_reset: gpio_free(mapphone_panel_data.reset_gpio); }
void __init omap4_mpuss_init(void) { /* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources. */ max_spi_irq = readl(gic_dist_base_addr + GIC_DIST_CTR) & 0x1f; max_spi_irq = (max_spi_irq + 1) * 32; if (max_spi_irq > max(1020, NR_IRQS)) max_spi_irq = max(1020, NR_IRQS); max_spi_irq = (max_spi_irq - 32); max_spi_reg = max_spi_irq / 32; /* * GIC needs to be saved in SAR_BANK3 */ sar_bank3_base = sar_ram_base + SAR_BANK3_OFFSET; cpu0_pwrdm = pwrdm_lookup("cpu0_pwrdm"); cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm"); mpuss_pd = pwrdm_lookup("mpu_pwrdm"); if (!cpu0_pwrdm || !cpu1_pwrdm || !mpuss_pd) pr_err("Failed to get lookup for CPUx/MPUSS pwrdm's\n"); l3_main_3_ick = clk_get(NULL, "l3_main_3_ick"); /* * Check the OMAP type and store it to scratchpad */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* Memory not released */ secure_ram = dma_alloc_coherent(NULL, OMAP4_SECURE_RAM_STORAGE, (dma_addr_t *)&omap4_secure_ram_phys, GFP_KERNEL); if (!secure_ram) pr_err("Unable to allocate secure ram storage\n"); writel(0x1, sar_ram_base + OMAP_TYPE_OFFSET); } else { writel(0x0, sar_ram_base + OMAP_TYPE_OFFSET); } }
/** * omap3_idle_init - Init routine for OMAP3 idle * * Registers the OMAP3 specific cpuidle driver to the cpuidle * framework with the valid set of states. */ int __init omap3_idle_init(void) { struct cpuidle_device *dev; struct omap3_idle_statedata *cx; mpu_pd = pwrdm_lookup("mpu_pwrdm"); core_pd = pwrdm_lookup("core_pwrdm"); per_pd = pwrdm_lookup("per_pwrdm"); cam_pd = pwrdm_lookup("cam_pwrdm"); cpuidle_register_driver(&omap3_idle_driver); dev = &per_cpu(omap3_idle_dev, smp_processor_id()); /* C1 . MPU WFI + Core active */ cx = _fill_cstate(dev, 0, "MPU ON + CORE ON"); (&dev->states[0])->enter = omap3_enter_idle; dev->safe_state = &dev->states[0]; cx->valid = 1; /* C1 is always valid */ cx->mpu_state = PWRDM_POWER_ON; cx->core_state = PWRDM_POWER_ON; /* C2 . MPU WFI + Core inactive */ cx = _fill_cstate(dev, 1, "MPU ON + CORE ON"); cx->mpu_state = PWRDM_POWER_ON; cx->core_state = PWRDM_POWER_ON; /* C3 . MPU CSWR + Core inactive */ cx = _fill_cstate(dev, 2, "MPU RET + CORE ON"); cx->mpu_state = PWRDM_POWER_RET; cx->core_state = PWRDM_POWER_ON; /* C4 . MPU OFF + Core inactive */ cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON"); cx->mpu_state = PWRDM_POWER_OFF; cx->core_state = PWRDM_POWER_ON; /* C5 . MPU RET + Core RET */ cx = _fill_cstate(dev, 4, "MPU RET + CORE RET"); cx->mpu_state = PWRDM_POWER_RET; cx->core_state = PWRDM_POWER_RET; /* C6 . MPU OFF + Core RET */ cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET"); cx->mpu_state = PWRDM_POWER_OFF; cx->core_state = PWRDM_POWER_RET; /* C7 . MPU OFF + Core OFF */ cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF"); /* * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot * enable OFF mode in a stable form for previous revisions. * We disable C7 state as a result. */ if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) { cx->valid = 0; pr_warn("%s: core off state C7 disabled due to i583\n", __func__); } cx->mpu_state = PWRDM_POWER_OFF; cx->core_state = PWRDM_POWER_OFF; dev->state_count = OMAP3_NUM_STATES; if (cpuidle_register_device(dev)) { printk(KERN_ERR "%s: CPUidle register device failed\n", __func__); return -EIO; } return 0; }
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) { static struct clockdomain *cpu1_clkdm; static bool booted; static struct powerdomain *cpu1_pwrdm; void __iomem *base = omap_get_wakeupgen_base(); /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * Update the AuxCoreBoot0 with boot state for secondary core. * omap4_secondary_startup() routine will hold the secondary core till * the AuxCoreBoot1 register is updated with cpu state * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) omap_modify_auxcoreboot0(0x200, 0xfffffdff); else __raw_writel(0x20, base + OMAP_AUX_CORE_BOOT_0); if (!cpu1_clkdm && !cpu1_pwrdm) { cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm"); } /* * The SGI(Software Generated Interrupts) are not wakeup capable * from low power states. This is known limitation on OMAP4 and * needs to be worked around by using software forced clockdomain * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to * software force wakeup. The clockdomain is then put back to * hardware supervised mode. * More details can be found in OMAP4430 TRM - Version J * Section : * 4.3.4.2 Power States of CPU0 and CPU1 */ if (booted && cpu1_pwrdm && cpu1_clkdm) { /* * GIC distributor control register has changed between * CortexA9 r1pX and r2pX. The Control Register secure * banked version is now composed of 2 bits: * bit 0 == Secure Enable * bit 1 == Non-Secure Enable * The Non-Secure banked register has not changed * Because the ROM Code is based on the r1pX GIC, the CPU1 * GIC restoration will cause a problem to CPU0 Non-Secure SW. * The workaround must be: * 1) Before doing the CPU1 wakeup, CPU0 must disable * the GIC distributor * 2) CPU1 must re-enable the GIC distributor on * it's wakeup path. */ if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { local_irq_disable(); gic_dist_disable(); } /* * Ensure that CPU power state is set to ON to avoid CPU * powerdomain transition on wfi */ clkdm_wakeup(cpu1_clkdm); omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON); clkdm_allow_idle(cpu1_clkdm); if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { while (gic_dist_disabled()) { udelay(1); cpu_relax(); } gic_timer_retrigger(); local_irq_enable(); } } else { dsb_sev(); booted = true; } arch_send_wakeup_ipi_mask(cpumask_of(cpu)); /* * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return 0; }
int __init omap2_pm_init(void) { u32 l; printk(KERN_INFO "Power Management for OMAP2 initializing\n"); l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET); printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); /* Look up important powerdomains */ mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if (!mpu_pwrdm) pr_err("PM: mpu_pwrdm not found\n"); core_pwrdm = pwrdm_lookup("core_pwrdm"); if (!core_pwrdm) pr_err("PM: core_pwrdm not found\n"); /* Look up important clockdomains */ mpu_clkdm = clkdm_lookup("mpu_clkdm"); if (!mpu_clkdm) pr_err("PM: mpu_clkdm not found\n"); wkup_clkdm = clkdm_lookup("wkup_clkdm"); if (!wkup_clkdm) pr_err("PM: wkup_clkdm not found\n"); dsp_clkdm = clkdm_lookup("dsp_clkdm"); if (!dsp_clkdm) pr_err("PM: dsp_clkdm not found\n"); gfx_clkdm = clkdm_lookup("gfx_clkdm"); if (!gfx_clkdm) pr_err("PM: gfx_clkdm not found\n"); osc_ck = clk_get(NULL, "osc_ck"); if (IS_ERR(osc_ck)) { printk(KERN_ERR "could not get osc_ck\n"); return -ENODEV; } if (cpu_is_omap242x()) { emul_ck = clk_get(NULL, "emul_ck"); if (IS_ERR(emul_ck)) { printk(KERN_ERR "could not get emul_ck\n"); clk_put(osc_ck); return -ENODEV; } } prcm_setup_regs(); /* * We copy the assembler sleep/wakeup routines to SRAM. * These routines need to be in SRAM as that's the only * memory the MPU can see when it wakes up. */ omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend, omap24xx_idle_loop_suspend_sz); omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, omap24xx_cpu_suspend_sz); arm_pm_idle = omap2_pm_idle; return 0; }
int __init am33xx_pm_init(void) { struct powerdomain *cefuse_pwrdm; #ifdef CONFIG_CPU_PM int ret; u32 temp; struct device_node *np; #endif /* CONFIG_CPU_PM */ if (!soc_is_am33xx() && !soc_is_am43xx()) return -ENODEV; #ifdef CONFIG_CPU_PM am33xx_pm = kzalloc(sizeof(*am33xx_pm), GFP_KERNEL); if (!am33xx_pm) { pr_err("Memory allocation failed\n"); ret = -ENOMEM; return ret; } ret = am33xx_map_emif(); if (ret) { pr_err("PM: Could not ioremap EMIF\n"); goto err; } #ifdef CONFIG_SUSPEND gfx_pwrdm = pwrdm_lookup("gfx_pwrdm"); per_pwrdm = pwrdm_lookup("per_pwrdm"); mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if ((!gfx_pwrdm) || (!per_pwrdm) || (!mpu_pwrdm)) { ret = -ENODEV; goto err; } /* * Code paths for each SoC are nearly the same but set ops * handle differences during init, pre-suspend, and post-suspend */ if (soc_is_am33xx()) am33xx_pm->ops = &am33xx_ops; else if (soc_is_am43xx()) am33xx_pm->ops = &am43xx_ops; ret = am33xx_pm->ops->init(); if (ret) goto err; #endif /* CONFIG_SUSPEND */ /* Determine Memory Type */ temp = readl(am33xx_emif_base + EMIF_SDRAM_CONFIG); temp = (temp & SDRAM_TYPE_MASK) >> SDRAM_TYPE_SHIFT; /* Parameters to pass to assembly code */ susp_params.wfi_flags = 0; susp_params.emif_addr_virt = am33xx_emif_base; susp_params.dram_sync = am33xx_dram_sync; switch (temp) { case MEM_TYPE_DDR2: susp_params.wfi_flags |= WFI_MEM_TYPE_DDR2; break; case MEM_TYPE_DDR3: susp_params.wfi_flags |= WFI_MEM_TYPE_DDR3; break; } susp_params.wfi_flags |= WFI_SELF_REFRESH; susp_params.wfi_flags |= WFI_SAVE_EMIF; susp_params.wfi_flags |= WFI_DISABLE_EMIF; susp_params.wfi_flags |= WFI_WAKE_M3; am33xx_pm->ipc.reg4 = temp & MEM_TYPE_MASK; np = of_find_compatible_node(NULL, NULL, "ti,am3353-wkup-m3"); if (np) { if (of_find_property(np, "ti,needs-vtt-toggle", NULL) && (!(of_property_read_u32(np, "ti,vtt-gpio-pin", &temp)))) { if (temp >= 0 && temp <= 31) am33xx_pm->ipc.reg4 |= ((1 << VTT_STAT_SHIFT) | (temp << VTT_GPIO_PIN_SHIFT)); else pr_warn("PM: Invalid VTT GPIO(%d) pin\n", temp); } if (of_find_property(np, "ti,set-io-isolation", NULL)) am33xx_pm->ipc.reg4 |= (1 << IO_ISOLATION_STAT_SHIFT); } #ifdef CONFIG_SUSPEND ret = am33xx_setup_sleep_sequence(); if (ret) { pr_err("Error fetching I2C sleep/wake sequence\n"); goto err; } #endif /* CONFIG_SUSPEND */ #endif /* CONFIG_CPU_PM */ (void) clkdm_for_each(omap_pm_clkdms_setup, NULL); /* CEFUSE domain can be turned off post bootup */ cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); if (cefuse_pwrdm) omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF); else pr_err("PM: Failed to get cefuse_pwrdm\n"); #ifdef CONFIG_CPU_PM am33xx_pm->state = M3_STATE_RESET; wkup_m3_set_ops(&am33xx_wkup_m3_ops); /* m3 may have already loaded but ops were not set yet, * manually invoke */ if (wkup_m3_is_valid()) am33xx_m3_fw_ready_cb(); #endif /* CONFIG_CPU_PM */ return 0; #ifdef CONFIG_CPU_PM err: kfree(am33xx_pm); return ret; #endif /* CONFIG_CPU_PM */ }
/* * Initialise OMAP4 MPUSS */ int __init omap4_mpuss_init(void) { struct omap4_cpu_pm_info *pm_info; if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); return -ENODEV; } sar_base = omap4_get_sar_ram_base(); /* Initilaise per CPU PM information */ pm_info = &per_cpu(omap4_pm_info, 0x0); pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET; pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU0 pwrdm\n"); return -ENODEV; } /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(0); /* Initialise CPU0 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); pm_info = &per_cpu(omap4_pm_info, 0x1); pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU1 pwrdm\n"); return -ENODEV; } /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(1); /* Initialise CPU1 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); mpuss_pd = pwrdm_lookup("mpu_pwrdm"); if (!mpuss_pd) { pr_err("Failed to lookup MPUSS power domain\n"); return -ENODEV; } pwrdm_clear_all_prev_pwrst(mpuss_pd); mpuss_clear_prev_logic_pwrst(); /* Save device type on scratchpad for low level code to use */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) __raw_writel(1, sar_base + OMAP_TYPE_OFFSET); else __raw_writel(0, sar_base + OMAP_TYPE_OFFSET); save_l2x0_context(); return 0; }
/* * Initialise OMAP4 MPUSS */ int __init omap4_mpuss_init(void) { struct omap4_cpu_pm_info *pm_info; if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); return -ENODEV; } if (cpu_is_omap44xx()) sar_base = omap4_get_sar_ram_base(); /* Initilaise per CPU PM information */ pm_info = &per_cpu(omap4_pm_info, 0x0); if (sar_base) { pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET; pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; } pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU0 pwrdm\n"); return -ENODEV; } /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(0); /* Initialise CPU0 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); pm_info = &per_cpu(omap4_pm_info, 0x1); if (sar_base) { pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; } pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU1 pwrdm\n"); return -ENODEV; } /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); cpu_clear_prev_logic_pwrst(1); /* Initialise CPU1 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); mpuss_pd = pwrdm_lookup("mpu_pwrdm"); if (!mpuss_pd) { pr_err("Failed to lookup MPUSS power domain\n"); return -ENODEV; } pwrdm_clear_all_prev_pwrst(mpuss_pd); mpuss_clear_prev_logic_pwrst(); if (sar_base) { /* Save device type on scratchpad for low level code to use */ writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0, sar_base + OMAP_TYPE_OFFSET); save_l2x0_context(); } if (cpu_is_omap44xx()) { omap_pm_ops.finish_suspend = omap4_finish_suspend; omap_pm_ops.resume = omap4_cpu_resume; omap_pm_ops.scu_prepare = scu_pwrst_prepare; omap_pm_ops.hotplug_restart = omap4_secondary_startup; cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET; } else if (soc_is_omap54xx() || soc_is_dra7xx()) { cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET; enable_mercury_retention_mode(); } if (cpu_is_omap446x()) omap_pm_ops.hotplug_restart = omap4460_secondary_startup; return 0; }
static int __init omap2_pm_init(void) { u32 l; if (!cpu_is_omap24xx()) return -ENODEV; printk(KERN_INFO "Power Management for OMAP2 initializing\n"); l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET); printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if (!mpu_pwrdm) pr_err("PM: mpu_pwrdm not found\n"); core_pwrdm = pwrdm_lookup("core_pwrdm"); if (!core_pwrdm) pr_err("PM: core_pwrdm not found\n"); mpu_clkdm = clkdm_lookup("mpu_clkdm"); if (!mpu_clkdm) pr_err("PM: mpu_clkdm not found\n"); wkup_clkdm = clkdm_lookup("wkup_clkdm"); if (!wkup_clkdm) pr_err("PM: wkup_clkdm not found\n"); dsp_clkdm = clkdm_lookup("dsp_clkdm"); if (!dsp_clkdm) pr_err("PM: dsp_clkdm not found\n"); gfx_clkdm = clkdm_lookup("gfx_clkdm"); if (!gfx_clkdm) pr_err("PM: gfx_clkdm not found\n"); osc_ck = clk_get(NULL, "osc_ck"); if (IS_ERR(osc_ck)) { printk(KERN_ERR "could not get osc_ck\n"); return -ENODEV; } if (cpu_is_omap242x()) { emul_ck = clk_get(NULL, "emul_ck"); if (IS_ERR(emul_ck)) { printk(KERN_ERR "could not get emul_ck\n"); clk_put(osc_ck); return -ENODEV; } } prcm_setup_regs(); { const struct omap_sti_console_config *sti; sti = omap_get_config(OMAP_TAG_STI_CONSOLE, struct omap_sti_console_config); if (sti != NULL && sti->enable) sti_console_enabled = 1; } if (cpu_is_omap24xx()) { omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend, omap24xx_idle_loop_suspend_sz); omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, omap24xx_cpu_suspend_sz); } arm_pm_idle = omap2_pm_idle; return 0; }
void __init mapphone_panel_init(void) { struct mapphone_dsi_panel_pwr_supply *supply; struct mapphone_dsi_panel_data *panel_data = (struct mapphone_dsi_panel_data *)mapphone_lcd_device.data; int ret; int i; int num_gpio_handled = 0; dss_pwrdm = pwrdm_lookup("dss_pwrdm"); if (!dss_pwrdm) pr_info("%s: Not found dss_pwrdm\n", __func__); if (mapphone_dt_panel_init()) PANELINFO(": using non-dt configuration\n"); mapphone_panel_get_fb_info(); omapfb_set_platform_data(&mapphone_fb_data); ret = gpio_request(mapphone_panel_data.reset_gpio, "display reset"); if (ret) { PANELERR("failed to get display reset gpio\n"); goto failed_reset; } gpio_direction_output(mapphone_panel_data.reset_gpio, 1); for (i = 0; i < panel_data->num_pwr_supply; i++) { supply = &(panel_data->disp_vol_supply[i]); if (supply->en_gpio != 0) { ret = gpio_request(supply->en_gpio, "LCD-pwr_sup_en"); if (ret) { PANELERR("Supply %d, failed to req for " "LCD-pwr_sup_en\n", i); num_gpio_handled = i; goto failed_pwr_supply; } gpio_direction_output(supply->en_gpio, supply->en_gpio_value); } } num_gpio_handled = panel_data->num_pwr_supply; if (mapphone_displ_lvds_cabc_en >= 0) { ret = gpio_request(mapphone_displ_lvds_cabc_en, "LCD-lvds_cabc_en"); if (ret) { printk(KERN_ERR "Failed LCD-lvds_cabc_en req\n"); goto failed_req_lvds_en; } gpio_direction_output(mapphone_displ_lvds_cabc_en, 1); } if (mapphone_displ_color_en >= 0) { ret = gpio_request(mapphone_displ_color_en, "LCD-color_en"); if (ret) { printk(KERN_ERR "Failed LCD-color_en req\n"); goto failed_req_color_en; } gpio_direction_output(mapphone_displ_color_en, 1); } if (mapphone_displ_lcd_bl_pwm >= 0) { ret = gpio_request(mapphone_displ_lcd_bl_pwm, "LCD-lcd_bl_pwm"); if (ret) { printk(KERN_ERR "Failed LCD-lcd_bl_pwm req\n"); goto failed_req_lcd_bl_pwm; } gpio_direction_output(mapphone_displ_lcd_bl_pwm, 0); } if (mapphone_displ_lvds_wp_g >= 0) { ret = gpio_request(mapphone_displ_lvds_wp_g, "LCD-lvds_wp_g"); if (ret) { printk(KERN_ERR "Failed LCD-lvds_wp_g req\n"); goto failed_req_lvds_wp_g; } gpio_direction_output(mapphone_displ_lvds_wp_g, 1); } if (mapphone_displ_lvds_wp_e >= 0) { ret = gpio_request(mapphone_displ_lvds_wp_e, "LCD-lvds_wp_e"); if (ret) { printk(KERN_ERR "Failed LCD-lvds_wp_e req\n"); goto failed_req_lvds_wp_e; } gpio_direction_output(mapphone_displ_lvds_wp_e, 1); } if (mapphone_feature_hdmi) { /* Set the bits to disable "internal pullups" for the DDC * clk and data lines. This is required for ES2.3 parts * and beyond. If these are not set EDID reads fails. */ if (cpu_is_omap44xx()) { omap_writel(HDMI_CONTROL_I2C_1_DDC_PU_DIS, HDMI_CONTROL_I2C_1_REG); } platform_device_register(&omap_dssmgr_device); } else { /* Remove HDTV from the DSS device list */ mapphone_dss_data.num_devices--; } platform_device_register(&omap_panel_device); omap_display_init(&mapphone_dss_data); return; failed_hdmi_5v: gpio_free(mapphone_displ_lvds_wp_e); failed_req_lvds_wp_e: gpio_free(mapphone_displ_lvds_wp_g); failed_req_lvds_wp_g: gpio_free(mapphone_displ_lcd_bl_pwm); failed_req_lcd_bl_pwm: gpio_free(mapphone_displ_color_en); failed_req_color_en: gpio_free(mapphone_displ_lvds_cabc_en); failed_req_lvds_en: failed_pwr_supply: for (i = 0; i < num_gpio_handled; i++) { supply = &(panel_data->disp_vol_supply[i]); if (supply->en_gpio != 0) gpio_free(supply->en_gpio); } failed_reset: gpio_free(mapphone_panel_data.reset_gpio); }
/** * power_domain_test - Test the power domain APIs * * Test the power domain APIs for all power domains * */ void power_domain_test() { int bank, i; int val = -EINVAL; static struct powerdomain *p, *pwrdm; for (i = 0; powerdomains_omap[i] != NULL; i++) { p = powerdomains_omap[i]; pwrdm = pwrdm_lookup(p->name); if (pwrdm) printk(KERN_INFO "PWR DM No%d = %s\n", i, pwrdm->name); else printk(KERN_INFO "PWR DM %s not supported\n", p->name); } /* i starts from 1 as gfx_pwrdm not supported in ES3.1.1 */ for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_add_clkdm(p = powerdomains_omap[i], &dummy_clkdm); if (val == 0) printk(KERN_INFO "Clock Domain Registered for %s\n", p->name); else if (val == -EINVAL) printk(KERN_ERR "Clock Domain Register FAILED!!! for" " %s\n", p->name); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_del_clkdm(p = powerdomains_omap[i], &dummy_clkdm); if (val == 0) printk(KERN_INFO "Clock Domain Unregistered for %s\n", p->name); else if (val == -EINVAL) printk(KERN_ERR "Clock Domain Unregister FAILED!!! for" " %s\n", p->name); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_get_mem_bank_count(p = powerdomains_omap[i]); printk(KERN_INFO "Bnk Cnt for %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_read_logic_pwrst(p = powerdomains_omap[i]); printk(KERN_INFO "PwrState of %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_logic_retst(p = powerdomains_omap[i], PWRDM_POWER_OFF); if (val == 0) printk(KERN_INFO "Logic RET State OFF for %s Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "OFF State not supported for %s\n", p->name); else printk(KERN_ERR "Set Logic RET State OFF FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_logic_retst(p = powerdomains_omap[i], PWRDM_POWER_RET); if (val == 0) printk(KERN_INFO "Logic RET State RET for %s Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "RET State not supported for %s\n", p->name); else printk(KERN_ERR "Logic RET State RET FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_read_pwrst(p = powerdomains_omap[i]); printk(KERN_INFO "PwrState of %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_next_pwrst(p = powerdomains_omap[i], PWRDM_POWER_OFF); if (val == 0) printk(KERN_INFO "Next PWRST for %s Set to OFF\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "OFF not supported for %s\n", p->name); else printk(KERN_ERR "Next PWRST Set to OFF FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_next_pwrst(p = powerdomains_omap[i], PWRDM_POWER_RET); if (val == 0) printk(KERN_INFO "Next PWRST for %s Set to RET\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "RET not supported for %s\n", p->name); else printk(KERN_ERR "Next PWRST Set to RET FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_set_next_pwrst(p = powerdomains_omap[i], PWRDM_POWER_ON); if (val == 0) printk(KERN_INFO "Next PWRST for %s Set to ON\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "ON not supported for %s\n", p->name); else printk(KERN_ERR "Next PWRST Set to ON FAILED!!!" " with value %d\n", val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_read_next_pwrst(p = powerdomains_omap[i]); printk(KERN_INFO "Next Power State of %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { val = pwrdm_read_pwrst(p = powerdomains_omap[i]); printk(KERN_INFO "Current Power State of %s = %d\n", p->name, val); } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_set_mem_onst(p = powerdomains_omap[i], bank, PWRDM_POWER_OFF); if (val == 0) printk(KERN_INFO "Memory ON State OFF for %s" " Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "OFF State not supported" " for %s\n", p->name); else if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else printk(KERN_ERR "Memory ON State OFF FAILED!!!" " with value %d\n", val); } } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_set_mem_onst(p = powerdomains_omap[i], bank, PWRDM_POWER_RET); if (val == 0) printk(KERN_INFO "Memory ON State RET for %s" " Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "RET State not supported" " for %s\n", p->name); else if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else printk(KERN_ERR "Memory ON State RET FAILED!!!" " with value %d\n", val); } } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_set_mem_retst(p = powerdomains_omap[i], bank, PWRDM_POWER_OFF); if (val == 0) printk(KERN_INFO "Memory RET State OFF for" " %s Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "OFF State not supported for" " %s\n", p->name); else if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else printk(KERN_ERR "Memory ON State OFF FAILED!!!" " with value %d\n", val); } } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_set_mem_retst(p = powerdomains_omap[i], bank, PWRDM_POWER_RET); if (val == 0) printk(KERN_INFO "Memory RET State RET for" " %s Set\n", p->name); else if (val == -EINVAL) printk(KERN_INFO "RET State not supported for" " %s\n", p->name); else if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else printk(KERN_ERR "MEM PWRST Set FAILED!!!" " with value %d\n", val); } } for (i = 1; powerdomains_omap[i] != NULL; i++) { for (bank = 0; bank < PWRDM_MAX_MEM_BANKS; bank++) { val = pwrdm_read_mem_pwrst(p = powerdomains_omap[i], bank); if (val == -EEXIST) printk(KERN_ERR "Memory Bank %d not present" " for %s\n", bank, p->name); else if (val == -EINVAL) printk(KERN_ERR "MEM PWRST Read FAILED!!!" " with value %d\n", val); else printk(KERN_INFO "MEM PWRST for bank %d of" " %s = %d\n", bank, p->name, val); } } }
/* * SAR RAM used to save and restore the HW * context in low power modes */ static int __init omap4_sar_ram_init(void) { void __iomem *secure_ctrl_mod; /* * To avoid code running on other OMAPs in * multi-omap builds */ if (!cpu_is_omap44xx()) return -ENODEV; /* Static mapping, never released */ sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_8K); BUG_ON(!sar_ram_base); /* * All these are static mappings so ioremap() will * just return with mapped VA */ omap4_sar_modules[EMIF1_INDEX] = ioremap(OMAP44XX_EMIF1, SZ_1M); BUG_ON(!omap4_sar_modules[EMIF1_INDEX]); omap4_sar_modules[EMIF2_INDEX] = ioremap(OMAP44XX_EMIF2, SZ_1M); BUG_ON(!omap4_sar_modules[EMIF2_INDEX]); omap4_sar_modules[DMM_INDEX] = ioremap(OMAP44XX_DMM_BASE, SZ_1M); BUG_ON(!omap4_sar_modules[DMM_INDEX]); omap4_sar_modules[CM1_INDEX] = ioremap(OMAP4430_CM1_BASE, SZ_8K); BUG_ON(!omap4_sar_modules[CM1_INDEX]); omap4_sar_modules[CM2_INDEX] = ioremap(OMAP4430_CM2_BASE, SZ_8K); BUG_ON(!omap4_sar_modules[CM2_INDEX]); omap4_sar_modules[C2C_INDEX] = ioremap(OMAP44XX_C2C_BASE, SZ_1M); BUG_ON(!omap4_sar_modules[C2C_INDEX]); omap4_sar_modules[CTRL_MODULE_PAD_CORE_INDEX] = ioremap(OMAP443X_CTRL_BASE, SZ_4K); BUG_ON(!omap4_sar_modules[CTRL_MODULE_PAD_CORE_INDEX]); omap4_sar_modules[L3_CLK1_INDEX] = ioremap(L3_44XX_BASE_CLK1, SZ_1M); BUG_ON(!omap4_sar_modules[L3_CLK1_INDEX]); omap4_sar_modules[L3_CLK2_INDEX] = ioremap(L3_44XX_BASE_CLK2, SZ_1M); BUG_ON(!omap4_sar_modules[L3_CLK2_INDEX]); omap4_sar_modules[L3_CLK3_INDEX] = ioremap(L3_44XX_BASE_CLK3, SZ_1M); BUG_ON(!omap4_sar_modules[L3_CLK3_INDEX]); omap4_sar_modules[USBTLL_INDEX] = ioremap(OMAP44XX_USBTLL_BASE, SZ_1M); BUG_ON(!omap4_sar_modules[USBTLL_INDEX]); omap4_sar_modules[UHH_INDEX] = ioremap(OMAP44XX_UHH_CONFIG_BASE, SZ_1M); BUG_ON(!omap4_sar_modules[UHH_INDEX]); omap4_sar_modules[L4CORE_INDEX] = ioremap(L4_44XX_PHYS, SZ_4M); BUG_ON(!omap4_sar_modules[L4CORE_INDEX]); omap4_sar_modules[L4PER_INDEX] = ioremap(L4_PER_44XX_PHYS, SZ_4M); BUG_ON(!omap4_sar_modules[L4PER_INDEX]); /* * SAR BANK3 contains all firewall settings and it's saved through * secure API on HS device. On GP device these registers are * meaningless but still needs to be saved. Otherwise Auto-restore * phase DMA takes an abort. Hence save these conents only once * in init to avoid the issue while waking up from device OFF */ if (omap_type() == OMAP2_DEVICE_TYPE_GP) save_sar_bank3(); /* * Overwrite EMIF1/EMIF2 * SECURE_EMIF1_SDRAM_CONFIG2_REG * SECURE_EMIF2_SDRAM_CONFIG2_REG */ secure_ctrl_mod = ioremap(OMAP4_CTRL_MODULE_WKUP, SZ_4K); BUG_ON(!secure_ctrl_mod); __raw_writel(0x10, secure_ctrl_mod + OMAP4_CTRL_SECURE_EMIF1_SDRAM_CONFIG2_REG); __raw_writel(0x10, secure_ctrl_mod + OMAP4_CTRL_SECURE_EMIF2_SDRAM_CONFIG2_REG); wmb(); iounmap(secure_ctrl_mod); /* * L3INIT PD and clocks are needed for SAR save phase */ l3init_pwrdm = pwrdm_lookup("l3init_pwrdm"); if (!l3init_pwrdm) pr_err("Failed to get l3init_pwrdm\n"); usb_host_ck = clk_get(NULL, "usb_host_hs_fck"); if (!usb_host_ck) pr_err("Could not get usb_host_ck\n"); usb_tll_ck = clk_get(NULL, "usb_tll_hs_ick"); if (!usb_tll_ck) pr_err("Could not get usb_tll_ck\n"); return 0; }
static int __init omap2_pm_init(void) { u32 l; if (!cpu_is_omap24xx()) return -ENODEV; printk(KERN_INFO "Power Management for OMAP2 initializing\n"); l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET); printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); /* Look up important powerdomains */ mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if (!mpu_pwrdm) pr_err("PM: mpu_pwrdm not found\n"); core_pwrdm = pwrdm_lookup("core_pwrdm"); if (!core_pwrdm) pr_err("PM: core_pwrdm not found\n"); /* Look up important clockdomains */ mpu_clkdm = clkdm_lookup("mpu_clkdm"); if (!mpu_clkdm) pr_err("PM: mpu_clkdm not found\n"); wkup_clkdm = clkdm_lookup("wkup_clkdm"); if (!wkup_clkdm) pr_err("PM: wkup_clkdm not found\n"); dsp_clkdm = clkdm_lookup("dsp_clkdm"); if (!dsp_clkdm) pr_err("PM: dsp_clkdm not found\n"); gfx_clkdm = clkdm_lookup("gfx_clkdm"); if (!gfx_clkdm) pr_err("PM: gfx_clkdm not found\n"); osc_ck = clk_get(NULL, "osc_ck"); if (IS_ERR(osc_ck)) { printk(KERN_ERR "could not get osc_ck\n"); return -ENODEV; } if (cpu_is_omap242x()) { emul_ck = clk_get(NULL, "emul_ck"); if (IS_ERR(emul_ck)) { printk(KERN_ERR "could not get emul_ck\n"); clk_put(osc_ck); return -ENODEV; } } prcm_setup_regs(); /* Hack to prevent MPU retention when STI console is enabled. */ { const struct omap_sti_console_config *sti; sti = omap_get_config(OMAP_TAG_STI_CONSOLE, struct omap_sti_console_config); if (sti != NULL && sti->enable) sti_console_enabled = 1; } /* * We copy the assembler sleep/wakeup routines to SRAM. * These routines need to be in SRAM as that's the only * memory the MPU can see when it wakes up. */ if (cpu_is_omap24xx()) { omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend, omap24xx_idle_loop_suspend_sz); omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, omap24xx_cpu_suspend_sz); } suspend_set_ops(&omap_pm_ops); pm_idle = omap2_pm_idle; return 0; }
static int __init am33xx_pm_init(void) { int ret; #ifdef CONFIG_SUSPEND void __iomem *base; u32 reg; u32 evm_id; #endif if (!cpu_is_am33xx()) return -ENODEV; pr_info("Power Management for AM33XX family\n"); #ifdef CONFIG_SUSPEND #ifdef CONFIG_TI_PM_DISABLE_VT_SWITCH pm_set_vt_switch(0); #endif /* Read SDRAM_CONFIG register to determine Memory Type */ base = am33xx_get_ram_base(); reg = readl(base + EMIF4_0_SDRAM_CONFIG); reg = (reg & SDRAM_TYPE_MASK) >> SDRAM_TYPE_SHIFT; suspend_cfg_param_list[MEMORY_TYPE] = reg; /* * vtp_ctrl register value for DDR2 and DDR3 as suggested * by h/w team */ if (reg == MEM_TYPE_DDR2) suspend_cfg_param_list[SUSP_VTP_CTRL_VAL] = SUSP_VTP_CTRL_DDR2; else suspend_cfg_param_list[SUSP_VTP_CTRL_VAL] = SUSP_VTP_CTRL_DDR3; /* Get Board Id */ evm_id = am335x_evm_get_id(); if (evm_id != -EINVAL) suspend_cfg_param_list[EVM_ID] = evm_id; else suspend_cfg_param_list[EVM_ID] = 0xff; /* CPU Revision */ reg = omap_rev(); if (reg == AM335X_REV_ES2_0) suspend_cfg_param_list[CPU_REV] = CPU_REV_2; else suspend_cfg_param_list[CPU_REV] = CPU_REV_1; (void) clkdm_for_each(clkdms_setup, NULL); /* CEFUSE domain should be turned off post bootup */ cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); if (cefuse_pwrdm == NULL) pr_err("Failed to get cefuse_pwrdm\n"); else pwrdm_set_next_pwrst(cefuse_pwrdm, PWRDM_POWER_OFF); gfx_pwrdm = pwrdm_lookup("gfx_pwrdm"); if (gfx_pwrdm == NULL) pr_err("Failed to get gfx_pwrdm\n"); per_pwrdm = pwrdm_lookup("per_pwrdm"); if (per_pwrdm == NULL) pr_err("Failed to get per_pwrdm\n"); gfx_l3_clkdm = clkdm_lookup("gfx_l3_clkdm"); if (gfx_l3_clkdm == NULL) pr_err("Failed to get gfx_l3_clkdm\n"); gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm"); if (gfx_l4ls_clkdm == NULL) pr_err("Failed to get gfx_l4ls_gfx_clkdm\n"); mpu_dev = omap_device_get_by_hwmod_name("mpu"); if (!mpu_dev) { pr_warning("%s: unable to get the mpu device\n", __func__); return -EINVAL; } ret = wkup_m3_init(); if (ret) { pr_err("Could not initialise WKUP_M3. " "Power management will be compromised\n"); enable_deep_sleep = false; } if (enable_deep_sleep) suspend_set_ops(&am33xx_pm_ops); #endif /* CONFIG_SUSPEND */ return ret; }