/** * omap4_prm_deassert_hardreset - deassert a submodule hardreset line and wait * @rstctrl_reg: RM_RSTCTRL register address for this module * @shift: register bit shift corresponding to the reset line to deassert * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * take the submodule out of reset and wait until the PRCM indicates * that the reset has completed before returning. Returns 0 upon success or * -EINVAL upon an argument error, -EEXIST if the submodule was already out * of reset, or -EBUSY if the submodule did not exit reset promptly. */ int omap4_prm_deassert_hardreset(void __iomem *rstctrl_reg, u8 shift) { u32 mask; void __iomem *rstst_reg; int c; if (!cpu_is_omap44xx() || !rstctrl_reg) return -EINVAL; rstst_reg = rstctrl_reg + OMAP4_RST_CTRL_ST_OFFSET; mask = 1 << shift; /* Check the current status to avoid de-asserting the line twice */ if (omap4_prm_read_bits_shift(rstctrl_reg, mask) == 0) return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ omap4_prm_rmw_reg_bits(0xffffffff, mask, rstst_reg); /* de-assert the reset control line */ omap4_prm_rmw_reg_bits(mask, 0, rstctrl_reg); /* wait the status to be set */ omap_test_timeout(omap4_prm_read_bits_shift(rstst_reg, mask), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; }
/** * omap4_prm_assert_hardreset - assert the HW reset line of a submodule * @rstctrl_reg: RM_RSTCTRL register address for this module * @shift: register bit shift corresponding to the reset line to assert * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * place the submodule into reset. Returns 0 upon success or -EINVAL * upon an argument error. */ int omap4_prm_assert_hardreset(void __iomem *rstctrl_reg, u8 shift) { u32 mask; if (!cpu_is_omap44xx() || !rstctrl_reg) return -EINVAL; mask = 1 << shift; omap4_prm_rmw_reg_bits(mask, mask, rstctrl_reg); return 0; }
int omap4_noncore_dpll_mn_bypass(struct clk *clk) { int i, ret = 0; u32 reg; struct dpll_data *dd; if (!clk || !clk->dpll_data) return -EINVAL; dd = clk->dpll_data; if (!(clk->dpll_data->modes & (1 << DPLL_MN_BYPASS))) return -EINVAL; pr_debug("%s: configuring DPLL %s for MN bypass\n", __func__, clk->name); /* protect the DPLL during programming; usecount++ */ clk_enable(dd->clk_bypass); omap4_prm_rmw_reg_bits(dd->enable_mask, (DPLL_MN_BYPASS << __ffs(dd->enable_mask)), dd->control_reg); /* wait for DPLL to enter bypass */ for (i = 0; i < 1000000; i++) { reg = __raw_readl(dd->idlest_reg) & dd->mn_bypass_st_mask; if (reg) break; } if (reg) { if (clk->usecount) { /* DPLL is actually needed right now; usecount++ */ clk_enable(dd->clk_bypass); clk_disable(clk->parent); } pr_err("%s: reparenting %s to %s, and setting old rate %lu to new rate %lu\n", __func__, clk->name, dd->clk_bypass->name, clk->rate, dd->clk_bypass->rate); clk_reparent(clk, dd->clk_bypass); clk->rate = dd->clk_bypass->rate; } else ret = -ENODEV; /* done programming, no need to protect DPLL; usecount-- */ clk_disable(dd->clk_bypass); return ret; }
int omap4_dpll_low_power_cascade_exit() { int ret = 0; struct clk *sys_clkin_ck; struct clk *dpll_abe_ck, *dpll_abe_m3x2_ck; struct clk *dpll_mpu_ck, *div_mpu_hs_clk; struct clk *dpll_iva_ck, *div_iva_hs_clk, *iva_hsd_byp_clk_mux_ck; struct clk *dpll_core_ck, *dpll_core_x2_ck; struct clk *dpll_core_m2_ck, *dpll_core_m5x2_ck, *dpll_core_m6x2_ck; struct clk *core_hsd_byp_clk_mux_ck; struct clk *div_core_ck, *l3_div_ck, *l4_div_ck; struct clk *dpll_per_ck, *func_48m_fclk; struct clk *per_hsd_byp_clk_mux_ck, *per_hs_clk_div_ck; struct clk *l4_wkup_clk_mux_ck, *lp_clk_div_ck; struct clk *pmd_stm_clock_mux_ck, *pmd_trace_clk_mux_ck; struct clockdomain *emu_sys_44xx_clkdm, *abe_44xx_clkdm; struct cpufreq_policy *cp; struct voltagedomain *vdd_mpu, *vdd_iva, *vdd_core; sys_clkin_ck = clk_get(NULL, "sys_clkin_ck"); dpll_abe_ck = clk_get(NULL, "dpll_abe_ck"); dpll_mpu_ck = clk_get(NULL, "dpll_mpu_ck"); div_mpu_hs_clk = clk_get(NULL, "div_mpu_hs_clk"); dpll_iva_ck = clk_get(NULL, "dpll_iva_ck"); div_iva_hs_clk = clk_get(NULL, "div_iva_hs_clk"); iva_hsd_byp_clk_mux_ck = clk_get(NULL, "iva_hsd_byp_clk_mux_ck"); dpll_core_ck = clk_get(NULL, "dpll_core_ck"); dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck"); dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck"); dpll_core_m6x2_ck = clk_get(NULL, "dpll_core_m6x2_ck"); dpll_abe_m3x2_ck = clk_get(NULL, "dpll_abe_m3x2_ck"); dpll_core_x2_ck = clk_get(NULL, "dpll_core_x2_ck"); core_hsd_byp_clk_mux_ck = clk_get(NULL, "core_hsd_byp_clk_mux_ck"); div_core_ck = clk_get(NULL, "div_core_ck"); l3_div_ck = clk_get(NULL, "l3_div_ck"); l4_div_ck = clk_get(NULL, "l4_div_ck"); l4_wkup_clk_mux_ck = clk_get(NULL, "l4_wkup_clk_mux_ck"); lp_clk_div_ck = clk_get(NULL, "lp_clk_div_ck"); pmd_stm_clock_mux_ck = clk_get(NULL, "pmd_stm_clock_mux_ck"); pmd_trace_clk_mux_ck = clk_get(NULL, "pmd_trace_clk_mux_ck"); dpll_per_ck = clk_get(NULL, "dpll_per_ck"); func_48m_fclk = clk_get(NULL, "func_48m_fclk"); per_hsd_byp_clk_mux_ck = clk_get(NULL, "per_hsd_byp_clk_mux_ck"); per_hs_clk_div_ck = clk_get(NULL, "per_hs_clk_div_ck"); emu_sys_44xx_clkdm = clkdm_lookup("emu_sys_44xx_clkdm"); abe_44xx_clkdm = clkdm_lookup("abe_clkdm"); if (!dpll_abe_ck || !dpll_mpu_ck || !div_mpu_hs_clk || !dpll_iva_ck || !div_iva_hs_clk || !iva_hsd_byp_clk_mux_ck || !dpll_core_m2_ck || !dpll_abe_m3x2_ck || !div_core_ck || !dpll_core_x2_ck || !core_hsd_byp_clk_mux_ck || !dpll_core_m5x2_ck || !l4_wkup_clk_mux_ck || !lp_clk_div_ck || !pmd_stm_clock_mux_ck || !pmd_trace_clk_mux_ck || !dpll_core_m6x2_ck || !sys_clkin_ck || !dpll_core_ck || !l3_div_ck || !l4_div_ck || !dpll_per_ck || !func_48m_fclk || !per_hsd_byp_clk_mux_ck || !per_hs_clk_div_ck) { pr_warn("%s: failed to get all necessary clocks\n", __func__); ret = -ENODEV; goto out; } if (delayed_work_pending(&lpmode_work)) cancel_delayed_work_sync(&lpmode_work); if (!omap4_lpmode) return 0; /* look up the three scalable voltage domains */ vdd_mpu = omap_voltage_domain_get("mpu"); vdd_iva = omap_voltage_domain_get("iva"); vdd_core = omap_voltage_domain_get("core"); /* disable SR adaptive voltage scaling while changing freq */ omap_smartreflex_disable(vdd_mpu); omap_smartreflex_disable(vdd_iva); omap_smartreflex_disable(vdd_core); /* lock DPLL_MPU */ ret = clk_set_rate(dpll_mpu_ck, state.dpll_mpu_ck_rate); if (ret) pr_err("%s: DPLL_MPU failed to relock\n", __func__); /* lock DPLL_IVA */ ret = clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate); if (ret) pr_err("%s: DPLL_IVA failed to relock\n", __func__); /* lock DPLL_PER */ ret = clk_set_rate(dpll_per_ck, state.dpll_per_ck_rate); if (ret) pr_err("%s: DPLL_PER failed to relock\n", __func__); /* restore bypass clock rates */ clk_set_rate(div_mpu_hs_clk, (div_mpu_hs_clk->parent->rate / (1 << state.div_mpu_hs_clk_div))); clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate / (1 << state.div_iva_hs_clk_div))); /* restore DPLL_IVA bypass clock */ ret = clk_set_parent(iva_hsd_byp_clk_mux_ck, state.iva_hsd_byp_clk_mux_ck_parent); if (ret) pr_err("%s: failed to restore DPLL_IVA bypass clock\n", __func__); /* restore DPLL_PER bypass clock */ ret = clk_set_parent(per_hsd_byp_clk_mux_ck, state.per_hsd_byp_clk_mux_ck_parent); if (ret) pr_err("%s: failed to restore DPLL_PER bypass clock\n", __func__); /* restore CORE clock rates */ ret = clk_set_rate(div_core_ck, (div_core_ck->parent->rate / (1 << state.div_core_ck_div))); omap4_prm_rmw_reg_bits(dpll_core_m2_ck->clksel_mask, state.dpll_core_m2_div, dpll_core_m2_ck->clksel_reg); ret |= clk_set_rate(dpll_core_m5x2_ck, (dpll_core_m5x2_ck->parent->rate / state.dpll_core_m5x2_ck_div)); ret |= clk_set_rate(dpll_core_ck, state.dpll_core_ck_rate); if (ret) pr_err("%s: failed to restore CORE clock rates\n", __func__); /* drive DPLL_CORE bypass clock from SYS_CK (CLKINP) */ ret = clk_set_parent(core_hsd_byp_clk_mux_ck, state.core_hsd_byp_clk_mux_ck_parent); if (ret) pr_err("%s: failed restoring DPLL_CORE bypass clock parent\n", __func__); /* WA: allow DPLL_ABE_M3X2 clock to auto-gate */ omap4_prm_rmw_reg_bits(BIT(8), 0x0, dpll_abe_m3x2_ck->clksel_reg); /* allow ABE clock domain to idle again */ omap2_clkdm_allow_idle(abe_44xx_clkdm); /* allow DPLL_ABE & DPLL_CORE to idle again */ omap3_dpll_allow_idle(dpll_core_ck); omap3_dpll_allow_idle(dpll_abe_ck); /* DPLLs are configured, so let SYSCK idle again */ __raw_writel(0, OMAP4430_CM_L4_WKUP_CLKSEL); /* restore CLKREQ behavior */ __raw_writel(state.clkreqctrl, OMAP4430_PRM_CLKREQCTRL); /* drive PM debug clocks from CORE_M6X2 and allow the clkdm to idle */ /*ret = clk_set_parent(pmd_stm_clock_mux_ck, state.pmd_stm_clock_mux_ck_parent); ret |= clk_set_parent(pmd_trace_clk_mux_ck, state.pmd_trace_clk_mux_ck_parent); if (ret) pr_debug("%s: failed restoring parent to PMD clocks\n", __func__);*/ /* re-enable SR adaptive voltage scaling */ omap_smartreflex_enable(vdd_mpu); omap_smartreflex_enable(vdd_iva); omap_smartreflex_enable(vdd_core); recalculate_root_clocks(); omap4_lpmode = false; out: return ret; }
/** * omap4_dpll_low_power_cascade - configure system for low power DPLL cascade * * The low power DPLL cascading scheme is a way to have a mostly functional * system running with only one locked DPLL and all of the others in bypass. * While this might be useful for many use cases, the primary target is low * power audio playback. The steps to enter this state are roughly: * * Reparent DPLL_ABE so that it is fed by SYS_32K_CK * Set magical REGM4XEN bit so DPLL_ABE MN dividers are multiplied by four * Lock DPLL_ABE at 196.608MHz and bypass DPLL_CORE, DPLL_MPU & DPLL_IVA * Reparent DPLL_CORE so that is fed by DPLL_ABE * Reparent DPLL_MPU & DPLL_IVA so that they are fed by DPLL_CORE */ int omap4_dpll_low_power_cascade_enter() { int ret = 0; struct clk *dpll_abe_ck, *dpll_abe_m3x2_ck; struct clk *dpll_mpu_ck, *div_mpu_hs_clk; struct clk *dpll_iva_ck, *div_iva_hs_clk, *iva_hsd_byp_clk_mux_ck; struct clk *dpll_per_ck, *func_48m_fclk; struct clk *per_hsd_byp_clk_mux_ck, *per_hs_clk_div_ck; struct clk *dpll_core_ck, *dpll_core_x2_ck; struct clk *dpll_core_m2_ck, *dpll_core_m5x2_ck, *dpll_core_m6x2_ck; struct clk *core_hsd_byp_clk_mux_ck; struct clk *div_core_ck, *l3_div_ck, *l4_div_ck; struct clk *l4_wkup_clk_mux_ck, *lp_clk_div_ck; struct clk *pmd_stm_clock_mux_ck, *pmd_trace_clk_mux_ck; struct clockdomain *emu_sys_44xx_clkdm, *abe_44xx_clkdm; struct device *mpu_dev; struct cpufreq_policy *cp; struct omap_opp *opp; struct voltagedomain *vdd_mpu, *vdd_iva, *vdd_core; dpll_abe_ck = clk_get(NULL, "dpll_abe_ck"); dpll_mpu_ck = clk_get(NULL, "dpll_mpu_ck"); div_mpu_hs_clk = clk_get(NULL, "div_mpu_hs_clk"); dpll_iva_ck = clk_get(NULL, "dpll_iva_ck"); div_iva_hs_clk = clk_get(NULL, "div_iva_hs_clk"); iva_hsd_byp_clk_mux_ck = clk_get(NULL, "iva_hsd_byp_clk_mux_ck"); dpll_core_ck = clk_get(NULL, "dpll_core_ck"); dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck"); dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck"); dpll_core_m6x2_ck = clk_get(NULL, "dpll_core_m6x2_ck"); dpll_abe_m3x2_ck = clk_get(NULL, "dpll_abe_m3x2_ck"); dpll_core_x2_ck = clk_get(NULL, "dpll_core_x2_ck"); core_hsd_byp_clk_mux_ck = clk_get(NULL, "core_hsd_byp_clk_mux_ck"); div_core_ck = clk_get(NULL, "div_core_ck"); l4_wkup_clk_mux_ck = clk_get(NULL, "l4_wkup_clk_mux_ck"); lp_clk_div_ck = clk_get(NULL, "lp_clk_div_ck"); pmd_stm_clock_mux_ck = clk_get(NULL, "pmd_stm_clock_mux_ck"); pmd_trace_clk_mux_ck = clk_get(NULL, "pmd_trace_clk_mux_ck"); l3_div_ck = clk_get(NULL, "l3_div_ck"); l4_div_ck = clk_get(NULL, "l4_div_ck"); dpll_per_ck = clk_get(NULL, "dpll_per_ck"); func_48m_fclk = clk_get(NULL, "func_48m_fclk"); per_hsd_byp_clk_mux_ck = clk_get(NULL, "per_hsd_byp_clk_mux_ck"); per_hs_clk_div_ck = clk_get(NULL, "per_hs_clk_div_ck"); emu_sys_44xx_clkdm = clkdm_lookup("emu_sys_44xx_clkdm"); abe_44xx_clkdm = clkdm_lookup("abe_clkdm"); if (!dpll_abe_ck || !dpll_mpu_ck || !div_mpu_hs_clk || !dpll_iva_ck || !div_iva_hs_clk || !iva_hsd_byp_clk_mux_ck || !dpll_core_m2_ck || !dpll_abe_m3x2_ck || !div_core_ck || !dpll_core_x2_ck || !core_hsd_byp_clk_mux_ck || !dpll_core_m5x2_ck || !l4_wkup_clk_mux_ck || !lp_clk_div_ck || !pmd_stm_clock_mux_ck || !pmd_trace_clk_mux_ck || !dpll_core_m6x2_ck || !dpll_core_ck || !dpll_per_ck || !func_48m_fclk || !per_hsd_byp_clk_mux_ck || !per_hs_clk_div_ck) { pr_warn("%s: failed to get all necessary clocks\n", __func__); ret = -ENODEV; goto out; } omap4_lpmode = true; /* look up the three scalable voltage domains */ vdd_mpu = omap_voltage_domain_get("mpu"); vdd_iva = omap_voltage_domain_get("iva"); vdd_core = omap_voltage_domain_get("core"); /* disable SR adaptive voltage scaling while changing freq */ omap_smartreflex_disable(vdd_mpu); omap_smartreflex_disable(vdd_iva); omap_smartreflex_disable(vdd_core); /* prevent DPLL_ABE & DPLL_CORE from idling */ omap3_dpll_deny_idle(dpll_abe_ck); omap3_dpll_deny_idle(dpll_core_ck); /* put ABE clock domain SW_WKUP */ omap2_clkdm_wakeup(abe_44xx_clkdm); /* WA: prevent DPLL_ABE_M3X2 clock from auto-gating */ omap4_prm_rmw_reg_bits(BIT(8), BIT(8), dpll_abe_m3x2_ck->clksel_reg); /* drive DPLL_CORE bypass clock from DPLL_ABE (CLKINPULOW) */ state.core_hsd_byp_clk_mux_ck_parent = core_hsd_byp_clk_mux_ck->parent; ret = clk_set_parent(core_hsd_byp_clk_mux_ck, dpll_abe_m3x2_ck); if (ret) { pr_err("%s: failed reparenting DPLL_CORE bypass clock to ABE_M3X2\n", __func__); goto core_bypass_clock_reparent_fail; } else pr_debug("%s: DPLL_CORE bypass clock reparented to ABE_M3X2\n", __func__); /* * bypass DPLL_CORE, configure EMIF for the new rate * CORE_CLK = CORE_X2_CLK */ state.dpll_core_ck_rate = dpll_core_ck->rate; state.div_core_ck_div = omap4_prm_read_bits_shift(div_core_ck->clksel_reg, div_core_ck->clksel_mask); state.l3_div_ck_div = omap4_prm_read_bits_shift(l3_div_ck->clksel_reg, l3_div_ck->clksel_mask); state.l4_div_ck_div = omap4_prm_read_bits_shift(l4_div_ck->clksel_reg, l4_div_ck->clksel_mask); state.dpll_core_m5x2_ck_div = omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg, dpll_core_m5x2_ck->clksel_mask); state.dpll_core_m2_div = omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg, dpll_core_m2_ck->clksel_mask); ret = clk_set_rate(div_core_ck, dpll_core_m5x2_ck->rate); ret |= clk_set_rate(dpll_core_ck, LP_196M_RATE); ret |= clk_set_rate(dpll_core_m5x2_ck, dpll_core_x2_ck->rate); if (ret) { pr_err("%s: failed setting CORE clock rates\n", __func__); goto core_clock_set_rate_fail; } else pr_debug("%s: DPLL_CORE bypass clock reparented to ABE_M3X2\n", __func__); /* divide MPU/IVA bypass clocks by 2 (for when we bypass DPLL_CORE) */ state.div_mpu_hs_clk_div = omap4_prm_read_bits_shift(div_mpu_hs_clk->clksel_reg, div_mpu_hs_clk->clksel_mask); state.div_iva_hs_clk_div = omap4_prm_read_bits_shift(div_iva_hs_clk->clksel_reg, div_iva_hs_clk->clksel_mask); clk_set_rate(div_mpu_hs_clk, div_mpu_hs_clk->parent->rate); clk_set_rate(div_iva_hs_clk, div_iva_hs_clk->parent->rate / 2); /* select CLKINPULOW (div_iva_hs_clk) as DPLL_IVA bypass clock */ state.iva_hsd_byp_clk_mux_ck_parent = iva_hsd_byp_clk_mux_ck->parent; ret = clk_set_parent(iva_hsd_byp_clk_mux_ck, div_iva_hs_clk); if (ret) { pr_err("%s: failed reparenting DPLL_IVA bypass clock to CLKINPULOW\n", __func__); goto iva_bypass_clk_reparent_fail; } else pr_debug("%s: reparented DPLL_IVA bypass clock to CLKINPULOW\n", __func__); /* select CLKINPULOW (per_hs_clk_div_ck) as DPLL_PER bypass clock */ state.per_hsd_byp_clk_mux_ck_parent = per_hsd_byp_clk_mux_ck->parent; ret = clk_set_parent(per_hsd_byp_clk_mux_ck, per_hs_clk_div_ck); if (ret) { pr_debug("%s: failed reparenting DPLL_PER bypass clock to CLKINPULOW\n", __func__); goto per_bypass_clk_reparent_fail; } else pr_debug("%s: reparented DPLL_PER bypass clock to CLKINPULOW\n", __func__); /* bypass DPLL_MPU */ state.dpll_mpu_ck_rate = dpll_mpu_ck->rate; ret = clk_set_rate(dpll_mpu_ck, dpll_mpu_ck->dpll_data->clk_bypass->rate); if (ret) { pr_err("%s: DPLL_MPU failed to enter Low Power bypass\n", __func__); goto dpll_mpu_bypass_fail; } else pr_debug("%s: DPLL_MPU entered Low Power bypass\n", __func__); /* bypass DPLL_IVA */ state.dpll_iva_ck_rate = dpll_iva_ck->rate; ret = clk_set_rate(dpll_iva_ck, dpll_iva_ck->dpll_data->clk_bypass->rate); if (ret) { pr_err("%s: DPLL_IVA failed to enter Low Power bypass\n", __func__); goto dpll_iva_bypass_fail; } else pr_debug("%s: DPLL_IVA entered Low Power bypass\n", __func__); /* bypass DPLL_PER */ state.dpll_per_ck_rate = dpll_per_ck->rate; ret = clk_set_rate(dpll_per_ck, dpll_per_ck->dpll_data->clk_bypass->rate); if (ret) { pr_debug("%s: DPLL_PER failed to enter Low Power bypass\n", __func__); goto dpll_per_bypass_fail; } else pr_debug("%s: DPLL_PER entered Low Power bypass\n",__func__); __raw_writel(1, OMAP4430_CM_L4_WKUP_CLKSEL); /* never de-assert CLKREQ while in DPLL cascading scheme */ state.clkreqctrl = __raw_readl(OMAP4430_PRM_CLKREQCTRL); __raw_writel(0x4, OMAP4430_PRM_CLKREQCTRL); /* re-enable SR adaptive voltage scaling */ omap_smartreflex_enable(vdd_mpu); omap_smartreflex_enable(vdd_iva); omap_smartreflex_enable(vdd_core); /* drive PM debug clocks from CORE_M6X2 and allow the clkdm to idle */ /*state.pmd_stm_clock_mux_ck_parent = pmd_stm_clock_mux_ck->parent; state.pmd_trace_clk_mux_ck_parent = pmd_trace_clk_mux_ck->parent; ret = clk_set_parent(pmd_stm_clock_mux_ck, dpll_core_m6x2_ck); ret |= clk_set_parent(pmd_trace_clk_mux_ck, dpll_core_m6x2_ck); if (ret) pr_err("%s: failed reparenting PMD clocks to ABE LP clock\n", __func__); else pr_debug("%s: reparented PMD clocks to ABE LP clock\n", __func__); omap2_clkdm_allow_idle(emu_sys_44xx_clkdm);*/ recalculate_root_clocks(); goto out; dpll_per_bypass_fail: clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate / (1 << state.div_iva_hs_clk_div))); clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate); per_bypass_clk_reparent_fail: clk_set_parent(per_hsd_byp_clk_mux_ck, state.per_hsd_byp_clk_mux_ck_parent); dpll_iva_bypass_fail: clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate / (1 << state.div_iva_hs_clk_div))); clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate); dpll_mpu_bypass_fail: omap4_lpmode = false; clk_set_rate(div_mpu_hs_clk, (div_mpu_hs_clk->parent->rate / (1 << state.div_mpu_hs_clk_div))); clk_set_rate(dpll_mpu_ck, state.dpll_mpu_ck_rate); iva_bypass_clk_reparent_fail: clk_set_parent(iva_hsd_byp_clk_mux_ck, state.iva_hsd_byp_clk_mux_ck_parent); core_clock_set_rate_fail: /* FIXME make this follow the sequence below */ clk_set_rate(dpll_core_m5x2_ck, (dpll_core_m5x2_ck->parent->rate / state.dpll_core_m5x2_ck_div)); clk_set_rate(dpll_core_ck, (dpll_core_ck->parent->rate / state.dpll_core_m2_ck_div)); clk_set_rate(div_core_ck, (div_core_ck->parent->rate / state.div_core_ck_div)); core_bypass_clock_reparent_fail: clk_set_parent(iva_hsd_byp_clk_mux_ck, state.iva_hsd_byp_clk_mux_ck_parent); omap4_prm_rmw_reg_bits(BIT(8), BIT(8), dpll_abe_m3x2_ck->clksel_reg); omap2_clkdm_allow_idle(abe_44xx_clkdm); omap3_dpll_allow_idle(dpll_abe_ck); omap3_dpll_allow_idle(dpll_core_ck); out: return ret; }
/** * omap4_core_dpll_set_rate - set the rate for the CORE DPLL * @clk: struct clk * of the DPLL to set * @rate: rounded target rate * * Program the CORE DPLL, including handling of EMIF frequency changes on M2 * divider. Returns 0 on success, otherwise a negative error code. */ int omap4_core_dpll_set_rate(struct clk *clk, unsigned long rate) { int i = 0, m2_div, m5_div; u32 mask, reg; u32 shadow_freq_cfg1 = 0, shadow_freq_cfg2 = 0; struct clk *new_parent; struct dpll_data *dd; if (!clk || !rate) return -EINVAL; if (!clk->dpll_data) return -EINVAL; dd = clk->dpll_data; if (rate == clk->rate) return 0; /* enable reference and bypass clocks */ omap2_clk_enable(dd->clk_bypass); omap2_clk_enable(dd->clk_ref); /* Just to avoid look-up on every call to speed up */ if (!l3_emif_clkdm) l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm"); if (!dpll_core_m2_ck) dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck"); if (!dpll_core_m5x2_ck) dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck"); if (!gpmc_ick) gpmc_ick = clk_get(NULL, "gpmc_ick"); /* Make sure MEMIF clkdm is in SW_WKUP & GPMC clocks are active */ omap2_clkdm_wakeup(l3_emif_clkdm); omap2_clk_enable(gpmc_ick); /* FIXME set m3, m6 & m7 rates here? */ /* check for bypass rate */ if (rate == dd->clk_bypass->rate && clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)) { /* * DDR clock = DPLL_CORE_M2_CK / 2. Program EMIF timing * parameters in EMIF shadow registers for bypass clock rate * divided by 2 */ omap_emif_setup_registers(rate / 2, LPDDR2_VOLTAGE_STABLE); /* * program CM_DIV_M5_DPLL_CORE.DPLL_CLKOUT_DIV into shadow * register as well as L3_CLK freq and update GPMC frequency * * HACK: hardcode L3_CLK = CORE_CLK / 2 for DPLL cascading * HACK: hardcode CORE_CLK = CORE_X2_CLK / 2 for DPLL * cascading */ m5_div = omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg, dpll_core_m5x2_ck->clksel_mask); shadow_freq_cfg2 = (m5_div << OMAP4430_DPLL_CORE_M5_DIV_SHIFT) | (1 << OMAP4430_CLKSEL_L3_SHADOW_SHIFT) | (0 << OMAP4430_CLKSEL_CORE_1_1_SHIFT) | (1 << OMAP4430_GPMC_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2); /* * program CM_DIV_M2_DPLL_CORE.DPLL_CLKOUT_DIV for divide by * two and put DPLL_CORE into LP Bypass */ m2_div = omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg, dpll_core_m2_ck->clksel_mask); shadow_freq_cfg1 = (m2_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) | (DPLL_LOW_POWER_BYPASS << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) | (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); new_parent = dd->clk_bypass; } else { if (dd->last_rounded_rate != rate) rate = clk->round_rate(clk, rate); if (dd->last_rounded_rate == 0) return -EINVAL; /* * DDR clock = DPLL_CORE_M2_CK / 2. Program EMIF timing * parameters in EMIF shadow registers for rate divided * by 2. */ omap_emif_setup_registers(rate / 2, LPDDR2_VOLTAGE_STABLE); /* * FIXME skipping bypass part of omap3_noncore_dpll_program. * also x-loader's configure_core_dpll_no_lock bypasses * DPLL_CORE directly through CM_CLKMODE_DPLL_CORE via MN * bypass; no shadow register necessary! */ mask = (dd->mult_mask | dd->div1_mask); reg = (dd->last_rounded_m << __ffs(dd->mult_mask)) | ((dd->last_rounded_n - 1) << __ffs(dd->div1_mask)); /* program mn divider values */ omap4_prm_rmw_reg_bits(mask, reg, dd->mult_div1_reg); /* * program CM_DIV_M5_DPLL_CORE.DPLL_CLKOUT_DIV into shadow * register as well as L3_CLK freq and update GPMC frequency * * HACK: hardcode L3_CLK = CORE_CLK / 2 for DPLL cascading * HACK: hardcode CORE_CLK = CORE_X2_CLK / 1 for DPLL * cascading */ m5_div = omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg, dpll_core_m5x2_ck->clksel_mask); shadow_freq_cfg2 = (m5_div << OMAP4430_DPLL_CORE_M5_DIV_SHIFT) | (1 << OMAP4430_CLKSEL_L3_SHADOW_SHIFT) | (0 << OMAP4430_CLKSEL_CORE_1_1_SHIFT) | (1 << OMAP4430_GPMC_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2); /* * program DPLL_CORE_M2_DIV with same value as the one already * in direct register and lock DPLL_CORE */ m2_div = omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg, dpll_core_m2_ck->clksel_mask); shadow_freq_cfg1 = (m2_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) | (DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) | (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); new_parent = dd->clk_ref; } /* wait for the configuration to be applied */ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1) & OMAP4430_FREQ_UPDATE_MASK) == 0), MAX_FREQ_UPDATE_TIMEOUT, i); /* clear GPMC_FREQ_UPDATE bit */ shadow_freq_cfg2 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG2); shadow_freq_cfg2 &= ~1; __raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2); /* * Switch the parent clock in the heirarchy, and make sure that the * new parent's usecount is correct. Note: we enable the new parent * before disabling the old to avoid any unnecessary hardware * disable->enable transitions. */ if (clk->usecount) { omap2_clk_enable(new_parent); omap2_clk_disable(clk->parent); } clk_reparent(clk, new_parent); clk->rate = rate; /* disable reference and bypass clocks */ omap2_clk_disable(dd->clk_bypass); omap2_clk_disable(dd->clk_ref); /* Configures MEMIF domain back to HW_WKUP & let GPMC clocks to idle */ omap2_clkdm_allow_idle(l3_emif_clkdm); omap2_clk_disable(gpmc_ick); /* * FIXME PRCM functional spec says we should set GPMC_FREQ_UPDATE bit * here, but we're not even handling CM_SHADOW_FREQ_CONFIG2 at all. */ if (i == MAX_FREQ_UPDATE_TIMEOUT) { pr_err("%s: Frequency update for CORE DPLL M2 change failed\n", __func__); return -1; } return 0; }