/*
 * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to enter low-power bypass mode.  In
 * bypass mode, the DPLL's rate is set equal to its parent clock's
 * rate.  Waits for the DPLL to report readiness before returning.
 * Will save and restore the DPLL's autoidle state across the enable,
 * per the CDP code.  If the DPLL entered bypass mode successfully,
 * return 0; if the DPLL did not enter bypass in the time allotted, or
 * DPLL3 was passed in, or the DPLL does not support low-power bypass,
 * return -EINVAL.
 */
static int _omap3_noncore_dpll_bypass(struct clk *clk)
{
	int r;
	u8 ai;

	if (clk == &dpll3_ck)
		return -EINVAL;

	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
		return -EINVAL;

	pr_debug("clock: configuring DPLL %s for low-power bypass\n",
		 clk->name);

	ai = omap3_dpll_autoidle_read(clk);

	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);

	r = _omap3_wait_dpll_status(clk, 0);

	if (ai)
		omap3_dpll_allow_idle(clk);
	else
		omap3_dpll_deny_idle(clk);

	return r;
}
Example #2
0
/*
 * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to lock.  Waits for the DPLL to report
 * readiness before returning.  Will save and restore the DPLL's
 * autoidle state across the enable, per the CDP code.  If the DPLL
 * locked successfully, return 0; if the DPLL did not lock in the time
 * allotted, or DPLL3 was passed in, return -EINVAL.
 */
static int _omap3_noncore_dpll_lock(struct clk *clk)
{
    const struct dpll_data *dd;
    u8 ai;
    u8 state = 1;
    int r = 0;

    pr_debug("clock: locking DPLL %s\n", clk->name);

    dd = clk->dpll_data;
    state <<= __ffs(dd->idlest_mask);

    /* Check if already locked */
    if ((__raw_readl(dd->idlest_reg) & dd->idlest_mask) == state)
        goto done;

    ai = omap3_dpll_autoidle_read(clk);

    omap3_dpll_deny_idle(clk);

    _omap3_dpll_write_clken(clk, DPLL_LOCKED);

    r = _omap3_wait_dpll_status(clk, 1);

    if (ai)
        omap3_dpll_allow_idle(clk);

done:
    return r;
}
/*
 * _omap3_noncore_dpll_stop - instruct a DPLL to stop
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to enter low-power stop. Will save and
 * restore the DPLL's autoidle state across the stop, per the CDP
 * code.  If DPLL3 was passed in, or the DPLL does not support
 * low-power stop, return -EINVAL; otherwise, return 0.
 */
static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
{
	u8 ai;

	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
		return -EINVAL;

	pr_debug("clock: stopping DPLL %s\n", __clk_get_name(clk->hw.clk));

	ai = omap3_dpll_autoidle_read(clk);

	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);

	if (ai)
		omap3_dpll_allow_idle(clk);

	return 0;
}
Example #4
0
/*
 * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to lock.  Waits for the DPLL to report
 * readiness before returning.  Will save and restore the DPLL's
 * autoidle state across the enable, per the CDP code.  If the DPLL
 * locked successfully, return 0; if the DPLL did not lock in the time
 * allotted, or DPLL3 was passed in, return -EINVAL.
 */
static int _omap3_noncore_dpll_lock(struct clk *clk)
{
	u8 ai;
	int r;

	pr_debug("clock: locking DPLL %s\n", clk->name);

	ai = omap3_dpll_autoidle_read(clk);

	omap3_dpll_deny_idle(clk);

	_omap3_dpll_write_clken(clk, DPLL_LOCKED);

	r = _omap3_wait_dpll_status(clk, 1);

	if (ai)
		omap3_dpll_allow_idle(clk);

	return r;
}
Example #5
0
void __init omap3_clk_lock_dpll5(void)
{
	struct clk *dpll5_clk;
	struct clk *dpll5_m2_clk;

	dpll5_clk = clk_get(NULL, "dpll5_ck");
	clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
	clk_enable(dpll5_clk);

	/* Enable autoidle to allow it to enter low power bypass */
	omap3_dpll_allow_idle(dpll5_clk);

	/* Program dpll5_m2_clk divider for no division */
	dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
	clk_enable(dpll5_m2_clk);
	clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);

	clk_disable(dpll5_m2_clk);
	clk_disable(dpll5_clk);
	return;
}
Example #6
0
static void dump_omap34xx_clocks(void)
{
	struct clk **c;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29))
	struct vdd_prcm_config *t1 = vdd1_rate_table;
	struct vdd_prcm_config *t2 = vdd2_rate_table;

	t1 = t1;
	t2 = t2;
#else

	omap3_dpll_allow_idle(0);
	omap3_dpll_deny_idle(0);
	omap3_dpll_autoidle_read(0);
	omap3_clk_recalc(0);
	omap3_followparent_recalc(0);
	omap3_propagate_rate(0);
	omap3_table_recalc(0);
	omap3_round_to_table_rate(0, 0);
	omap3_select_table_rate(0, 0);
#endif

	for(c = ONCHIP_CLKS; c < ONCHIP_CLKS + ARRAY_SIZE(ONCHIP_CLKS); c++)
	{
		struct clk *cp = *c, *copy;
		unsigned long rate;
		copy = clk_get(NULL, cp->name);
		if(!copy)
			continue;
		rate = clk_get_rate(copy);
		if (rate < 1000000)
		{
			PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu KHz (%lu Hz)", __func__, cp->name, rate/1000, rate));
		}
		else
		{
			PVR_DPF((PVR_DBG_ERROR, "%s: clock %s is %lu MHz (%lu Hz)", __func__, cp->name, rate/1000000, rate));
		}
	}
}
/*
 * _omap3_noncore_dpll_stop - instruct a DPLL to stop
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to enter low-power stop. Will save and
 * restore the DPLL's autoidle state across the stop, per the CDP
 * code.  If DPLL3 was passed in, or the DPLL does not support
 * low-power stop, return -EINVAL; otherwise, return 0.
 */
static int _omap3_noncore_dpll_stop(struct clk *clk)
{
	u8 ai;

	if (clk == &dpll3_ck)
		return -EINVAL;

	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
		return -EINVAL;

	pr_debug("clock: stopping DPLL %s\n", clk->name);

	ai = omap3_dpll_autoidle_read(clk);

	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);

	if (ai)
		omap3_dpll_allow_idle(clk);
	else
		omap3_dpll_deny_idle(clk);

	return 0;
}
int omap4_dpll_low_power_cascade_exit()
{
	int ret = 0;
	struct clk *sys_clkin_ck;
	struct clk *dpll_abe_ck, *dpll_abe_m3x2_ck;
	struct clk *dpll_mpu_ck, *div_mpu_hs_clk;
	struct clk *dpll_iva_ck, *div_iva_hs_clk, *iva_hsd_byp_clk_mux_ck;
	struct clk *dpll_core_ck, *dpll_core_x2_ck;
	struct clk *dpll_core_m2_ck, *dpll_core_m5x2_ck, *dpll_core_m6x2_ck;
	struct clk *core_hsd_byp_clk_mux_ck;
	struct clk *div_core_ck, *l3_div_ck, *l4_div_ck;
	struct clk *dpll_per_ck, *func_48m_fclk;
	struct clk *per_hsd_byp_clk_mux_ck, *per_hs_clk_div_ck;
	struct clk *l4_wkup_clk_mux_ck, *lp_clk_div_ck;
	struct clk *pmd_stm_clock_mux_ck, *pmd_trace_clk_mux_ck;
	struct clockdomain *emu_sys_44xx_clkdm, *abe_44xx_clkdm;
	struct cpufreq_policy *cp;
	struct voltagedomain *vdd_mpu, *vdd_iva, *vdd_core;

	sys_clkin_ck = clk_get(NULL, "sys_clkin_ck");
	dpll_abe_ck = clk_get(NULL, "dpll_abe_ck");
	dpll_mpu_ck = clk_get(NULL, "dpll_mpu_ck");
	div_mpu_hs_clk = clk_get(NULL, "div_mpu_hs_clk");
	dpll_iva_ck = clk_get(NULL, "dpll_iva_ck");
	div_iva_hs_clk = clk_get(NULL, "div_iva_hs_clk");
	iva_hsd_byp_clk_mux_ck = clk_get(NULL, "iva_hsd_byp_clk_mux_ck");
	dpll_core_ck = clk_get(NULL, "dpll_core_ck");
	dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck");
	dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck");
	dpll_core_m6x2_ck = clk_get(NULL, "dpll_core_m6x2_ck");
	dpll_abe_m3x2_ck = clk_get(NULL, "dpll_abe_m3x2_ck");
	dpll_core_x2_ck = clk_get(NULL, "dpll_core_x2_ck");
	core_hsd_byp_clk_mux_ck = clk_get(NULL, "core_hsd_byp_clk_mux_ck");
	div_core_ck = clk_get(NULL, "div_core_ck");
	l3_div_ck = clk_get(NULL, "l3_div_ck");
	l4_div_ck = clk_get(NULL, "l4_div_ck");
	l4_wkup_clk_mux_ck = clk_get(NULL, "l4_wkup_clk_mux_ck");
	lp_clk_div_ck = clk_get(NULL, "lp_clk_div_ck");
	pmd_stm_clock_mux_ck = clk_get(NULL, "pmd_stm_clock_mux_ck");
	pmd_trace_clk_mux_ck = clk_get(NULL, "pmd_trace_clk_mux_ck");
	dpll_per_ck = clk_get(NULL, "dpll_per_ck");
	func_48m_fclk = clk_get(NULL, "func_48m_fclk");
	per_hsd_byp_clk_mux_ck = clk_get(NULL, "per_hsd_byp_clk_mux_ck");
	per_hs_clk_div_ck = clk_get(NULL, "per_hs_clk_div_ck");

	emu_sys_44xx_clkdm = clkdm_lookup("emu_sys_44xx_clkdm");
	abe_44xx_clkdm = clkdm_lookup("abe_clkdm");

	if (!dpll_abe_ck || !dpll_mpu_ck || !div_mpu_hs_clk || !dpll_iva_ck ||
		!div_iva_hs_clk || !iva_hsd_byp_clk_mux_ck || !dpll_core_m2_ck
		|| !dpll_abe_m3x2_ck || !div_core_ck || !dpll_core_x2_ck ||
		!core_hsd_byp_clk_mux_ck || !dpll_core_m5x2_ck ||
		!l4_wkup_clk_mux_ck || !lp_clk_div_ck || !pmd_stm_clock_mux_ck
		|| !pmd_trace_clk_mux_ck || !dpll_core_m6x2_ck
		|| !sys_clkin_ck || !dpll_core_ck || !l3_div_ck || !l4_div_ck
		|| !dpll_per_ck || !func_48m_fclk || !per_hsd_byp_clk_mux_ck
		|| !per_hs_clk_div_ck) {
		pr_warn("%s: failed to get all necessary clocks\n", __func__);
		ret = -ENODEV;
		goto out;
	}

	if (delayed_work_pending(&lpmode_work))
		cancel_delayed_work_sync(&lpmode_work);

	if (!omap4_lpmode)
		return 0;

	/* look up the three scalable voltage domains */
	vdd_mpu = omap_voltage_domain_get("mpu");
	vdd_iva = omap_voltage_domain_get("iva");
	vdd_core = omap_voltage_domain_get("core");

	/* disable SR adaptive voltage scaling while changing freq */
	omap_smartreflex_disable(vdd_mpu);
	omap_smartreflex_disable(vdd_iva);
	omap_smartreflex_disable(vdd_core);

	/* lock DPLL_MPU */
	ret = clk_set_rate(dpll_mpu_ck, state.dpll_mpu_ck_rate);
	if (ret)
		pr_err("%s: DPLL_MPU failed to relock\n", __func__);

	/* lock DPLL_IVA */
	ret = clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate);
	if (ret)
		pr_err("%s: DPLL_IVA failed to relock\n", __func__);

	/* lock DPLL_PER */
	ret = clk_set_rate(dpll_per_ck, state.dpll_per_ck_rate);
	if (ret)
		pr_err("%s: DPLL_PER failed to relock\n", __func__);

	/* restore bypass clock rates */
	clk_set_rate(div_mpu_hs_clk, (div_mpu_hs_clk->parent->rate /
				(1 << state.div_mpu_hs_clk_div)));
	clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate /
				(1 << state.div_iva_hs_clk_div)));

	/* restore DPLL_IVA bypass clock */
	ret = clk_set_parent(iva_hsd_byp_clk_mux_ck,
			state.iva_hsd_byp_clk_mux_ck_parent);
	if (ret)
		pr_err("%s: failed to restore DPLL_IVA bypass clock\n",
				__func__);

	/* restore DPLL_PER bypass clock */
	ret = clk_set_parent(per_hsd_byp_clk_mux_ck,
			state.per_hsd_byp_clk_mux_ck_parent);
	if (ret)
		pr_err("%s: failed to restore DPLL_PER bypass clock\n",
				__func__);

	/* restore CORE clock rates */
	ret = clk_set_rate(div_core_ck, (div_core_ck->parent->rate /
				(1 << state.div_core_ck_div)));
	omap4_prm_rmw_reg_bits(dpll_core_m2_ck->clksel_mask,
			state.dpll_core_m2_div,
			dpll_core_m2_ck->clksel_reg);
	ret |=  clk_set_rate(dpll_core_m5x2_ck,
			(dpll_core_m5x2_ck->parent->rate /
			 state.dpll_core_m5x2_ck_div));
	ret |= clk_set_rate(dpll_core_ck, state.dpll_core_ck_rate);
	if (ret)
		pr_err("%s: failed to restore CORE clock rates\n", __func__);

	/* drive DPLL_CORE bypass clock from SYS_CK (CLKINP) */
	ret = clk_set_parent(core_hsd_byp_clk_mux_ck,
			state.core_hsd_byp_clk_mux_ck_parent);
	if (ret)
		pr_err("%s: failed restoring DPLL_CORE bypass clock parent\n",
				__func__);

	/* WA: allow DPLL_ABE_M3X2 clock to auto-gate */
	omap4_prm_rmw_reg_bits(BIT(8), 0x0,
			dpll_abe_m3x2_ck->clksel_reg);

	/* allow ABE clock domain to idle again */
	omap2_clkdm_allow_idle(abe_44xx_clkdm);

	/* allow DPLL_ABE & DPLL_CORE to idle again */
	omap3_dpll_allow_idle(dpll_core_ck);
	omap3_dpll_allow_idle(dpll_abe_ck);

	/* DPLLs are configured, so let SYSCK idle again */

	__raw_writel(0, OMAP4430_CM_L4_WKUP_CLKSEL);


	/* restore CLKREQ behavior */
	__raw_writel(state.clkreqctrl, OMAP4430_PRM_CLKREQCTRL);

	/* drive PM debug clocks from CORE_M6X2 and allow the clkdm to idle */
	/*ret =  clk_set_parent(pmd_stm_clock_mux_ck,
			state.pmd_stm_clock_mux_ck_parent);
	ret |= clk_set_parent(pmd_trace_clk_mux_ck,
			state.pmd_trace_clk_mux_ck_parent);
	if (ret)
		pr_debug("%s: failed restoring parent to PMD clocks\n",
				__func__);*/

	/* re-enable SR adaptive voltage scaling */
	omap_smartreflex_enable(vdd_mpu);
	omap_smartreflex_enable(vdd_iva);
	omap_smartreflex_enable(vdd_core);

	recalculate_root_clocks();

	omap4_lpmode = false;

out:
	return ret;
}
/**
 * omap4_dpll_low_power_cascade - configure system for low power DPLL cascade
 *
 * The low power DPLL cascading scheme is a way to have a mostly functional
 * system running with only one locked DPLL and all of the others in bypass.
 * While this might be useful for many use cases, the primary target is low
 * power audio playback.  The steps to enter this state are roughly:
 *
 * Reparent DPLL_ABE so that it is fed by SYS_32K_CK
 * Set magical REGM4XEN bit so DPLL_ABE MN dividers are multiplied by four
 * Lock DPLL_ABE at 196.608MHz and bypass DPLL_CORE, DPLL_MPU & DPLL_IVA
 * Reparent DPLL_CORE so that is fed by DPLL_ABE
 * Reparent DPLL_MPU & DPLL_IVA so that they are fed by DPLL_CORE
 */
int omap4_dpll_low_power_cascade_enter()
{
	int ret = 0;
	struct clk *dpll_abe_ck, *dpll_abe_m3x2_ck;
	struct clk *dpll_mpu_ck, *div_mpu_hs_clk;
	struct clk *dpll_iva_ck, *div_iva_hs_clk, *iva_hsd_byp_clk_mux_ck;
	struct clk *dpll_per_ck, *func_48m_fclk;
	struct clk *per_hsd_byp_clk_mux_ck, *per_hs_clk_div_ck;
	struct clk *dpll_core_ck, *dpll_core_x2_ck;
	struct clk *dpll_core_m2_ck, *dpll_core_m5x2_ck, *dpll_core_m6x2_ck;
	struct clk *core_hsd_byp_clk_mux_ck;
	struct clk *div_core_ck, *l3_div_ck, *l4_div_ck;
	struct clk *l4_wkup_clk_mux_ck, *lp_clk_div_ck;
	struct clk *pmd_stm_clock_mux_ck, *pmd_trace_clk_mux_ck;
	struct clockdomain *emu_sys_44xx_clkdm, *abe_44xx_clkdm;
	struct device *mpu_dev;
	struct cpufreq_policy *cp;
	struct omap_opp *opp;
	struct voltagedomain *vdd_mpu, *vdd_iva, *vdd_core;

	dpll_abe_ck = clk_get(NULL, "dpll_abe_ck");
	dpll_mpu_ck = clk_get(NULL, "dpll_mpu_ck");
	div_mpu_hs_clk = clk_get(NULL, "div_mpu_hs_clk");
	dpll_iva_ck = clk_get(NULL, "dpll_iva_ck");
	div_iva_hs_clk = clk_get(NULL, "div_iva_hs_clk");
	iva_hsd_byp_clk_mux_ck = clk_get(NULL, "iva_hsd_byp_clk_mux_ck");
	dpll_core_ck = clk_get(NULL, "dpll_core_ck");
	dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck");
	dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck");
	dpll_core_m6x2_ck = clk_get(NULL, "dpll_core_m6x2_ck");
	dpll_abe_m3x2_ck = clk_get(NULL, "dpll_abe_m3x2_ck");
	dpll_core_x2_ck = clk_get(NULL, "dpll_core_x2_ck");
	core_hsd_byp_clk_mux_ck = clk_get(NULL, "core_hsd_byp_clk_mux_ck");
	div_core_ck = clk_get(NULL, "div_core_ck");
	l4_wkup_clk_mux_ck = clk_get(NULL, "l4_wkup_clk_mux_ck");
	lp_clk_div_ck = clk_get(NULL, "lp_clk_div_ck");
	pmd_stm_clock_mux_ck = clk_get(NULL, "pmd_stm_clock_mux_ck");
	pmd_trace_clk_mux_ck = clk_get(NULL, "pmd_trace_clk_mux_ck");
	l3_div_ck = clk_get(NULL, "l3_div_ck");
	l4_div_ck = clk_get(NULL, "l4_div_ck");
	dpll_per_ck = clk_get(NULL, "dpll_per_ck");
	func_48m_fclk = clk_get(NULL, "func_48m_fclk");
	per_hsd_byp_clk_mux_ck = clk_get(NULL, "per_hsd_byp_clk_mux_ck");
	per_hs_clk_div_ck = clk_get(NULL, "per_hs_clk_div_ck");

	emu_sys_44xx_clkdm = clkdm_lookup("emu_sys_44xx_clkdm");
	abe_44xx_clkdm = clkdm_lookup("abe_clkdm");

	if (!dpll_abe_ck || !dpll_mpu_ck || !div_mpu_hs_clk || !dpll_iva_ck ||
		!div_iva_hs_clk || !iva_hsd_byp_clk_mux_ck || !dpll_core_m2_ck
		|| !dpll_abe_m3x2_ck || !div_core_ck || !dpll_core_x2_ck ||
		!core_hsd_byp_clk_mux_ck || !dpll_core_m5x2_ck ||
		!l4_wkup_clk_mux_ck || !lp_clk_div_ck || !pmd_stm_clock_mux_ck
		|| !pmd_trace_clk_mux_ck || !dpll_core_m6x2_ck ||
		!dpll_core_ck || !dpll_per_ck || !func_48m_fclk
		|| !per_hsd_byp_clk_mux_ck || !per_hs_clk_div_ck) {

		pr_warn("%s: failed to get all necessary clocks\n", __func__);
		ret = -ENODEV;
		goto out;
	}

	omap4_lpmode = true;

	/* look up the three scalable voltage domains */
	vdd_mpu = omap_voltage_domain_get("mpu");
	vdd_iva = omap_voltage_domain_get("iva");
	vdd_core = omap_voltage_domain_get("core");

	/* disable SR adaptive voltage scaling while changing freq */
	omap_smartreflex_disable(vdd_mpu);
	omap_smartreflex_disable(vdd_iva);
	omap_smartreflex_disable(vdd_core);

	/* prevent DPLL_ABE & DPLL_CORE from idling */
	omap3_dpll_deny_idle(dpll_abe_ck);
	omap3_dpll_deny_idle(dpll_core_ck);

	/* put ABE clock domain SW_WKUP */
	omap2_clkdm_wakeup(abe_44xx_clkdm);

	/* WA: prevent DPLL_ABE_M3X2 clock from auto-gating */
	omap4_prm_rmw_reg_bits(BIT(8), BIT(8),
			dpll_abe_m3x2_ck->clksel_reg);

	/* drive DPLL_CORE bypass clock from DPLL_ABE (CLKINPULOW) */
	state.core_hsd_byp_clk_mux_ck_parent = core_hsd_byp_clk_mux_ck->parent;
	ret = clk_set_parent(core_hsd_byp_clk_mux_ck, dpll_abe_m3x2_ck);
	if (ret) {
		pr_err("%s: failed reparenting DPLL_CORE bypass clock to ABE_M3X2\n",
				__func__);
		goto core_bypass_clock_reparent_fail;
	} else
		pr_debug("%s: DPLL_CORE bypass clock reparented to ABE_M3X2\n",
				__func__);

	/*
	 * bypass DPLL_CORE, configure EMIF for the new rate
	 * CORE_CLK = CORE_X2_CLK
	 */
	state.dpll_core_ck_rate = dpll_core_ck->rate;

	state.div_core_ck_div =
		omap4_prm_read_bits_shift(div_core_ck->clksel_reg,
				div_core_ck->clksel_mask);

	state.l3_div_ck_div =
		omap4_prm_read_bits_shift(l3_div_ck->clksel_reg,
				l3_div_ck->clksel_mask);

	state.l4_div_ck_div =
		omap4_prm_read_bits_shift(l4_div_ck->clksel_reg,
				l4_div_ck->clksel_mask);

	state.dpll_core_m5x2_ck_div =
		omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg,
				dpll_core_m5x2_ck->clksel_mask);

	state.dpll_core_m2_div =
		omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg,
				dpll_core_m2_ck->clksel_mask);

	ret =  clk_set_rate(div_core_ck, dpll_core_m5x2_ck->rate);
	ret |= clk_set_rate(dpll_core_ck, LP_196M_RATE);
	ret |= clk_set_rate(dpll_core_m5x2_ck, dpll_core_x2_ck->rate);
	if (ret) {
		pr_err("%s: failed setting CORE clock rates\n", __func__);
		goto core_clock_set_rate_fail;
	} else
		pr_debug("%s: DPLL_CORE bypass clock reparented to ABE_M3X2\n",
				__func__);

	/* divide MPU/IVA bypass clocks by 2 (for when we bypass DPLL_CORE) */
	state.div_mpu_hs_clk_div =
		omap4_prm_read_bits_shift(div_mpu_hs_clk->clksel_reg,
				div_mpu_hs_clk->clksel_mask);
	state.div_iva_hs_clk_div =
		omap4_prm_read_bits_shift(div_iva_hs_clk->clksel_reg,
				div_iva_hs_clk->clksel_mask);
	clk_set_rate(div_mpu_hs_clk, div_mpu_hs_clk->parent->rate);
	clk_set_rate(div_iva_hs_clk, div_iva_hs_clk->parent->rate / 2);

	/* select CLKINPULOW (div_iva_hs_clk) as DPLL_IVA bypass clock */
	state.iva_hsd_byp_clk_mux_ck_parent = iva_hsd_byp_clk_mux_ck->parent;
	ret = clk_set_parent(iva_hsd_byp_clk_mux_ck, div_iva_hs_clk);
	if (ret) {
		pr_err("%s: failed reparenting DPLL_IVA bypass clock to CLKINPULOW\n",
				__func__);
		goto iva_bypass_clk_reparent_fail;
	} else
		pr_debug("%s: reparented DPLL_IVA bypass clock to CLKINPULOW\n",
				__func__);

	/* select CLKINPULOW (per_hs_clk_div_ck) as DPLL_PER bypass clock */
	state.per_hsd_byp_clk_mux_ck_parent = per_hsd_byp_clk_mux_ck->parent;
	ret = clk_set_parent(per_hsd_byp_clk_mux_ck, per_hs_clk_div_ck);
	if (ret) {
		pr_debug("%s: failed reparenting DPLL_PER bypass clock to CLKINPULOW\n",
				__func__);
		goto per_bypass_clk_reparent_fail;
	} else
		pr_debug("%s: reparented DPLL_PER bypass clock to CLKINPULOW\n",
				__func__);

	/* bypass DPLL_MPU */
	state.dpll_mpu_ck_rate = dpll_mpu_ck->rate;
	ret = clk_set_rate(dpll_mpu_ck,
			dpll_mpu_ck->dpll_data->clk_bypass->rate);
	if (ret) {
		pr_err("%s: DPLL_MPU failed to enter Low Power bypass\n",
				__func__);
		goto dpll_mpu_bypass_fail;
	} else
		pr_debug("%s: DPLL_MPU entered Low Power bypass\n", __func__);

	/* bypass DPLL_IVA */
	state.dpll_iva_ck_rate = dpll_iva_ck->rate;
	ret = clk_set_rate(dpll_iva_ck,
			dpll_iva_ck->dpll_data->clk_bypass->rate);
	if (ret) {
		pr_err("%s: DPLL_IVA failed to enter Low Power bypass\n",
				__func__);
		goto dpll_iva_bypass_fail;
	} else
		pr_debug("%s: DPLL_IVA entered Low Power bypass\n", __func__);

	/* bypass DPLL_PER */
	state.dpll_per_ck_rate = dpll_per_ck->rate;
	ret = clk_set_rate(dpll_per_ck,
			dpll_per_ck->dpll_data->clk_bypass->rate);
	if (ret) {
		pr_debug("%s: DPLL_PER failed to enter Low Power bypass\n",
				__func__);
		goto dpll_per_bypass_fail;
	} else
		pr_debug("%s: DPLL_PER entered Low Power bypass\n",__func__);

	__raw_writel(1, OMAP4430_CM_L4_WKUP_CLKSEL);

	/* never de-assert CLKREQ while in DPLL cascading scheme */
	state.clkreqctrl = __raw_readl(OMAP4430_PRM_CLKREQCTRL);
	__raw_writel(0x4, OMAP4430_PRM_CLKREQCTRL);

	/* re-enable SR adaptive voltage scaling */
	omap_smartreflex_enable(vdd_mpu);
	omap_smartreflex_enable(vdd_iva);
	omap_smartreflex_enable(vdd_core);

	/* drive PM debug clocks from CORE_M6X2 and allow the clkdm to idle */
	/*state.pmd_stm_clock_mux_ck_parent = pmd_stm_clock_mux_ck->parent;
	state.pmd_trace_clk_mux_ck_parent = pmd_trace_clk_mux_ck->parent;
	ret =  clk_set_parent(pmd_stm_clock_mux_ck, dpll_core_m6x2_ck);
	ret |= clk_set_parent(pmd_trace_clk_mux_ck, dpll_core_m6x2_ck);
	if (ret)
		pr_err("%s: failed reparenting PMD clocks to ABE LP clock\n",
				__func__);
	else
		pr_debug("%s: reparented PMD clocks to ABE LP clock\n",
				__func__);

	omap2_clkdm_allow_idle(emu_sys_44xx_clkdm);*/

	recalculate_root_clocks();

	goto out;

dpll_per_bypass_fail:
	clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate /
				(1 << state.div_iva_hs_clk_div)));
	clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate);
per_bypass_clk_reparent_fail:
	clk_set_parent(per_hsd_byp_clk_mux_ck,
			state.per_hsd_byp_clk_mux_ck_parent);
dpll_iva_bypass_fail:
	clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate /
				(1 << state.div_iva_hs_clk_div)));
	clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate);
dpll_mpu_bypass_fail:
	omap4_lpmode = false;
	clk_set_rate(div_mpu_hs_clk, (div_mpu_hs_clk->parent->rate /
				(1 << state.div_mpu_hs_clk_div)));
	clk_set_rate(dpll_mpu_ck, state.dpll_mpu_ck_rate);
iva_bypass_clk_reparent_fail:
	clk_set_parent(iva_hsd_byp_clk_mux_ck,
			state.iva_hsd_byp_clk_mux_ck_parent);
core_clock_set_rate_fail:
	/* FIXME make this follow the sequence below */
	clk_set_rate(dpll_core_m5x2_ck, (dpll_core_m5x2_ck->parent->rate /
				state.dpll_core_m5x2_ck_div));
	clk_set_rate(dpll_core_ck, (dpll_core_ck->parent->rate /
				state.dpll_core_m2_ck_div));
	clk_set_rate(div_core_ck, (div_core_ck->parent->rate /
				state.div_core_ck_div));
core_bypass_clock_reparent_fail:
	clk_set_parent(iva_hsd_byp_clk_mux_ck,
			state.iva_hsd_byp_clk_mux_ck_parent);
	omap4_prm_rmw_reg_bits(BIT(8), BIT(8),
			dpll_abe_m3x2_ck->clksel_reg);
	omap2_clkdm_allow_idle(abe_44xx_clkdm);
	omap3_dpll_allow_idle(dpll_abe_ck);
	omap3_dpll_allow_idle(dpll_core_ck);
out:
	return ret;
}
Example #10
0
/*
 * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
 * @clk:	struct clk * of DPLL to set
 * @freqsel:	FREQSEL value to set
 *
 * Program the DPLL with the last M, N values calculated, and wait for
 * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
 */
static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
{
	struct dpll_data *dd = clk->dpll_data;
	u8 dco, sd_div, ai = 0;
	u32 v;
	bool errata_i810;

	/* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
	_omap3_noncore_dpll_bypass(clk);

	/*
	 * Set jitter correction. Jitter correction applicable for OMAP343X
	 * only since freqsel field is no longer present on other devices.
	 */
	if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
		v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
		v &= ~dd->freqsel_mask;
		v |= freqsel << __ffs(dd->freqsel_mask);
		ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
	}

	/* Set DPLL multiplier, divider */
	v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);

	/* Handle Duty Cycle Correction */
	if (dd->dcc_mask) {
		if (dd->last_rounded_rate >= dd->dcc_rate)
			v |= dd->dcc_mask; /* Enable DCC */
		else
			v &= ~dd->dcc_mask; /* Disable DCC */
	}

	v &= ~(dd->mult_mask | dd->div1_mask);
	v |= dd->last_rounded_m << __ffs(dd->mult_mask);
	v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);

	/* Configure dco and sd_div for dplls that have these fields */
	if (dd->dco_mask) {
		_lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
		v &= ~(dd->dco_mask);
		v |= dco << __ffs(dd->dco_mask);
	}
	if (dd->sddiv_mask) {
		_lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
			      dd->last_rounded_n);
		v &= ~(dd->sddiv_mask);
		v |= sd_div << __ffs(dd->sddiv_mask);
	}

	/*
	 * Errata i810 - DPLL controller can get stuck while transitioning
	 * to a power saving state. Software must ensure the DPLL can not
	 * transition to a low power state while changing M/N values.
	 * Easiest way to accomplish this is to prevent DPLL autoidle
	 * before doing the M/N re-program.
	 */
	errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810;

	if (errata_i810) {
		ai = omap3_dpll_autoidle_read(clk);
		if (ai) {
			omap3_dpll_deny_idle(clk);

			/* OCP barrier */
			omap3_dpll_autoidle_read(clk);
		}
	}

	ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);

	/* Set 4X multiplier and low-power mode */
	if (dd->m4xen_mask || dd->lpmode_mask) {
		v = ti_clk_ll_ops->clk_readl(&dd->control_reg);

		if (dd->m4xen_mask) {
			if (dd->last_rounded_m4xen)
				v |= dd->m4xen_mask;
			else
				v &= ~dd->m4xen_mask;
		}

		if (dd->lpmode_mask) {
			if (dd->last_rounded_lpmode)
				v |= dd->lpmode_mask;
			else
				v &= ~dd->lpmode_mask;
		}

		ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
	}

	/* We let the clock framework set the other output dividers later */

	/* REVISIT: Set ramp-up delay? */

	_omap3_noncore_dpll_lock(clk);

	if (errata_i810 && ai)
		omap3_dpll_allow_idle(clk);

	return 0;
}