Ejemplo n.º 1
0
/**
 * omap4_dpll_low_power_cascade - configure system for low power DPLL cascade
 *
 * The low power DPLL cascading scheme is a way to have a mostly functional
 * system running with only one locked DPLL and all of the others in bypass.
 * While this might be useful for many use cases, the primary target is low
 * power audio playback.  The steps to enter this state are roughly:
 *
 * Reparent DPLL_ABE so that it is fed by SYS_32K_CK
 * Set magical REGM4XEN bit so DPLL_ABE MN dividers are multiplied by four
 * Lock DPLL_ABE at 196.608MHz and bypass DPLL_CORE, DPLL_MPU & DPLL_IVA
 * Reparent DPLL_CORE so that is fed by DPLL_ABE
 * Reparent DPLL_MPU & DPLL_IVA so that they are fed by DPLL_CORE
 */
int omap4_dpll_low_power_cascade_enter()
{
	int ret = 0;
	struct clk *dpll_abe_ck, *dpll_abe_m3x2_ck;
	struct clk *dpll_mpu_ck, *div_mpu_hs_clk;
	struct clk *dpll_iva_ck, *div_iva_hs_clk, *iva_hsd_byp_clk_mux_ck;
	struct clk *dpll_per_ck, *func_48m_fclk;
	struct clk *per_hsd_byp_clk_mux_ck, *per_hs_clk_div_ck;
	struct clk *dpll_core_ck, *dpll_core_x2_ck;
	struct clk *dpll_core_m2_ck, *dpll_core_m5x2_ck, *dpll_core_m6x2_ck;
	struct clk *core_hsd_byp_clk_mux_ck;
	struct clk *div_core_ck, *l3_div_ck, *l4_div_ck;
	struct clk *l4_wkup_clk_mux_ck, *lp_clk_div_ck;
	struct clk *pmd_stm_clock_mux_ck, *pmd_trace_clk_mux_ck;
	struct clockdomain *emu_sys_44xx_clkdm, *abe_44xx_clkdm;
	struct device *mpu_dev;
	struct cpufreq_policy *cp;
	struct omap_opp *opp;
	struct voltagedomain *vdd_mpu, *vdd_iva, *vdd_core;

	dpll_abe_ck = clk_get(NULL, "dpll_abe_ck");
	dpll_mpu_ck = clk_get(NULL, "dpll_mpu_ck");
	div_mpu_hs_clk = clk_get(NULL, "div_mpu_hs_clk");
	dpll_iva_ck = clk_get(NULL, "dpll_iva_ck");
	div_iva_hs_clk = clk_get(NULL, "div_iva_hs_clk");
	iva_hsd_byp_clk_mux_ck = clk_get(NULL, "iva_hsd_byp_clk_mux_ck");
	dpll_core_ck = clk_get(NULL, "dpll_core_ck");
	dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck");
	dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck");
	dpll_core_m6x2_ck = clk_get(NULL, "dpll_core_m6x2_ck");
	dpll_abe_m3x2_ck = clk_get(NULL, "dpll_abe_m3x2_ck");
	dpll_core_x2_ck = clk_get(NULL, "dpll_core_x2_ck");
	core_hsd_byp_clk_mux_ck = clk_get(NULL, "core_hsd_byp_clk_mux_ck");
	div_core_ck = clk_get(NULL, "div_core_ck");
	l4_wkup_clk_mux_ck = clk_get(NULL, "l4_wkup_clk_mux_ck");
	lp_clk_div_ck = clk_get(NULL, "lp_clk_div_ck");
	pmd_stm_clock_mux_ck = clk_get(NULL, "pmd_stm_clock_mux_ck");
	pmd_trace_clk_mux_ck = clk_get(NULL, "pmd_trace_clk_mux_ck");
	l3_div_ck = clk_get(NULL, "l3_div_ck");
	l4_div_ck = clk_get(NULL, "l4_div_ck");
	dpll_per_ck = clk_get(NULL, "dpll_per_ck");
	func_48m_fclk = clk_get(NULL, "func_48m_fclk");
	per_hsd_byp_clk_mux_ck = clk_get(NULL, "per_hsd_byp_clk_mux_ck");
	per_hs_clk_div_ck = clk_get(NULL, "per_hs_clk_div_ck");

	emu_sys_44xx_clkdm = clkdm_lookup("emu_sys_44xx_clkdm");
	abe_44xx_clkdm = clkdm_lookup("abe_clkdm");

	if (!dpll_abe_ck || !dpll_mpu_ck || !div_mpu_hs_clk || !dpll_iva_ck ||
		!div_iva_hs_clk || !iva_hsd_byp_clk_mux_ck || !dpll_core_m2_ck
		|| !dpll_abe_m3x2_ck || !div_core_ck || !dpll_core_x2_ck ||
		!core_hsd_byp_clk_mux_ck || !dpll_core_m5x2_ck ||
		!l4_wkup_clk_mux_ck || !lp_clk_div_ck || !pmd_stm_clock_mux_ck
		|| !pmd_trace_clk_mux_ck || !dpll_core_m6x2_ck ||
		!dpll_core_ck || !dpll_per_ck || !func_48m_fclk
		|| !per_hsd_byp_clk_mux_ck || !per_hs_clk_div_ck) {

		pr_warn("%s: failed to get all necessary clocks\n", __func__);
		ret = -ENODEV;
		goto out;
	}

	omap4_lpmode = true;

	/* look up the three scalable voltage domains */
	vdd_mpu = omap_voltage_domain_get("mpu");
	vdd_iva = omap_voltage_domain_get("iva");
	vdd_core = omap_voltage_domain_get("core");

	/* disable SR adaptive voltage scaling while changing freq */
	omap_smartreflex_disable(vdd_mpu);
	omap_smartreflex_disable(vdd_iva);
	omap_smartreflex_disable(vdd_core);

	/* prevent DPLL_ABE & DPLL_CORE from idling */
	omap3_dpll_deny_idle(dpll_abe_ck);
	omap3_dpll_deny_idle(dpll_core_ck);

	/* put ABE clock domain SW_WKUP */
	omap2_clkdm_wakeup(abe_44xx_clkdm);

	/* WA: prevent DPLL_ABE_M3X2 clock from auto-gating */
	omap4_prm_rmw_reg_bits(BIT(8), BIT(8),
			dpll_abe_m3x2_ck->clksel_reg);

	/* drive DPLL_CORE bypass clock from DPLL_ABE (CLKINPULOW) */
	state.core_hsd_byp_clk_mux_ck_parent = core_hsd_byp_clk_mux_ck->parent;
	ret = clk_set_parent(core_hsd_byp_clk_mux_ck, dpll_abe_m3x2_ck);
	if (ret) {
		pr_err("%s: failed reparenting DPLL_CORE bypass clock to ABE_M3X2\n",
				__func__);
		goto core_bypass_clock_reparent_fail;
	} else
		pr_debug("%s: DPLL_CORE bypass clock reparented to ABE_M3X2\n",
				__func__);

	/*
	 * bypass DPLL_CORE, configure EMIF for the new rate
	 * CORE_CLK = CORE_X2_CLK
	 */
	state.dpll_core_ck_rate = dpll_core_ck->rate;

	state.div_core_ck_div =
		omap4_prm_read_bits_shift(div_core_ck->clksel_reg,
				div_core_ck->clksel_mask);

	state.l3_div_ck_div =
		omap4_prm_read_bits_shift(l3_div_ck->clksel_reg,
				l3_div_ck->clksel_mask);

	state.l4_div_ck_div =
		omap4_prm_read_bits_shift(l4_div_ck->clksel_reg,
				l4_div_ck->clksel_mask);

	state.dpll_core_m5x2_ck_div =
		omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg,
				dpll_core_m5x2_ck->clksel_mask);

	state.dpll_core_m2_div =
		omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg,
				dpll_core_m2_ck->clksel_mask);

	ret =  clk_set_rate(div_core_ck, dpll_core_m5x2_ck->rate);
	ret |= clk_set_rate(dpll_core_ck, LP_196M_RATE);
	ret |= clk_set_rate(dpll_core_m5x2_ck, dpll_core_x2_ck->rate);
	if (ret) {
		pr_err("%s: failed setting CORE clock rates\n", __func__);
		goto core_clock_set_rate_fail;
	} else
		pr_debug("%s: DPLL_CORE bypass clock reparented to ABE_M3X2\n",
				__func__);

	/* divide MPU/IVA bypass clocks by 2 (for when we bypass DPLL_CORE) */
	state.div_mpu_hs_clk_div =
		omap4_prm_read_bits_shift(div_mpu_hs_clk->clksel_reg,
				div_mpu_hs_clk->clksel_mask);
	state.div_iva_hs_clk_div =
		omap4_prm_read_bits_shift(div_iva_hs_clk->clksel_reg,
				div_iva_hs_clk->clksel_mask);
	clk_set_rate(div_mpu_hs_clk, div_mpu_hs_clk->parent->rate);
	clk_set_rate(div_iva_hs_clk, div_iva_hs_clk->parent->rate / 2);

	/* select CLKINPULOW (div_iva_hs_clk) as DPLL_IVA bypass clock */
	state.iva_hsd_byp_clk_mux_ck_parent = iva_hsd_byp_clk_mux_ck->parent;
	ret = clk_set_parent(iva_hsd_byp_clk_mux_ck, div_iva_hs_clk);
	if (ret) {
		pr_err("%s: failed reparenting DPLL_IVA bypass clock to CLKINPULOW\n",
				__func__);
		goto iva_bypass_clk_reparent_fail;
	} else
		pr_debug("%s: reparented DPLL_IVA bypass clock to CLKINPULOW\n",
				__func__);

	/* select CLKINPULOW (per_hs_clk_div_ck) as DPLL_PER bypass clock */
	state.per_hsd_byp_clk_mux_ck_parent = per_hsd_byp_clk_mux_ck->parent;
	ret = clk_set_parent(per_hsd_byp_clk_mux_ck, per_hs_clk_div_ck);
	if (ret) {
		pr_debug("%s: failed reparenting DPLL_PER bypass clock to CLKINPULOW\n",
				__func__);
		goto per_bypass_clk_reparent_fail;
	} else
		pr_debug("%s: reparented DPLL_PER bypass clock to CLKINPULOW\n",
				__func__);

	/* bypass DPLL_MPU */
	state.dpll_mpu_ck_rate = dpll_mpu_ck->rate;
	ret = clk_set_rate(dpll_mpu_ck,
			dpll_mpu_ck->dpll_data->clk_bypass->rate);
	if (ret) {
		pr_err("%s: DPLL_MPU failed to enter Low Power bypass\n",
				__func__);
		goto dpll_mpu_bypass_fail;
	} else
		pr_debug("%s: DPLL_MPU entered Low Power bypass\n", __func__);

	/* bypass DPLL_IVA */
	state.dpll_iva_ck_rate = dpll_iva_ck->rate;
	ret = clk_set_rate(dpll_iva_ck,
			dpll_iva_ck->dpll_data->clk_bypass->rate);
	if (ret) {
		pr_err("%s: DPLL_IVA failed to enter Low Power bypass\n",
				__func__);
		goto dpll_iva_bypass_fail;
	} else
		pr_debug("%s: DPLL_IVA entered Low Power bypass\n", __func__);

	/* bypass DPLL_PER */
	state.dpll_per_ck_rate = dpll_per_ck->rate;
	ret = clk_set_rate(dpll_per_ck,
			dpll_per_ck->dpll_data->clk_bypass->rate);
	if (ret) {
		pr_debug("%s: DPLL_PER failed to enter Low Power bypass\n",
				__func__);
		goto dpll_per_bypass_fail;
	} else
		pr_debug("%s: DPLL_PER entered Low Power bypass\n",__func__);

	__raw_writel(1, OMAP4430_CM_L4_WKUP_CLKSEL);

	/* never de-assert CLKREQ while in DPLL cascading scheme */
	state.clkreqctrl = __raw_readl(OMAP4430_PRM_CLKREQCTRL);
	__raw_writel(0x4, OMAP4430_PRM_CLKREQCTRL);

	/* re-enable SR adaptive voltage scaling */
	omap_smartreflex_enable(vdd_mpu);
	omap_smartreflex_enable(vdd_iva);
	omap_smartreflex_enable(vdd_core);

	/* drive PM debug clocks from CORE_M6X2 and allow the clkdm to idle */
	/*state.pmd_stm_clock_mux_ck_parent = pmd_stm_clock_mux_ck->parent;
	state.pmd_trace_clk_mux_ck_parent = pmd_trace_clk_mux_ck->parent;
	ret =  clk_set_parent(pmd_stm_clock_mux_ck, dpll_core_m6x2_ck);
	ret |= clk_set_parent(pmd_trace_clk_mux_ck, dpll_core_m6x2_ck);
	if (ret)
		pr_err("%s: failed reparenting PMD clocks to ABE LP clock\n",
				__func__);
	else
		pr_debug("%s: reparented PMD clocks to ABE LP clock\n",
				__func__);

	omap2_clkdm_allow_idle(emu_sys_44xx_clkdm);*/

	recalculate_root_clocks();

	goto out;

dpll_per_bypass_fail:
	clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate /
				(1 << state.div_iva_hs_clk_div)));
	clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate);
per_bypass_clk_reparent_fail:
	clk_set_parent(per_hsd_byp_clk_mux_ck,
			state.per_hsd_byp_clk_mux_ck_parent);
dpll_iva_bypass_fail:
	clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate /
				(1 << state.div_iva_hs_clk_div)));
	clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate);
dpll_mpu_bypass_fail:
	omap4_lpmode = false;
	clk_set_rate(div_mpu_hs_clk, (div_mpu_hs_clk->parent->rate /
				(1 << state.div_mpu_hs_clk_div)));
	clk_set_rate(dpll_mpu_ck, state.dpll_mpu_ck_rate);
iva_bypass_clk_reparent_fail:
	clk_set_parent(iva_hsd_byp_clk_mux_ck,
			state.iva_hsd_byp_clk_mux_ck_parent);
core_clock_set_rate_fail:
	/* FIXME make this follow the sequence below */
	clk_set_rate(dpll_core_m5x2_ck, (dpll_core_m5x2_ck->parent->rate /
				state.dpll_core_m5x2_ck_div));
	clk_set_rate(dpll_core_ck, (dpll_core_ck->parent->rate /
				state.dpll_core_m2_ck_div));
	clk_set_rate(div_core_ck, (div_core_ck->parent->rate /
				state.div_core_ck_div));
core_bypass_clock_reparent_fail:
	clk_set_parent(iva_hsd_byp_clk_mux_ck,
			state.iva_hsd_byp_clk_mux_ck_parent);
	omap4_prm_rmw_reg_bits(BIT(8), BIT(8),
			dpll_abe_m3x2_ck->clksel_reg);
	omap2_clkdm_allow_idle(abe_44xx_clkdm);
	omap3_dpll_allow_idle(dpll_abe_ck);
	omap3_dpll_allow_idle(dpll_core_ck);
out:
	return ret;
}
Ejemplo n.º 2
0
int omap4_dpll_low_power_cascade_exit()
{
	int ret = 0;
	struct clk *sys_clkin_ck;
	struct clk *dpll_abe_ck, *dpll_abe_m3x2_ck;
	struct clk *dpll_mpu_ck, *div_mpu_hs_clk;
	struct clk *dpll_iva_ck, *div_iva_hs_clk, *iva_hsd_byp_clk_mux_ck;
	struct clk *dpll_core_ck, *dpll_core_x2_ck;
	struct clk *dpll_core_m2_ck, *dpll_core_m5x2_ck, *dpll_core_m6x2_ck;
	struct clk *core_hsd_byp_clk_mux_ck;
	struct clk *div_core_ck, *l3_div_ck, *l4_div_ck;
	struct clk *dpll_per_ck, *func_48m_fclk;
	struct clk *per_hsd_byp_clk_mux_ck, *per_hs_clk_div_ck;
	struct clk *l4_wkup_clk_mux_ck, *lp_clk_div_ck;
	struct clk *pmd_stm_clock_mux_ck, *pmd_trace_clk_mux_ck;
	struct clockdomain *emu_sys_44xx_clkdm, *abe_44xx_clkdm;
	struct cpufreq_policy *cp;
	struct voltagedomain *vdd_mpu, *vdd_iva, *vdd_core;

	sys_clkin_ck = clk_get(NULL, "sys_clkin_ck");
	dpll_abe_ck = clk_get(NULL, "dpll_abe_ck");
	dpll_mpu_ck = clk_get(NULL, "dpll_mpu_ck");
	div_mpu_hs_clk = clk_get(NULL, "div_mpu_hs_clk");
	dpll_iva_ck = clk_get(NULL, "dpll_iva_ck");
	div_iva_hs_clk = clk_get(NULL, "div_iva_hs_clk");
	iva_hsd_byp_clk_mux_ck = clk_get(NULL, "iva_hsd_byp_clk_mux_ck");
	dpll_core_ck = clk_get(NULL, "dpll_core_ck");
	dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck");
	dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck");
	dpll_core_m6x2_ck = clk_get(NULL, "dpll_core_m6x2_ck");
	dpll_abe_m3x2_ck = clk_get(NULL, "dpll_abe_m3x2_ck");
	dpll_core_x2_ck = clk_get(NULL, "dpll_core_x2_ck");
	core_hsd_byp_clk_mux_ck = clk_get(NULL, "core_hsd_byp_clk_mux_ck");
	div_core_ck = clk_get(NULL, "div_core_ck");
	l3_div_ck = clk_get(NULL, "l3_div_ck");
	l4_div_ck = clk_get(NULL, "l4_div_ck");
	l4_wkup_clk_mux_ck = clk_get(NULL, "l4_wkup_clk_mux_ck");
	lp_clk_div_ck = clk_get(NULL, "lp_clk_div_ck");
	pmd_stm_clock_mux_ck = clk_get(NULL, "pmd_stm_clock_mux_ck");
	pmd_trace_clk_mux_ck = clk_get(NULL, "pmd_trace_clk_mux_ck");
	dpll_per_ck = clk_get(NULL, "dpll_per_ck");
	func_48m_fclk = clk_get(NULL, "func_48m_fclk");
	per_hsd_byp_clk_mux_ck = clk_get(NULL, "per_hsd_byp_clk_mux_ck");
	per_hs_clk_div_ck = clk_get(NULL, "per_hs_clk_div_ck");

	emu_sys_44xx_clkdm = clkdm_lookup("emu_sys_44xx_clkdm");
	abe_44xx_clkdm = clkdm_lookup("abe_clkdm");

	if (!dpll_abe_ck || !dpll_mpu_ck || !div_mpu_hs_clk || !dpll_iva_ck ||
		!div_iva_hs_clk || !iva_hsd_byp_clk_mux_ck || !dpll_core_m2_ck
		|| !dpll_abe_m3x2_ck || !div_core_ck || !dpll_core_x2_ck ||
		!core_hsd_byp_clk_mux_ck || !dpll_core_m5x2_ck ||
		!l4_wkup_clk_mux_ck || !lp_clk_div_ck || !pmd_stm_clock_mux_ck
		|| !pmd_trace_clk_mux_ck || !dpll_core_m6x2_ck
		|| !sys_clkin_ck || !dpll_core_ck || !l3_div_ck || !l4_div_ck
		|| !dpll_per_ck || !func_48m_fclk || !per_hsd_byp_clk_mux_ck
		|| !per_hs_clk_div_ck) {
		pr_warn("%s: failed to get all necessary clocks\n", __func__);
		ret = -ENODEV;
		goto out;
	}

	if (delayed_work_pending(&lpmode_work))
		cancel_delayed_work_sync(&lpmode_work);

	if (!omap4_lpmode)
		return 0;

	/* look up the three scalable voltage domains */
	vdd_mpu = omap_voltage_domain_get("mpu");
	vdd_iva = omap_voltage_domain_get("iva");
	vdd_core = omap_voltage_domain_get("core");

	/* disable SR adaptive voltage scaling while changing freq */
	omap_smartreflex_disable(vdd_mpu);
	omap_smartreflex_disable(vdd_iva);
	omap_smartreflex_disable(vdd_core);

	/* lock DPLL_MPU */
	ret = clk_set_rate(dpll_mpu_ck, state.dpll_mpu_ck_rate);
	if (ret)
		pr_err("%s: DPLL_MPU failed to relock\n", __func__);

	/* lock DPLL_IVA */
	ret = clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate);
	if (ret)
		pr_err("%s: DPLL_IVA failed to relock\n", __func__);

	/* lock DPLL_PER */
	ret = clk_set_rate(dpll_per_ck, state.dpll_per_ck_rate);
	if (ret)
		pr_err("%s: DPLL_PER failed to relock\n", __func__);

	/* restore bypass clock rates */
	clk_set_rate(div_mpu_hs_clk, (div_mpu_hs_clk->parent->rate /
				(1 << state.div_mpu_hs_clk_div)));
	clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate /
				(1 << state.div_iva_hs_clk_div)));

	/* restore DPLL_IVA bypass clock */
	ret = clk_set_parent(iva_hsd_byp_clk_mux_ck,
			state.iva_hsd_byp_clk_mux_ck_parent);
	if (ret)
		pr_err("%s: failed to restore DPLL_IVA bypass clock\n",
				__func__);

	/* restore DPLL_PER bypass clock */
	ret = clk_set_parent(per_hsd_byp_clk_mux_ck,
			state.per_hsd_byp_clk_mux_ck_parent);
	if (ret)
		pr_err("%s: failed to restore DPLL_PER bypass clock\n",
				__func__);

	/* restore CORE clock rates */
	ret = clk_set_rate(div_core_ck, (div_core_ck->parent->rate /
				(1 << state.div_core_ck_div)));
	omap4_prm_rmw_reg_bits(dpll_core_m2_ck->clksel_mask,
			state.dpll_core_m2_div,
			dpll_core_m2_ck->clksel_reg);
	ret |=  clk_set_rate(dpll_core_m5x2_ck,
			(dpll_core_m5x2_ck->parent->rate /
			 state.dpll_core_m5x2_ck_div));
	ret |= clk_set_rate(dpll_core_ck, state.dpll_core_ck_rate);
	if (ret)
		pr_err("%s: failed to restore CORE clock rates\n", __func__);

	/* drive DPLL_CORE bypass clock from SYS_CK (CLKINP) */
	ret = clk_set_parent(core_hsd_byp_clk_mux_ck,
			state.core_hsd_byp_clk_mux_ck_parent);
	if (ret)
		pr_err("%s: failed restoring DPLL_CORE bypass clock parent\n",
				__func__);

	/* WA: allow DPLL_ABE_M3X2 clock to auto-gate */
	omap4_prm_rmw_reg_bits(BIT(8), 0x0,
			dpll_abe_m3x2_ck->clksel_reg);

	/* allow ABE clock domain to idle again */
	omap2_clkdm_allow_idle(abe_44xx_clkdm);

	/* allow DPLL_ABE & DPLL_CORE to idle again */
	omap3_dpll_allow_idle(dpll_core_ck);
	omap3_dpll_allow_idle(dpll_abe_ck);

	/* DPLLs are configured, so let SYSCK idle again */

	__raw_writel(0, OMAP4430_CM_L4_WKUP_CLKSEL);


	/* restore CLKREQ behavior */
	__raw_writel(state.clkreqctrl, OMAP4430_PRM_CLKREQCTRL);

	/* drive PM debug clocks from CORE_M6X2 and allow the clkdm to idle */
	/*ret =  clk_set_parent(pmd_stm_clock_mux_ck,
			state.pmd_stm_clock_mux_ck_parent);
	ret |= clk_set_parent(pmd_trace_clk_mux_ck,
			state.pmd_trace_clk_mux_ck_parent);
	if (ret)
		pr_debug("%s: failed restoring parent to PMD clocks\n",
				__func__);*/

	/* re-enable SR adaptive voltage scaling */
	omap_smartreflex_enable(vdd_mpu);
	omap_smartreflex_enable(vdd_iva);
	omap_smartreflex_enable(vdd_core);

	recalculate_root_clocks();

	omap4_lpmode = false;

out:
	return ret;
}
static int sr_dev_init(struct omap_hwmod *oh, void *user)
{
	struct omap_sr_data *sr_data;
	struct omap_sr_dev_data *sr_dev_data;
	struct omap_device *od;
	char *name = "smartreflex";
	static int i;

	sr_data = kzalloc(sizeof(struct omap_sr_data), GFP_KERNEL);
	if (!sr_data) {
		pr_err("%s: Unable to allocate memory for %s sr_data.Error!\n",
			__func__, oh->name);
		return -ENOMEM;
	}

	sr_dev_data = (struct omap_sr_dev_data *)oh->dev_attr;
	if (unlikely(!sr_dev_data)) {
		pr_err("%s: dev atrribute is NULL\n", __func__);
		kfree(sr_data);
		goto exit;
	}

	/*
	 * OMAP3430 ES3.1 chips by default come with Efuse burnt
	 * with parameters required for full functionality of
	 * smartreflex AVS feature like ntarget values , sennenable
	 * and senpenable. So enable the SR AVS feature during boot up
	 * itself if it is a OMAP3430 ES3.1 chip.
	 */
	if (cpu_is_omap343x()) {
		if (omap_rev() == OMAP3430_REV_ES3_1)
			sr_data->enable_on_init = true;
		else
			sr_data->enable_on_init = false;
	} else {
		sr_data->enable_on_init = false;
	}
	sr_data->device_enable = omap_device_enable;
	sr_data->device_shutdown = omap_device_shutdown;
	sr_data->device_idle = omap_device_idle;
	sr_data->voltdm = omap_voltage_domain_get(sr_dev_data->vdd_name);
	if (IS_ERR(sr_data->voltdm)) {
		pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
			__func__, sr_dev_data->vdd_name);
		kfree(sr_data);
		goto exit;
	}
	sr_dev_data->volts_supported = omap_voltage_get_volttable(
			sr_data->voltdm, &sr_dev_data->volt_data);
	if (!sr_dev_data->volts_supported) {
		pr_warning("%s: No Voltage table registerd fo VDD%d."
			"Something really wrong\n\n", __func__, i + 1);
		kfree(sr_data);
		goto exit;
	}
	sr_set_nvalues(sr_dev_data, sr_data);
	od = omap_device_build(name, i, oh, sr_data, sizeof(*sr_data),
			       omap_sr_latency,
			       ARRAY_SIZE(omap_sr_latency), 0);
	if (IS_ERR(od)) {
		pr_warning("%s: Could not build omap_device for %s: %s.\n\n",
			__func__, name, oh->name);
		kfree(sr_data);
	}
exit:
	i++;
	return 0;
}