Exemplo n.º 1
0
/*
 * omap4_sec_dispatcher: Routine to dispatch low power secure
 * service routines
 *
 * @idx: The HAL API index
 * @flag: The flag indicating criticality of operation
 * @nargs: Number of valid arguments out of four.
 * @arg1, arg2, arg3 args4: Parameters passed to secure API
 *
 * Return the error value on success/failure
 */
u32 omap4_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
							 u32 arg3, u32 arg4)
{
	u32 ret;
	u32 param[5];

	param[0] = nargs;
	param[1] = arg1;
	param[2] = arg2;
	param[3] = arg3;
	param[4] = arg4;

	/* Look-up Only once */
	if (!l4_secure_clkdm)
		l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");

	/* Put l4 secure to SW_WKUP so that moduels are accessible */
	omap2_clkdm_wakeup(l4_secure_clkdm);

	/*
	 * Secure API needs physical address
	 * pointer for the parameters
	 */
	flush_cache_all();
	outer_clean_range(__pa(param), __pa(param + 5));

	ret = omap_smc2(idx, flag, __pa(param));

	/* Restore the HW_SUP so that module can idle */
	omap2_clkdm_allow_idle(l4_secure_clkdm);

	return ret;
}
Exemplo n.º 2
0
static int omap2_clkdm_clk_enable(struct clockdomain *clkdm)
{
	bool hwsup = false;

	if (!clkdm->clktrctrl_mask)
		return 0;

	/*
	 * The CLKDM_MISSING_IDLE_REPORTING flag documentation has
	 * more details on the unpleasant problem this is working
	 * around
	 */
	if (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING &&
	    !(clkdm->flags & CLKDM_CAN_FORCE_SLEEP)) {
		_enable_hwsup(clkdm);
		return 0;
	}

	hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
				clkdm->clktrctrl_mask);

	if (hwsup) {
		/* Disable HW transitions when we are changing deps */
		_disable_hwsup(clkdm);
		_clkdm_add_autodeps(clkdm);
		_enable_hwsup(clkdm);
	} else {
		if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
			omap2_clkdm_wakeup(clkdm);
	}

	return 0;
}
/**
 * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider
 * @clk: struct clk * of DPLL to set
 * @rate: rounded target rate
 *
 * Programs the CM shadow registers to update CORE DPLL M2 divider. M2 divider
 * is used to clock external DDR and its reconfiguration on frequency change
 * is managed through a hardware sequencer. This is managed by the PRCM with
 * EMIF using shadow registers.  If rate specified matches DPLL_CORE's bypass
 * clock rate then put it in Low-Power Bypass.
 * Returns negative int on error and 0 on success.
 */
int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
	int i = 0;
	u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0;

	if (!clk || !rate)
		return -EINVAL;

	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
	if (validrate != rate)
		return -EINVAL;

	/* Just to avoid look-up on every call to speed up */
	if (!l3_emif_clkdm)
		l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm");

	/* put MEMIF domain in SW_WKUP & increment usecount for clks */
	omap2_clkdm_wakeup(l3_emif_clkdm);

	/*
	 * maybe program core m5 divider here
	 * definitely program m3, m6 & m7 dividers here
	 */

	/*
	 * DDR clock = DPLL_CORE_M2_CK / 2.  Program EMIF timing
	 * parameters in EMIF shadow registers for validrate divided
	 * by 2.
	 */
	omap_emif_setup_registers(validrate / 2, LPDDR2_VOLTAGE_STABLE);

	/*
	 * program DPLL_CORE_M2_DIV with same value as the one already
	 * in direct register and lock DPLL_CORE
	 */
	shadow_freq_cfg1 =
		(new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) |
		(DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) |
		(1 << OMAP4430_DLL_RESET_SHIFT) |
		(1 << OMAP4430_FREQ_UPDATE_SHIFT);
	__raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

	/* wait for the configuration to be applied */
	omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
					& OMAP4430_FREQ_UPDATE_MASK) == 0),
			MAX_FREQ_UPDATE_TIMEOUT, i);

	/* put MEMIF clkdm back to HW_AUTO & decrement usecount for clks */
	omap2_clkdm_allow_idle(l3_emif_clkdm);

	if (i == MAX_FREQ_UPDATE_TIMEOUT) {
		pr_err("%s: Frequency update for CORE DPLL M2 change failed\n",
				__func__);
		return -1;
	}

	return 0;
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	struct clockdomain *cpu1_clkdm;
	static bool booted;
	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * Update the AuxCoreBoot0 with boot state for secondary core.
	 * omap_secondary_startup() routine will hold the secondary core till
	 * the AuxCoreBoot1 register is updated with cpu state
	 * A barrier is added to ensure that write buffer is drained
	 */
	omap_modify_auxcoreboot0(0x200, 0xfffffdff);
	flush_cache_all();
	smp_wmb();

	/*
	 * SGI isn't wakeup capable from low power states. This is
	 * known limitation and can be worked around by using software
	 * forced wake-up. After the wakeup, the CPU will restore it
	 * to hw_auto. This code also gets initialised but pm init code
	 * initialises the CPUx clockdomain to hw-auto mode
	 */
	if (booted) {
		cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
		omap2_clkdm_wakeup(cpu1_clkdm);
		smp_cross_call(cpumask_of(cpu));
	} else {
		set_event();
		booted = true;
	}

	/*
	 * Now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return 0;
}
Exemplo n.º 5
0
static void save_sar_bank3(void)
{
	struct clockdomain *l4_secure_clkdm;

	/*
	 * Not supported on ES1.0 silicon
	 */
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN_ONCE(1, "omap4: SAR backup not supported on ES1.0 ..\n");
		return;
	}

	l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");
	omap2_clkdm_wakeup(l4_secure_clkdm);

	sar_save(NB_REGS_CONST_SETS_RAM3_HW, SAR_BANK3_OFFSET, sar_ram3_layout);

	omap2_clkdm_allow_idle(l4_secure_clkdm);
}
Exemplo n.º 6
0
/*
 * This sets pwrdm state (other than mpu & core. Currently only ON &
 * RET are supported. Function is assuming that clkdm doesn't have
 * hw_sup mode enabled.
 */
int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
{
	u32 cur_state;
	int sleep_switch = 0;
	int ret = 0;

	if (pwrdm == NULL || IS_ERR(pwrdm))
		return -EINVAL;

	while (!(pwrdm->pwrsts & (1 << state))) {
		if (state == PWRDM_POWER_OFF)
			return ret;
		state--;
	}

	cur_state = pwrdm_read_next_pwrst(pwrdm);
	if (cur_state == state)
		return ret;

	if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) {
		omap2_clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
		sleep_switch = 1;
		pwrdm_wait_transition(pwrdm);
	}

	ret = pwrdm_set_next_pwrst(pwrdm, state);
	if (ret) {
		printk(KERN_ERR "Unable to set state of powerdomain: %s\n",
		       pwrdm->name);
		goto err;
	}

	if (sleep_switch) {
		omap2_clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
		pwrdm_wait_transition(pwrdm);
		pwrdm_state_switch(pwrdm);
	}

err:
	return ret;
}
Exemplo n.º 7
0
 /*
  * omap4_sar_save -
  * Save the context to SAR_RAM1 and SAR_RAM2 as per
  * sar_ram1_layout and sar_ram2_layout for the device OFF mode
  */
int omap4_sar_save(void)
{
	struct clockdomain *l3_init_clkdm;

	/*
	 * Not supported on ES1.0 silicon
	 */
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN_ONCE(1, "omap4: SAR backup not supported on ES1.0 ..\n");
		return 0;
	}

	if (omap4_sar_enable_check() < 0) {
		WARN(1, "%s: SAR enable check failed!\n", __func__);
		return -EBUSY;
	}

	l3_init_clkdm = clkdm_lookup("l3_init_clkdm");
	omap2_clkdm_wakeup(l3_init_clkdm);
	/*
	 * SAR bits and clocks needs to be enabled
	 */
	pwrdm_enable_hdwr_sar(l3init_pwrdm);
	clk_enable(usb_host_ck);
	clk_enable(usb_tll_ck);

	/* Save SAR BANK1 */
	sar_save(NB_REGS_CONST_SETS_RAM1_HW, SAR_BANK1_OFFSET, sar_ram1_layout);

	clk_disable(usb_host_ck);
	clk_disable(usb_tll_ck);
	pwrdm_disable_hdwr_sar(l3init_pwrdm);

	omap2_clkdm_allow_idle(l3_init_clkdm);

	/* Save SAR BANK2 */
	sar_save(NB_REGS_CONST_SETS_RAM2_HW, SAR_BANK2_OFFSET, sar_ram2_layout);

	return 0;
}
Exemplo n.º 8
0
static int omap2_clkdm_clk_enable(struct clockdomain *clkdm)
{
	bool hwsup = false;

	if (!clkdm->clktrctrl_mask)
		return 0;

	hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
				clkdm->clktrctrl_mask);

	if (hwsup) {
		/* Disable HW transitions when we are changing deps */
		_disable_hwsup(clkdm);
		_clkdm_add_autodeps(clkdm);
		_enable_hwsup(clkdm);
	} else {
		if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
			omap2_clkdm_wakeup(clkdm);
	}

	return 0;
}
/**
 * omap4_prcm_freq_update - set freq_update bit
 *
 * Programs the CM shadow registers to update EMIF
 * parametrs. Few usecase only few registers needs to
 * be updated using prcm freq update sequence.
 * EMIF read-idle control and zq-config needs to be
 * updated for temprature alerts and voltage change
 * Returns -1 on error and 0 on success.
 */
int omap4_set_freq_update(void)
{
	u32 shadow_freq_cfg1;
	int i = 0;

	/* Just to avoid look-up on every call to speed up */
	if (!l3_emif_clkdm)
		l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm");

	/* Configures MEMIF domain in SW_WKUP */
	omap2_clkdm_wakeup(l3_emif_clkdm);

	/*
	 * FREQ_UPDATE sequence:
	 * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
	 *	after CORE DPLL lock)
	 * - FREQ_UPDATE=1 (to start HW sequence)
	 */
	shadow_freq_cfg1 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1);
	shadow_freq_cfg1 |= (1 << OMAP4430_DLL_RESET_SHIFT) |
			   (1 << OMAP4430_FREQ_UPDATE_SHIFT);
	__raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

	/* wait for the configuration to be applied */
	omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
				& OMAP4430_FREQ_UPDATE_MASK) == 0),
				MAX_FREQ_UPDATE_TIMEOUT, i);

	/* Configures MEMIF domain back to HW_WKUP */
	omap2_clkdm_allow_idle(l3_emif_clkdm);

	if (i == MAX_FREQ_UPDATE_TIMEOUT) {
		pr_err("%s: Frequency update failed\n",	__func__);
		return -1;
	}

	return 0;
}
Exemplo n.º 10
0
/**
 * omap4_dpll_low_power_cascade - configure system for low power DPLL cascade
 *
 * The low power DPLL cascading scheme is a way to have a mostly functional
 * system running with only one locked DPLL and all of the others in bypass.
 * While this might be useful for many use cases, the primary target is low
 * power audio playback.  The steps to enter this state are roughly:
 *
 * Reparent DPLL_ABE so that it is fed by SYS_32K_CK
 * Set magical REGM4XEN bit so DPLL_ABE MN dividers are multiplied by four
 * Lock DPLL_ABE at 196.608MHz and bypass DPLL_CORE, DPLL_MPU & DPLL_IVA
 * Reparent DPLL_CORE so that is fed by DPLL_ABE
 * Reparent DPLL_MPU & DPLL_IVA so that they are fed by DPLL_CORE
 */
int omap4_dpll_low_power_cascade_enter()
{
	int ret = 0;
	struct clk *dpll_abe_ck, *dpll_abe_m3x2_ck;
	struct clk *dpll_mpu_ck, *div_mpu_hs_clk;
	struct clk *dpll_iva_ck, *div_iva_hs_clk, *iva_hsd_byp_clk_mux_ck;
	struct clk *dpll_per_ck, *func_48m_fclk;
	struct clk *per_hsd_byp_clk_mux_ck, *per_hs_clk_div_ck;
	struct clk *dpll_core_ck, *dpll_core_x2_ck;
	struct clk *dpll_core_m2_ck, *dpll_core_m5x2_ck, *dpll_core_m6x2_ck;
	struct clk *core_hsd_byp_clk_mux_ck;
	struct clk *div_core_ck, *l3_div_ck, *l4_div_ck;
	struct clk *l4_wkup_clk_mux_ck, *lp_clk_div_ck;
	struct clk *pmd_stm_clock_mux_ck, *pmd_trace_clk_mux_ck;
	struct clockdomain *emu_sys_44xx_clkdm, *abe_44xx_clkdm;
	struct device *mpu_dev;
	struct cpufreq_policy *cp;
	struct omap_opp *opp;
	struct voltagedomain *vdd_mpu, *vdd_iva, *vdd_core;

	dpll_abe_ck = clk_get(NULL, "dpll_abe_ck");
	dpll_mpu_ck = clk_get(NULL, "dpll_mpu_ck");
	div_mpu_hs_clk = clk_get(NULL, "div_mpu_hs_clk");
	dpll_iva_ck = clk_get(NULL, "dpll_iva_ck");
	div_iva_hs_clk = clk_get(NULL, "div_iva_hs_clk");
	iva_hsd_byp_clk_mux_ck = clk_get(NULL, "iva_hsd_byp_clk_mux_ck");
	dpll_core_ck = clk_get(NULL, "dpll_core_ck");
	dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck");
	dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck");
	dpll_core_m6x2_ck = clk_get(NULL, "dpll_core_m6x2_ck");
	dpll_abe_m3x2_ck = clk_get(NULL, "dpll_abe_m3x2_ck");
	dpll_core_x2_ck = clk_get(NULL, "dpll_core_x2_ck");
	core_hsd_byp_clk_mux_ck = clk_get(NULL, "core_hsd_byp_clk_mux_ck");
	div_core_ck = clk_get(NULL, "div_core_ck");
	l4_wkup_clk_mux_ck = clk_get(NULL, "l4_wkup_clk_mux_ck");
	lp_clk_div_ck = clk_get(NULL, "lp_clk_div_ck");
	pmd_stm_clock_mux_ck = clk_get(NULL, "pmd_stm_clock_mux_ck");
	pmd_trace_clk_mux_ck = clk_get(NULL, "pmd_trace_clk_mux_ck");
	l3_div_ck = clk_get(NULL, "l3_div_ck");
	l4_div_ck = clk_get(NULL, "l4_div_ck");
	dpll_per_ck = clk_get(NULL, "dpll_per_ck");
	func_48m_fclk = clk_get(NULL, "func_48m_fclk");
	per_hsd_byp_clk_mux_ck = clk_get(NULL, "per_hsd_byp_clk_mux_ck");
	per_hs_clk_div_ck = clk_get(NULL, "per_hs_clk_div_ck");

	emu_sys_44xx_clkdm = clkdm_lookup("emu_sys_44xx_clkdm");
	abe_44xx_clkdm = clkdm_lookup("abe_clkdm");

	if (!dpll_abe_ck || !dpll_mpu_ck || !div_mpu_hs_clk || !dpll_iva_ck ||
		!div_iva_hs_clk || !iva_hsd_byp_clk_mux_ck || !dpll_core_m2_ck
		|| !dpll_abe_m3x2_ck || !div_core_ck || !dpll_core_x2_ck ||
		!core_hsd_byp_clk_mux_ck || !dpll_core_m5x2_ck ||
		!l4_wkup_clk_mux_ck || !lp_clk_div_ck || !pmd_stm_clock_mux_ck
		|| !pmd_trace_clk_mux_ck || !dpll_core_m6x2_ck ||
		!dpll_core_ck || !dpll_per_ck || !func_48m_fclk
		|| !per_hsd_byp_clk_mux_ck || !per_hs_clk_div_ck) {

		pr_warn("%s: failed to get all necessary clocks\n", __func__);
		ret = -ENODEV;
		goto out;
	}

	omap4_lpmode = true;

	/* look up the three scalable voltage domains */
	vdd_mpu = omap_voltage_domain_get("mpu");
	vdd_iva = omap_voltage_domain_get("iva");
	vdd_core = omap_voltage_domain_get("core");

	/* disable SR adaptive voltage scaling while changing freq */
	omap_smartreflex_disable(vdd_mpu);
	omap_smartreflex_disable(vdd_iva);
	omap_smartreflex_disable(vdd_core);

	/* prevent DPLL_ABE & DPLL_CORE from idling */
	omap3_dpll_deny_idle(dpll_abe_ck);
	omap3_dpll_deny_idle(dpll_core_ck);

	/* put ABE clock domain SW_WKUP */
	omap2_clkdm_wakeup(abe_44xx_clkdm);

	/* WA: prevent DPLL_ABE_M3X2 clock from auto-gating */
	omap4_prm_rmw_reg_bits(BIT(8), BIT(8),
			dpll_abe_m3x2_ck->clksel_reg);

	/* drive DPLL_CORE bypass clock from DPLL_ABE (CLKINPULOW) */
	state.core_hsd_byp_clk_mux_ck_parent = core_hsd_byp_clk_mux_ck->parent;
	ret = clk_set_parent(core_hsd_byp_clk_mux_ck, dpll_abe_m3x2_ck);
	if (ret) {
		pr_err("%s: failed reparenting DPLL_CORE bypass clock to ABE_M3X2\n",
				__func__);
		goto core_bypass_clock_reparent_fail;
	} else
		pr_debug("%s: DPLL_CORE bypass clock reparented to ABE_M3X2\n",
				__func__);

	/*
	 * bypass DPLL_CORE, configure EMIF for the new rate
	 * CORE_CLK = CORE_X2_CLK
	 */
	state.dpll_core_ck_rate = dpll_core_ck->rate;

	state.div_core_ck_div =
		omap4_prm_read_bits_shift(div_core_ck->clksel_reg,
				div_core_ck->clksel_mask);

	state.l3_div_ck_div =
		omap4_prm_read_bits_shift(l3_div_ck->clksel_reg,
				l3_div_ck->clksel_mask);

	state.l4_div_ck_div =
		omap4_prm_read_bits_shift(l4_div_ck->clksel_reg,
				l4_div_ck->clksel_mask);

	state.dpll_core_m5x2_ck_div =
		omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg,
				dpll_core_m5x2_ck->clksel_mask);

	state.dpll_core_m2_div =
		omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg,
				dpll_core_m2_ck->clksel_mask);

	ret =  clk_set_rate(div_core_ck, dpll_core_m5x2_ck->rate);
	ret |= clk_set_rate(dpll_core_ck, LP_196M_RATE);
	ret |= clk_set_rate(dpll_core_m5x2_ck, dpll_core_x2_ck->rate);
	if (ret) {
		pr_err("%s: failed setting CORE clock rates\n", __func__);
		goto core_clock_set_rate_fail;
	} else
		pr_debug("%s: DPLL_CORE bypass clock reparented to ABE_M3X2\n",
				__func__);

	/* divide MPU/IVA bypass clocks by 2 (for when we bypass DPLL_CORE) */
	state.div_mpu_hs_clk_div =
		omap4_prm_read_bits_shift(div_mpu_hs_clk->clksel_reg,
				div_mpu_hs_clk->clksel_mask);
	state.div_iva_hs_clk_div =
		omap4_prm_read_bits_shift(div_iva_hs_clk->clksel_reg,
				div_iva_hs_clk->clksel_mask);
	clk_set_rate(div_mpu_hs_clk, div_mpu_hs_clk->parent->rate);
	clk_set_rate(div_iva_hs_clk, div_iva_hs_clk->parent->rate / 2);

	/* select CLKINPULOW (div_iva_hs_clk) as DPLL_IVA bypass clock */
	state.iva_hsd_byp_clk_mux_ck_parent = iva_hsd_byp_clk_mux_ck->parent;
	ret = clk_set_parent(iva_hsd_byp_clk_mux_ck, div_iva_hs_clk);
	if (ret) {
		pr_err("%s: failed reparenting DPLL_IVA bypass clock to CLKINPULOW\n",
				__func__);
		goto iva_bypass_clk_reparent_fail;
	} else
		pr_debug("%s: reparented DPLL_IVA bypass clock to CLKINPULOW\n",
				__func__);

	/* select CLKINPULOW (per_hs_clk_div_ck) as DPLL_PER bypass clock */
	state.per_hsd_byp_clk_mux_ck_parent = per_hsd_byp_clk_mux_ck->parent;
	ret = clk_set_parent(per_hsd_byp_clk_mux_ck, per_hs_clk_div_ck);
	if (ret) {
		pr_debug("%s: failed reparenting DPLL_PER bypass clock to CLKINPULOW\n",
				__func__);
		goto per_bypass_clk_reparent_fail;
	} else
		pr_debug("%s: reparented DPLL_PER bypass clock to CLKINPULOW\n",
				__func__);

	/* bypass DPLL_MPU */
	state.dpll_mpu_ck_rate = dpll_mpu_ck->rate;
	ret = clk_set_rate(dpll_mpu_ck,
			dpll_mpu_ck->dpll_data->clk_bypass->rate);
	if (ret) {
		pr_err("%s: DPLL_MPU failed to enter Low Power bypass\n",
				__func__);
		goto dpll_mpu_bypass_fail;
	} else
		pr_debug("%s: DPLL_MPU entered Low Power bypass\n", __func__);

	/* bypass DPLL_IVA */
	state.dpll_iva_ck_rate = dpll_iva_ck->rate;
	ret = clk_set_rate(dpll_iva_ck,
			dpll_iva_ck->dpll_data->clk_bypass->rate);
	if (ret) {
		pr_err("%s: DPLL_IVA failed to enter Low Power bypass\n",
				__func__);
		goto dpll_iva_bypass_fail;
	} else
		pr_debug("%s: DPLL_IVA entered Low Power bypass\n", __func__);

	/* bypass DPLL_PER */
	state.dpll_per_ck_rate = dpll_per_ck->rate;
	ret = clk_set_rate(dpll_per_ck,
			dpll_per_ck->dpll_data->clk_bypass->rate);
	if (ret) {
		pr_debug("%s: DPLL_PER failed to enter Low Power bypass\n",
				__func__);
		goto dpll_per_bypass_fail;
	} else
		pr_debug("%s: DPLL_PER entered Low Power bypass\n",__func__);

	__raw_writel(1, OMAP4430_CM_L4_WKUP_CLKSEL);

	/* never de-assert CLKREQ while in DPLL cascading scheme */
	state.clkreqctrl = __raw_readl(OMAP4430_PRM_CLKREQCTRL);
	__raw_writel(0x4, OMAP4430_PRM_CLKREQCTRL);

	/* re-enable SR adaptive voltage scaling */
	omap_smartreflex_enable(vdd_mpu);
	omap_smartreflex_enable(vdd_iva);
	omap_smartreflex_enable(vdd_core);

	/* drive PM debug clocks from CORE_M6X2 and allow the clkdm to idle */
	/*state.pmd_stm_clock_mux_ck_parent = pmd_stm_clock_mux_ck->parent;
	state.pmd_trace_clk_mux_ck_parent = pmd_trace_clk_mux_ck->parent;
	ret =  clk_set_parent(pmd_stm_clock_mux_ck, dpll_core_m6x2_ck);
	ret |= clk_set_parent(pmd_trace_clk_mux_ck, dpll_core_m6x2_ck);
	if (ret)
		pr_err("%s: failed reparenting PMD clocks to ABE LP clock\n",
				__func__);
	else
		pr_debug("%s: reparented PMD clocks to ABE LP clock\n",
				__func__);

	omap2_clkdm_allow_idle(emu_sys_44xx_clkdm);*/

	recalculate_root_clocks();

	goto out;

dpll_per_bypass_fail:
	clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate /
				(1 << state.div_iva_hs_clk_div)));
	clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate);
per_bypass_clk_reparent_fail:
	clk_set_parent(per_hsd_byp_clk_mux_ck,
			state.per_hsd_byp_clk_mux_ck_parent);
dpll_iva_bypass_fail:
	clk_set_rate(div_iva_hs_clk, (div_iva_hs_clk->parent->rate /
				(1 << state.div_iva_hs_clk_div)));
	clk_set_rate(dpll_iva_ck, state.dpll_iva_ck_rate);
dpll_mpu_bypass_fail:
	omap4_lpmode = false;
	clk_set_rate(div_mpu_hs_clk, (div_mpu_hs_clk->parent->rate /
				(1 << state.div_mpu_hs_clk_div)));
	clk_set_rate(dpll_mpu_ck, state.dpll_mpu_ck_rate);
iva_bypass_clk_reparent_fail:
	clk_set_parent(iva_hsd_byp_clk_mux_ck,
			state.iva_hsd_byp_clk_mux_ck_parent);
core_clock_set_rate_fail:
	/* FIXME make this follow the sequence below */
	clk_set_rate(dpll_core_m5x2_ck, (dpll_core_m5x2_ck->parent->rate /
				state.dpll_core_m5x2_ck_div));
	clk_set_rate(dpll_core_ck, (dpll_core_ck->parent->rate /
				state.dpll_core_m2_ck_div));
	clk_set_rate(div_core_ck, (div_core_ck->parent->rate /
				state.div_core_ck_div));
core_bypass_clock_reparent_fail:
	clk_set_parent(iva_hsd_byp_clk_mux_ck,
			state.iva_hsd_byp_clk_mux_ck_parent);
	omap4_prm_rmw_reg_bits(BIT(8), BIT(8),
			dpll_abe_m3x2_ck->clksel_reg);
	omap2_clkdm_allow_idle(abe_44xx_clkdm);
	omap3_dpll_allow_idle(dpll_abe_ck);
	omap3_dpll_allow_idle(dpll_core_ck);
out:
	return ret;
}
Exemplo n.º 11
0
/**
 * omap4_core_dpll_set_rate - set the rate for the CORE DPLL
 * @clk: struct clk * of the DPLL to set
 * @rate: rounded target rate
 *
 * Program the CORE DPLL, including handling of EMIF frequency changes on M2
 * divider.  Returns 0 on success, otherwise a negative error code.
 */
int omap4_core_dpll_set_rate(struct clk *clk, unsigned long rate)
{
	int i = 0, m2_div, m5_div;
	u32 mask, reg;
	u32 shadow_freq_cfg1 = 0, shadow_freq_cfg2 = 0;
	struct clk *new_parent;
	struct dpll_data *dd;

	if (!clk  || !rate)
		return -EINVAL;

	if (!clk->dpll_data)
		return -EINVAL;

	dd = clk->dpll_data;

	if (rate == clk->rate)
		return 0;

	/* enable reference and bypass clocks */
	omap2_clk_enable(dd->clk_bypass);
	omap2_clk_enable(dd->clk_ref);

	/* Just to avoid look-up on every call to speed up */
	if (!l3_emif_clkdm)
		l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm");
	if (!dpll_core_m2_ck)
		dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck");
	if (!dpll_core_m5x2_ck)
		dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck");
	if (!gpmc_ick)
		gpmc_ick = clk_get(NULL, "gpmc_ick");

	/* Make sure MEMIF clkdm is in SW_WKUP & GPMC clocks are active */
	omap2_clkdm_wakeup(l3_emif_clkdm);
	omap2_clk_enable(gpmc_ick);

	/* FIXME set m3, m6 & m7 rates here? */

	/* check for bypass rate */
	if (rate == dd->clk_bypass->rate &&
			clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)) {
		/*
		 * DDR clock = DPLL_CORE_M2_CK / 2.  Program EMIF timing
		 * parameters in EMIF shadow registers for bypass clock rate
		 * divided by 2
		 */
		omap_emif_setup_registers(rate / 2, LPDDR2_VOLTAGE_STABLE);

		/*
		 * program CM_DIV_M5_DPLL_CORE.DPLL_CLKOUT_DIV into shadow
		 * register as well as L3_CLK freq and update GPMC frequency
		 *
		 * HACK: hardcode L3_CLK = CORE_CLK / 2 for DPLL cascading
		 * HACK: hardcode CORE_CLK = CORE_X2_CLK / 2 for DPLL
		 * cascading
		 */
		m5_div = omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg,
				dpll_core_m5x2_ck->clksel_mask);

		shadow_freq_cfg2 =
			(m5_div << OMAP4430_DPLL_CORE_M5_DIV_SHIFT) |
			(1 << OMAP4430_CLKSEL_L3_SHADOW_SHIFT) |
			(0 << OMAP4430_CLKSEL_CORE_1_1_SHIFT) |
			(1 << OMAP4430_GPMC_FREQ_UPDATE_SHIFT);

		__raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2);

		/*
		 * program CM_DIV_M2_DPLL_CORE.DPLL_CLKOUT_DIV for divide by
		 * two and put DPLL_CORE into LP Bypass
		 */
		m2_div = omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg,
				dpll_core_m2_ck->clksel_mask);

		shadow_freq_cfg1 =
			(m2_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) |
			(DPLL_LOW_POWER_BYPASS <<
			 OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) |
			(1 << OMAP4430_DLL_RESET_SHIFT) |
			(1 << OMAP4430_FREQ_UPDATE_SHIFT);
		__raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

		new_parent = dd->clk_bypass;
	} else {
		if (dd->last_rounded_rate != rate)
			rate = clk->round_rate(clk, rate);

		if (dd->last_rounded_rate == 0)
			return -EINVAL;

		/*
		 * DDR clock = DPLL_CORE_M2_CK / 2.  Program EMIF timing
		 * parameters in EMIF shadow registers for rate divided
		 * by 2.
		 */
		omap_emif_setup_registers(rate / 2, LPDDR2_VOLTAGE_STABLE);

		/*
		 * FIXME skipping bypass part of omap3_noncore_dpll_program.
		 * also x-loader's configure_core_dpll_no_lock bypasses
		 * DPLL_CORE directly through CM_CLKMODE_DPLL_CORE via MN
		 * bypass; no shadow register necessary!
		 */

		mask = (dd->mult_mask | dd->div1_mask);
		reg  = (dd->last_rounded_m << __ffs(dd->mult_mask)) |
			((dd->last_rounded_n - 1) << __ffs(dd->div1_mask));

		/* program mn divider values */
		omap4_prm_rmw_reg_bits(mask, reg, dd->mult_div1_reg);

		/*
		 * program CM_DIV_M5_DPLL_CORE.DPLL_CLKOUT_DIV into shadow
		 * register as well as L3_CLK freq and update GPMC frequency
		 *
		 * HACK: hardcode L3_CLK = CORE_CLK / 2 for DPLL cascading
		 * HACK: hardcode CORE_CLK = CORE_X2_CLK / 1 for DPLL
		 * cascading
		 */
		m5_div = omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg,
				dpll_core_m5x2_ck->clksel_mask);

		shadow_freq_cfg2 =
			(m5_div << OMAP4430_DPLL_CORE_M5_DIV_SHIFT) |
			(1 << OMAP4430_CLKSEL_L3_SHADOW_SHIFT) |
			(0 << OMAP4430_CLKSEL_CORE_1_1_SHIFT) |
			(1 << OMAP4430_GPMC_FREQ_UPDATE_SHIFT);

		__raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2);

		/*
		 * program DPLL_CORE_M2_DIV with same value as the one already
		 * in direct register and lock DPLL_CORE
		 */
		m2_div = omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg,
				dpll_core_m2_ck->clksel_mask);

		shadow_freq_cfg1 =
			(m2_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) |
			(DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) |
			(1 << OMAP4430_DLL_RESET_SHIFT) |
			(1 << OMAP4430_FREQ_UPDATE_SHIFT);
		__raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

		new_parent = dd->clk_ref;
	}

	/* wait for the configuration to be applied */
	omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
					& OMAP4430_FREQ_UPDATE_MASK) == 0),
			MAX_FREQ_UPDATE_TIMEOUT, i);

	/* clear GPMC_FREQ_UPDATE bit */
	shadow_freq_cfg2 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG2);
	shadow_freq_cfg2 &= ~1;
	__raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2);

	/*
	 * Switch the parent clock in the heirarchy, and make sure that the
	 * new parent's usecount is correct.  Note: we enable the new parent
	 * before disabling the old to avoid any unnecessary hardware
	 * disable->enable transitions.
	 */
	if (clk->usecount) {
		omap2_clk_enable(new_parent);
		omap2_clk_disable(clk->parent);
	}
	clk_reparent(clk, new_parent);
	clk->rate = rate;

	/* disable reference and bypass clocks */
	omap2_clk_disable(dd->clk_bypass);
	omap2_clk_disable(dd->clk_ref);

	/* Configures MEMIF domain back to HW_WKUP & let GPMC clocks to idle */
	omap2_clkdm_allow_idle(l3_emif_clkdm);
	omap2_clk_disable(gpmc_ick);

	/*
	 * FIXME PRCM functional spec says we should set GPMC_FREQ_UPDATE bit
	 * here, but we're not even handling CM_SHADOW_FREQ_CONFIG2 at all.
	 */

	if (i == MAX_FREQ_UPDATE_TIMEOUT) {
		pr_err("%s: Frequency update for CORE DPLL M2 change failed\n",
				__func__);
		return -1;
	}

	return 0;
}