Beispiel #1
0
int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
{
	u32 v, field_val, validrate, new_div = 0;

	if (!clk->clksel_mask)
		return -EINVAL;

	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
	if (validrate != rate)
		return -EINVAL;

	field_val = omap2_divisor_to_clksel(clk, new_div);
	if (field_val == ~0)
		return -EINVAL;

	v = __raw_readl(clk->clksel_reg);
	v &= ~clk->clksel_mask;
	v |= field_val << __ffs(clk->clksel_mask);
	__raw_writel(v, clk->clksel_reg);
	v = __raw_readl(clk->clksel_reg); /* OCP barrier */

	clk->rate = clk->parent->rate / new_div;

	_omap2xxx_clk_commit(clk);

	return 0;
}
Beispiel #2
0
int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
{
    u32 field_mask, field_val, reg_val, validrate, new_div = 0;
    void __iomem *div_addr;

    validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
    if (validrate != rate)
        return -EINVAL;

    div_addr = omap2_get_clksel(clk, &field_mask);
    if (div_addr == 0)
        return -EINVAL;

    field_val = omap2_divisor_to_clksel(clk, new_div);
    if (field_val == ~0)
        return -EINVAL;

    reg_val = __raw_readl(div_addr);
    reg_val &= ~field_mask;
    reg_val |= (field_val << __ffs(field_mask));
    __raw_writel(reg_val, div_addr);
    wmb();

    clk->rate = clk->parent->rate / new_div;

    if (clk->flags & DELAYED_APP && cpu_is_omap24xx()) {
        __raw_writel(OMAP24XX_VALID_CONFIG, OMAP24XX_PRCM_CLKCFG_CTRL);
        wmb();
    }

    return 0;
}
Beispiel #3
0
/**
 * omap2_clksel_round_rate() - find rounded rate for the given clock and rate
 * @clk: OMAP struct clk to use
 * @target_rate: desired clock rate
 *
 * This function is intended to be called only by the clock framework.
 * Finds best target rate based on the source clock and possible dividers.
 * rates. The divider array must be sorted with smallest divider first.
 *
 * Returns the rounded clock rate or returns 0xffffffff on error.
 */
long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate)
{
	u32 new_div, new_mul;

	return omap2_clksel_round_rate_div(clk, target_rate,
					   &new_div, &new_mul);
}
/**
 * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider
 * @clk: struct clk * of DPLL to set
 * @rate: rounded target rate
 *
 * Programs the CM shadow registers to update CORE DPLL M2 divider. M2 divider
 * is used to clock external DDR and its reconfiguration on frequency change
 * is managed through a hardware sequencer. This is managed by the PRCM with
 * EMIF using shadow registers.  If rate specified matches DPLL_CORE's bypass
 * clock rate then put it in Low-Power Bypass.
 * Returns negative int on error and 0 on success.
 */
int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
	int i = 0;
	u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0;

	if (!clk || !rate)
		return -EINVAL;

	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
	if (validrate != rate)
		return -EINVAL;

	/* Just to avoid look-up on every call to speed up */
	if (!l3_emif_clkdm)
		l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm");

	/* put MEMIF domain in SW_WKUP & increment usecount for clks */
	omap2_clkdm_wakeup(l3_emif_clkdm);

	/*
	 * maybe program core m5 divider here
	 * definitely program m3, m6 & m7 dividers here
	 */

	/*
	 * DDR clock = DPLL_CORE_M2_CK / 2.  Program EMIF timing
	 * parameters in EMIF shadow registers for validrate divided
	 * by 2.
	 */
	omap_emif_setup_registers(validrate / 2, LPDDR2_VOLTAGE_STABLE);

	/*
	 * program DPLL_CORE_M2_DIV with same value as the one already
	 * in direct register and lock DPLL_CORE
	 */
	shadow_freq_cfg1 =
		(new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) |
		(DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) |
		(1 << OMAP4430_DLL_RESET_SHIFT) |
		(1 << OMAP4430_FREQ_UPDATE_SHIFT);
	__raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

	/* wait for the configuration to be applied */
	omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
					& OMAP4430_FREQ_UPDATE_MASK) == 0),
			MAX_FREQ_UPDATE_TIMEOUT, i);

	/* put MEMIF clkdm back to HW_AUTO & decrement usecount for clks */
	omap2_clkdm_allow_idle(l3_emif_clkdm);

	if (i == MAX_FREQ_UPDATE_TIMEOUT) {
		pr_err("%s: Frequency update for CORE DPLL M2 change failed\n",
				__func__);
		return -1;
	}

	return 0;
}
/**
 * omap3_core_dpll_m2_set_rate - set CORE DPLL M2 divider
 * @clk: struct clk * of DPLL to set
 * @rate: rounded target rate
 *
 * Program the DPLL M2 divider with the rounded target rate.  Returns
 * -EINVAL upon error, or 0 upon success.
 */
static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
	u32 new_div = 0;
	unsigned long validrate, sdrcrate;
	struct omap_sdrc_params *sp;

	if (!clk || !rate)
		return -EINVAL;

	if (clk != &dpll3_m2_ck)
		return -EINVAL;

	if (rate == clk->rate)
		return 0;

	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
	if (validrate != rate)
		return -EINVAL;

	sdrcrate = sdrc_ick.rate;
	if (rate > clk->rate)
		sdrcrate <<= ((rate / clk->rate) - 1);
	else
		sdrcrate >>= ((clk->rate / rate) - 1);

	sp = omap2_sdrc_get_params(sdrcrate);
	if (!sp)
		return -EINVAL;

	pr_info("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate,
		validrate);
	pr_info("clock: SDRC timing params used: %08x %08x %08x\n",
		sp->rfr_ctrl, sp->actim_ctrla, sp->actim_ctrlb);

	/* REVISIT: SRAM code doesn't support other M2 divisors yet */
	WARN_ON(new_div != 1 && new_div != 2);

	/* REVISIT: Add SDRC_MR changing to this code also */
	omap3_configure_core_dpll(sp->rfr_ctrl, sp->actim_ctrla,
				  sp->actim_ctrlb, new_div);

	return 0;
}
/**
 * omap3_core_dpll_m2_set_rate - set CORE DPLL M2 divider
 * @clk: struct clk * of DPLL to set
 * @rate: rounded target rate
 *
 * Program the DPLL M2 divider with the rounded target rate.  Returns
 * -EINVAL upon error, or 0 upon success.
 */
int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
	u32 new_div = 0;
	u32 unlock_dll = 0;
	u32 c;
	unsigned long validrate, sdrcrate, _mpurate;
	struct omap_sdrc_params *sdrc_cs0;
	struct omap_sdrc_params *sdrc_cs1;
	int ret;

	if (!clk || !rate)
		return -EINVAL;

	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
	if (validrate != rate)
		return -EINVAL;

	sdrcrate = sdrc_ick_p->rate;
	if (rate > clk->rate)
		sdrcrate <<= ((rate / clk->rate) >> 1);
	else
Beispiel #7
0
int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
{
	u32 field_val, validrate, new_div = 0;

	if (!clk->clksel || !clk->clksel_mask)
		return -EINVAL;

	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
	if (validrate != rate)
		return -EINVAL;

	field_val = _divisor_to_clksel(clk, new_div);
	if (field_val == ~0)
		return -EINVAL;

	_write_clksel_reg(clk, field_val);

	clk->rate = clk->parent->rate / new_div;

	pr_debug("clock: %s: set rate to %ld\n", clk->name, clk->rate);

	return 0;
}
/**
 * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider
 * @clk: struct clk * of DPLL to set
 * @rate: rounded target rate
 *
 * Programs the CM shadow registers to update CORE DPLL M2
 * divider. M2 divider is used to clock external DDR and its
 * reconfiguration on frequency change is managed through a
 * hardware sequencer. This is managed by the PRCM with EMIF
 * uding shadow registers.
 * Returns -EINVAL/-1 on error and 0 on success.
 */
int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
    int i = 0;
    u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0;
    unsigned long flags;

    if (!clk || !rate)
        return -EINVAL;

    validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
    if (validrate != rate)
        return -EINVAL;

    /* Just to avoid look-up on every call to speed up */
    if (!l3_emif_clkdm) {
        l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm");
        if (!l3_emif_clkdm) {
            pr_err("%s: clockdomain lookup failed\n", __func__);
            return -EINVAL;
        }
    }

    spin_lock_irqsave(&l3_emif_lock, flags);

    /*
     * Errata ID: i728
     *
     * DESCRIPTION:
     *
     * If during a small window the following three events occur:
     *
     * 1) The EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM SR_TIMING counter expires
     * 2) Frequency change update is requested CM_SHADOW_FREQ_CONFIG1
     *    FREQ_UPDATE set to 1
     * 3) OCP access is requested
     *
     * There will be instable clock on the DDR interface.
     *
     * WORKAROUND:
     *
     * Prevent event 1) while event 2) is happening.
     *
     * Disable the self-refresh when requesting a frequency change.
     * Before requesting a frequency change, program
     * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0
     * (omap_emif_frequency_pre_notify)
     *
     * When the frequency change is completed, reprogram
     * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2.
     * (omap_emif_frequency_post_notify)
     */
    omap_emif_frequency_pre_notify();

    /* Configures MEMIF domain in SW_WKUP */
    clkdm_wakeup(l3_emif_clkdm);

    /*
     * Program EMIF timing parameters in EMIF shadow registers
     * for targetted DRR clock.
     * DDR Clock = core_dpll_m2 / 2
     */
    omap_emif_setup_registers(validrate >> 1, LPDDR2_VOLTAGE_STABLE);

    /*
     * FREQ_UPDATE sequence:
     * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
     *	after CORE DPLL lock)
     * - DLL_RESET=1 (DLL must be reset upon frequency change)
     * - DPLL_CORE_M2_DIV with same value as the one already
     *	in direct register
     * - DPLL_CORE_DPLL_EN=0x7 (to make CORE DPLL lock)
     * - FREQ_UPDATE=1 (to start HW sequence)
     */
    shadow_freq_cfg1 = (1 << OMAP4430_DLL_RESET_SHIFT) |
                       (new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) |
                       (DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) |
                       (1 << OMAP4430_FREQ_UPDATE_SHIFT);
    shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK;
    __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

    /* wait for the configuration to be applied */
    omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
                        & OMAP4430_FREQ_UPDATE_MASK) == 0),
                      MAX_FREQ_UPDATE_TIMEOUT, i);

    /* Configures MEMIF domain back to HW_WKUP */
    clkdm_allow_idle(l3_emif_clkdm);

    /* Re-enable DDR self refresh */
    omap_emif_frequency_post_notify();

    spin_unlock_irqrestore(&l3_emif_lock, flags);

    if (i == MAX_FREQ_UPDATE_TIMEOUT) {
        pr_err("%s: Frequency update for CORE DPLL M2 change failed\n",
               __func__);
        return -1;
    }

    /* Update the clock change */
    clk->rate = validrate;

    return 0;
}
Beispiel #9
0
/**
 * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider
 * @clk: struct clk * of DPLL to set
 * @rate: rounded target rate
 *
 * Programs the CM shadow registers to update CORE DPLL M2
 * divider. M2 divider is used to clock external DDR and its
 * reconfiguration on frequency change is managed through a
 * hardware sequencer. This is managed by the PRCM with EMIF
 * uding shadow registers.
 * Returns -EINVAL/-1 on error and 0 on success.
 */
int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
	int i = 0;
	u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0;
	unsigned long flags;

	if (!clk || !rate)
		return -EINVAL;

	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
	if (validrate != rate)
		return -EINVAL;

	/* Just to avoid look-up on every call to speed up */
	if (!l3_emif_clkdm) {
		l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm");
		if (!l3_emif_clkdm) {
			pr_err("%s: clockdomain lookup failed\n", __func__);
			return -EINVAL;
		}
	}

	spin_lock_irqsave(&l3_emif_lock, flags);

	/* Configures MEMIF domain in SW_WKUP */
	clkdm_wakeup(l3_emif_clkdm);

	/*
	 * Program EMIF timing parameters in EMIF shadow registers
	 * for targetted DRR clock.
	 * DDR Clock = core_dpll_m2 / 2
	 */
	omap_emif_setup_registers(validrate >> 1, LPDDR2_VOLTAGE_STABLE);

	/*
	 * FREQ_UPDATE sequence:
	 * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
	 *	after CORE DPLL lock)
	 * - DLL_RESET=1 (DLL must be reset upon frequency change)
	 * - DPLL_CORE_M2_DIV with same value as the one already
	 *	in direct register
	 * - DPLL_CORE_DPLL_EN=0x7 (to make CORE DPLL lock)
	 * - FREQ_UPDATE=1 (to start HW sequence)
	 */
	shadow_freq_cfg1 = (1 << OMAP4430_DLL_RESET_SHIFT) |
			(new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) |
			(DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) |
			(1 << OMAP4430_FREQ_UPDATE_SHIFT);
	shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK;
	__raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

	/* wait for the configuration to be applied */
	omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
				& OMAP4430_FREQ_UPDATE_MASK) == 0),
				MAX_FREQ_UPDATE_TIMEOUT, i);

	/* Configures MEMIF domain back to HW_WKUP */
	clkdm_allow_idle(l3_emif_clkdm);

	spin_unlock_irqrestore(&l3_emif_lock, flags);

	if (i == MAX_FREQ_UPDATE_TIMEOUT) {
		pr_err("%s: Frequency update for CORE DPLL M2 change failed\n",
				__func__);
		return -1;
	}

	/* Update the clock change */
	clk->rate = validrate;

	return 0;
}