/**
 * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
 * @hw: struct clk_hw * of the clock being enabled
 *
 * Increment the usecount of the clockdomain of the clock pointed to
 * by @hw; if the usecount is 1, the clockdomain will be "enabled."
 * Only needed for clocks that don't use omap2_dflt_clk_enable() as
 * their enable function pointer.  Passes along the return value of
 * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
 * clockdomain, or 0 if clock framework-based clockdomain control is
 * not implemented.
 */
int omap2_clkops_enable_clkdm(struct clk_hw *hw)
{
	struct clk_hw_omap *clk;
	int ret = 0;

	clk = to_clk_hw_omap(hw);

	if (unlikely(!clk->clkdm)) {
		pr_err("%s: %s: no clkdm set ?!\n", __func__,
		       __clk_get_name(hw->clk));
		return -EINVAL;
	}

	if (unlikely(clk->enable_reg))
		pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
		       __clk_get_name(hw->clk));

	if (!clkdm_control) {
		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
		       __func__, __clk_get_name(hw->clk));
		return 0;
	}

	ret = clkdm_clk_enable(clk->clkdm, hw->clk);
	WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
	     __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret);

	return ret;
}
/**
 * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
 * The choice of modes depends on the DPLL's programmed rate: if it is
 * the same as the DPLL's parent clock, it will enter bypass;
 * otherwise, it will enter lock.  This code will wait for the DPLL to
 * indicate readiness before returning, unless the DPLL takes too long
 * to enter the target state.  Intended to be used as the struct clk's
 * enable function.  If DPLL3 was passed in, or the DPLL does not
 * support low-power stop, or if the DPLL took too long to enter
 * bypass or lock, return -EINVAL; otherwise, return 0.
 */
int omap3_noncore_dpll_enable(struct clk_hw *hw)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
	int r;
	struct dpll_data *dd;
	struct clk *parent;

	dd = clk->dpll_data;
	if (!dd)
		return -EINVAL;

	if (clk->clkdm) {
		r = clkdm_clk_enable(clk->clkdm, hw->clk);
		if (r) {
			WARN(1,
			     "%s: could not enable %s's clockdomain %s: %d\n",
			     __func__, __clk_get_name(hw->clk),
			     clk->clkdm->name, r);
			return r;
		}
	}

	parent = __clk_get_parent(hw->clk);

	if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
		WARN_ON(parent != dd->clk_bypass);
		r = _omap3_noncore_dpll_bypass(clk);
	} else {
		WARN_ON(parent != dd->clk_ref);
		r = _omap3_noncore_dpll_lock(clk);
	}

	return r;
}
Exemple #3
0
/**
 * omap2_clk_enable - request that the system enable a clock
 * @clk: struct clk * to enable
 *
 * Increments the usecount on struct clk @clk.  If there were no users
 * previously, then recurse up the clock tree, enabling all of the
 * clock's parents and all of the parent clockdomains, and finally,
 * enabling @clk's clockdomain, and @clk itself.  Intended to be
 * called with the clockfw_lock spinlock held.  Returns 0 upon success
 * or a negative error code upon failure.
 */
int omap2_clk_enable(struct clk *clk)
{
	int ret;

	pr_debug("clock: %s: incrementing usecount\n", clk->name);

	clk->usecount++;

	if (clk->usecount > 1)
		return 0;

	pr_debug("clock: %s: enabling in hardware\n", clk->name);

	if (clk->parent) {
		ret = omap2_clk_enable(clk->parent);
		if (ret) {
			WARN(1, "clock: %s: could not enable parent %s: %d\n",
			     clk->name, clk->parent->name, ret);
			goto oce_err1;
		}
	}

	if (clk->clkdm) {
		ret = clkdm_clk_enable(clk->clkdm, clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable clockdomain %s: "
			     "%d\n", clk->name, clk->clkdm->name, ret);
			goto oce_err2;
		}
	}

	if (clk->ops && clk->ops->enable) {
		trace_clock_enable(clk->name, 1, smp_processor_id());
		ret = clk->ops->enable(clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable: %d\n",
			     clk->name, ret);
			goto oce_err3;
		}
	}

	/* If clockdomain supports hardware control, enable it */
	if (clk->clkdm)
		clkdm_allow_idle(clk->clkdm);

	return 0;

oce_err3:
	if (clk->clkdm)
		clkdm_clk_disable(clk->clkdm, clk);
oce_err2:
	if (clk->parent)
		omap2_clk_disable(clk->parent);
oce_err1:
	clk->usecount--;

	return ret;
}
Exemple #4
0
/**
 * omap2_clk_enable - request that the system enable a clock
 * @clk: struct clk * to enable
 *
 * Increments the usecount on struct clk @clk.  If there were no users
 * previously, then recurse up the clock tree, enabling all of the
 * clock's parents and all of the parent clockdomains, and finally,
 * enabling @clk's clockdomain, and @clk itself.  Intended to be
 * called with the clockfw_lock spinlock held.  Returns 0 upon success
 * or a negative error code upon failure.
 */
int omap2_clk_enable(struct clk *clk)
{
	int ret;

	pr_debug("clock: %s: incrementing usecount\n", clk->name);

	clk->usecount++;

	if (clk->usecount > 1)
		return 0;

	pr_debug("clock: %s: enabling in hardware\n", clk->name);

	if (clk->parent) {
		ret = omap2_clk_enable(clk->parent);
		if (ret) {
			WARN(1, "clock: %s: could not enable parent %s: %d\n",
			     clk->name, clk->parent->name, ret);
			goto oce_err1;
		}
	}

	if (clkdm_control && clk->clkdm) {
		ret = clkdm_clk_enable(clk->clkdm, clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable clockdomain %s: "
			     "%d\n", clk->name, clk->clkdm->name, ret);
			goto oce_err2;
		}
	}

	if (clk->ops && clk->ops->enable) {
		trace_clock_enable(clk->name, 1, get_cpu());
		put_cpu();
		ret = clk->ops->enable(clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable: %d\n",
			     clk->name, ret);
			goto oce_err3;
		}
	}

	return 0;

oce_err3:
	if (clkdm_control && clk->clkdm)
		clkdm_clk_disable(clk->clkdm, clk);
oce_err2:
	if (clk->parent)
		omap2_clk_disable(clk->parent);
oce_err1:
	clk->usecount--;

	return ret;
}
/**
 * omap2_dflt_clk_enable - enable a clock in the hardware
 * @hw: struct clk_hw * of the clock to enable
 *
 * Enable the clock @hw in the hardware.  We first call into the OMAP
 * clockdomain code to "enable" the corresponding clockdomain if this
 * is the first enabled user of the clockdomain.  Then program the
 * hardware to enable the clock.  Then wait for the IP block that uses
 * this clock to leave idle (if applicable).  Returns the error value
 * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
 * if @hw has a null clock enable_reg, or zero upon success.
 */
int omap2_dflt_clk_enable(struct clk_hw *hw)
{
	struct clk_hw_omap *clk;
	u32 v;
	int ret = 0;

	clk = to_clk_hw_omap(hw);

	if (clkdm_control && clk->clkdm) {
		ret = clkdm_clk_enable(clk->clkdm, hw->clk);
		if (ret) {
			WARN(1, "%s: could not enable %s's clockdomain %s: %d\n",
			     __func__, __clk_get_name(hw->clk),
			     clk->clkdm->name, ret);
			return ret;
		}
	}

	if (unlikely(clk->enable_reg == NULL)) {
		pr_err("%s: %s missing enable_reg\n", __func__,
		       __clk_get_name(hw->clk));
		ret = -EINVAL;
		goto err;
	}

	/* FIXME should not have INVERT_ENABLE bit here */
	v = omap2_clk_readl(clk, clk->enable_reg);
	if (clk->flags & INVERT_ENABLE)
		v &= ~(1 << clk->enable_bit);
	else
		v |= (1 << clk->enable_bit);
	omap2_clk_writel(v, clk, clk->enable_reg);
	v = omap2_clk_readl(clk, clk->enable_reg); /* OCP barrier */

	if (clk->ops && clk->ops->find_idlest)
		_omap2_module_wait_ready(clk);

	return 0;

err:
	if (clkdm_control && clk->clkdm)
		clkdm_clk_disable(clk->clkdm, hw->clk);
	return ret;
}
/*
 * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
 * @clk:	struct clk * of DPLL to set
 * @freqsel:	FREQSEL value to set
 *
 * Program the DPLL with the last M, N values calculated, and wait for
 * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
 */
static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
{
	struct dpll_data *dd = clk->dpll_data;
	u8 dco, sd_div;
	u32 v;

	/* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
	_omap3_noncore_dpll_bypass(clk);

	if (dd->sink_clkdm)
		clkdm_clk_enable(dd->sink_clkdm, clk->hw.clk);

	/*
	 * Set jitter correction. Jitter correction applicable for OMAP343X
	 * only since freqsel field is no longer present on other devices.
	 */
	if (cpu_is_omap343x()) {
		v = omap2_clk_readl(clk, dd->control_reg);
		v &= ~dd->freqsel_mask;
		v |= freqsel << __ffs(dd->freqsel_mask);
		omap2_clk_writel(v, clk, dd->control_reg);
	}

	/* Set DPLL multiplier, divider */
	v = omap2_clk_readl(clk, dd->mult_div1_reg);

	/* Handle Duty Cycle Correction */
	if (dd->dcc_mask) {
		if (dd->last_rounded_rate >= dd->dcc_rate)
			v |= dd->dcc_mask; /* Enable DCC */
		else
			v &= ~dd->dcc_mask; /* Disable DCC */
	}

	v &= ~(dd->mult_mask | dd->div1_mask);
	v |= dd->last_rounded_m << __ffs(dd->mult_mask);
	v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);

	/* Configure dco and sd_div for dplls that have these fields */
	if (dd->dco_mask) {
		_lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
		v &= ~(dd->dco_mask);
		v |= dco << __ffs(dd->dco_mask);
	}
	if (dd->sddiv_mask) {
		_lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
			      dd->last_rounded_n);
		v &= ~(dd->sddiv_mask);
		v |= sd_div << __ffs(dd->sddiv_mask);
	}

	omap2_clk_writel(v, clk, dd->mult_div1_reg);

	/* Set 4X multiplier and low-power mode */
	if (dd->m4xen_mask || dd->lpmode_mask) {
		v = omap2_clk_readl(clk, dd->control_reg);

		if (dd->m4xen_mask) {
			if (dd->last_rounded_m4xen)
				v |= dd->m4xen_mask;
			else
				v &= ~dd->m4xen_mask;
		}

		if (dd->lpmode_mask) {
			if (dd->last_rounded_lpmode)
				v |= dd->lpmode_mask;
			else
				v &= ~dd->lpmode_mask;
		}

		omap2_clk_writel(v, clk, dd->control_reg);
	}

	/* We let the clock framework set the other output dividers later */

	/* REVISIT: Set ramp-up delay? */

	_omap3_noncore_dpll_lock(clk);

	if (dd->sink_clkdm)
		clkdm_clk_disable(dd->sink_clkdm, clk->hw.clk);

	return 0;
}