Ejemplo n.º 1
0
/**
 * omap2_clk_disable - disable a clock, if the system is not using it
 * @clk: struct clk * to disable
 *
 * Decrements the usecount on struct clk @clk.  If there are no users
 * left, call the clkops-specific clock disable function to disable it
 * in hardware.  If the clock is part of a clockdomain (which they all
 * should be), request that the clockdomain be disabled.  (It too has
 * a usecount, and so will not be disabled in the hardware until it no
 * longer has any users.)  If the clock has a parent clock (most of
 * them do), then call ourselves, recursing on the parent clock.  This
 * can cause an entire branch of the clock tree to be powered off by
 * simply disabling one clock.  Intended to be called with the clockfw_lock
 * spinlock held.  No return value.
 */
void omap2_clk_disable(struct clk *clk)
{
	if (clk->usecount == 0) {
		WARN(1, "clock: %s: omap2_clk_disable() called, but usecount "
		     "already 0?", clk->name);
		return;
	}

	pr_debug("clock: %s: decrementing usecount\n", clk->name);

	clk->usecount--;

	if (clk->usecount > 0)
		return;

	pr_debug("clock: %s: disabling in hardware\n", clk->name);

	if (clk->ops && clk->ops->disable) {
		trace_clock_disable(clk->name, 0, smp_processor_id());
		clk->ops->disable(clk);
	}

	if (clk->clkdm)
		clkdm_clk_disable(clk->clkdm, clk);

	if (clk->parent)
		omap2_clk_disable(clk->parent);
}
Ejemplo n.º 2
0
/**
 * omap2_dflt_clk_disable - disable a clock in the hardware
 * @hw: struct clk_hw * of the clock to disable
 *
 * Disable the clock @hw in the hardware, and call into the OMAP
 * clockdomain code to "disable" the corresponding clockdomain if all
 * clocks/hwmods in that clockdomain are now disabled.  No return
 * value.
 */
void omap2_dflt_clk_disable(struct clk_hw *hw)
{
	struct clk_hw_omap *clk;
	u32 v;

	clk = to_clk_hw_omap(hw);
	if (!clk->enable_reg) {
		/*
		 * 'independent' here refers to a clock which is not
		 * controlled by its parent.
		 */
		pr_err("%s: independent clock %s has no enable_reg\n",
		       __func__, __clk_get_name(hw->clk));
		return;
	}

	v = omap2_clk_readl(clk, clk->enable_reg);
	if (clk->flags & INVERT_ENABLE)
		v |= (1 << clk->enable_bit);
	else
		v &= ~(1 << clk->enable_bit);
	omap2_clk_writel(v, clk, clk->enable_reg);
	/* No OCP barrier needed here since it is a disable operation */

	if (clkdm_control && clk->clkdm)
		clkdm_clk_disable(clk->clkdm, hw->clk);
}
Ejemplo n.º 3
0
/**
 * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to enter low-power stop.  This function is
 * intended for use in struct clkops.  No return value.
 */
void omap3_noncore_dpll_disable(struct clk_hw *hw)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);

	_omap3_noncore_dpll_stop(clk);
	if (clk->clkdm)
		clkdm_clk_disable(clk->clkdm, hw->clk);
}
Ejemplo n.º 4
0
/**
 * omap2_clk_enable - request that the system enable a clock
 * @clk: struct clk * to enable
 *
 * Increments the usecount on struct clk @clk.  If there were no users
 * previously, then recurse up the clock tree, enabling all of the
 * clock's parents and all of the parent clockdomains, and finally,
 * enabling @clk's clockdomain, and @clk itself.  Intended to be
 * called with the clockfw_lock spinlock held.  Returns 0 upon success
 * or a negative error code upon failure.
 */
int omap2_clk_enable(struct clk *clk)
{
	int ret;

	pr_debug("clock: %s: incrementing usecount\n", clk->name);

	clk->usecount++;

	if (clk->usecount > 1)
		return 0;

	pr_debug("clock: %s: enabling in hardware\n", clk->name);

	if (clk->parent) {
		ret = omap2_clk_enable(clk->parent);
		if (ret) {
			WARN(1, "clock: %s: could not enable parent %s: %d\n",
			     clk->name, clk->parent->name, ret);
			goto oce_err1;
		}
	}

	if (clk->clkdm) {
		ret = clkdm_clk_enable(clk->clkdm, clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable clockdomain %s: "
			     "%d\n", clk->name, clk->clkdm->name, ret);
			goto oce_err2;
		}
	}

	if (clk->ops && clk->ops->enable) {
		trace_clock_enable(clk->name, 1, smp_processor_id());
		ret = clk->ops->enable(clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable: %d\n",
			     clk->name, ret);
			goto oce_err3;
		}
	}

	/* If clockdomain supports hardware control, enable it */
	if (clk->clkdm)
		clkdm_allow_idle(clk->clkdm);

	return 0;

oce_err3:
	if (clk->clkdm)
		clkdm_clk_disable(clk->clkdm, clk);
oce_err2:
	if (clk->parent)
		omap2_clk_disable(clk->parent);
oce_err1:
	clk->usecount--;

	return ret;
}
Ejemplo n.º 5
0
/**
 * omap2_clk_enable - request that the system enable a clock
 * @clk: struct clk * to enable
 *
 * Increments the usecount on struct clk @clk.  If there were no users
 * previously, then recurse up the clock tree, enabling all of the
 * clock's parents and all of the parent clockdomains, and finally,
 * enabling @clk's clockdomain, and @clk itself.  Intended to be
 * called with the clockfw_lock spinlock held.  Returns 0 upon success
 * or a negative error code upon failure.
 */
int omap2_clk_enable(struct clk *clk)
{
	int ret;

	pr_debug("clock: %s: incrementing usecount\n", clk->name);

	clk->usecount++;

	if (clk->usecount > 1)
		return 0;

	pr_debug("clock: %s: enabling in hardware\n", clk->name);

	if (clk->parent) {
		ret = omap2_clk_enable(clk->parent);
		if (ret) {
			WARN(1, "clock: %s: could not enable parent %s: %d\n",
			     clk->name, clk->parent->name, ret);
			goto oce_err1;
		}
	}

	if (clkdm_control && clk->clkdm) {
		ret = clkdm_clk_enable(clk->clkdm, clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable clockdomain %s: "
			     "%d\n", clk->name, clk->clkdm->name, ret);
			goto oce_err2;
		}
	}

	if (clk->ops && clk->ops->enable) {
		trace_clock_enable(clk->name, 1, get_cpu());
		put_cpu();
		ret = clk->ops->enable(clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable: %d\n",
			     clk->name, ret);
			goto oce_err3;
		}
	}

	return 0;

oce_err3:
	if (clkdm_control && clk->clkdm)
		clkdm_clk_disable(clk->clkdm, clk);
oce_err2:
	if (clk->parent)
		omap2_clk_disable(clk->parent);
oce_err1:
	clk->usecount--;

	return ret;
}
Ejemplo n.º 6
0
/**
 * omap2_dflt_clk_enable - enable a clock in the hardware
 * @hw: struct clk_hw * of the clock to enable
 *
 * Enable the clock @hw in the hardware.  We first call into the OMAP
 * clockdomain code to "enable" the corresponding clockdomain if this
 * is the first enabled user of the clockdomain.  Then program the
 * hardware to enable the clock.  Then wait for the IP block that uses
 * this clock to leave idle (if applicable).  Returns the error value
 * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
 * if @hw has a null clock enable_reg, or zero upon success.
 */
int omap2_dflt_clk_enable(struct clk_hw *hw)
{
	struct clk_hw_omap *clk;
	u32 v;
	int ret = 0;

	clk = to_clk_hw_omap(hw);

	if (clkdm_control && clk->clkdm) {
		ret = clkdm_clk_enable(clk->clkdm, hw->clk);
		if (ret) {
			WARN(1, "%s: could not enable %s's clockdomain %s: %d\n",
			     __func__, __clk_get_name(hw->clk),
			     clk->clkdm->name, ret);
			return ret;
		}
	}

	if (unlikely(clk->enable_reg == NULL)) {
		pr_err("%s: %s missing enable_reg\n", __func__,
		       __clk_get_name(hw->clk));
		ret = -EINVAL;
		goto err;
	}

	/* FIXME should not have INVERT_ENABLE bit here */
	v = omap2_clk_readl(clk, clk->enable_reg);
	if (clk->flags & INVERT_ENABLE)
		v &= ~(1 << clk->enable_bit);
	else
		v |= (1 << clk->enable_bit);
	omap2_clk_writel(v, clk, clk->enable_reg);
	v = omap2_clk_readl(clk, clk->enable_reg); /* OCP barrier */

	if (clk->ops && clk->ops->find_idlest)
		_omap2_module_wait_ready(clk);

	return 0;

err:
	if (clkdm_control && clk->clkdm)
		clkdm_clk_disable(clk->clkdm, hw->clk);
	return ret;
}
Ejemplo n.º 7
0
/**
 * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
 * @hw: struct clk_hw * of the clock being disabled
 *
 * Decrement the usecount of the clockdomain of the clock pointed to
 * by @hw; if the usecount is 0, the clockdomain will be "disabled."
 * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
 * disable function pointer.  No return value.
 */
void omap2_clkops_disable_clkdm(struct clk_hw *hw)
{
	struct clk_hw_omap *clk;

	clk = to_clk_hw_omap(hw);

	if (unlikely(!clk->clkdm)) {
		pr_err("%s: %s: no clkdm set ?!\n", __func__,
		       __clk_get_name(hw->clk));
		return;
	}

	if (unlikely(clk->enable_reg))
		pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
		       __clk_get_name(hw->clk));

	if (!clkdm_control) {
		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
		       __func__, __clk_get_name(hw->clk));
		return;
	}

	clkdm_clk_disable(clk->clkdm, hw->clk);
}
Ejemplo n.º 8
0
/*
 * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
 * @clk:	struct clk * of DPLL to set
 * @freqsel:	FREQSEL value to set
 *
 * Program the DPLL with the last M, N values calculated, and wait for
 * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
 */
static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
{
	struct dpll_data *dd = clk->dpll_data;
	u8 dco, sd_div;
	u32 v;

	/* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
	_omap3_noncore_dpll_bypass(clk);

	if (dd->sink_clkdm)
		clkdm_clk_enable(dd->sink_clkdm, clk->hw.clk);

	/*
	 * Set jitter correction. Jitter correction applicable for OMAP343X
	 * only since freqsel field is no longer present on other devices.
	 */
	if (cpu_is_omap343x()) {
		v = omap2_clk_readl(clk, dd->control_reg);
		v &= ~dd->freqsel_mask;
		v |= freqsel << __ffs(dd->freqsel_mask);
		omap2_clk_writel(v, clk, dd->control_reg);
	}

	/* Set DPLL multiplier, divider */
	v = omap2_clk_readl(clk, dd->mult_div1_reg);

	/* Handle Duty Cycle Correction */
	if (dd->dcc_mask) {
		if (dd->last_rounded_rate >= dd->dcc_rate)
			v |= dd->dcc_mask; /* Enable DCC */
		else
			v &= ~dd->dcc_mask; /* Disable DCC */
	}

	v &= ~(dd->mult_mask | dd->div1_mask);
	v |= dd->last_rounded_m << __ffs(dd->mult_mask);
	v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);

	/* Configure dco and sd_div for dplls that have these fields */
	if (dd->dco_mask) {
		_lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
		v &= ~(dd->dco_mask);
		v |= dco << __ffs(dd->dco_mask);
	}
	if (dd->sddiv_mask) {
		_lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
			      dd->last_rounded_n);
		v &= ~(dd->sddiv_mask);
		v |= sd_div << __ffs(dd->sddiv_mask);
	}

	omap2_clk_writel(v, clk, dd->mult_div1_reg);

	/* Set 4X multiplier and low-power mode */
	if (dd->m4xen_mask || dd->lpmode_mask) {
		v = omap2_clk_readl(clk, dd->control_reg);

		if (dd->m4xen_mask) {
			if (dd->last_rounded_m4xen)
				v |= dd->m4xen_mask;
			else
				v &= ~dd->m4xen_mask;
		}

		if (dd->lpmode_mask) {
			if (dd->last_rounded_lpmode)
				v |= dd->lpmode_mask;
			else
				v &= ~dd->lpmode_mask;
		}

		omap2_clk_writel(v, clk, dd->control_reg);
	}

	/* We let the clock framework set the other output dividers later */

	/* REVISIT: Set ramp-up delay? */

	_omap3_noncore_dpll_lock(clk);

	if (dd->sink_clkdm)
		clkdm_clk_disable(dd->sink_clkdm, clk->hw.clk);

	return 0;
}