/** * omap2_clk_disable - disable a clock, if the system is not using it * @clk: struct clk * to disable * * Decrements the usecount on struct clk @clk. If there are no users * left, call the clkops-specific clock disable function to disable it * in hardware. If the clock is part of a clockdomain (which they all * should be), request that the clockdomain be disabled. (It too has * a usecount, and so will not be disabled in the hardware until it no * longer has any users.) If the clock has a parent clock (most of * them do), then call ourselves, recursing on the parent clock. This * can cause an entire branch of the clock tree to be powered off by * simply disabling one clock. Intended to be called with the clockfw_lock * spinlock held. No return value. */ void omap2_clk_disable(struct clk *clk) { if (clk->usecount == 0) { WARN(1, "clock: %s: omap2_clk_disable() called, but usecount " "already 0?", clk->name); return; } pr_debug("clock: %s: decrementing usecount\n", clk->name); clk->usecount--; if (clk->usecount > 0) return; pr_debug("clock: %s: disabling in hardware\n", clk->name); if (clk->ops && clk->ops->disable) { trace_clock_disable(clk->name, 0, smp_processor_id()); clk->ops->disable(clk); } if (clk->clkdm) clkdm_clk_disable(clk->clkdm, clk); if (clk->parent) omap2_clk_disable(clk->parent); }
void clk_disable(struct clk *clk) { unsigned long flags; if (IS_ERR_OR_NULL(clk)) return; spin_lock_irqsave(&clk->lock, flags); if (WARN(!clk->warned && !clk->prepare_count, "%s: Never called prepare or calling disable " "after unprepare\n", clk->dbg_name)) clk->warned = true; if (WARN(clk->count == 0, "%s is unbalanced", clk->dbg_name)) goto out; if (clk->count == 1) { struct clk *parent = clk_get_parent(clk); trace_clock_disable(clk->dbg_name, 0, smp_processor_id()); if (clk->ops->disable) clk->ops->disable(clk); unvote_rate_vdd(clk, clk->rate); clk_disable(clk->depends); clk_disable(parent); } clk->count--; out: spin_unlock_irqrestore(&clk->lock, flags); }