static void __init of_ti_clockdomain_setup(struct device_node *node) { struct clk *clk; struct clk_hw *clk_hw; const char *clkdm_name = node->name; int i; int num_clks; num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells"); for (i = 0; i < num_clks; i++) { clk = of_clk_get(node, i); if (IS_ERR(clk)) { pr_err("%s: Failed get %s' clock nr %d (%ld)\n", __func__, node->full_name, i, PTR_ERR(clk)); continue; } if (__clk_get_flags(clk) & CLK_IS_BASIC) { pr_warn("can't setup clkdm for basic clk %s\n", __clk_get_name(clk)); continue; } clk_hw = __clk_get_hw(clk); to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name; omap2_init_clk_clkdm(clk_hw); } }
static long div_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct div_clk *d = to_div_clk(hw); bool set_parent = __clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT; return __div_round_rate(&d->data, rate, __clk_get_parent(hw->clk), NULL, parent_rate, set_parent); }
/** * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock * @clk: struct clk * to initialize * * Add an OMAP clock @clk to the internal list of OMAP clocks. Used * temporarily for autoidle handling, until this support can be * integrated into the common clock framework code in some way. No * return value. */ void omap2_init_clk_hw_omap_clocks(struct clk *clk) { struct clk_hw_omap *c; if (__clk_get_flags(clk) & CLK_IS_BASIC) return; c = to_clk_hw_omap(__clk_get_hw(clk)); list_add(&c->node, &clk_hw_omap_clocks); }
/** * omap2_clk_allow_idle - enable autoidle on an OMAP clock * @clk: struct clk * to enable autoidle for * * Enable autoidle on an OMAP clock. */ int omap2_clk_allow_idle(struct clk *clk) { struct clk_hw_omap *c; if (__clk_get_flags(clk) & CLK_IS_BASIC) return -EINVAL; c = to_clk_hw_omap(__clk_get_hw(clk)); if (c->ops && c->ops->allow_idle) c->ops->allow_idle(c); return 0; }
static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { struct clk_fixed_factor *fix = to_clk_fixed_factor(hw); if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { unsigned long best_parent; best_parent = (rate / fix->mult) * fix->div; *prate = __clk_round_rate(__clk_get_parent(hw->clk), best_parent); } return (*prate / fix->div) * fix->mult; }
static long flexgen_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { unsigned long div; /* Round div according to exact prate and wished rate */ div = clk_best_div(*prate, rate); if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { *prate = rate * div; return rate; } return *prate / div; }
long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { const struct dpll_data *dd; u32 v; struct clk_hw_omap *pclk = NULL; if (!*prate) return 0; pclk = omap3_find_clkoutx2_dpll(hw); if (!pclk) return 0; dd = pclk->dpll_data; /* TYPE J does not have a clkoutx2 */ if (dd->flags & DPLL_J_TYPE) { *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate); return *prate; } WARN_ON(!dd->enable_mask); v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; v >>= __ffs(dd->enable_mask); /* If in bypass, the rate is fixed to the bypass rate*/ if (v != OMAP3XXX_EN_DPLL_LOCKED) return *prate; if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { unsigned long best_parent; best_parent = (rate / 2); *prate = __clk_round_rate(__clk_get_parent(hw->clk), best_parent); } return *prate * 2; }
static long __mux_div_round_rate(struct clk_hw *hw, unsigned long rate, struct clk **best_parent, int *best_div, unsigned long *best_prate) { struct mux_div_clk *md = to_mux_div_clk(hw); unsigned int i; unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0; struct clk *_best_parent = 0; int num_parents = __clk_get_num_parents(hw->clk); bool set_parent = __clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT; for (i = 0; i < num_parents; i++) { int div; unsigned long prate; struct clk *p = clk_get_parent_by_index(hw->clk, i); rrate = __div_round_rate(&md->data, rate, p, &div, &prate, set_parent); if (is_better_rate(rate, best, rrate)) { best = rrate; _best_div = div; _best_prate = prate; _best_parent = p; } if (rate <= rrate) break; } if (best_div) *best_div = _best_div; if (best_prate) *best_prate = _best_prate; if (best_parent) *best_parent = _best_parent; if (best) return best; return -EINVAL; }
/** * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate * @clk: DPLL output struct clk * * Using parent clock DPLL data, look up DPLL state. If locked, set our * rate to the dpll_clk * 2; otherwise, just use dpll_clk. */ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, unsigned long parent_rate) { const struct dpll_data *dd; unsigned long rate; u32 v; struct clk_hw_omap *pclk = NULL; struct clk *parent; /* Walk up the parents of clk, looking for a DPLL */ do { do { parent = __clk_get_parent(hw->clk); hw = __clk_get_hw(parent); } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); if (!hw) break; pclk = to_clk_hw_omap(hw); } while (pclk && !pclk->dpll_data); /* clk does not have a DPLL as a parent? error in the clock data */ if (!pclk) { WARN_ON(1); return 0; } dd = pclk->dpll_data; WARN_ON(!dd->enable_mask); v = __raw_readl(dd->control_reg) & dd->enable_mask; v >>= __ffs(dd->enable_mask); if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) rate = parent_rate; else rate = parent_rate * 2; return rate; }
/* Find the parent DPLL for the given clkoutx2 clock */ static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) { struct clk_hw_omap *pclk = NULL; struct clk *parent; /* Walk up the parents of clk, looking for a DPLL */ do { do { parent = __clk_get_parent(hw->clk); hw = __clk_get_hw(parent); } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); if (!hw) break; pclk = to_clk_hw_omap(hw); } while (pclk && !pclk->dpll_data); /* clk does not have a DPLL as a parent? error in the clock data */ if (!pclk) { WARN_ON(1); return NULL; } return pclk; }
static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, unsigned long min_rate, unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p) { struct clk_composite *composite = to_clk_composite(hw); const struct clk_ops *rate_ops = composite->rate_ops; const struct clk_ops *mux_ops = composite->mux_ops; struct clk_hw *rate_hw = composite->rate_hw; struct clk_hw *mux_hw = composite->mux_hw; struct clk *parent; unsigned long parent_rate; long tmp_rate, best_rate = 0; unsigned long rate_diff; unsigned long best_rate_diff = ULONG_MAX; int i; if (rate_hw && rate_ops && rate_ops->determine_rate) { __clk_hw_set_clk(rate_hw, hw); return rate_ops->determine_rate(rate_hw, rate, min_rate, max_rate, best_parent_rate, best_parent_p); } else if (rate_hw && rate_ops && rate_ops->round_rate && mux_hw && mux_ops && mux_ops->set_parent) { *best_parent_p = NULL; if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) { parent = clk_get_parent(mux_hw->clk); *best_parent_p = __clk_get_hw(parent); *best_parent_rate = __clk_get_rate(parent); return rate_ops->round_rate(rate_hw, rate, best_parent_rate); } for (i = 0; i < __clk_get_num_parents(mux_hw->clk); i++) { parent = clk_get_parent_by_index(mux_hw->clk, i); if (!parent) continue; parent_rate = __clk_get_rate(parent); tmp_rate = rate_ops->round_rate(rate_hw, rate, &parent_rate); if (tmp_rate < 0) continue; rate_diff = abs(rate - tmp_rate); if (!rate_diff || !*best_parent_p || best_rate_diff > rate_diff) { *best_parent_p = __clk_get_hw(parent); *best_parent_rate = parent_rate; best_rate_diff = rate_diff; best_rate = tmp_rate; } if (!rate_diff) return rate; } return best_rate; } else if (mux_hw && mux_ops && mux_ops->determine_rate) { __clk_hw_set_clk(mux_hw, hw); return mux_ops->determine_rate(mux_hw, rate, min_rate, max_rate, best_parent_rate, best_parent_p); } else { pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n"); return 0; } }