long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long ret; mutex_lock(&prepare_lock); ret = __clk_round_rate(clk, rate); mutex_unlock(&prepare_lock); return ret; }
long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { const struct dpll_data *dd; u32 v; struct clk_hw_omap *pclk = NULL; if (!*prate) return 0; pclk = omap3_find_clkoutx2_dpll(hw); if (!pclk) return 0; dd = pclk->dpll_data; /* TYPE J does not have a clkoutx2 */ if (dd->flags & DPLL_J_TYPE) { *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate); return *prate; } WARN_ON(!dd->enable_mask); v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; v >>= __ffs(dd->enable_mask); /* If in bypass, the rate is fixed to the bypass rate*/ if (v != OMAP3XXX_EN_DPLL_LOCKED) return *prate; if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { unsigned long best_parent; best_parent = (rate / 2); *prate = __clk_round_rate(__clk_get_parent(hw->clk), best_parent); } return *prate * 2; }
static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { struct clk_fixed_factor *fix = to_clk_fixed_factor(hw); if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { unsigned long best_parent; best_parent = (rate / fix->mult) * fix->div; *prate = __clk_round_rate(__clk_get_parent(hw->clk), best_parent); } return (*prate / fix->div) * fix->mult; }
long clk_round_rate_rec(struct clk *clk, unsigned long rate) { long rounded_rate; unsigned long flags; if ((clk == NULL) || (clk->parent == NULL)) return -EINVAL; __clk_lock(clk->parent, clk->mutex, &flags); rounded_rate = __clk_round_rate(clk->parent, rate); __clk_unlock(clk->parent, clk->mutex, flags); return rounded_rate; }
long clk_round_rate(struct clk *clk, unsigned long rate) { long rounded_rate; unsigned long flags; if (clk == NULL) return -EINVAL; __clk_lock(clk, NO_LOCK, &flags); rounded_rate = __clk_round_rate(clk, rate); __clk_unlock(clk, NO_LOCK, flags); return rounded_rate; }
static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct clk *pclk = __clk_get_parent(hw->clk); long best_rate = -EINVAL; unsigned long best_parent_rate = 0; unsigned long tmp_qd; pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n" , __func__ , rate, *parent_rate); if (clk_audio_pll_compute_qdpmc(AUDIO_PLL_REFERENCE_FOUT, rate, &tmp_qd)) return -EINVAL; best_parent_rate = __clk_round_rate(pclk, rate * tmp_qd); best_rate = best_parent_rate / tmp_qd; pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %lu)\n", __func__, best_rate, best_parent_rate, tmp_qd - 1); *parent_rate = best_parent_rate; return best_rate; }
static long __div_round_rate(struct div_data *data, unsigned long rate, struct clk *parent, unsigned int *best_div, unsigned long *best_prate, bool set_parent) { unsigned int div, min_div, max_div, _best_div = 1; unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate; unsigned int numer; rate = max(rate, 1UL); min_div = max(data->min_div, 1U); max_div = min(data->max_div, (unsigned int) (ULONG_MAX / rate)); /* * div values are doubled for half dividers. * Adjust for that by picking a numer of 2. */ numer = data->is_half_divider ? 2 : 1; if (!set_parent) { prate = *best_prate * numer; div = DIV_ROUND_UP(prate, rate); div = clamp(1U, div, max_div); if (best_div) *best_div = div; return mult_frac(*best_prate, numer, div); } for (div = min_div; div <= max_div; div++) { req_prate = mult_frac(rate, div, numer); prate = __clk_round_rate(parent, req_prate); if (IS_ERR_VALUE(prate)) break; actual_rate = mult_frac(prate, numer, div); if (is_better_rate(rate, rrate, actual_rate)) { rrate = actual_rate; _best_div = div; _best_prate = prate; } /* * Trying higher dividers is only going to ask the parent for * a higher rate. If it can't even output a rate higher than * the one we request for this divider, the parent is not * going to be able to output an even higher rate required * for a higher divider. So, stop trying higher dividers. */ if (actual_rate < rate) break; if (rrate <= rate) break; } if (!rrate) return -EINVAL; if (best_div) *best_div = _best_div; if (best_prate) *best_prate = _best_prate; return rrate; }
/** * omap3_noncore_dpll_set_rate - set non-core DPLL rate * @clk: struct clk * of DPLL to set * @rate: rounded target rate * * Set the DPLL CLKOUT to the target rate. If the DPLL can enter * low-power bypass, and the target rate is the bypass source clock * rate, then configure the DPLL for bypass. Otherwise, round the * target rate if it hasn't been done already, then program and lock * the DPLL. Returns -EINVAL upon error, or 0 upon success. */ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct clk *new_parent = NULL; unsigned long rrate; u16 freqsel = 0; struct dpll_data *dd; int ret; if (!hw || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; if (__clk_get_rate(dd->clk_bypass) == rate && (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { pr_debug("%s: %s: set rate: entering bypass.\n", __func__, __clk_get_name(hw->clk)); __clk_prepare(dd->clk_bypass); clk_enable(dd->clk_bypass); ret = _omap3_noncore_dpll_bypass(clk); if (!ret) new_parent = dd->clk_bypass; clk_disable(dd->clk_bypass); __clk_unprepare(dd->clk_bypass); } else { __clk_prepare(dd->clk_ref); clk_enable(dd->clk_ref); if (dd->last_rounded_rate != rate) { rrate = __clk_round_rate(hw->clk, rate); if (rrate != rate) { pr_warn("%s: %s: final rate %lu does not match desired rate %lu\n", __func__, __clk_get_name(hw->clk), rrate, rate); rate = rrate; } } if (dd->last_rounded_rate == 0) return -EINVAL; /* Freqsel is available only on OMAP343X devices */ if (cpu_is_omap343x()) { freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); WARN_ON(!freqsel); } pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, __clk_get_name(hw->clk), rate); ret = omap3_noncore_dpll_program(clk, freqsel); if (!ret) new_parent = dd->clk_ref; clk_disable(dd->clk_ref); __clk_unprepare(dd->clk_ref); } /* * FIXME - this is all wrong. common code handles reparenting and * migrating prepare/enable counts. dplls should be a multiplexer * clock and this should be a set_parent operation so that all of that * stuff is inherited for free */ if (!ret && clk_get_parent(hw->clk) != new_parent) __clk_reparent(hw->clk, new_parent); return 0; }
/** * omap3_noncore_dpll_set_rate - set non-core DPLL rate * @clk: struct clk * of DPLL to set * @rate: rounded target rate * * Set the DPLL CLKOUT to the target rate. If the DPLL can enter * low-power bypass, and the target rate is the bypass source clock * rate, then configure the DPLL for bypass. Otherwise, round the * target rate if it hasn't been done already, then program and lock * the DPLL. Returns -EINVAL upon error, or 0 upon success. */ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct clk *new_parent = NULL; u16 freqsel = 0; struct dpll_data *dd; int ret; if (!hw || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; __clk_prepare(dd->clk_bypass); clk_enable(dd->clk_bypass); __clk_prepare(dd->clk_ref); clk_enable(dd->clk_ref); if (__clk_get_rate(dd->clk_bypass) == rate && (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { pr_debug("%s: %s: set rate: entering bypass.\n", __func__, __clk_get_name(hw->clk)); ret = _omap3_noncore_dpll_bypass(clk); if (!ret) new_parent = dd->clk_bypass; } else { if (dd->last_rounded_rate != rate) rate = __clk_round_rate(hw->clk, rate); if (dd->last_rounded_rate == 0) return -EINVAL; /* No freqsel on OMAP4 and OMAP3630 */ if (!cpu_is_omap44xx() && !cpu_is_omap3630()) { freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); if (!freqsel) WARN_ON(1); } pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, __clk_get_name(hw->clk), rate); ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m, dd->last_rounded_n, freqsel); if (!ret) new_parent = dd->clk_ref; } /* * FIXME - this is all wrong. common code handles reparenting and * migrating prepare/enable counts. dplls should be a multiplexer * clock and this should be a set_parent operation so that all of that * stuff is inherited for free */ if (!ret) __clk_reparent(hw->clk, new_parent); clk_disable(dd->clk_ref); __clk_unprepare(dd->clk_ref); clk_disable(dd->clk_bypass); __clk_unprepare(dd->clk_bypass); return 0; }