/** * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. * The choice of modes depends on the DPLL's programmed rate: if it is * the same as the DPLL's parent clock, it will enter bypass; * otherwise, it will enter lock. This code will wait for the DPLL to * indicate readiness before returning, unless the DPLL takes too long * to enter the target state. Intended to be used as the struct clk's * enable function. If DPLL3 was passed in, or the DPLL does not * support low-power stop, or if the DPLL took too long to enter * bypass or lock, return -EINVAL; otherwise, return 0. */ int omap3_noncore_dpll_enable(struct clk_hw *hw) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); int r; struct dpll_data *dd; struct clk *parent; dd = clk->dpll_data; if (!dd) return -EINVAL; if (clk->clkdm) { r = clkdm_clk_enable(clk->clkdm, hw->clk); if (r) { WARN(1, "%s: could not enable %s's clockdomain %s: %d\n", __func__, __clk_get_name(hw->clk), clk->clkdm->name, r); return r; } } parent = __clk_get_parent(hw->clk); if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) { WARN_ON(parent != dd->clk_bypass); r = _omap3_noncore_dpll_bypass(clk); } else { WARN_ON(parent != dd->clk_ref); r = _omap3_noncore_dpll_lock(clk); } return r; }
/** * omap2_clksel_set_parent() - change a clock's parent clock * @clk: struct clk * of the child clock * @new_parent: struct clk * of the new parent clock * * This function is intended to be called only by the clock framework. * Change the parent clock of clock @clk to @new_parent. This is * intended to be used while @clk is disabled. This function does not * currently check the usecount of the clock, so if multiple drivers * are using the clock, and the parent is changed, they will all be * affected without any notification. Returns -EINVAL upon error, or * 0 upon success. */ int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent) { u32 field_val = 0; u32 parent_div; if (!clk->clksel || !clk->clksel_mask) return -EINVAL; parent_div = _get_div_and_fieldval(new_parent, clk, &field_val); if (!parent_div) return -EINVAL; _write_clksel_reg(clk, field_val); clk_reparent(clk, new_parent); /* CLKSEL clocks follow their parents' rates, divided by a divisor */ clk->rate = __clk_get_rate(new_parent); if (parent_div > 0) __clk_get_rate(clk) /= parent_div; pr_debug("clock: %s: set parent to %s (new rate %ld)\n", __clk_get_name(clk), __clk_get_name(__clk_get_parent(clk)), __clk_get_rate(clk)); return 0; }
/** * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate * @clk: struct clk * of a DPLL * * DPLLs can be locked or bypassed - basically, enabled or disabled. * When locked, the DPLL output depends on the M and N values. When * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock * or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk. * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0 * if the clock @clk is not a DPLL. */ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) { long long dpll_clk; u32 dpll_mult, dpll_div, v; struct dpll_data *dd; dd = clk->dpll_data; if (!dd) return 0; /* Return bypass rate if DPLL is bypassed */ v = omap2_clk_readl(clk, dd->control_reg); v &= dd->enable_mask; v >>= __ffs(dd->enable_mask); if (_omap2_dpll_is_in_bypass(v)) return __clk_get_rate(dd->clk_bypass); v = omap2_clk_readl(clk, dd->mult_div1_reg); dpll_mult = v & dd->mult_mask; dpll_mult >>= __ffs(dd->mult_mask); dpll_div = v & dd->div1_mask; dpll_div >>= __ffs(dd->div1_mask); dpll_clk = (long long) __clk_get_rate(dd->clk_ref) * dpll_mult; do_div(dpll_clk, dpll_div + 1); return dpll_clk; }
static long clk_mux_with_evendiv_determine_rate(struct clk_hw *div_hw, unsigned long rate, unsigned long *best_parent_rate, struct clk **best_parent_p) { struct clk *clk = div_hw->clk, *parent = NULL, *best_parent = NULL; int i, num_parents; unsigned long parent_rate = 0, best_prate = 0, best = 0, now = 0; parent = __clk_get_parent(clk); if(!parent){ best = __clk_get_rate(clk); goto out; } /* if NO_REPARENT flag set, pass through to current parent */ if (clk->flags & CLK_SET_RATE_NO_REPARENT) { best_prate = __clk_get_rate(parent); best = clk_div_round_rate_even(div_hw, rate, &best_prate); goto out; } /* find the parent that can provide the fastest rate <= rate */ num_parents = clk->num_parents; for (i = 0; i < num_parents; i++) { parent = clk_get_parent_by_index(clk, i); if (!parent) continue; parent_rate = __clk_get_rate(parent); now = clk_div_round_rate_even(div_hw, rate, &parent_rate); if (now <= rate && now > best) { best_parent = parent; best_prate = parent_rate; best = now; } } out: if(best_prate) *best_parent_rate = best_prate; if (best_parent) *best_parent_p = best_parent; clk_debug("clk name = %s, determine rate = %lu, best = %lu\n" "\tbest_parent name = %s, best_prate = %lu\n", clk->name, rate, best, __clk_get_name(*best_parent_p), *best_parent_rate); return best; }
unsigned long clk_get_rate(struct clk *clk) { if (clk == NULL) return 0; return __clk_get_rate(clk, NO_LOCK); }
static int clk_ddr_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk *parent = __clk_get_parent(hw->clk); struct clk *grand_p = __clk_get_parent(parent); /* Do nothing before ddr init */ if (!ddr_change_freq) return 0; if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(grand_p)) { clk_err("fail to get parent or grand_parent!\n"); return -EINVAL; } clk_debug("%s: will set rate = %lu\n", __func__, rate); /* Func provided by ddr driver */ ddr_change_freq(rate/MHZ); parent->rate = parent->ops->recalc_rate(parent->hw, __clk_get_rate(grand_p)); return 0; }
/** * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL * @hw: pointer to the clock to determine rate for * @rate: target rate for the DPLL * @best_parent_rate: pointer for returning best parent rate * @best_parent_clk: pointer for returning best parent clock * * Determines which DPLL mode to use for reaching a desired rate. * Checks whether the DPLL shall be in bypass or locked mode, and if * locked, calculates the M,N values for the DPLL via round-rate. * Returns a positive clock rate with success, negative error value * in failure. */ long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, struct clk **best_parent_clk) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *dd; if (!hw || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; if (__clk_get_rate(dd->clk_bypass) == rate && (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { *best_parent_clk = dd->clk_bypass; } else { rate = omap4_dpll_regm4xen_round_rate(hw, rate, best_parent_rate); *best_parent_clk = dd->clk_ref; } *best_parent_rate = rate; return rate; }
/* From 3430 TRM ES2 4.7.6.2 */ static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n) { unsigned long fint; u16 f = 0; fint = __clk_get_rate(clk->dpll_data->clk_ref) / n; pr_debug("clock: fint is %lu\n", fint); if (fint >= 750000 && fint <= 1000000) f = 0x3; else if (fint > 1000000 && fint <= 1250000) f = 0x4; else if (fint > 1250000 && fint <= 1500000) f = 0x5; else if (fint > 1500000 && fint <= 1750000) f = 0x6; else if (fint > 1750000 && fint <= 2100000) f = 0x7; else if (fint > 7500000 && fint <= 10000000) f = 0xB; else if (fint > 10000000 && fint <= 12500000) f = 0xC; else if (fint > 12500000 && fint <= 15000000) f = 0xD; else if (fint > 15000000 && fint <= 17500000) f = 0xE; else if (fint > 17500000 && fint <= 21000000) f = 0xF; else pr_debug("clock: unknown freqsel setting for %d\n", n); return f; }
static int clk_core_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk *parent = __clk_get_parent(hw->clk); struct clk *grand_p = __clk_get_parent(parent); int ret; if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(grand_p)) { clk_err("fail to get parent or grand_parent!\n"); return -EINVAL; } ret = parent->ops->set_rate(parent->hw, rate, __clk_get_rate(grand_p)); parent->rate = parent->ops->recalc_rate(parent->hw, __clk_get_rate(grand_p)); return ret; }
static int clk_3288_i2s_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk *parent = __clk_get_parent(hw->clk); struct clk *grand_p = __clk_get_parent(parent); if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(grand_p)) { return 0; } if (parent->ops->set_rate) { parent->ops->set_rate(parent->hw, rate/2, __clk_get_rate(grand_p)); parent->ops->set_rate(parent->hw, rate, __clk_get_rate(grand_p)); } return 0; }
/** * omap2_clksel_round_rate_div() - find divisor for the given clock and rate * @clk: OMAP struct clk to use * @target_rate: desired clock rate * @new_div: ptr to where we should store the divisor * * Finds 'best' divider value in an array based on the source and target * rates. The divider array must be sorted with smallest divider first. * This function is also used by the DPLL3 M2 divider code. * * Returns the rounded clock rate or returns 0xffffffff on error. */ u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate, u32 *new_div) { unsigned long test_rate; const struct clksel *clks; const struct clksel_rate *clkr; u32 last_div = 0; struct clk *parent; unsigned long parent_rate; const char *clk_name; parent = __clk_get_parent(clk); parent_rate = __clk_get_rate(parent); clk_name = __clk_get_name(clk); if (!clk->clksel || !clk->clksel_mask) return ~0; pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n", clk_name, target_rate); *new_div = 1; clks = _get_clksel_by_parent(clk, parent); if (!clks) return ~0; for (clkr = clks->rates; clkr->div; clkr++) { if (!(clkr->flags & cpu_mask)) continue; /* Sanity check */ if (clkr->div <= last_div) pr_err("clock: %s: clksel_rate table not sorted\n", clk_name); last_div = clkr->div; test_rate = parent_rate / clkr->div; if (test_rate <= target_rate) break; /* found it */ } if (!clkr->div) { pr_err("clock: %s: could not find divisor for target rate %ld for parent %s\n", clk_name, target_rate, __clk_get_name(parent)); return ~0; } *new_div = clkr->div; pr_debug("clock: new_div = %d, new_rate = %ld\n", *new_div, (parent_rate / clkr->div)); return parent_rate / clkr->div; }
/** * omap2_clksel_recalc() - function ptr to pass via struct clk .recalc field * @clk: struct clk * * * This function is intended to be called only by the clock framework. * Each clksel clock should have its struct clk .recalc field set to this * function. Returns the clock's current rate, based on its parent's rate * and its current divisor setting in the hardware. */ unsigned long omap2_clksel_recalc(struct clk *clk) { unsigned long rate; u32 div = 0; struct clk *parent; div = _read_divisor(clk); if (div == 0) return __clk_get_rate(clk); parent = __clk_get_parent(clk); rate = __clk_get_rate(parent) / div; pr_debug("clock: %s: recalc'd rate is %ld (div %d)\n", __clk_get_name(clk), rate, div); return rate; }
unsigned long clk_get_rate(struct clk *clk) { unsigned long rate; mutex_lock(&prepare_lock); rate = __clk_get_rate(clk); mutex_unlock(&prepare_lock); return rate; }
/** * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate * @clk: struct clk * of a DPLL * * DPLLs can be locked or bypassed - basically, enabled or disabled. * When locked, the DPLL output depends on the M and N values. When * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock * or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk. * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0 * if the clock @clk is not a DPLL. */ u32 omap2_get_dpll_rate(struct clk *clk) { long long dpll_clk; u32 dpll_mult, dpll_div, v; struct dpll_data *dd; dd = clk->dpll_data; if (!dd) return 0; /* Return bypass rate if DPLL is bypassed */ v = __raw_readl(dd->control_reg); v &= dd->enable_mask; v >>= __ffs(dd->enable_mask); if (cpu_is_omap24xx()) { if (v == OMAP2XXX_EN_DPLL_LPBYPASS || v == OMAP2XXX_EN_DPLL_FRBYPASS) return __clk_get_rate(dd->clk_bypass); } else if (cpu_is_omap34xx()) { if (v == OMAP3XXX_EN_DPLL_LPBYPASS || v == OMAP3XXX_EN_DPLL_FRBYPASS) return __clk_get_rate(dd->clk_bypass); } else if (soc_is_am33xx() || cpu_is_omap44xx()) { if (v == OMAP4XXX_EN_DPLL_LPBYPASS || v == OMAP4XXX_EN_DPLL_FRBYPASS || v == OMAP4XXX_EN_DPLL_MNBYPASS) return __clk_get_rate(dd->clk_bypass); } v = __raw_readl(dd->mult_div1_reg); dpll_mult = v & dd->mult_mask; dpll_mult >>= __ffs(dd->mult_mask); dpll_div = v & dd->div1_mask; dpll_div >>= __ffs(dd->div1_mask); dpll_clk = (long long) __clk_get_rate(dd->clk_ref) * dpll_mult; do_div(dpll_clk, dpll_div + 1); return dpll_clk; }
/** * _lookup_dco - Lookup DCO used by j-type DPLL * @clk: pointer to a DPLL struct clk * @dco: digital control oscillator selector * @m: DPLL multiplier to set * @n: DPLL divider to set * * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" * * XXX This code is not needed for 3430/AM35xx; can it be optimized * out in non-multi-OMAP builds for those chips? */ static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n) { unsigned long fint, clkinp; /* watch out for overflow */ clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk)); fint = (clkinp / n) * m; if (fint < 1000000000) *dco = 2; else *dco = 4; }
/** * omap4_dpll_lpmode_recalc - compute DPLL low-power setting * @dd: pointer to the dpll data structure * * Calculates if low-power mode can be enabled based upon the last * multiplier and divider values calculated. If low-power mode can be * enabled, then the bit to enable low-power mode is stored in the * last_rounded_lpmode variable. This implementation is based upon the * criteria for enabling low-power mode as described in the OMAP4430/60 * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power * Operation Mode". */ static void omap4_dpll_lpmode_recalc(struct dpll_data *dd) { long fint, fout; fint = __clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1); fout = fint * dd->last_rounded_m; if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX)) dd->last_rounded_lpmode = 1; else dd->last_rounded_lpmode = 0; }
static int clkout_clksel_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_hw_clkout *clkout_clk = to_clk_hw_clkout(hw); struct clk_hw_omap *omap_clk = to_clk_hw_omap(hw); struct clkout_match *m = &clkout_clk->match; struct clk *clk = hw->clk; int ret; u32 v; ret = clkout_find_match(hw, rate, m); if (ret != 0) return ret; /* have to be exact now */ if (rate != m->best_rate) { pr_err("%s: Failed to find exact rate %lu (was %lu)\n", __func__, rate, m->best_rate); return -EINVAL; } /* switch parent */ if (m->best_clks->parent != __clk_get_parent(clk)) { __clk_set_parent(clk, m->best_clks->parent); __clk_reparent(clk, m->best_clks->parent); parent_rate = __clk_get_rate(__clk_get_parent(clk)); } /* we need to write the new value */ if (omap_clk->clksel_reg != clkout_clk->div_reg) { v = __raw_readl(omap_clk->clksel_reg); v &= ~omap_clk->clksel_mask; v |= m->best_clkr->val << __ffs(omap_clk->clksel_mask); __raw_writel(v, omap_clk->clksel_reg); v = __raw_readl(omap_clk->clksel_reg); /* OCP barrier */ v = __raw_readl(clkout_clk->div_reg); v &= ~clkout_clk->div_mask; v |= m->best_clkd->val << __ffs(clkout_clk->div_mask); __raw_writel(v, clkout_clk->div_reg); v = __raw_readl(clkout_clk->div_reg); /* OCP barrier */ } else { v = __raw_readl(omap_clk->clksel_reg); v &= ~(omap_clk->clksel_mask | clkout_clk->div_mask); v |= (m->best_clkr->val << __ffs(omap_clk->clksel_mask)) | (m->best_clkd->val << __ffs(clkout_clk->div_mask)); __raw_writel(v, omap_clk->clksel_reg); v = __raw_readl(omap_clk->clksel_reg); /* OCP barrier */ } return ret; }
static unsigned long clk_core_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { /* As parent rate could be changed in clk_core.set_rate * ops, the passing_in parent_rate may not be the newest * and we should use the parent->rate instead. As a side * effect, we should NOT directly set clk_core's parent * (apll) rate, otherwise we will get a wrong recalc rate * with clk_core_recalc_rate. */ struct clk *parent = __clk_get_parent(hw->clk); return clk_divider_recalc_rate(hw, __clk_get_rate(parent)); }
/** * omap2_clksel_set_rate() - program clock rate in hardware * @clk: struct clk * to program rate * @rate: target rate to program * * This function is intended to be called only by the clock framework. * Program @clk's rate to @rate in the hardware. The clock can be * either enabled or disabled when this happens, although if the clock * is enabled, some downstream devices may glitch or behave * unpredictably when the clock rate is changed - this depends on the * hardware. This function does not currently check the usecount of * the clock, so if multiple drivers are using the clock, and the rate * is changed, they will all be affected without any notification. * Returns -EINVAL upon error, or 0 upon success. */ int omap2_clksel_set_rate(struct clk *clk, unsigned long rate) { u32 field_val, validrate, new_div = 0; if (!clk->clksel || !clk->clksel_mask) return -EINVAL; validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); if (validrate != rate) return -EINVAL; field_val = _divisor_to_clksel(clk, new_div); if (field_val == ~0) return -EINVAL; _write_clksel_reg(clk, field_val); clk->rate = __clk_get_rate(__clk_get_parent(clk)) / new_div; pr_debug("clock: %s: set rate to %ld\n", __clk_get_name(clk), __clk_get_rate(clk)); return 0; }
static long clkout_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate, unsigned long *parent_rate) { struct clkout_match m; long ret; ret = clkout_find_match(hw, target_rate, &m); if (ret != 0) { pr_err("%s: Failed to find a best match\n", __func__); return (unsigned long)-1; } *parent_rate = __clk_get_rate(m.best_clks->parent); return m.best_rate; }
/* * _dpll_test_fint - test whether an Fint value is valid for the DPLL * @clk: DPLL struct clk to test * @n: divider value (N) to test * * Tests whether a particular divider @n will result in a valid DPLL * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate * (assuming that it is counting N upwards), or -2 if the enclosing loop * should skip to the next iteration (again assuming N is increasing). */ static int _dpll_test_fint(struct clk *clk, u8 n) { struct dpll_data *dd; long fint, fint_min, fint_max; int ret = 0; dd = clk->dpll_data; /* DPLL divider must result in a valid jitter correction val */ fint = __clk_get_rate(__clk_get_parent(clk)) / n; if (cpu_is_omap24xx()) { /* Should not be called for OMAP2, so warn if it is called */ WARN(1, "No fint limits available for OMAP2!\n"); return DPLL_FINT_INVALID; } else if (cpu_is_omap3430()) { fint_min = OMAP3430_DPLL_FINT_BAND1_MIN; fint_max = OMAP3430_DPLL_FINT_BAND2_MAX; } else if (dd->flags & DPLL_J_TYPE) { fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN; fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX; } else { fint_min = OMAP3PLUS_DPLL_FINT_MIN; fint_max = OMAP3PLUS_DPLL_FINT_MAX; } if (fint < fint_min) { pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n", n); dd->max_divider = n; ret = DPLL_FINT_UNDERFLOW; } else if (fint > fint_max) { pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n", n); dd->min_divider = n; ret = DPLL_FINT_INVALID; } else if (cpu_is_omap3430() && fint > OMAP3430_DPLL_FINT_BAND1_MAX && fint < OMAP3430_DPLL_FINT_BAND2_MIN) { pr_debug("rejecting n=%d due to Fint failure\n", n); ret = DPLL_FINT_INVALID; } return ret; }
static int clk_3288_dclk_lcdc1_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk* aclk_vio1 = clk_get(NULL, "aclk_vio1"); struct clk* parent; clk_divider_ops.set_rate(hw, rate, parent_rate); /* set aclk_vio */ if(parent_rate == 297*MHZ) parent = clk_get(NULL, "clk_gpll"); else parent = clk_get(NULL, "clk_cpll"); clk_set_parent(aclk_vio1, parent); clk_set_rate(aclk_vio1, __clk_get_rate(parent)); return 0; }
/* * _dpll_test_fint - test whether an Fint value is valid for the DPLL * @clk: DPLL struct clk to test * @n: divider value (N) to test * * Tests whether a particular divider @n will result in a valid DPLL * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate * (assuming that it is counting N upwards), or -2 if the enclosing loop * should skip to the next iteration (again assuming N is increasing). */ static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n) { struct dpll_data *dd; long fint, fint_min, fint_max; int ret = 0; dd = clk->dpll_data; /* DPLL divider must result in a valid jitter correction val */ fint = __clk_get_rate(__clk_get_parent(clk->hw.clk)) / n; if (dd->flags & DPLL_J_TYPE) { fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN; fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX; } else { fint_min = ti_clk_features.fint_min; fint_max = ti_clk_features.fint_max; } if (!fint_min || !fint_max) { WARN(1, "No fint limits available!\n"); return DPLL_FINT_INVALID; } if (fint < ti_clk_features.fint_min) { pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n", n); dd->max_divider = n; ret = DPLL_FINT_UNDERFLOW; } else if (fint > ti_clk_features.fint_max) { pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n", n); dd->min_divider = n; ret = DPLL_FINT_INVALID; } else if (fint > ti_clk_features.fint_band1_max && fint < ti_clk_features.fint_band2_min) { pr_debug("rejecting n=%d due to Fint failure\n", n); ret = DPLL_FINT_INVALID; } return ret; }
/** * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL * @clk: pointer to a DPLL struct clk * @sd_div: target sigma-delta divider * @m: DPLL multiplier to set * @n: DPLL divider to set * * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" * * XXX This code is not needed for 3430/AM35xx; can it be optimized * out in non-multi-OMAP builds for those chips? */ static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n) { unsigned long clkinp, sd; /* watch out for overflow */ int mod1, mod2; clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk)); /* * target sigma-delta to near 250MHz * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)] */ clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */ mod1 = (clkinp * m) % (250 * n); sd = (clkinp * m) / (250 * n); mod2 = sd % 10; sd /= 10; if (mod1 || mod2) sd++; *sd_div = sd; }
unsigned long __clk_get_rate(struct clk *clk, void *current_lock) { unsigned long rate; unsigned long flags; if (clk == NULL) return 0; __clk_lock(clk, current_lock, &flags); if ((clk->ops != NULL) && (clk->ops->get_rate != NULL)) rate = clk->ops->get_rate(clk); else if (clk->rate) rate = clk->rate; else rate = __clk_get_rate(clk->parent, clk->mutex); __clk_unlock(clk, current_lock, flags); return rate; }
static int clkout_find_match(struct clk_hw *hw, unsigned long target_rate, struct clkout_match *m) { struct clk_hw_clkout *clkout_clk = to_clk_hw_clkout(hw); struct clk_hw_omap *omap_clk = to_clk_hw_omap(hw); struct clk *pclk; unsigned long prate, test_rate; const struct clksel *clks; const struct clksel_rate *clkr; const struct clk_div_table *clkd; memset(m, 0, sizeof(*m)); /* iterate over all the clksel */ for (clks = omap_clk->clksel; (pclk = clks->parent) != NULL; clks++) { prate = __clk_get_rate(pclk); for (clkr = clks->rates; clkr->div; clkr++) { if (!(clkr->flags & cpu_mask)) continue; for (clkd = clkout_clk->div_table; clkd->div; clkd++) { test_rate = (prate / clkr->div) / clkd->div; if (abs(test_rate - target_rate) < abs(m->best_rate - target_rate)) { m->parent_rate = prate / clkr->div; m->best_rate = test_rate; m->best_clks = clks; m->best_clkr = clkr; m->best_clkd = clkd; } } } } if (!m->best_clks || !m->best_clkr || !m->best_clkd) { pr_err("%s: Failed to find a best match\n", __func__); return -EINVAL; } return 0; }
/** * omap3_noncore_dpll_set_rate - set non-core DPLL rate * @clk: struct clk * of DPLL to set * @rate: rounded target rate * * Set the DPLL CLKOUT to the target rate. If the DPLL can enter * low-power bypass, and the target rate is the bypass source clock * rate, then configure the DPLL for bypass. Otherwise, round the * target rate if it hasn't been done already, then program and lock * the DPLL. Returns -EINVAL upon error, or 0 upon success. */ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct clk *new_parent = NULL; unsigned long rrate; u16 freqsel = 0; struct dpll_data *dd; int ret; if (!hw || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; if (__clk_get_rate(dd->clk_bypass) == rate && (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { pr_debug("%s: %s: set rate: entering bypass.\n", __func__, __clk_get_name(hw->clk)); __clk_prepare(dd->clk_bypass); clk_enable(dd->clk_bypass); ret = _omap3_noncore_dpll_bypass(clk); if (!ret) new_parent = dd->clk_bypass; clk_disable(dd->clk_bypass); __clk_unprepare(dd->clk_bypass); } else { __clk_prepare(dd->clk_ref); clk_enable(dd->clk_ref); if (dd->last_rounded_rate != rate) { rrate = __clk_round_rate(hw->clk, rate); if (rrate != rate) { pr_warn("%s: %s: final rate %lu does not match desired rate %lu\n", __func__, __clk_get_name(hw->clk), rrate, rate); rate = rrate; } } if (dd->last_rounded_rate == 0) return -EINVAL; /* Freqsel is available only on OMAP343X devices */ if (cpu_is_omap343x()) { freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); WARN_ON(!freqsel); } pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, __clk_get_name(hw->clk), rate); ret = omap3_noncore_dpll_program(clk, freqsel); if (!ret) new_parent = dd->clk_ref; clk_disable(dd->clk_ref); __clk_unprepare(dd->clk_ref); } /* * FIXME - this is all wrong. common code handles reparenting and * migrating prepare/enable counts. dplls should be a multiplexer * clock and this should be a set_parent operation so that all of that * stuff is inherited for free */ if (!ret && clk_get_parent(hw->clk) != new_parent) __clk_reparent(hw->clk, new_parent); return 0; }
/** * omap2_dpll_round_rate - round a target rate for an OMAP DPLL * @clk: struct clk * for a DPLL * @target_rate: desired DPLL clock rate * * Given a DPLL and a desired target rate, round the target rate to a * possible, programmable rate for this DPLL. Attempts to select the * minimum possible n. Stores the computed (m, n) in the DPLL's * dpll_data structure so set_rate() will not need to call this * (expensive) function again. Returns ~0 if the target rate cannot * be rounded, or the rounded rate upon success. */ long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate) { int m, n, r, scaled_max_m; unsigned long scaled_rt_rp; unsigned long new_rate = 0; struct dpll_data *dd; unsigned long ref_rate; const char *clk_name; if (!clk || !clk->dpll_data) return ~0; dd = clk->dpll_data; ref_rate = __clk_get_rate(dd->clk_ref); clk_name = __clk_get_name(clk); pr_debug("clock: %s: starting DPLL round_rate, target rate %ld\n", clk_name, target_rate); scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR); scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; dd->last_rounded_rate = 0; for (n = dd->min_divider; n <= dd->max_divider; n++) { /* Is the (input clk, divider) pair valid for the DPLL? */ r = _dpll_test_fint(clk, n); if (r == DPLL_FINT_UNDERFLOW) break; else if (r == DPLL_FINT_INVALID) continue; /* Compute the scaled DPLL multiplier, based on the divider */ m = scaled_rt_rp * n; /* * Since we're counting n up, a m overflow means we * can bail out completely (since as n increases in * the next iteration, there's no way that m can * increase beyond the current m) */ if (m > scaled_max_m) break; r = _dpll_test_mult(&m, n, &new_rate, target_rate, ref_rate); /* m can't be set low enough for this n - try with a larger n */ if (r == DPLL_MULT_UNDERFLOW) continue; pr_debug("clock: %s: m = %d: n = %d: new_rate = %ld\n", clk_name, m, n, new_rate); if (target_rate == new_rate) { dd->last_rounded_m = m; dd->last_rounded_n = n; dd->last_rounded_rate = target_rate; break; } } if (target_rate != new_rate) { pr_debug("clock: %s: cannot round to rate %ld\n", clk_name, target_rate); return ~0; } return target_rate; }
/** * omap2_dpll_round_rate - round a target rate for an OMAP DPLL * @clk: struct clk * for a DPLL * @target_rate: desired DPLL clock rate * * Given a DPLL and a desired target rate, round the target rate to a * possible, programmable rate for this DPLL. Attempts to select the * minimum possible n. Stores the computed (m, n) in the DPLL's * dpll_data structure so set_rate() will not need to call this * (expensive) function again. Returns ~0 if the target rate cannot * be rounded, or the rounded rate upon success. */ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, unsigned long *parent_rate) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); int m, n, r, scaled_max_m; int min_delta_m = INT_MAX, min_delta_n = INT_MAX; unsigned long scaled_rt_rp; unsigned long new_rate = 0; struct dpll_data *dd; unsigned long ref_rate; long delta; long prev_min_delta = LONG_MAX; const char *clk_name; if (!clk || !clk->dpll_data) return ~0; dd = clk->dpll_data; if (dd->max_rate && target_rate > dd->max_rate) target_rate = dd->max_rate; ref_rate = __clk_get_rate(dd->clk_ref); clk_name = __clk_get_name(hw->clk); pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n", clk_name, target_rate); scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR); scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; dd->last_rounded_rate = 0; for (n = dd->min_divider; n <= dd->max_divider; n++) { /* Is the (input clk, divider) pair valid for the DPLL? */ r = _dpll_test_fint(clk, n); if (r == DPLL_FINT_UNDERFLOW) break; else if (r == DPLL_FINT_INVALID) continue; /* Compute the scaled DPLL multiplier, based on the divider */ m = scaled_rt_rp * n; /* * Since we're counting n up, a m overflow means we * can bail out completely (since as n increases in * the next iteration, there's no way that m can * increase beyond the current m) */ if (m > scaled_max_m) break; r = _dpll_test_mult(&m, n, &new_rate, target_rate, ref_rate); /* m can't be set low enough for this n - try with a larger n */ if (r == DPLL_MULT_UNDERFLOW) continue; /* skip rates above our target rate */ delta = target_rate - new_rate; if (delta < 0) continue; if (delta < prev_min_delta) { prev_min_delta = delta; min_delta_m = m; min_delta_n = n; } pr_debug("clock: %s: m = %d: n = %d: new_rate = %lu\n", clk_name, m, n, new_rate); if (delta == 0) break; } if (prev_min_delta == LONG_MAX) { pr_debug("clock: %s: cannot round to rate %lu\n", clk_name, target_rate); return ~0; } dd->last_rounded_m = min_delta_m; dd->last_rounded_n = min_delta_n; dd->last_rounded_rate = target_rate - prev_min_delta; return dd->last_rounded_rate; }
static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, unsigned long min_rate, unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p) { struct clk_composite *composite = to_clk_composite(hw); const struct clk_ops *rate_ops = composite->rate_ops; const struct clk_ops *mux_ops = composite->mux_ops; struct clk_hw *rate_hw = composite->rate_hw; struct clk_hw *mux_hw = composite->mux_hw; struct clk *parent; unsigned long parent_rate; long tmp_rate, best_rate = 0; unsigned long rate_diff; unsigned long best_rate_diff = ULONG_MAX; int i; if (rate_hw && rate_ops && rate_ops->determine_rate) { __clk_hw_set_clk(rate_hw, hw); return rate_ops->determine_rate(rate_hw, rate, min_rate, max_rate, best_parent_rate, best_parent_p); } else if (rate_hw && rate_ops && rate_ops->round_rate && mux_hw && mux_ops && mux_ops->set_parent) { *best_parent_p = NULL; if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) { parent = clk_get_parent(mux_hw->clk); *best_parent_p = __clk_get_hw(parent); *best_parent_rate = __clk_get_rate(parent); return rate_ops->round_rate(rate_hw, rate, best_parent_rate); } for (i = 0; i < __clk_get_num_parents(mux_hw->clk); i++) { parent = clk_get_parent_by_index(mux_hw->clk, i); if (!parent) continue; parent_rate = __clk_get_rate(parent); tmp_rate = rate_ops->round_rate(rate_hw, rate, &parent_rate); if (tmp_rate < 0) continue; rate_diff = abs(rate - tmp_rate); if (!rate_diff || !*best_parent_p || best_rate_diff > rate_diff) { *best_parent_p = __clk_get_hw(parent); *best_parent_rate = parent_rate; best_rate_diff = rate_diff; best_rate = tmp_rate; } if (!rate_diff) return rate; } return best_rate; } else if (mux_hw && mux_ops && mux_ops->determine_rate) { __clk_hw_set_clk(mux_hw, hw); return mux_ops->determine_rate(mux_hw, rate, min_rate, max_rate, best_parent_rate, best_parent_p); } else { pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n"); return 0; } }