/** * omap36xx_gate_clk_enable_with_hsdiv_restore - enable clocks suffering * from HSDivider PWRDN problem Implements Errata ID: i556. * @clk: DPLL output struct clk * * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck, * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset * valueafter their respective PWRDN bits are set. Any dummy write * (Any other value different from the Read value) to the * corresponding CM_CLKSEL register will refresh the dividers. */ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw) { struct clk_divider *parent; struct clk_hw *parent_hw; u32 dummy_v, orig_v; int ret; /* Clear PWRDN bit of HSDIVIDER */ ret = omap2_dflt_clk_enable(hw); /* Parent is the x2 node, get parent of parent for the m2 div */ parent_hw = clk_hw_get_parent(clk_hw_get_parent(hw)); parent = to_clk_divider(parent_hw); /* Restore the dividers */ if (!ret) { orig_v = ti_clk_ll_ops->clk_readl(parent->reg); dummy_v = orig_v; /* Write any other value different from the Read value */ dummy_v ^= (1 << parent->shift); ti_clk_ll_ops->clk_writel(dummy_v, parent->reg); /* Write the original divider */ ti_clk_ll_ops->clk_writel(orig_v, parent->reg); } return ret; }
/** * omap3_noncore_dpll_set_rate - set rate for a DPLL clock * @hw: pointer to the clock to set parent for * @rate: target rate for the clock * @parent_rate: rate of the parent clock * * Sets rate for a DPLL clock. First checks if the clock parent is * reference clock (in bypass mode, the rate of the clock can't be * changed) and proceeds with the rate change operation. Returns 0 * with success, negative error value otherwise. */ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *dd; u16 freqsel = 0; int ret; if (!hw || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; if (clk_hw_get_parent(hw) != dd->clk_ref) return -EINVAL; if (dd->last_rounded_rate == 0) return -EINVAL; /* Freqsel is available only on OMAP343X devices */ if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); WARN_ON(!freqsel); } pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, clk_hw_get_name(hw), rate); ret = omap3_noncore_dpll_program(clk, freqsel); return ret; }
static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, unsigned long *prate, bool set_parent) { int best_div = 1, div; struct clk_hw *parent = clk_hw_get_parent(hw); unsigned long best_prate = 0; unsigned long best_diff = ~0ul; int max_div = (1 << (TIM2_PCD_LO_BITS + TIM2_PCD_HI_BITS)) - 1; for (div = 1; div < max_div; div++) { unsigned long this_prate, div_rate, diff; if (set_parent) this_prate = clk_hw_round_rate(parent, rate * div); else this_prate = *prate; div_rate = DIV_ROUND_UP_ULL(this_prate, div); diff = abs(rate - div_rate); if (diff < best_diff) { best_div = div; best_diff = diff; best_prate = this_prate; } } *prate = best_prate; return best_div; }
/** * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. * The choice of modes depends on the DPLL's programmed rate: if it is * the same as the DPLL's parent clock, it will enter bypass; * otherwise, it will enter lock. This code will wait for the DPLL to * indicate readiness before returning, unless the DPLL takes too long * to enter the target state. Intended to be used as the struct clk's * enable function. If DPLL3 was passed in, or the DPLL does not * support low-power stop, or if the DPLL took too long to enter * bypass or lock, return -EINVAL; otherwise, return 0. */ int omap3_noncore_dpll_enable(struct clk_hw *hw) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); int r; struct dpll_data *dd; struct clk_hw *parent; dd = clk->dpll_data; if (!dd) return -EINVAL; if (clk->clkdm) { r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); if (r) { WARN(1, "%s: could not enable %s's clockdomain %s: %d\n", __func__, clk_hw_get_name(hw), clk->clkdm_name, r); return r; } } parent = clk_hw_get_parent(hw); if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) { WARN_ON(parent != dd->clk_bypass); r = _omap3_noncore_dpll_bypass(clk); } else { WARN_ON(parent != dd->clk_ref); r = _omap3_noncore_dpll_lock(clk); } return r; }
/** * _lookup_dco - Lookup DCO used by j-type DPLL * @clk: pointer to a DPLL struct clk * @dco: digital control oscillator selector * @m: DPLL multiplier to set * @n: DPLL divider to set * * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" * * XXX This code is not needed for 3430/AM35xx; can it be optimized * out in non-multi-OMAP builds for those chips? */ static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n) { unsigned long fint, clkinp; /* watch out for overflow */ clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)); fint = (clkinp / n) * m; if (fint < 1000000000) *dco = 2; else *dco = 4; }
static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { struct clk_fixed_factor *fix = to_clk_fixed_factor(hw); if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { unsigned long best_parent; best_parent = (rate / fix->mult) * fix->div; *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent); } return (*prate / fix->div) * fix->mult; }
static int lpc18xx_ccu_gate_is_enabled(struct clk_hw *hw) { const struct clk_hw *parent; /* * The branch clock registers are only accessible * if the base (parent) clock is enabled. Register * access with a disabled base clock will hang the * system. */ parent = clk_hw_get_parent(hw); if (!parent) return 0; if (!clk_hw_is_enabled(parent)) return 0; return clk_gate_ops.is_enabled(hw); }
/** * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL * @clk: pointer to a DPLL struct clk * @sd_div: target sigma-delta divider * @m: DPLL multiplier to set * @n: DPLL divider to set * * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" * * XXX This code is not needed for 3430/AM35xx; can it be optimized * out in non-multi-OMAP builds for those chips? */ static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n) { unsigned long clkinp, sd; /* watch out for overflow */ int mod1, mod2; clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)); /* * target sigma-delta to near 250MHz * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)] */ clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */ mod1 = (clkinp * m) % (250 * n); sd = (clkinp * m) / (250 * n); mod2 = sd % 10; sd /= 10; if (mod1 || mod2) sd++; *sd_div = sd; }
/* Find the parent DPLL for the given clkoutx2 clock */ static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) { struct clk_hw_omap *pclk = NULL; /* Walk up the parents of clk, looking for a DPLL */ do { do { hw = clk_hw_get_parent(hw); } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC)); if (!hw) break; pclk = to_clk_hw_omap(hw); } while (pclk && !pclk->dpll_data); /* clk does not have a DPLL as a parent? error in the clock data */ if (!pclk) { WARN_ON(1); return NULL; } return pclk; }
static int clk_composite_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { struct clk_composite *composite = to_clk_composite(hw); const struct clk_ops *rate_ops = composite->rate_ops; const struct clk_ops *mux_ops = composite->mux_ops; struct clk_hw *rate_hw = composite->rate_hw; struct clk_hw *mux_hw = composite->mux_hw; struct clk_hw *parent; unsigned long parent_rate; long tmp_rate, best_rate = 0; unsigned long rate_diff; unsigned long best_rate_diff = ULONG_MAX; long rate; int i; if (rate_hw && rate_ops && rate_ops->determine_rate) { __clk_hw_set_clk(rate_hw, hw); return rate_ops->determine_rate(rate_hw, req); } else if (rate_hw && rate_ops && rate_ops->round_rate && mux_hw && mux_ops && mux_ops->set_parent) { req->best_parent_hw = NULL; if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) { parent = clk_hw_get_parent(mux_hw); req->best_parent_hw = parent; req->best_parent_rate = clk_hw_get_rate(parent); rate = rate_ops->round_rate(rate_hw, req->rate, &req->best_parent_rate); if (rate < 0) return rate; req->rate = rate; return 0; } for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) { parent = clk_hw_get_parent_by_index(mux_hw, i); if (!parent) continue; parent_rate = clk_hw_get_rate(parent); tmp_rate = rate_ops->round_rate(rate_hw, req->rate, &parent_rate); if (tmp_rate < 0) continue; rate_diff = abs(req->rate - tmp_rate); if (!rate_diff || !req->best_parent_hw || best_rate_diff > rate_diff) { req->best_parent_hw = parent; req->best_parent_rate = parent_rate; best_rate_diff = rate_diff; best_rate = tmp_rate; } if (!rate_diff) return 0; } req->rate = best_rate; return 0; } else if (mux_hw && mux_ops && mux_ops->determine_rate) { __clk_hw_set_clk(mux_hw, hw); return mux_ops->determine_rate(mux_hw, req); } else { pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n"); return -EINVAL; } }
/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */ static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2); return DIV_ROUND_UP(*parent_rate, 2); }
static int meson8b_clkc_probe(struct platform_device *pdev) { void __iomem *clk_base; int ret, clkid, i; struct clk_hw *parent_hw; struct clk *parent_clk; struct device *dev = &pdev->dev; /* Generic clocks and PLLs */ clk_base = of_iomap(dev->of_node, 1); if (!clk_base) { pr_err("%s: Unable to map clk base\n", __func__); return -ENXIO; } /* Populate base address for PLLs */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_plls); i++) meson8b_clk_plls[i]->base = clk_base; /* Populate base address for MPLLs */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_mplls); i++) meson8b_clk_mplls[i]->base = clk_base; /* Populate the base address for CPU clk */ meson8b_cpu_clk.base = clk_base; /* Populate base address for gates */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_gates); i++) meson8b_clk_gates[i]->reg = clk_base + (u32)meson8b_clk_gates[i]->reg; /* Populate base address for muxes */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_muxes); i++) meson8b_clk_muxes[i]->reg = clk_base + (u32)meson8b_clk_muxes[i]->reg; /* Populate base address for dividers */ for (i = 0; i < ARRAY_SIZE(meson8b_clk_dividers); i++) meson8b_clk_dividers[i]->reg = clk_base + (u32)meson8b_clk_dividers[i]->reg; /* * register all clks * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1 */ for (clkid = CLKID_XTAL; clkid < CLK_NR_CLKS; clkid++) { /* array might be sparse */ if (!meson8b_hw_onecell_data.hws[clkid]) continue; /* FIXME convert to devm_clk_register */ ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[clkid]); if (ret) goto iounmap; } /* * Register CPU clk notifier * * FIXME this is wrong for a lot of reasons. First, the muxes should be * struct clk_hw objects. Second, we shouldn't program the muxes in * notifier handlers. The tricky programming sequence will be handled * by the forthcoming coordinated clock rates mechanism once that * feature is released. * * Furthermore, looking up the parent this way is terrible. At some * point we will stop allocating a default struct clk when registering * a new clk_hw, and this hack will no longer work. Releasing the ccr * feature before that time solves the problem :-) */ parent_hw = clk_hw_get_parent(&meson8b_cpu_clk.hw); parent_clk = parent_hw->clk; ret = clk_notifier_register(parent_clk, &meson8b_cpu_clk.clk_nb); if (ret) { pr_err("%s: failed to register clock notifier for cpu_clk\n", __func__); goto iounmap; } return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, &meson8b_hw_onecell_data); iounmap: iounmap(clk_base); return ret; }