static int clk_prcmu_opp_prepare(struct clk_hw *hw) { int err; struct clk_prcmu *clk = to_clk_prcmu(hw); if (!clk->opp_requested) { err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, (char *)clk_hw_get_name(hw), 100); if (err) { pr_err("clk_prcmu: %s fail req APE OPP for %s.\n", __func__, clk_hw_get_name(hw)); return err; } clk->opp_requested = 1; } err = prcmu_request_clock(clk->cg_sel, true); if (err) { prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, (char *)clk_hw_get_name(hw)); clk->opp_requested = 0; return err; } clk->is_prepared = 1; return 0; }
/** * omap2_dflt_clk_enable - enable a clock in the hardware * @hw: struct clk_hw * of the clock to enable * * Enable the clock @hw in the hardware. We first call into the OMAP * clockdomain code to "enable" the corresponding clockdomain if this * is the first enabled user of the clockdomain. Then program the * hardware to enable the clock. Then wait for the IP block that uses * this clock to leave idle (if applicable). Returns the error value * from clkdm_clk_enable() if it terminated with an error, or -EINVAL * if @hw has a null clock enable_reg, or zero upon success. */ int omap2_dflt_clk_enable(struct clk_hw *hw) { struct clk_hw_omap *clk; u32 v; int ret = 0; bool clkdm_control; if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) clkdm_control = false; else clkdm_control = true; clk = to_clk_hw_omap(hw); if (clkdm_control && clk->clkdm) { ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); if (ret) { WARN(1, "%s: could not enable %s's clockdomain %s: %d\n", __func__, clk_hw_get_name(hw), clk->clkdm_name, ret); return ret; } } if (IS_ERR(clk->enable_reg)) { pr_err("%s: %s missing enable_reg\n", __func__, clk_hw_get_name(hw)); ret = -EINVAL; goto err; } /* FIXME should not have INVERT_ENABLE bit here */ v = ti_clk_ll_ops->clk_readl(clk->enable_reg); if (clk->flags & INVERT_ENABLE) v &= ~(1 << clk->enable_bit); else v |= (1 << clk->enable_bit); ti_clk_ll_ops->clk_writel(v, clk->enable_reg); v = ti_clk_ll_ops->clk_readl(clk->enable_reg); /* OCP barrier */ if (clk->ops && clk->ops->find_idlest) _omap2_module_wait_ready(clk); return 0; err: if (clkdm_control && clk->clkdm) ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); return ret; }
static int update_config(struct clk_rcg2 *rcg) { int count, ret; u32 cmd; struct clk_hw *hw = &rcg->clkr.hw; const char *name = clk_hw_get_name(hw); ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, CMD_UPDATE, CMD_UPDATE); if (ret) return ret; /* Wait for update to take effect */ for (count = 500; count > 0; count--) { ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd); if (ret) return ret; if (!(cmd & CMD_UPDATE)) return 0; udelay(1); } WARN(1, "%s: rcg didn't update its configuration.", name); return 0; }
/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state) { const struct dpll_data *dd; int i = 0; int ret = -EINVAL; const char *clk_name; dd = clk->dpll_data; clk_name = clk_hw_get_name(&clk->hw); state <<= __ffs(dd->idlest_mask); while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) != state) && i < MAX_DPLL_WAIT_TRIES) { i++; udelay(1); } if (i == MAX_DPLL_WAIT_TRIES) { pr_err("clock: %s failed transition to '%s'\n", clk_name, (state) ? "locked" : "bypassed"); } else { pr_debug("clock: %s transition to '%s' in %d loops\n", clk_name, (state) ? "locked" : "bypassed", i); ret = 0; } return ret; }
static int clk_branch_wait(const struct clk_branch *br, bool enabling, bool (check_halt)(const struct clk_branch *, bool)) { bool voted = br->halt_check & BRANCH_VOTED; const char *name = clk_hw_get_name(&br->clkr.hw); /* * Skip checking halt bit if we're explicitly ignoring the bit or the * clock is in hardware gated mode */ if (br->halt_check == BRANCH_HALT_SKIP || clk_branch_in_hwcg_mode(br)) return 0; if (br->halt_check == BRANCH_HALT_DELAY || (!enabling && voted)) { udelay(10); } else if (br->halt_check == BRANCH_HALT_ENABLE || br->halt_check == BRANCH_HALT || (enabling && voted)) { int count = 200; while (count-- > 0) { if (check_halt(br, enabling)) return 0; udelay(1); } WARN(1, "%s status stuck at 'o%s'", name, enabling ? "ff" : "n"); return -EBUSY; } return 0; }
/* * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report * readiness before returning. Will save and restore the DPLL's * autoidle state across the enable, per the CDP code. If the DPLL * locked successfully, return 0; if the DPLL did not lock in the time * allotted, or DPLL3 was passed in, return -EINVAL. */ static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk) { const struct dpll_data *dd; u8 ai; u8 state = 1; int r = 0; pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw)); dd = clk->dpll_data; state <<= __ffs(dd->idlest_mask); /* Check if already locked */ if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) == state) goto done; ai = omap3_dpll_autoidle_read(clk); if (ai) omap3_dpll_deny_idle(clk); _omap3_dpll_write_clken(clk, DPLL_LOCKED); r = _omap3_wait_dpll_status(clk, 1); if (ai) omap3_dpll_allow_idle(clk); done: return r; }
/** * omap2_dflt_clk_disable - disable a clock in the hardware * @hw: struct clk_hw * of the clock to disable * * Disable the clock @hw in the hardware, and call into the OMAP * clockdomain code to "disable" the corresponding clockdomain if all * clocks/hwmods in that clockdomain are now disabled. No return * value. */ void omap2_dflt_clk_disable(struct clk_hw *hw) { struct clk_hw_omap *clk; u32 v; clk = to_clk_hw_omap(hw); if (IS_ERR(clk->enable_reg)) { /* * 'independent' here refers to a clock which is not * controlled by its parent. */ pr_err("%s: independent clock %s has no enable_reg\n", __func__, clk_hw_get_name(hw)); return; } v = ti_clk_ll_ops->clk_readl(clk->enable_reg); if (clk->flags & INVERT_ENABLE) v |= (1 << clk->enable_bit); else v &= ~(1 << clk->enable_bit); ti_clk_ll_ops->clk_writel(v, clk->enable_reg); /* No OCP barrier needed here since it is a disable operation */ if (!(ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) && clk->clkdm) ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); }
/** * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE * @clk: struct clk * belonging to the module * * If the necessary clocks for the OMAP hardware IP block that * corresponds to clock @clk are enabled, then wait for the module to * indicate readiness (i.e., to leave IDLE). This code does not * belong in the clock code and will be moved in the medium term to * module-dependent code. No return value. */ static void _omap2_module_wait_ready(struct clk_hw_omap *clk) { void __iomem *companion_reg, *idlest_reg; u8 other_bit, idlest_bit, idlest_val, idlest_reg_id; s16 prcm_mod; int r; /* Not all modules have multiple clocks that their IDLEST depends on */ if (clk->ops->find_companion) { clk->ops->find_companion(clk, &companion_reg, &other_bit); if (!(ti_clk_ll_ops->clk_readl(companion_reg) & (1 << other_bit))) return; } clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val); r = ti_clk_ll_ops->cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id); if (r) { /* IDLEST register not in the CM module */ _wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit), idlest_val, clk_hw_get_name(&clk->hw)); } else { ti_clk_ll_ops->cm_wait_module_ready(0, prcm_mod, idlest_reg_id, idlest_bit); } }
static void clk_sysctrl_unprepare(struct clk_hw *hw) { struct clk_sysctrl *clk = to_clk_sysctrl(hw); if (ab8500_sysctrl_clear(clk->reg_sel[0], clk->reg_mask[0])) dev_err(clk->dev, "clk_sysctrl: %s fail to clear %s.\n", __func__, clk_hw_get_name(hw)); }
static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw) { int err; struct clk_prcmu *clk = to_clk_prcmu(hw); if (!clk->opp_requested) { err = prcmu_request_ape_opp_100_voltage(true); if (err) { pr_err("clk_prcmu: %s fail req APE OPP VOLT for %s.\n", __func__, clk_hw_get_name(hw)); return err; } clk->opp_requested = 1; } err = prcmu_request_clock(clk->cg_sel, true); if (err) { prcmu_request_ape_opp_100_voltage(false); clk->opp_requested = 0; return err; } clk->is_prepared = 1; return 0; }
static void xgene_clk_disable(struct clk_hw *hw) { struct xgene_clk *pclk = to_xgene_clk(hw); unsigned long flags = 0; u32 data; if (pclk->lock) spin_lock_irqsave(pclk->lock, flags); if (pclk->param.csr_reg != NULL) { pr_debug("%s clock disabled\n", clk_hw_get_name(hw)); /* First put the CSR in reset */ data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_csr_offset); data |= pclk->param.reg_csr_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_csr_offset); /* Second disable the clock */ data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_clk_offset); data &= ~pclk->param.reg_clk_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_clk_offset); } if (pclk->lock) spin_unlock_irqrestore(pclk->lock, flags); }
/** * omap3_noncore_dpll_set_rate - set rate for a DPLL clock * @hw: pointer to the clock to set parent for * @rate: target rate for the clock * @parent_rate: rate of the parent clock * * Sets rate for a DPLL clock. First checks if the clock parent is * reference clock (in bypass mode, the rate of the clock can't be * changed) and proceeds with the rate change operation. Returns 0 * with success, negative error value otherwise. */ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *dd; u16 freqsel = 0; int ret; if (!hw || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; if (clk_hw_get_parent(hw) != dd->clk_ref) return -EINVAL; if (dd->last_rounded_rate == 0) return -EINVAL; /* Freqsel is available only on OMAP343X devices */ if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); WARN_ON(!freqsel); } pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, clk_hw_get_name(hw), rate); ret = omap3_noncore_dpll_program(clk, freqsel); return ret; }
/** * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. * The choice of modes depends on the DPLL's programmed rate: if it is * the same as the DPLL's parent clock, it will enter bypass; * otherwise, it will enter lock. This code will wait for the DPLL to * indicate readiness before returning, unless the DPLL takes too long * to enter the target state. Intended to be used as the struct clk's * enable function. If DPLL3 was passed in, or the DPLL does not * support low-power stop, or if the DPLL took too long to enter * bypass or lock, return -EINVAL; otherwise, return 0. */ int omap3_noncore_dpll_enable(struct clk_hw *hw) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); int r; struct dpll_data *dd; struct clk_hw *parent; dd = clk->dpll_data; if (!dd) return -EINVAL; if (clk->clkdm) { r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); if (r) { WARN(1, "%s: could not enable %s's clockdomain %s: %d\n", __func__, clk_hw_get_name(hw), clk->clkdm_name, r); return r; } } parent = clk_hw_get_parent(hw); if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) { WARN_ON(parent != dd->clk_bypass); r = _omap3_noncore_dpll_bypass(clk); } else { WARN_ON(parent != dd->clk_ref); r = _omap3_noncore_dpll_lock(clk); } return r; }
static void clk_prcmu_unprepare(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); if (prcmu_request_clock(clk->cg_sel, false)) pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, clk_hw_get_name(hw)); else clk->is_prepared = 0; }
static void clk_prcmu_opp_unprepare(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); if (prcmu_request_clock(clk->cg_sel, false)) { pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, clk_hw_get_name(hw)); return; } if (clk->opp_requested) { prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, (char *)clk_hw_get_name(hw)); clk->opp_requested = 0; } clk->is_prepared = 0; }
static int xgene_clk_is_enabled(struct clk_hw *hw) { struct xgene_clk *pclk = to_xgene_clk(hw); u32 data = 0; if (pclk->param.csr_reg != NULL) { pr_debug("%s clock checking\n", clk_hw_get_name(hw)); data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_clk_offset); pr_debug("%s clock is %s\n", clk_hw_get_name(hw), data & pclk->param.reg_clk_mask ? "enabled" : "disabled"); } if (pclk->param.csr_reg == NULL) return 1; return data & pclk->param.reg_clk_mask ? 1 : 0; }
static int xgene_clk_pll_is_enabled(struct clk_hw *hw) { struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw); u32 data; data = xgene_clk_read(pllclk->reg + pllclk->pll_offset); pr_debug("%s pll %s\n", clk_hw_get_name(hw), data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled"); return data & REGSPEC_RESET_F1_MASK ? 0 : 1; }
unsigned long ccu_frac_helper_read_rate(struct ccu_common *common, struct _ccu_frac *cf) { u32 reg; printk("%s: Read fractional\n", clk_hw_get_name(&common->hw)); if (!(common->features & CCU_FEATURE_FRACTIONAL)) return 0; printk("%s: clock is fractional (rates %lu and %lu)\n", clk_hw_get_name(&common->hw), cf->rates[0], cf->rates[1]); reg = readl(common->base + common->reg); printk("%s: clock reg is 0x%x (select is 0x%x)\n", clk_hw_get_name(&common->hw), reg, cf->select); return (reg & cf->select) ? cf->rates[1] : cf->rates[0]; }
static int xgene_clk_enable(struct clk_hw *hw) { struct xgene_clk *pclk = to_xgene_clk(hw); unsigned long flags = 0; u32 data; phys_addr_t reg; if (pclk->lock) spin_lock_irqsave(pclk->lock, flags); if (pclk->param.csr_reg != NULL) { pr_debug("%s clock enabled\n", clk_hw_get_name(hw)); reg = __pa(pclk->param.csr_reg); /* First enable the clock */ data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_clk_offset); data |= pclk->param.reg_clk_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_clk_offset); pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n", clk_hw_get_name(hw), ®, pclk->param.reg_clk_offset, pclk->param.reg_clk_mask, data); /* Second enable the CSR */ data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_csr_offset); data &= ~pclk->param.reg_csr_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_csr_offset); pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n", clk_hw_get_name(hw), ®, pclk->param.reg_csr_offset, pclk->param.reg_csr_mask, data); } if (pclk->lock) spin_unlock_irqrestore(pclk->lock, flags); return 0; }
static void flexgen_disable(struct clk_hw *hw) { struct flexgen *flexgen = to_flexgen(hw); struct clk_hw *fgate_hw = &flexgen->fgate.hw; /* disable only the final gate */ __clk_hw_set_clk(fgate_hw, hw); clk_gate_ops.disable(fgate_hw); pr_debug("%s: flexgen output disabled\n", clk_hw_get_name(hw)); }
static int flexgen_enable(struct clk_hw *hw) { struct flexgen *flexgen = to_flexgen(hw); struct clk_hw *pgate_hw = &flexgen->pgate.hw; struct clk_hw *fgate_hw = &flexgen->fgate.hw; __clk_hw_set_clk(pgate_hw, hw); __clk_hw_set_clk(fgate_hw, hw); clk_gate_ops.enable(pgate_hw); clk_gate_ops.enable(fgate_hw); pr_debug("%s: flexgen output enabled\n", clk_hw_get_name(hw)); return 0; }
static u8 clkgena_divmux_get_parent(struct clk_hw *hw) { struct clkgena_divmux *genamux = to_clkgena_divmux(hw); struct clk_hw *mux_hw = &genamux->mux.hw; __clk_hw_set_clk(mux_hw, hw); genamux->muxsel = clk_mux_ops.get_parent(mux_hw); if ((s8)genamux->muxsel < 0) { pr_debug("%s: %s: Invalid parent, setting to default.\n", __func__, clk_hw_get_name(hw)); genamux->muxsel = 0; } return genamux->muxsel; }
static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw); unsigned long fref; unsigned long fvco; u32 pll; u32 nref; u32 nout; u32 nfb; pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset); if (pllclk->version <= 1) { if (pllclk->type == PLL_TYPE_PCP) { /* * PLL VCO = Reference clock * NF * PCP PLL = PLL_VCO / 2 */ nout = 2; fvco = parent_rate * (N_DIV_RD(pll) + 4); } else { /* * Fref = Reference Clock / NREF; * Fvco = Fref * NFB; * Fout = Fvco / NOUT; */ nref = CLKR_RD(pll) + 1; nout = CLKOD_RD(pll) + 1; nfb = CLKF_RD(pll); fref = parent_rate / nref; fvco = fref * nfb; } } else { /* * fvco = Reference clock * FBDIVC * PLL freq = fvco / NOUT */ nout = SC_OUTDIV2(pll) ? 2 : 3; fvco = parent_rate * SC_N_DIV_RD(pll); } pr_debug("%s pll recalc rate %ld parent %ld version %d\n", clk_hw_get_name(hw), fvco / nout, parent_rate, pllclk->version); return fvco / nout; }
static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw) { struct clk_prcmu *clk = to_clk_prcmu(hw); if (prcmu_request_clock(clk->cg_sel, false)) { pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, clk_hw_get_name(hw)); return; } if (clk->opp_requested) { prcmu_request_ape_opp_100_voltage(false); clk->opp_requested = 0; } clk->is_prepared = 0; }
/* * zynqmp_clk_gate_disable - Disable clock * @hw: handle between common and hardware-specific interfaces */ static void zynqmp_clk_gate_disable(struct clk_hw *hw) { struct zynqmp_clk_gate *gate = to_zynqmp_clk_gate(hw); const char *clk_name = clk_hw_get_name(hw); u32 clk_id = gate->clk_id; int ret = 0; const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); if (!eemi_ops || !eemi_ops->clock_disable) return; ret = eemi_ops->clock_disable(clk_id); if (ret) pr_warn_once("%s() clock disable failed for %s, ret = %d\n", __func__, clk_name, ret); }
static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct xgene_clk *pclk = to_xgene_clk(hw); u32 data; if (pclk->param.divider_reg) { data = xgene_clk_read(pclk->param.divider_reg + pclk->param.reg_divider_offset); data >>= pclk->param.reg_divider_shift; data &= (1 << pclk->param.reg_divider_width) - 1; pr_debug("%s clock recalc rate %ld parent %ld\n", clk_hw_get_name(hw), parent_rate / data, parent_rate); return parent_rate / data; } else {
/** * zynqmp_clk_gate_is_enable - Check clock state * @hw: handle between common and hardware-specific interfaces * * Return: 1 if enabled, 0 if disabled */ static int zynqmp_clk_gate_is_enabled(struct clk_hw *hw) { struct zynqmp_clk_gate *gate = to_zynqmp_clk_gate(hw); const char *clk_name = clk_hw_get_name(hw); u32 clk_id = gate->clk_id; int state, ret; const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); if (!eemi_ops || !eemi_ops->clock_getstate) return 0; ret = eemi_ops->clock_getstate(clk_id, &state); if (ret) pr_warn_once("%s() clock get state failed for %s, ret = %d\n", __func__, clk_name, ret); return state ? 1 : 0; }
/* * _omap3_noncore_dpll_stop - instruct a DPLL to stop * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to enter low-power stop. Will save and * restore the DPLL's autoidle state across the stop, per the CDP * code. If DPLL3 was passed in, or the DPLL does not support * low-power stop, return -EINVAL; otherwise, return 0. */ static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk) { u8 ai; if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP))) return -EINVAL; pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw)); ai = omap3_dpll_autoidle_read(clk); _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP); if (ai) omap3_dpll_allow_idle(clk); return 0; }
/* * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness * @clk: pointer to a DPLL struct clk * * Instructs a non-CORE DPLL to enter low-power bypass mode. In * bypass mode, the DPLL's rate is set equal to its parent clock's * rate. Waits for the DPLL to report readiness before returning. * Will save and restore the DPLL's autoidle state across the enable, * per the CDP code. If the DPLL entered bypass mode successfully, * return 0; if the DPLL did not enter bypass in the time allotted, or * DPLL3 was passed in, or the DPLL does not support low-power bypass, * return -EINVAL. */ static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk) { int r; u8 ai; if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) return -EINVAL; pr_debug("clock: configuring DPLL %s for low-power bypass\n", clk_hw_get_name(&clk->hw)); ai = omap3_dpll_autoidle_read(clk); _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS); r = _omap3_wait_dpll_status(clk, 0); if (ai) omap3_dpll_allow_idle(clk); return r; }
static u8 clk_rcg2_get_parent(struct clk_hw *hw) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); int num_parents = clk_hw_get_num_parents(hw); u32 cfg; int i, ret; ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); if (ret) goto err; cfg &= CFG_SRC_SEL_MASK; cfg >>= CFG_SRC_SEL_SHIFT; for (i = 0; i < num_parents; i++) if (cfg == rcg->parent_map[i].cfg) return i; err: pr_debug("%s: Clock %s has invalid parent, using default.\n", __func__, clk_hw_get_name(hw)); return 0; }