static int mux_set_parent(struct clk *c, struct clk *p) { struct mux_clk *mux = to_mux_clk(c); int sel = parent_to_src_sel(mux, p); struct clk *old_parent; int rc = 0; unsigned long flags; if (sel < 0) return sel; rc = __clk_pre_reparent(c, p, &flags); if (rc) goto out; rc = mux->ops->set_mux_sel(mux, sel); if (rc) goto set_fail; old_parent = c->parent; c->parent = p; __clk_post_reparent(c, old_parent, &flags); return 0; set_fail: __clk_post_reparent(c, p, &flags); out: return rc; }
static int mux_set_rate(struct clk *c, unsigned long rate) { struct mux_clk *mux = to_mux_clk(c); struct clk *new_parent = NULL; int rc = 0, i; unsigned long new_par_curr_rate; unsigned long flags; for (i = 0; i < mux->num_parents; i++) { if (clk_round_rate(mux->parents[i].src, rate) == rate) { new_parent = mux->parents[i].src; break; } } if (new_parent == NULL) return -EINVAL; /* * Switch to safe parent since the old and new parent might be the * same and the parent might temporarily turn off while switching * rates. */ if (mux->safe_sel >= 0) { /* * Some mux implementations might switch to/from a low power * parent as part of their disable/enable ops. Grab the * enable lock to avoid racing with these implementations. */ spin_lock_irqsave(&c->lock, flags); rc = mux->ops->set_mux_sel(mux, mux->safe_sel); spin_unlock_irqrestore(&c->lock, flags); } if (rc) return rc; new_par_curr_rate = clk_get_rate(new_parent); rc = clk_set_rate(new_parent, rate); if (rc) goto set_rate_fail; rc = mux_set_parent(c, new_parent); if (rc) goto set_par_fail; return 0; set_par_fail: clk_set_rate(new_parent, new_par_curr_rate); set_rate_fail: WARN(mux->ops->set_mux_sel(mux, parent_to_src_sel(mux->parents, mux->num_parents, c->parent)), "Set rate failed for %s. Also in bad state!\n", c->dbg_name); return rc; }
static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div) { u32 rc = 0, src_sel; src_sel = parent_to_src_sel(md->parents, md->num_parents, parent); if (md->c.count) rc = md->ops->set_src_div(md, src_sel, div); if (!rc) { md->data.div = div; md->src_sel = src_sel; } return rc; }
static enum handoff mux_handoff(struct clk *c) { struct mux_clk *mux = to_mux_clk(c); c->rate = clk_get_rate(c->parent); mux->safe_sel = parent_to_src_sel(mux->parents, mux->num_parents, mux->safe_parent); if (mux->en_mask && mux->ops && mux->ops->is_enabled) return mux->ops->is_enabled(mux) ? HANDOFF_ENABLED_CLK : HANDOFF_DISABLED_CLK; return HANDOFF_DISABLED_CLK; }
static int mux_set_parent(struct clk *c, struct clk *p) { struct mux_clk *mux = to_mux_clk(c); int sel = parent_to_src_sel(mux->parents, mux->num_parents, p); struct clk *old_parent; int rc = 0, i; unsigned long flags; if (sel < 0 && mux->rec_set_par) { for (i = 0; i < mux->num_parents; i++) { rc = clk_set_parent(mux->parents[i].src, p); if (!rc) { sel = mux->parents[i].sel; /* * This is necessary to ensure prepare/enable * counts get propagated correctly. */ p = mux->parents[i].src; break; } } } if (sel < 0) return sel; rc = __clk_pre_reparent(c, p, &flags); if (rc) goto out; rc = mux->ops->set_mux_sel(mux, sel); if (rc) goto set_fail; old_parent = c->parent; c->parent = p; c->rate = clk_get_rate(p); __clk_post_reparent(c, old_parent, &flags); return 0; set_fail: __clk_post_reparent(c, p, &flags); out: return rc; }
static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div) { u32 rc = 0, src_sel; src_sel = parent_to_src_sel(md->parents, md->num_parents, parent); WARN(!md->c.count, "ref count is zero! parent will not be switched to gpll0\n"); if (md->c.count) rc = md->ops->set_src_div(md, src_sel, div); if (!rc) { md->data.div = div; md->src_sel = src_sel; } return rc; }
/* requires enable lock to be held */ static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div) { u32 rc = 0, src_sel; src_sel = parent_to_src_sel(md->parents, md->num_parents, parent); /* * If the clock is disabled, don't change to the new settings until * the clock is reenabled */ if (md->c.count) rc = md->ops->set_src_div(md, src_sel, div); if (!rc) { md->data.div = div; md->src_sel = src_sel; } return rc; }
static int mux_set_rate(struct clk *c, unsigned long rate) { struct mux_clk *mux = to_mux_clk(c); struct clk *new_parent = NULL; int rc = 0, i; unsigned long new_par_curr_rate; unsigned long flags; for (i = 0; i < mux->num_parents; i++) { if (clk_round_rate(mux->parents[i].src, rate) == rate) { new_parent = mux->parents[i].src; break; } } if (new_parent == NULL) return -EINVAL; if (mux->safe_sel >= 0) { spin_lock_irqsave(&c->lock, flags); rc = mux->ops->set_mux_sel(mux, mux->safe_sel); spin_unlock_irqrestore(&c->lock, flags); } if (rc) return rc; new_par_curr_rate = clk_get_rate(new_parent); rc = clk_set_rate(new_parent, rate); if (rc) goto set_rate_fail; rc = mux_set_parent(c, new_parent); if (rc) goto set_par_fail; return 0; set_par_fail: clk_set_rate(new_parent, new_par_curr_rate); set_rate_fail: WARN(mux->ops->set_mux_sel(mux, parent_to_src_sel(mux->parents, mux->num_parents, c->parent)), "Set rate failed for %s. Also in bad state!\n", c->dbg_name); return rc; }
static enum handoff mux_handoff(struct clk *c) { struct mux_clk *mux = to_mux_clk(c); c->rate = clk_get_rate(c->parent); mux->safe_sel = parent_to_src_sel(mux, mux->safe_parent); if (mux->en_mask && mux->ops && mux->ops->is_enabled) return mux->ops->is_enabled(mux) ? HANDOFF_ENABLED_CLK : HANDOFF_DISABLED_CLK; /* * If this function returns 'enabled' even when the clock downstream * of this clock is disabled, then handoff code will unnecessarily * enable the current parent of this clock. If this function always * returns 'disabled' and a clock downstream is on, the clock handoff * code will bump up the ref count for this clock and its current * parent as necessary. So, clocks without an actual HW gate can * always return disabled. */ return HANDOFF_DISABLED_CLK; }
static int mux_parent_to_src_sel(struct mux_clk *mux, struct clk *p) { return parent_to_src_sel(mux->parents, mux->num_parents, p); }