int dsi_pll_mux_prepare(struct clk *c)
{
	struct mux_clk *mux = to_mux_clk(c);
	int i, rc, sel = 0;
	struct mdss_pll_resources *dsi_pll_res = mux->priv;

	rc = mdss_pll_resource_enable(dsi_pll_res, true);
	if (rc) {
		pr_err("Failed to enable mdss dsi pll resources\n");
		return rc;
	}

	for (i = 0; i < mux->num_parents; i++)
		if (mux->parents[i].src == c->parent) {
			sel = mux->parents[i].sel;
			break;
		}

	if (i == mux->num_parents) {
		pr_err("Failed to select the parent clock\n");
		rc = -EINVAL;
		goto error;
	}

	/* Restore the mux source select value */
	rc = mux->ops->set_mux_sel(mux, sel);

error:
	mdss_pll_resource_enable(dsi_pll_res, false);
	return rc;
}
static int mux_enable(struct clk *c)
{
	struct mux_clk *mux = to_mux_clk(c);
	if (mux->ops->enable)
		return mux->ops->enable(mux);
	return 0;
}
static int mux_set_parent(struct clk *c, struct clk *p)
{
	struct mux_clk *mux = to_mux_clk(c);
	int sel = parent_to_src_sel(mux, p);
	struct clk *old_parent;
	int rc = 0;
	unsigned long flags;

	if (sel < 0)
		return sel;

	rc = __clk_pre_reparent(c, p, &flags);
	if (rc)
		goto out;

	rc = mux->ops->set_mux_sel(mux, sel);
	if (rc)
		goto set_fail;

	old_parent = c->parent;
	c->parent = p;
	__clk_post_reparent(c, old_parent, &flags);

	return 0;

set_fail:
	__clk_post_reparent(c, p, &flags);
out:
	return rc;
}
static int mux_set_parent(struct clk_hw *hw, u8 sel)
{
	struct mux_clk *mux = to_mux_clk(hw);

	if (mux->parent_map)
		sel = mux->parent_map[sel];

	return mux->ops->set_mux_sel(mux, sel);
}
static void __iomem *mux_clk_list_registers(struct clk *c, int n,
			struct clk_register_data **regs, u32 *size)
{
	struct mux_clk *mux = to_mux_clk(c);

	if (mux->ops && mux->ops->list_registers)
		return mux->ops->list_registers(mux, n, regs, size);

	return ERR_PTR(-EINVAL);
}
示例#6
0
static int mux_set_rate(struct clk *c, unsigned long rate)
{
	struct mux_clk *mux = to_mux_clk(c);
	struct clk *new_parent = NULL;
	int rc = 0, i;
	unsigned long new_par_curr_rate;
	unsigned long flags;

	for (i = 0; i < mux->num_parents; i++) {
		if (clk_round_rate(mux->parents[i].src, rate) == rate) {
			new_parent = mux->parents[i].src;
			break;
		}
	}
	if (new_parent == NULL)
		return -EINVAL;

	/*
	 * Switch to safe parent since the old and new parent might be the
	 * same and the parent might temporarily turn off while switching
	 * rates.
	 */
	if (mux->safe_sel >= 0) {
		/*
		 * Some mux implementations might switch to/from a low power
		 * parent as part of their disable/enable ops. Grab the
		 * enable lock to avoid racing with these implementations.
		 */
		spin_lock_irqsave(&c->lock, flags);
		rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
		spin_unlock_irqrestore(&c->lock, flags);
	}
	if (rc)
		return rc;

	new_par_curr_rate = clk_get_rate(new_parent);
	rc = clk_set_rate(new_parent, rate);
	if (rc)
		goto set_rate_fail;

	rc = mux_set_parent(c, new_parent);
	if (rc)
		goto set_par_fail;

	return 0;

set_par_fail:
	clk_set_rate(new_parent, new_par_curr_rate);
set_rate_fail:
	WARN(mux->ops->set_mux_sel(mux,
		parent_to_src_sel(mux->parents, mux->num_parents, c->parent)),
		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
	return rc;
}
static enum handoff mux_handoff(struct clk *c)
{
	struct mux_clk *mux = to_mux_clk(c);

	c->rate = clk_get_rate(c->parent);
	mux->safe_sel = mux_parent_to_src_sel(mux, mux->safe_parent);

	if (mux->en_mask && mux->ops && mux->ops->is_enabled)
		return mux->ops->is_enabled(mux)
			? HANDOFF_ENABLED_CLK
			: HANDOFF_DISABLED_CLK;

	return HANDOFF_DISABLED_CLK;
}
static struct clk *mux_get_parent(struct clk *c)
{
	struct mux_clk *mux = to_mux_clk(c);
	int sel = mux->ops->get_mux_sel(mux);
	int i;

	for (i = 0; i < mux->num_parents; i++) {
		if (mux->parents[i].sel == sel)
			return mux->parents[i].src;
	}

	
	return NULL;
}
static long mux_round_rate(struct clk *c, unsigned long rate)
{
	struct mux_clk *mux = to_mux_clk(c);
	int i;
	unsigned long prate, rrate = 0;

	for (i = 0; i < mux->num_parents; i++) {
		prate = clk_round_rate(mux->parents[i].src, rate);
		if (is_better_rate(rate, rrate, prate))
			rrate = prate;
	}
	if (!rrate)
		return -EINVAL;

	return rrate;
}
示例#10
0
static int mux_set_parent(struct clk *c, struct clk *p)
{
	struct mux_clk *mux = to_mux_clk(c);
	int sel = parent_to_src_sel(mux->parents, mux->num_parents, p);
	struct clk *old_parent;
	int rc = 0, i;
	unsigned long flags;

	if (sel < 0 && mux->rec_set_par) {
		for (i = 0; i < mux->num_parents; i++) {
			rc = clk_set_parent(mux->parents[i].src, p);
			if (!rc) {
				sel = mux->parents[i].sel;
				/*
				 * This is necessary to ensure prepare/enable
				 * counts get propagated correctly.
				 */
				p = mux->parents[i].src;
				break;
			}
		}
	}

	if (sel < 0)
		return sel;

	rc = __clk_pre_reparent(c, p, &flags);
	if (rc)
		goto out;

	rc = mux->ops->set_mux_sel(mux, sel);
	if (rc)
		goto set_fail;

	old_parent = c->parent;
	c->parent = p;
	c->rate = clk_get_rate(p);
	__clk_post_reparent(c, old_parent, &flags);

	return 0;

set_fail:
	__clk_post_reparent(c, p, &flags);
out:
	return rc;
}
示例#11
0
static struct clk *mux_get_safe_parent(struct clk_hw *hw)
{
	int i;
	struct mux_clk *mux = to_mux_clk(hw);
	int num_parents = __clk_get_num_parents(hw->clk);

	if (!mux->has_safe_parent)
		return NULL;

	i = mux->safe_sel;
	if (mux->parent_map)
		for (i = 0; i < num_parents; i++)
			if (mux->safe_sel == mux->parent_map[i])
				break;

	return clk_get_parent_by_index(hw->clk, i);
}
示例#12
0
static u8 mux_get_parent(struct clk_hw *hw)
{
	struct mux_clk *mux = to_mux_clk(hw);
	int num_parents = __clk_get_num_parents(hw->clk);
	int i;
	u8 sel;

	sel = mux->ops->get_mux_sel(mux);
	if (mux->parent_map) {
		for (i = 0; i < num_parents; i++)
			if (sel == mux->parent_map[i])
				return i;
		WARN(1, "Can't find parent\n");
		return -EINVAL;
	}

	return sel;
}
static int mux_set_rate(struct clk *c, unsigned long rate)
{
	struct mux_clk *mux = to_mux_clk(c);
	struct clk *new_parent = NULL;
	int rc = 0, i;
	unsigned long new_par_curr_rate;
	unsigned long flags;

	for (i = 0; i < mux->num_parents; i++) {
		if (clk_round_rate(mux->parents[i].src, rate) == rate) {
			new_parent = mux->parents[i].src;
			break;
		}
	}
	if (new_parent == NULL)
		return -EINVAL;

	if (mux->safe_sel >= 0) {
		spin_lock_irqsave(&c->lock, flags);
		rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
		spin_unlock_irqrestore(&c->lock, flags);
	}
	if (rc)
		return rc;

	new_par_curr_rate = clk_get_rate(new_parent);
	rc = clk_set_rate(new_parent, rate);
	if (rc)
		goto set_rate_fail;

	rc = mux_set_parent(c, new_parent);
	if (rc)
		goto set_par_fail;

	return 0;

set_par_fail:
	clk_set_rate(new_parent, new_par_curr_rate);
set_rate_fail:
	WARN(mux->ops->set_mux_sel(mux,
		parent_to_src_sel(mux->parents, mux->num_parents, c->parent)),
		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
	return rc;
}
static int mux_set_parent(struct clk *c, struct clk *p)
{
	struct mux_clk *mux = to_mux_clk(c);
	int sel = mux_parent_to_src_sel(mux, p);
	struct clk *old_parent;
	int rc = 0, i;
	unsigned long flags;

	if (sel < 0 && mux->rec_parents) {
		for (i = 0; i < mux->num_rec_parents; i++) {
			rc = clk_set_parent(mux->rec_parents[i], p);
			if (!rc) {
				p = mux->rec_parents[i];
				sel = mux_parent_to_src_sel(mux, p);
				break;
			}
		}
	}

	if (sel < 0)
		return sel;

	rc = __clk_pre_reparent(c, p, &flags);
	if (rc)
		goto out;

	rc = mux->ops->set_mux_sel(mux, sel);
	if (rc)
		goto set_fail;

	old_parent = c->parent;
	c->parent = p;
	c->rate = clk_get_rate(p);
	__clk_post_reparent(c, old_parent, &flags);

	return 0;

set_fail:
	__clk_post_reparent(c, p, &flags);
out:
	return rc;
}
static long mux_round_rate(struct clk *c, unsigned long rate)
{
	struct mux_clk *mux = to_mux_clk(c);
	int i;
	unsigned long prate, max_prate = 0, rrate = ULONG_MAX;

	for (i = 0; i < mux->num_parents; i++) {
		prate = clk_round_rate(mux->parents[i].src, rate);
		if (IS_ERR_VALUE(prate))
			continue;
		if (prate < rate) {
			max_prate = max(prate, max_prate);
			continue;
		}

		rrate = min(rrate, prate);
	}
	if (rrate == ULONG_MAX)
		rrate = max_prate;

	return rrate ? rrate : -EINVAL;
}
int mux_prepare(struct clk *c)
{
	struct mux_clk *mux = to_mux_clk(c);
	int i, rc, sel = 0;

	clk_prepare_enable(mdss_dsi_ahb_clk);
	for (i = 0; i < mux->num_parents; i++)
		if (mux->parents[i].src == c->parent) {
			sel = mux->parents[i].sel;
			break;
		}

	if (i == mux->num_parents) {
		clk_disable_unprepare(mdss_dsi_ahb_clk);
		return -EINVAL;
	}

	/* Restore the mux source select value */
	rc = mux->ops->set_mux_sel(mux, sel);
	clk_disable_unprepare(mdss_dsi_ahb_clk);

	return rc;
}
示例#17
0
int dsi_pll_mux_prepare(struct clk *c)
{
	struct mux_clk *mux = to_mux_clk(c);
	int i, rc, sel = 0;

	for (i = 0; i < mux->num_parents; i++)
		if (mux->parents[i].src == c->parent) {
			sel = mux->parents[i].sel;
			break;
		}

	if (i == mux->num_parents) {
		pr_err("Failed to select the parent clock\n");
		rc = -EINVAL;
		goto error;
	}

	/* Restore the mux source select value */
	rc = mux->ops->set_mux_sel(mux, sel);

error:
	return rc;
}
static enum handoff mux_handoff(struct clk *c)
{
	struct mux_clk *mux = to_mux_clk(c);

	c->rate = clk_get_rate(c->parent);
	mux->safe_sel = parent_to_src_sel(mux, mux->safe_parent);

	if (mux->en_mask && mux->ops && mux->ops->is_enabled)
		return mux->ops->is_enabled(mux)
			? HANDOFF_ENABLED_CLK
			: HANDOFF_DISABLED_CLK;

	/*
	 * If this function returns 'enabled' even when the clock downstream
	 * of this clock is disabled, then handoff code will unnecessarily
	 * enable the current parent of this clock. If this function always
	 * returns 'disabled' and a clock downstream is on, the clock handoff
	 * code will bump up the ref count for this clock and its current
	 * parent as necessary. So, clocks without an actual HW gate can
	 * always return disabled.
	 */
	return HANDOFF_DISABLED_CLK;
}
static void mux_disable(struct clk *c)
{
	struct mux_clk *mux = to_mux_clk(c);
	if (mux->ops->disable)
		return mux->ops->disable(mux);
}
static int mux_set_rate(struct clk *c, unsigned long rate)
{
	struct mux_clk *mux = to_mux_clk(c);
	struct clk *new_parent = NULL;
	int rc = 0, i;
	unsigned long new_par_curr_rate;
	unsigned long flags;

	/*
	 * Check if one of the possible parents is already at the requested
	 * rate.
	 */
	for (i = 0; i < mux->num_parents && mux->try_get_rate; i++) {
		struct clk *p = mux->parents[i].src;
		if (p->rate == rate && clk_round_rate(p, rate) == rate) {
			new_parent = mux->parents[i].src;
			break;
		}
	}

	for (i = 0; i < mux->num_parents && !(!i && new_parent); i++) {
		if (clk_round_rate(mux->parents[i].src, rate) == rate) {
			new_parent = mux->parents[i].src;
			if (!mux->try_new_parent)
				break;
			if (mux->try_new_parent && new_parent != c->parent)
				break;
		}
	}

	if (new_parent == NULL)
		return -EINVAL;

	/*
	 * Switch to safe parent since the old and new parent might be the
	 * same and the parent might temporarily turn off while switching
	 * rates. If the mux can switch between distinct sources safely
	 * (indicated by try_new_parent), and the new source is not the current
	 * parent, do not switch to the safe parent.
	 */
	if (mux->safe_sel >= 0 &&
		!(mux->try_new_parent && (new_parent != c->parent))) {
		/*
		 * The safe parent might be a clock with multiple sources;
		 * to select the "safe" source, set a safe frequency.
		 */
		if (mux->safe_freq) {
			rc = clk_set_rate(mux->safe_parent, mux->safe_freq);
			if (rc) {
				pr_err("Failed to set safe rate on %s\n",
					clk_name(mux->safe_parent));
				return rc;
			}
		}

		/*
		 * Some mux implementations might switch to/from a low power
		 * parent as part of their disable/enable ops. Grab the
		 * enable lock to avoid racing with these implementations.
		 */
		spin_lock_irqsave(&c->lock, flags);
		rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
		spin_unlock_irqrestore(&c->lock, flags);
		if (rc)
			return rc;

	}

	new_par_curr_rate = clk_get_rate(new_parent);
	rc = clk_set_rate(new_parent, rate);
	if (rc)
		goto set_rate_fail;

	rc = mux_set_parent(c, new_parent);
	if (rc)
		goto set_par_fail;

	return 0;

set_par_fail:
	clk_set_rate(new_parent, new_par_curr_rate);
set_rate_fail:
	WARN(mux->ops->set_mux_sel(mux,
		mux_parent_to_src_sel(mux, c->parent)),
		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
	return rc;
}
static int mux_set_rate(struct clk *c, unsigned long rate)
{
	struct mux_clk *mux = to_mux_clk(c);
	struct clk *new_parent = NULL;
	int rc = 0, i;
	unsigned long new_par_curr_rate;
	unsigned long flags;

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT)
	set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_SAFE_PARENT_INIT);
#endif

	for (i = 0; i < mux->num_parents && mux->try_get_rate; i++) {
		struct clk *p = mux->parents[i].src;
		if (p->rate == rate && clk_round_rate(p, rate) == rate) {
			new_parent = mux->parents[i].src;
			break;
		}
	}

	for (i = 0; i < mux->num_parents && !(!i && new_parent); i++) {
		if (clk_round_rate(mux->parents[i].src, rate) == rate) {
			new_parent = mux->parents[i].src;
			if (!mux->try_new_parent)
				break;
			if (mux->try_new_parent && new_parent != c->parent)
				break;
		}
	}

	if (new_parent == NULL)
		return -EINVAL;

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT)
	set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_SET_SAFE_RATE);
#endif

	if (mux->safe_sel >= 0 &&
		!(mux->try_new_parent && (new_parent != c->parent))) {
		if (mux->safe_freq) {
			rc = clk_set_rate(mux->safe_parent, mux->safe_freq);
			if (rc) {
				pr_err("Failed to set safe rate on %s\n",
					clk_name(mux->safe_parent));
				return rc;
			}
		}

		spin_lock_irqsave(&c->lock, flags);
		rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
		spin_unlock_irqrestore(&c->lock, flags);
		if (rc)
			return rc;

	}

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT)
	set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_SET_PARENT_RATE);
#endif
	new_par_curr_rate = clk_get_rate(new_parent);
	rc = clk_set_rate(new_parent, rate);
	if (rc)
		goto set_rate_fail;

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT)
	set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_CLK_UNPREPARE);
#endif

	rc = mux_set_parent(c, new_parent);
	if (rc)
		goto set_par_fail;

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT)
	set_acpuclk_cpu_freq_footprint_by_clk(FT_CUR_RATE, c, rate);
	set_acpuclk_l2_freq_footprint_by_clk(FT_CUR_RATE, c, rate);
#endif

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT)
	set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_RETURN);
#endif
	return 0;

set_par_fail:
#if defined(CONFIG_HTC_DEBUG_FOOTPRINT)
	set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_ERR_CLK_UNPREPARE);
#endif
	clk_set_rate(new_parent, new_par_curr_rate);
set_rate_fail:
#if defined(CONFIG_HTC_DEBUG_FOOTPRINT)
	set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_ERR_SET_PARENT_RATE);
#endif
	WARN(mux->ops->set_mux_sel(mux,
		mux_parent_to_src_sel(mux, c->parent)),
		"Set rate failed for %s. Also in bad state!\n", c->dbg_name);
#if defined(CONFIG_HTC_DEBUG_FOOTPRINT)
	set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_ERR_RETURN);
#endif
	return rc;
}