示例#1
0
static enum handoff mux_div_clk_handoff(struct clk *c)
{
	struct mux_div_clk *md = to_mux_div_clk(c);
	unsigned long parent_rate;
	unsigned int numer;

	parent_rate = clk_get_rate(c->parent);
	if (!parent_rate)
		return HANDOFF_DISABLED_CLK;
	/*
	 * div values are doubled for half dividers.
	 * Adjust for that by picking a numer of 2.
	 */
	numer = md->data.is_half_divider ? 2 : 1;

	if (md->data.div) {
		c->rate = mult_frac(parent_rate, numer, md->data.div);
	} else {
		c->rate = 0;
		return HANDOFF_DISABLED_CLK;
	}

	if (!md->ops->is_enabled)
		return HANDOFF_DISABLED_CLK;
	if (md->ops->is_enabled(md))
		return HANDOFF_ENABLED_CLK;
	return HANDOFF_DISABLED_CLK;
}
static void mux_div_clk_disable(struct clk_hw *hw)
{
	struct mux_div_clk *md = to_mux_div_clk(hw);

	if (md->ops->disable)
		return md->ops->disable(md);
}
static int mux_div_clk_set_rate(struct clk_hw *hw,
		unsigned long rate, unsigned long parent_rate)
{
	struct mux_div_clk *md = to_mux_div_clk(hw);
	return __mux_div_clk_set_rate_and_parent(hw, md->src_sel,
			parent_rate / rate);
}
static int mux_div_clk_enable(struct clk_hw *hw)
{
	struct mux_div_clk *md = to_mux_div_clk(hw);

	if (md->ops->enable)
		return md->ops->enable(md);
	return 0;
}
static void mux_div_clk_disable(struct clk *c)
{
	struct mux_div_clk *md = to_mux_div_clk(c);

	log_clk_call(c, FN_DISABLE, 300000000);

	if (md->ops->disable)
		return md->ops->disable(md);
}
static int mux_div_clk_enable(struct clk *c)
{
	struct mux_div_clk *md = to_mux_div_clk(c);

	log_clk_call(c, FN_ENABLE, c->rate);

	if (md->ops->enable)
		return md->ops->enable(md);
	return 0;
}
static void __iomem *mux_div_clk_list_registers(struct clk *c, int n,
				struct clk_register_data **regs, u32 *size)
{
	struct mux_div_clk *md = to_mux_div_clk(c);

	if (md->ops && md->ops->list_registers)
		return md->ops->list_registers(md, n , regs, size);

	return ERR_PTR(-EINVAL);
}
static unsigned long
mux_div_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
	struct mux_div_clk *md = to_mux_div_clk(hw);
	u32 div, sel;

	md->ops->get_src_div(md, &sel, &div);

	return prate / div;
}
static int
__mux_div_clk_set_rate_and_parent(struct clk_hw *hw, u8 index, u32 div)
{
	struct mux_div_clk *md = to_mux_div_clk(hw);
	int rc;

	rc = safe_parent_init_once(hw);
	if (rc)
		return rc;

	return __set_src_div(md, index, div);
}
static enum handoff mux_div_clk_handoff(struct clk *c)
{
	struct mux_div_clk *md = to_mux_div_clk(c);
	unsigned long parent_rate;

	parent_rate = clk_get_rate(c->parent);
	c->rate = parent_rate / md->data.div;

	if (!md->ops->is_enabled)
		return HANDOFF_DISABLED_CLK;
	if (md->ops->is_enabled(md))
		return HANDOFF_ENABLED_CLK;
	return HANDOFF_DISABLED_CLK;
}
示例#11
0
static u8 mux_div_clk_get_parent(struct clk_hw *hw)
{
	struct mux_div_clk *md = to_mux_div_clk(hw);
	int num_parents = __clk_get_num_parents(hw->clk);
	u32 i, div, sel;

	md->ops->get_src_div(md, &sel, &div);
	md->src_sel = sel;

	for (i = 0; i < num_parents; i++)
		if (sel == md->parent_map[i])
			return i;
	WARN(1, "Can't find parent\n");
	return -EINVAL;
}
static struct clk *mux_div_clk_get_parent(struct clk *c)
{
	struct mux_div_clk *md = to_mux_div_clk(c);
	u32 i, div, src_sel;

	md->ops->get_src_div(md, &src_sel, &div);

	md->data.div = div;
	md->src_sel = src_sel;

	for (i = 0; i < md->num_parents; i++) {
		if (md->parents[i].sel == src_sel)
			return md->parents[i].src;
	}

	return NULL;
}
static void log_clk_call(struct clk *c, u32 fn, u32 rate) {
	struct mux_div_clk *md = to_mux_div_clk(c);
	struct clk_logger *log;
	unsigned long flags;

	spin_lock_irqsave(&log_lock, flags);
	log = &clk_log[buf_index];

	log->timestamp = sched_clock();
	log->fn = fn;
	log->cmd_reg = readl_relaxed(md->base + md->div_offset - 0x4);
	log->cfg_reg = readl_relaxed(md->base + md->div_offset);
	log->rate = rate;

	buf_index++;
	if (buf_index >= BUF_SIZE)
		buf_index = 0;

	spin_unlock_irqrestore(&log_lock, flags);
}
示例#14
0
static long __mux_div_round_rate(struct clk_hw *hw, unsigned long rate,
	struct clk **best_parent, int *best_div, unsigned long *best_prate)
{
	struct mux_div_clk *md = to_mux_div_clk(hw);
	unsigned int i;
	unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
	struct clk *_best_parent = 0;
	int num_parents = __clk_get_num_parents(hw->clk);
	bool set_parent = __clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT;

	for (i = 0; i < num_parents; i++) {
		int div;
		unsigned long prate;
		struct clk *p = clk_get_parent_by_index(hw->clk, i);

		rrate = __div_round_rate(&md->data, rate, p, &div, &prate,
				set_parent);

		if (is_better_rate(rate, best, rrate)) {
			best = rrate;
			_best_div = div;
			_best_prate = prate;
			_best_parent = p;
		}

		if (rate <= rrate)
			break;
	}

	if (best_div)
		*best_div = _best_div;
	if (best_prate)
		*best_prate = _best_prate;
	if (best_parent)
		*best_parent = _best_parent;

	if (best)
		return best;
	return -EINVAL;
}
static long __mux_div_round_rate(struct clk *c, unsigned long rate,
	struct clk **best_parent, int *best_div, unsigned long *best_prate)
{
	struct mux_div_clk *md = to_mux_div_clk(c);
	unsigned int i;
	unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
	struct clk *_best_parent = 0;

	for (i = 0; i < md->num_parents; i++) {
		int div;
		unsigned long prate;

		rrate = __div_round_rate(&md->data, rate, md->parents[i].src,
				&div, &prate);

		if (is_better_rate(rate, best, rrate)) {
			best = rrate;
			_best_div = div;
			_best_prate = prate;
			_best_parent = md->parents[i].src;
		}

		if (rate <= rrate && rrate <= rate + md->data.rate_margin)
			break;
	}

	if (best_div)
		*best_div = _best_div;
	if (best_prate)
		*best_prate = _best_prate;
	if (best_parent)
		*best_parent = _best_parent;

	if (best)
		return best;
	return -EINVAL;
}
static int safe_parent_init_once(struct clk *c)
{
	unsigned long rrate;
	u32 best_div;
	struct clk *best_parent;
	struct mux_div_clk *md = to_mux_div_clk(c);

	if (IS_ERR(md->safe_parent))
		return -EINVAL;
	if (!md->safe_freq || md->safe_parent)
		return 0;

	rrate = __mux_div_round_rate(c, md->safe_freq, &best_parent,
			&best_div, NULL);

	if (rrate == md->safe_freq) {
		md->safe_div = best_div;
		md->safe_parent = best_parent;
	} else {
		md->safe_parent = ERR_PTR(-EINVAL);
		return -EINVAL;
	}
	return 0;
}
static enum handoff mux_div_clk_handoff(struct clk *c)
{
	struct mux_div_clk *md = to_mux_div_clk(c);
	unsigned long parent_rate;
	unsigned int numer;

	parent_rate = clk_get_rate(c->parent);
	if (!parent_rate)
		return HANDOFF_DISABLED_CLK;
	numer = md->data.is_half_divider ? 2 : 1;

	if (md->data.div) {
		c->rate = mult_frac(parent_rate, numer, md->data.div);
	} else {
		c->rate = 0;
		return HANDOFF_DISABLED_CLK;
	}

	if (!md->ops->is_enabled)
		return HANDOFF_DISABLED_CLK;
	if (md->ops->is_enabled(md))
		return HANDOFF_ENABLED_CLK;
	return HANDOFF_DISABLED_CLK;
}
static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
{
	struct mux_div_clk *md = to_mux_div_clk(c);
	unsigned long flags, rrate;
	unsigned long new_prate, old_prate;
	struct clk *old_parent, *new_parent;
	u32 new_div, old_div;
	int rc;

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_SAFE_PARENT_INIT);
#endif

	log_clk_call(c, FN_SET_RATE, rate);

	rc = safe_parent_init_once(c);
	if (rc)
		return rc;

	rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
							&new_prate);
	if (rrate != rate)
		return -EINVAL;

	old_parent = c->parent;
	old_div = md->data.div;
	old_prate = clk_get_rate(c->parent);

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_SET_SAFE_RATE);
#endif

	
	if (md->safe_freq)
		rc = set_src_div(md, md->safe_parent, md->safe_div);

	else if (new_parent == old_parent && new_div >= old_div) {
		rc = set_src_div(md, old_parent, new_div);
	}
	if (rc) {
		WARN(rc, "error switching to safe_parent freq=%ld\n", md->safe_freq);
		return rc;
	}
#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_SET_PARENT_RATE);
#endif

	rc = clk_set_rate(new_parent, new_prate);
	if (rc) {
		pr_err("failed to set %s to %ld\n",
			new_parent->dbg_name, new_prate);
		goto err_set_rate;
	}

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_CLK_PREPARE);
#endif

	rc = __clk_pre_reparent(c, new_parent, &flags);
	if (rc)
		goto err_pre_reparent;

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_SET_RATE);
#endif

	
	rc = __set_src_div(md, new_parent, new_div);
	if (rc)
		goto err_set_src_div;

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	
	set_acpuclk_cpu_freq_footprint(FT_CUR_RATE, 0, rrate);
	set_acpuclk_footprint(0, ACPU_BEFORE_CLK_UNPREPARE);
#endif

	c->parent = new_parent;

	__clk_post_reparent(c, old_parent, &flags);

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_RETURN);
#endif

	return 0;

err_set_src_div:
	
	WARN(rc, "disabling %s\n", new_parent->dbg_name);
#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_ERR_CLK_UNPREPARE);
#endif

	
	__clk_post_reparent(c, new_parent, &flags);
err_pre_reparent:

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_ERR_SET_PARENT_RATE);
#endif

	WARN(rc, "%s: error changing parent (%s) rate to %ld\n",
		c->dbg_name, old_parent->dbg_name, old_prate);
	rc = clk_set_rate(old_parent, old_prate);
err_set_rate:

#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_ERR_SET_RATE);
#endif

	WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
		c->dbg_name, old_div, old_parent->dbg_name);
	rc = set_src_div(md, old_parent, old_div);
#if defined(CONFIG_HTC_DEBUG_FOOTPRINT) && defined(CONFIG_MSM_CORTEX_A7)
	set_acpuclk_footprint(0, ACPU_BEFORE_ERR_RETURN);
#endif

	return rc;
}
示例#19
0
static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
{
	struct mux_div_clk *md = to_mux_div_clk(c);
	unsigned long flags, rrate;
	unsigned long new_prate, old_prate;
	struct clk *old_parent, *new_parent;
	u32 new_div, old_div;
	int rc;

	rc = safe_parent_init_once(c);
	if (rc)
		return rc;

	rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
							&new_prate);
	if (rrate != rate)
		return -EINVAL;

	old_parent = c->parent;
	old_div = md->data.div;
	old_prate = clk_get_rate(c->parent);

	/* Refer to the description of safe_freq in clock-generic.h */
	if (md->safe_freq)
		rc = set_src_div(md, md->safe_parent, md->safe_div);

	else if (new_parent == old_parent && new_div >= old_div) {
		/*
		 * If both the parent_rate and divider changes, there may be an
		 * intermediate frequency generated. Ensure this intermediate
		 * frequency is less than both the new rate and previous rate.
		 */
		rc = set_src_div(md, old_parent, new_div);
	}
	if (rc)
		return rc;

	rc = clk_set_rate(new_parent, new_prate);
	if (rc) {
		pr_err("failed to set %s to %ld\n",
			new_parent->dbg_name, new_prate);
		goto err_set_rate;
	}

	rc = __clk_pre_reparent(c, new_parent, &flags);
	if (rc)
		goto err_pre_reparent;

	/* Set divider and mux src atomically */
	rc = __set_src_div(md, new_parent, new_div);
	if (rc)
		goto err_set_src_div;

	c->parent = new_parent;

	__clk_post_reparent(c, old_parent, &flags);
	return 0;

err_set_src_div:
	/* Not switching to new_parent, so disable it */
	__clk_post_reparent(c, new_parent, &flags);
err_pre_reparent:
	rc = clk_set_rate(old_parent, old_prate);
	WARN(rc, "%s: error changing parent (%s) rate to %ld\n",
		c->dbg_name, old_parent->dbg_name, old_prate);
err_set_rate:
	rc = set_src_div(md, old_parent, old_div);
	WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
		c->dbg_name, old_div, old_parent->dbg_name);

	return rc;
}
示例#20
0
static int mux_div_clk_set_parent(struct clk_hw *hw, u8 index)
{
	struct mux_div_clk *md = to_mux_div_clk(hw);
	return __mux_div_clk_set_rate_and_parent(hw, md->parent_map[index],
			md->data.div);
}
static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
{
	struct mux_div_clk *md = to_mux_div_clk(c);
	unsigned long flags, rrate;
	unsigned long new_prate, old_prate;
	struct clk *old_parent, *new_parent;
	u32 new_div, old_div;
	int rc;

	rc = safe_parent_init_once(c);
	if (rc)
		return rc;

	rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
							&new_prate);
	if (rrate != rate)
		return -EINVAL;

	old_parent = c->parent;
	old_div = md->data.div;
	old_prate = clk_get_rate(c->parent);

	
	if (md->safe_freq)
		rc = set_src_div(md, md->safe_parent, md->safe_div);

	else if (new_parent == old_parent && new_div >= old_div) {
		rc = set_src_div(md, old_parent, new_div);
	}
	if (rc)
		return rc;

	rc = clk_set_rate(new_parent, new_prate);
	if (rc) {
		pr_err("failed to set %s to %ld\n",
			clk_name(new_parent), new_prate);
		goto err_set_rate;
	}

	rc = __clk_pre_reparent(c, new_parent, &flags);
	if (rc)
		goto err_pre_reparent;

	
	rc = __set_src_div(md, new_parent, new_div);
	if (rc)
		goto err_set_src_div;

	c->parent = new_parent;

	__clk_post_reparent(c, old_parent, &flags);
	return 0;

err_set_src_div:
	
	__clk_post_reparent(c, new_parent, &flags);
err_pre_reparent:
	rc = clk_set_rate(old_parent, old_prate);
	WARN(rc, "%s: error changing parent (%s) rate to %ld\n",
		clk_name(c), clk_name(old_parent), old_prate);
err_set_rate:
	rc = set_src_div(md, old_parent, old_div);
	WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
		clk_name(c), old_div, clk_name(old_parent));

	return rc;
}