static void rpm_clk_disable(struct clk *clk)
{
	unsigned long flags;
	struct rpm_clk *r = to_rpm_clk(clk);

	spin_lock_irqsave(&rpm_clock_lock, flags);

	if (r->last_set_khz) {
		struct msm_rpm_iv_pair iv;
		struct rpm_clk *peer = r->peer;
		unsigned long peer_khz = 0, peer_sleep_khz = 0;
		int rc;

		iv.id = r->rpm_clk_id;

		/* Take peer clock's rate into account only if it's enabled. */
		if (peer->enabled) {
			peer_khz = peer->last_set_khz;
			peer_sleep_khz = peer->last_set_sleep_khz;
		}

		iv.value = peer_khz;
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
		if (rc)
			goto out;

		iv.value = peer_sleep_khz;
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
	}
	r->enabled = false;
out:
	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return;
}
Пример #2
0
static void rpm_clk_disable(unsigned id)
{
	unsigned long flags;

	spin_lock_irqsave(&rpm_clock_lock, flags);

	if (rpm_clk[id].count > 0)
		rpm_clk[id].count--;
	else {
		pr_warning("%s: Reference counts are incorrect for clock %d!\n",
			   __func__, id);
		goto out;
	}

	if (!rpm_clk[id].count) {
                unsigned peer_id = rpm_clk[id].peer_clk_id;

                if (rpm_clk[id].last_set_khz) {
                        struct msm_rpm_iv_pair iv;
                        unsigned peer_khz = 0, peer_sleep_khz = 0;
                        int rc;

                        iv.id = rpm_clk[id].rpm_clk_id;

                        /* Take peer clock rate into account only if enabled. */
                        if (rpm_clk[peer_id].count) {
                                peer_khz = rpm_clk[peer_id].last_set_khz;
                                peer_sleep_khz =
                                        rpm_clk[peer_id].last_set_sleep_khz;
                        }

                        iv.value = peer_khz;
                        rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
                        if (rc)
                                goto out;

                        iv.value = peer_sleep_khz;
                        rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
                }

                /* Turn off local smi_clk after disabling remote clock. */
                if ((id == R_SMI_CLK || id == R_SMI_A_CLK)
                    && !rpm_clk[peer_id].count) {
                        uint32_t regval;
                        spin_lock(&local_clock_reg_lock);
                        regval = secure_readl(MMSS_MAXI_EN2);
                        regval &= ~SMI_2X_AXI_CLK_EN;
                        secure_writel(regval, MMSS_MAXI_EN2);
                        spin_unlock(&local_clock_reg_lock);
                }
        }

out:
	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return;
}
static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
{
	unsigned long flags;
	struct rpm_clk *r = to_rpm_clk(clk);
	unsigned long this_khz, this_sleep_khz;
	int rc = 0;

	this_khz = DIV_ROUND_UP(rate, 1000);

	spin_lock_irqsave(&rpm_clock_lock, flags);

	/* Ignore duplicate requests. */
	if (r->last_set_khz == this_khz)
		goto out;

	/* Active-only clocks don't care what the rate is during sleep. So,
	 * they vote for zero. */
	if (r->active_only)
		this_sleep_khz = 0;
	else
		this_sleep_khz = this_khz;

	if (r->enabled) {
		struct msm_rpm_iv_pair iv;
		struct rpm_clk *peer = r->peer;
		unsigned long peer_khz = 0, peer_sleep_khz = 0;

		iv.id = r->rpm_clk_id;

		/* Take peer clock's rate into account only if it's enabled. */
		if (peer->enabled) {
			peer_khz = peer->last_set_khz;
			peer_sleep_khz = peer->last_set_sleep_khz;
		}

		iv.value = max(this_khz, peer_khz);
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
		if (rc)
			goto out;

		iv.value = max(this_sleep_khz, peer_sleep_khz);
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
	}
	if (!rc) {
		r->last_set_khz = this_khz;
		r->last_set_sleep_khz = this_sleep_khz;
	}

out:
	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return rc;
}
static int msm_xo_update_vote(struct msm_xo *xo)
{
	int ret;
	unsigned vote, prev_vote = xo->mode;
	struct msm_rpm_iv_pair cmd;

	if (xo->votes[MSM_XO_MODE_ON])
		vote = MSM_XO_MODE_ON;
	else if (xo->votes[MSM_XO_MODE_PIN_CTRL])
		vote = MSM_XO_MODE_PIN_CTRL;
	else
		vote = MSM_XO_MODE_OFF;

	if (vote == prev_vote)
		return 0;

	/*
	 * Change the vote here to simplify the TCXO logic. If the RPM
	 * command fails we'll rollback.
	 */
	xo->mode = vote;

	if (xo == &msm_xo_sources[MSM_XO_PXO]) {
		cmd.id = MSM_RPM_ID_PXO_CLK;
		cmd.value = msm_xo_sources[MSM_XO_PXO].mode ? 1 : 0;
		ret = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &cmd, 1);
	} else if (xo == &msm_xo_sources[MSM_XO_CXO]) {
		cmd.id = MSM_RPM_ID_CXO_CLK;
		cmd.value = msm_xo_sources[MSM_XO_CXO].mode ? 1 : 0;
		ret = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &cmd, 1);
	} else {
		cmd.id = MSM_RPM_ID_CXO_BUFFERS;
		cmd.value = (msm_xo_sources[MSM_XO_TCXO_D0].mode << 0)  |
			    (msm_xo_sources[MSM_XO_TCXO_D1].mode << 8)  |
			    (msm_xo_sources[MSM_XO_TCXO_A0].mode << 16) |
			    (msm_xo_sources[MSM_XO_TCXO_A1].mode << 24) |
			    (msm_xo_sources[MSM_XO_TCXO_A2].mode << 28) |
			    /*
			     * 8660 RPM has XO_CORE at bit 18 and 8960 RPM has
			     * XO_CORE at bit 20. Since the opposite bit is
			     * reserved in both cases, just set both and be
			     * done with it.
			     */
			    ((msm_xo_sources[MSM_XO_CORE].mode ? 1 : 0) << 20) |
			    ((msm_xo_sources[MSM_XO_CORE].mode ? 1 : 0) << 18);
		ret = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &cmd, 1);
	}

	if (ret)
		xo->mode = prev_vote;

	return ret;
}
Пример #5
0
static int rpm_clk_enable(unsigned id)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&rpm_clock_lock, flags);

	/* Don't send requests to the RPM if the rate has not been set. */
	if (rpm_clk[id].last_set_khz == 0)
		goto out;

	if (!rpm_clk[id].count) {
		struct msm_rpm_iv_pair iv;
		unsigned this_khz = rpm_clk[id].last_set_khz;
		unsigned this_sleep_khz = rpm_clk[id].last_set_sleep_khz;
		unsigned peer_id = rpm_clk[id].peer_clk_id;
		unsigned peer_khz = 0, peer_sleep_khz = 0;

		/* Turn on local smi_clk before enabling remote clock. */
		if (id == R_SMI_CLK || id == R_SMI_A_CLK) {
			uint32_t regval;
			spin_lock(&local_clock_reg_lock);
			regval = secure_readl(MMSS_MAXI_EN2);
			regval |= SMI_2X_AXI_CLK_EN;
			secure_writel(regval, MMSS_MAXI_EN2);
			spin_unlock(&local_clock_reg_lock);
		}

		iv.id = rpm_clk[id].rpm_clk_id;

		/* Take peer clock's rate into account only if it's enabled. */
		if (rpm_clk[peer_id].count) {
			peer_khz = rpm_clk[peer_id].last_set_khz;
			peer_sleep_khz = rpm_clk[peer_id].last_set_sleep_khz;
		}

		iv.value = max(this_khz, peer_khz);
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
		if (rc)
			goto out;

		iv.value = max(this_sleep_khz, peer_sleep_khz);
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
	}
out:
	if (!rc)
		rpm_clk[id].count++;

	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return rc;
}
static int rpm_clk_enable(struct clk *clk)
{
	unsigned long flags;
	struct msm_rpm_iv_pair iv;
	int rc = 0;
	struct rpm_clk *r = to_rpm_clk(clk);
	unsigned long this_khz, this_sleep_khz;
	unsigned long peer_khz = 0, peer_sleep_khz = 0;
	struct rpm_clk *peer = r->peer;

	spin_lock_irqsave(&rpm_clock_lock, flags);

	this_khz = r->last_set_khz;
	/* Don't send requests to the RPM if the rate has not been set. */
	if (this_khz == 0)
		goto out;

	this_sleep_khz = r->last_set_sleep_khz;

	iv.id = r->rpm_clk_id;

	/* Take peer clock's rate into account only if it's enabled. */
	if (peer->enabled) {
		peer_khz = peer->last_set_khz;
		peer_sleep_khz = peer->last_set_sleep_khz;
	}

	iv.value = max(this_khz, peer_khz);
	rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
	if (rc)
		goto out;

	iv.value = max(this_sleep_khz, peer_sleep_khz);
	rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
	if (rc) {
		iv.value = peer_khz;
		msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
	}

out:
	if (!rc)
		r->enabled = true;

	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return rc;
}
Пример #7
0
static int rpm_clk_enable(unsigned id)
{
	unsigned long flags;
	int rc = 0;

	spin_lock_irqsave(&rpm_clock_lock, flags);

	/* Don't send requests to the RPM if the rate has not been set. */
	if (rpm_clk[id].last_set_khz == 0)
		goto out;

	if (!rpm_clk[id].count) {
		struct msm_rpm_iv_pair iv;
		unsigned this_khz = rpm_clk[id].last_set_khz;
		unsigned this_sleep_khz = rpm_clk[id].last_set_sleep_khz;
		unsigned peer_id = rpm_clk[id].peer_clk_id;
		unsigned peer_khz = 0, peer_sleep_khz = 0;

		iv.id = rpm_clk[id].rpm_clk_id;

		/* Take peer clock's rate into account only if it's enabled. */
		if (rpm_clk[peer_id].count) {
			peer_khz = rpm_clk[peer_id].last_set_khz;
			peer_sleep_khz = rpm_clk[peer_id].last_set_sleep_khz;
		}

		iv.value = max(this_khz, peer_khz);
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
		if (rc)
			goto out;

		iv.value = max(this_sleep_khz, peer_sleep_khz);
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
	}
out:
	if (!rc)
		rpm_clk[id].count++;

	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return rc;
}
Пример #8
0
static void rpm_clk_disable(unsigned id)
{
	unsigned long flags;

	spin_lock_irqsave(&rpm_clock_lock, flags);

	if (rpm_clk[id].count > 0)
		rpm_clk[id].count--;
	else {
		pr_warning("%s: Reference counts are incorrect for clock %d!\n",
			   __func__, id);
		goto out;
	}

	if (!rpm_clk[id].count && rpm_clk[id].last_set_khz) {
		struct msm_rpm_iv_pair iv;
		unsigned peer_id = rpm_clk[id].peer_clk_id;
		unsigned peer_khz = 0, peer_sleep_khz = 0;
		int rc;

		iv.id = rpm_clk[id].rpm_clk_id;

		/* Take peer clock's rate into account only if it's enabled. */
		if (rpm_clk[peer_id].count) {
			peer_khz = rpm_clk[peer_id].last_set_khz;
			peer_sleep_khz = rpm_clk[peer_id].last_set_sleep_khz;
		}

		iv.value = peer_khz;
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, &iv, 1);
		if (rc)
			goto out;

		iv.value = peer_sleep_khz;
		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &iv, 1);
	}

out:
	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return;
}
static int msm_xo_update_vote(struct msm_xo *xo)
{
	int ret;
	unsigned vote, prev_vote = xo->mode;
	struct msm_rpm_iv_pair cmd;

	if (xo->votes[MSM_XO_MODE_ON])
		vote = MSM_XO_MODE_ON;
	else if (xo->votes[MSM_XO_MODE_PIN_CTRL])
		vote = MSM_XO_MODE_PIN_CTRL;
	else
		vote = MSM_XO_MODE_OFF;

	if (vote == prev_vote)
		return 0;

	/*
	 * Change the vote here to simplify the TCXO logic. If the RPM
	 * command fails we'll rollback.
	 */
	xo->mode = vote;

	if (xo == &msm_xo_sources[MSM_XO_PXO]) {
		cmd.id = MSM_RPM_ID_PXO_CLK;
		cmd.value = msm_xo_sources[MSM_XO_PXO].mode ? 1 : 0;
		ret = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_SLEEP, &cmd, 1);
	} else {
		cmd.id = MSM_RPM_ID_CXO_BUFFERS;
		cmd.value = (msm_xo_sources[MSM_XO_TCXO_D0].mode << 0)  |
			    (msm_xo_sources[MSM_XO_TCXO_D1].mode << 8)  |
			    (msm_xo_sources[MSM_XO_TCXO_A0].mode << 16) |
#if defined(CONFIG_MACH_VIGOR)
			    (msm_xo_sources[MSM_XO_TCXO_A1].mode << 24) |
			    ((msm_xo_sources[MSM_XO_CORE].mode ? 1 : 0) << 2);
#else
			    (msm_xo_sources[MSM_XO_TCXO_A1].mode << 24);
#endif
		ret = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &cmd, 1);
	}

	if (ret)
		xo->mode = prev_vote;

	return ret;
}
static int vreg_send_request(struct vreg *vreg, enum rpm_vreg_voter voter,
			  int set, unsigned mask0, unsigned val0,
			  unsigned mask1, unsigned val1, unsigned cnt,
			  int update_voltage)
{
	struct msm_rpm_iv_pair *prev_req;
	int rc = 0, max_uV_vote = 0;
	unsigned prev0, prev1;
	int *min_uV_vote;
	int i;

	if (set == MSM_RPM_CTX_SET_0) {
		min_uV_vote = vreg->active_min_uV_vote;
		prev_req = vreg->prev_active_req;
	} else {
		min_uV_vote = vreg->sleep_min_uV_vote;
		prev_req = vreg->prev_sleep_req;
	}

	prev0 = vreg->req[0].value;
	vreg->req[0].value &= ~mask0;
	vreg->req[0].value |= val0 & mask0;

	prev1 = vreg->req[1].value;
	vreg->req[1].value &= ~mask1;
	vreg->req[1].value |= val1 & mask1;

	if (update_voltage)
		min_uV_vote[voter] = voltage_from_req(vreg);

	/* Find the highest voltage voted for and use it. */
	for (i = 0; i < RPM_VREG_VOTER_COUNT; i++)
		max_uV_vote = max(max_uV_vote, min_uV_vote[i]);
	voltage_to_req(max_uV_vote, vreg);

	if (msm_rpm_vreg_debug_mask & MSM_RPM_VREG_DEBUG_VOTE)
		rpm_regulator_vote(vreg, voter, set, min_uV_vote[voter],
				max_uV_vote);

	/* Ignore duplicate requests */
	if (vreg->req[0].value != prev_req[0].value ||
	    vreg->req[1].value != prev_req[1].value) {
		rc = msm_rpmrs_set_noirq(set, vreg->req, cnt);
		if (rc) {
			vreg->req[0].value = prev0;
			vreg->req[1].value = prev1;

			vreg_err(vreg, "msm_rpmrs_set_noirq failed - "
				"set=%s, id=%d, rc=%d\n",
				(set == MSM_RPM_CTX_SET_0 ? "active" : "sleep"),
				vreg->req[0].id, rc);
		} else {
			/* Only save if nonzero and active set. */
			if (max_uV_vote && (set == MSM_RPM_CTX_SET_0))
				vreg->save_uV = max_uV_vote;
			if (msm_rpm_vreg_debug_mask
			    & MSM_RPM_VREG_DEBUG_REQUEST)
				rpm_regulator_req(vreg, set);
			prev_req[0].value = vreg->req[0].value;
			prev_req[1].value = vreg->req[1].value;
		}
	} else if (msm_rpm_vreg_debug_mask & MSM_RPM_VREG_DEBUG_DUPLICATE) {
		rpm_regulator_duplicate(vreg, set, cnt);
	}

	return rc;
}
Пример #11
0
static int clk_rpmrs_set_rate(struct rpm_clk *r, uint32_t value,
			   uint32_t context, int noirq)
{
	struct msm_rpm_iv_pair iv = {
		.id = r->rpm_clk_id,
		.value = value,
	};
	if (noirq)
		return msm_rpmrs_set_noirq(context, &iv, 1);
	else
		return msm_rpmrs_set(context, &iv, 1);
}

static int clk_rpmrs_get_rate(struct rpm_clk *r)
{
	int rc;
	struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
	rc = msm_rpm_get_status(&iv, 1);
	return (rc < 0) ? rc : iv.value * r->factor;
}

static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
				uint32_t context, int noirq)
{
	struct msm_rpm_kvp kvp = {
		.key = r->rpm_key,
		.data = (void *)&value,
		.length = sizeof(value),
	};

	if (noirq)
		return msm_rpm_send_message_noirq(context,
				r->rpm_res_type, r->rpm_clk_id, &kvp, 1);
	else
		return msm_rpm_send_message(context, r->rpm_res_type,
						r->rpm_clk_id, &kvp, 1);
}

struct clk_rpmrs_data {
	int (*set_rate_fn)(struct rpm_clk *r, uint32_t value,
				uint32_t context, int noirq);
	int (*get_rate_fn)(struct rpm_clk *r);
	int ctx_active_id;
	int ctx_sleep_id;
};

struct clk_rpmrs_data clk_rpmrs_data = {
	.set_rate_fn = clk_rpmrs_set_rate,
	.get_rate_fn = clk_rpmrs_get_rate,
	.ctx_active_id = MSM_RPM_CTX_SET_0,
	.ctx_sleep_id = MSM_RPM_CTX_SET_SLEEP,
};

struct clk_rpmrs_data clk_rpmrs_data_smd = {
	.set_rate_fn = clk_rpmrs_set_rate_smd,
	.ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
	.ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
};

static DEFINE_SPINLOCK(rpm_clock_lock);

static int rpm_clk_enable(struct clk *clk)
{
	unsigned long flags;
	struct rpm_clk *r = to_rpm_clk(clk);
	uint32_t value;
	int rc = 0;
	unsigned long this_khz, this_sleep_khz;
	unsigned long peer_khz = 0, peer_sleep_khz = 0;
	struct rpm_clk *peer = r->peer;

	spin_lock_irqsave(&rpm_clock_lock, flags);

	this_khz = r->last_set_khz;
	/* Don't send requests to the RPM if the rate has not been set. */
	if (this_khz == 0)
		goto out;

	this_sleep_khz = r->last_set_sleep_khz;

	/* Take peer clock's rate into account only if it's enabled. */
	if (peer->enabled) {
		peer_khz = peer->last_set_khz;
		peer_sleep_khz = peer->last_set_sleep_khz;
	}

	value = max(this_khz, peer_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_active_noirq(r, value);
	if (rc)
		goto out;

	value = max(this_sleep_khz, peer_sleep_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
	if (rc) {
		/* Undo the active set vote and restore it to peer_khz */
		value = peer_khz;
		rc = clk_rpmrs_set_rate_active_noirq(r, value);
	}

out:
	if (!rc)
		r->enabled = true;

	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return rc;
}

static void rpm_clk_disable(struct clk *clk)
{
	unsigned long flags;
	struct rpm_clk *r = to_rpm_clk(clk);

	spin_lock_irqsave(&rpm_clock_lock, flags);

	if (r->last_set_khz) {
		uint32_t value;
		struct rpm_clk *peer = r->peer;
		unsigned long peer_khz = 0, peer_sleep_khz = 0;
		int rc;

		/* Take peer clock's rate into account only if it's enabled. */
		if (peer->enabled) {
			peer_khz = peer->last_set_khz;
			peer_sleep_khz = peer->last_set_sleep_khz;
		}

		value = r->branch ? !!peer_khz : peer_khz;
		rc = clk_rpmrs_set_rate_active_noirq(r, value);
		if (rc)
			goto out;

		value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
		rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
	}
	r->enabled = false;
out:
	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return;
}
Пример #12
0
static int clk_rpmrs_set_rate(struct rpm_clk *r, uint32_t value,
			   uint32_t context, int noirq)
{
	struct msm_rpm_iv_pair iv = {
		.id = r->rpm_clk_id,
		.value = value,
	};
	if (noirq)
		return msm_rpmrs_set_noirq(context, &iv, 1);
	else
		return msm_rpmrs_set(context, &iv, 1);
}

static int clk_rpmrs_get_rate(struct rpm_clk *r)
{
	int rc;
	struct msm_rpm_iv_pair iv = { .id = r->rpm_status_id, };
	rc = msm_rpm_get_status(&iv, 1);
	return (rc < 0) ? rc : iv.value * r->factor;
}

#define RPM_SMD_KEY_RATE	0x007A484B
#define RPM_SMD_KEY_ENABLE	0x62616E45

static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
				uint32_t context, int noirq)
{
	u32 rpm_key = r->branch ? RPM_SMD_KEY_ENABLE : RPM_SMD_KEY_RATE;
	struct msm_rpm_kvp kvp = {
		.key = rpm_key,
		.data = (void *)&value,
		.length = sizeof(value),
	};

	if (noirq)
		return msm_rpm_send_message_noirq(context,
				r->rpm_res_type, r->rpm_clk_id, &kvp, 1);
	else
		return msm_rpm_send_message(context, r->rpm_res_type,
						r->rpm_clk_id, &kvp, 1);
}

struct clk_rpmrs_data {
	int (*set_rate_fn)(struct rpm_clk *r, uint32_t value,
				uint32_t context, int noirq);
	int (*get_rate_fn)(struct rpm_clk *r);
	int ctx_active_id;
	int ctx_sleep_id;
};

struct clk_rpmrs_data clk_rpmrs_data = {
	.set_rate_fn = clk_rpmrs_set_rate,
	.get_rate_fn = clk_rpmrs_get_rate,
	.ctx_active_id = MSM_RPM_CTX_SET_0,
	.ctx_sleep_id = MSM_RPM_CTX_SET_SLEEP,
};

struct clk_rpmrs_data clk_rpmrs_data_smd = {
	.set_rate_fn = clk_rpmrs_set_rate_smd,
	.ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
	.ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
};

static DEFINE_SPINLOCK(rpm_clock_lock);

static int rpm_clk_enable(struct clk *clk)
{
	unsigned long flags;
	struct rpm_clk *r = to_rpm_clk(clk);
	uint32_t value;
	int rc = 0;
	unsigned long this_khz, this_sleep_khz;
	unsigned long peer_khz = 0, peer_sleep_khz = 0;
	struct rpm_clk *peer = r->peer;

	spin_lock_irqsave(&rpm_clock_lock, flags);

	this_khz = r->last_set_khz;
	
	if (this_khz == 0)
		goto out;

	this_sleep_khz = r->last_set_sleep_khz;

	
	if (peer->enabled) {
		peer_khz = peer->last_set_khz;
		peer_sleep_khz = peer->last_set_sleep_khz;
	}

	value = max(this_khz, peer_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_active_noirq(r, value);
	if (rc)
		goto out;

	value = max(this_sleep_khz, peer_sleep_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_sleep_noirq(r, value);
	if (rc) {
		
		value = peer_khz;
		rc = clk_rpmrs_set_rate_active_noirq(r, value);
	}

out:
	if (!rc)
		r->enabled = true;

	spin_unlock_irqrestore(&rpm_clock_lock, flags);

	return rc;
}