__s32 OSAL_CCMU_SetMclkSrc(__hdle hMclk, __u32 nSclkNo) { struct clk *hSysClk = NULL; struct clk *hModClk = (struct clk *)hMclk; s32 retCode = -1; hSysClk = clk_get(NULL, _sysClkName[nSclkNo]); if (NULL == hSysClk) { __wrn("Fail to get handle for system clock [%d].\n", nSclkNo); return -1; } else __inf("OSAL_CCMU_SetMclkSrc<%s,%s>\n", clk_name(hModClk), clk_name(hSysClk)); if (clk_get_parent(hModClk) == hSysClk) { __inf("Parent is alreay %d, not need to set.\n", nSclkNo); clk_put(hSysClk); return 0; } retCode = clk_set_parent(hModClk, hSysClk); if (-1 == retCode) { __wrn("Fail to set parent for clk.\n"); clk_put(hSysClk); return -1; } clk_put(hSysClk); return retCode; }
__s32 OSAL_CCMU_SetMclkDiv(__hdle hMclk, __s32 nDiv) { struct clk *hModClk = (struct clk *)hMclk; struct clk *hParentClk = clk_get_parent(hModClk); u32 srcRate = clk_get_rate(hParentClk); __inf("OSAL_CCMU_SetMclkDiv<p:%s,m:%s,%d>\n", clk_name(hParentClk), clk_name(hModClk), nDiv); if (nDiv == 0) return -1; return clk_set_rate(hModClk, srcRate / nDiv); }
__s32 OSAL_CCMU_SetSrcFreq(__u32 nSclkNo, __u32 nFreq) { struct clk *hSysClk = NULL; s32 retCode = -1; hSysClk = clk_get(NULL, _sysClkName[nSclkNo]); if (NULL == hSysClk) { __wrn("Fail to get handle for system clock [%d].\n", nSclkNo); return -1; } else __inf("OSAL_CCMU_SetSrcFreq<%s,%d>\n", clk_name(hSysClk), nFreq); if (nFreq == clk_get_rate(hSysClk)) { #if 0 __inf("Sys clk[%d] freq is alreay %d, not need to set.\n", nSclkNo, nFreq); #endif clk_put(hSysClk); return 0; } retCode = clk_set_rate(hSysClk, nFreq); if (retCode == -1) { __wrn("Fail to set nFreq[%d] for sys clk[%d].\n", nFreq, nSclkNo); clk_put(hSysClk); return retCode; } clk_put(hSysClk); hSysClk = NULL; return retCode; }
__s32 OSAL_CCMU_MclkReset(__hdle hMclk, __s32 bReset) { struct clk *hModClk = (struct clk *)hMclk; __inf("OSAL_CCMU_MclkReset<%s,%d>\n", clk_name(hModClk), bReset); return clk_reset(hModClk, bReset); }
__s32 OSAL_CCMU_MclkOnOff(__hdle hMclk, __s32 bOnOff) { struct clk *hModClk = (struct clk *)hMclk; __s32 ret = 0; __inf("OSAL_CCMU_MclkOnOff<%s,%d>\n", clk_name(hModClk), bOnOff); if (bOnOff) { if (!hModClk->enable) ret = clk_enable(hModClk); } else { while (hModClk->enable) clk_disable(hModClk); } return ret; }
static int mux_set_rate(struct clk *c, unsigned long rate) { struct mux_clk *mux = to_mux_clk(c); struct clk *new_parent = NULL; int rc = 0, i; unsigned long new_par_curr_rate; unsigned long flags; #if defined(CONFIG_HTC_DEBUG_FOOTPRINT) set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_SAFE_PARENT_INIT); #endif for (i = 0; i < mux->num_parents && mux->try_get_rate; i++) { struct clk *p = mux->parents[i].src; if (p->rate == rate && clk_round_rate(p, rate) == rate) { new_parent = mux->parents[i].src; break; } } for (i = 0; i < mux->num_parents && !(!i && new_parent); i++) { if (clk_round_rate(mux->parents[i].src, rate) == rate) { new_parent = mux->parents[i].src; if (!mux->try_new_parent) break; if (mux->try_new_parent && new_parent != c->parent) break; } } if (new_parent == NULL) return -EINVAL; #if defined(CONFIG_HTC_DEBUG_FOOTPRINT) set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_SET_SAFE_RATE); #endif if (mux->safe_sel >= 0 && !(mux->try_new_parent && (new_parent != c->parent))) { if (mux->safe_freq) { rc = clk_set_rate(mux->safe_parent, mux->safe_freq); if (rc) { pr_err("Failed to set safe rate on %s\n", clk_name(mux->safe_parent)); return rc; } } spin_lock_irqsave(&c->lock, flags); rc = mux->ops->set_mux_sel(mux, mux->safe_sel); spin_unlock_irqrestore(&c->lock, flags); if (rc) return rc; } #if defined(CONFIG_HTC_DEBUG_FOOTPRINT) set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_SET_PARENT_RATE); #endif new_par_curr_rate = clk_get_rate(new_parent); rc = clk_set_rate(new_parent, rate); if (rc) goto set_rate_fail; #if defined(CONFIG_HTC_DEBUG_FOOTPRINT) set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_CLK_UNPREPARE); #endif rc = mux_set_parent(c, new_parent); if (rc) goto set_par_fail; #if defined(CONFIG_HTC_DEBUG_FOOTPRINT) set_acpuclk_cpu_freq_footprint_by_clk(FT_CUR_RATE, c, rate); set_acpuclk_l2_freq_footprint_by_clk(FT_CUR_RATE, c, rate); #endif #if defined(CONFIG_HTC_DEBUG_FOOTPRINT) set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_RETURN); #endif return 0; set_par_fail: #if defined(CONFIG_HTC_DEBUG_FOOTPRINT) set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_ERR_CLK_UNPREPARE); #endif clk_set_rate(new_parent, new_par_curr_rate); set_rate_fail: #if defined(CONFIG_HTC_DEBUG_FOOTPRINT) set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_ERR_SET_PARENT_RATE); #endif WARN(mux->ops->set_mux_sel(mux, mux_parent_to_src_sel(mux, c->parent)), "Set rate failed for %s. Also in bad state!\n", c->dbg_name); #if defined(CONFIG_HTC_DEBUG_FOOTPRINT) set_acpuclk_footprint_by_clk(c, ACPU_BEFORE_ERR_RETURN); #endif return rc; }
static int mux_div_clk_set_rate(struct clk *c, unsigned long rate) { struct mux_div_clk *md = to_mux_div_clk(c); unsigned long flags, rrate; unsigned long new_prate, old_prate; struct clk *old_parent, *new_parent; u32 new_div, old_div; int rc; rc = safe_parent_init_once(c); if (rc) return rc; rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div, &new_prate); if (rrate != rate) return -EINVAL; old_parent = c->parent; old_div = md->data.div; old_prate = clk_get_rate(c->parent); if (md->safe_freq) rc = set_src_div(md, md->safe_parent, md->safe_div); else if (new_parent == old_parent && new_div >= old_div) { rc = set_src_div(md, old_parent, new_div); } if (rc) return rc; rc = clk_set_rate(new_parent, new_prate); if (rc) { pr_err("failed to set %s to %ld\n", clk_name(new_parent), new_prate); goto err_set_rate; } rc = __clk_pre_reparent(c, new_parent, &flags); if (rc) goto err_pre_reparent; rc = __set_src_div(md, new_parent, new_div); if (rc) goto err_set_src_div; c->parent = new_parent; __clk_post_reparent(c, old_parent, &flags); return 0; err_set_src_div: __clk_post_reparent(c, new_parent, &flags); err_pre_reparent: rc = clk_set_rate(old_parent, old_prate); WARN(rc, "%s: error changing parent (%s) rate to %ld\n", clk_name(c), clk_name(old_parent), old_prate); err_set_rate: rc = set_src_div(md, old_parent, old_div); WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n", clk_name(c), old_div, clk_name(old_parent)); return rc; }
static int mux_set_rate(struct clk *c, unsigned long rate) { struct mux_clk *mux = to_mux_clk(c); struct clk *new_parent = NULL; int rc = 0, i; unsigned long new_par_curr_rate; unsigned long flags; /* * Check if one of the possible parents is already at the requested * rate. */ for (i = 0; i < mux->num_parents && mux->try_get_rate; i++) { struct clk *p = mux->parents[i].src; if (p->rate == rate && clk_round_rate(p, rate) == rate) { new_parent = mux->parents[i].src; break; } } for (i = 0; i < mux->num_parents && !(!i && new_parent); i++) { if (clk_round_rate(mux->parents[i].src, rate) == rate) { new_parent = mux->parents[i].src; if (!mux->try_new_parent) break; if (mux->try_new_parent && new_parent != c->parent) break; } } if (new_parent == NULL) return -EINVAL; /* * Switch to safe parent since the old and new parent might be the * same and the parent might temporarily turn off while switching * rates. If the mux can switch between distinct sources safely * (indicated by try_new_parent), and the new source is not the current * parent, do not switch to the safe parent. */ if (mux->safe_sel >= 0 && !(mux->try_new_parent && (new_parent != c->parent))) { /* * The safe parent might be a clock with multiple sources; * to select the "safe" source, set a safe frequency. */ if (mux->safe_freq) { rc = clk_set_rate(mux->safe_parent, mux->safe_freq); if (rc) { pr_err("Failed to set safe rate on %s\n", clk_name(mux->safe_parent)); return rc; } } /* * Some mux implementations might switch to/from a low power * parent as part of their disable/enable ops. Grab the * enable lock to avoid racing with these implementations. */ spin_lock_irqsave(&c->lock, flags); rc = mux->ops->set_mux_sel(mux, mux->safe_sel); spin_unlock_irqrestore(&c->lock, flags); if (rc) return rc; } new_par_curr_rate = clk_get_rate(new_parent); rc = clk_set_rate(new_parent, rate); if (rc) goto set_rate_fail; rc = mux_set_parent(c, new_parent); if (rc) goto set_par_fail; return 0; set_par_fail: clk_set_rate(new_parent, new_par_curr_rate); set_rate_fail: WARN(mux->ops->set_mux_sel(mux, mux_parent_to_src_sel(mux, c->parent)), "Set rate failed for %s. Also in bad state!\n", c->dbg_name); return rc; }
static int mux_div_clk_set_rate(struct clk *c, unsigned long rate) { struct mux_div_clk *md = to_mux_div_clk(c); unsigned long flags, rrate; unsigned long new_prate, old_prate; struct clk *old_parent, *new_parent; u32 new_div, old_div; int rc; rc = safe_parent_init_once(c); if (rc) return rc; rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div, &new_prate); if (rrate != rate) return -EINVAL; old_parent = c->parent; old_div = md->data.div; old_prate = clk_get_rate(c->parent); /* Refer to the description of safe_freq in clock-generic.h */ if (md->safe_freq) rc = set_src_div(md, md->safe_parent, md->safe_div); else if (new_parent == old_parent && new_div >= old_div) { /* * If both the parent_rate and divider changes, there may be an * intermediate frequency generated. Ensure this intermediate * frequency is less than both the new rate and previous rate. */ rc = set_src_div(md, old_parent, new_div); } if (rc) return rc; rc = clk_set_rate(new_parent, new_prate); if (rc) { pr_err("failed to set %s to %ld\n", clk_name(new_parent), new_prate); goto err_set_rate; } rc = __clk_pre_reparent(c, new_parent, &flags); if (rc) goto err_pre_reparent; /* Set divider and mux src atomically */ rc = __set_src_div(md, new_parent, new_div); if (rc) goto err_set_src_div; c->parent = new_parent; __clk_post_reparent(c, old_parent, &flags); return 0; err_set_src_div: /* Not switching to new_parent, so disable it */ __clk_post_reparent(c, new_parent, &flags); err_pre_reparent: rc = clk_set_rate(old_parent, old_prate); WARN(rc, "%s: error changing parent (%s) rate to %ld\n", clk_name(c), clk_name(old_parent), old_prate); err_set_rate: rc = set_src_div(md, old_parent, old_div); WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n", clk_name(c), old_div, clk_name(old_parent)); return rc; }