/* Enable a clock with no locking, enabling parent clocks as needed. */ static int local_clk_enable_nolock(unsigned id) { struct clk_local *clk = &soc_clk_local_tbl[id]; int rc = 0; if (clk->type == RESET) return -EPERM; if (!clk->count) { rc = local_vote_sys_vdd(clk->current_freq->sys_vdd); if (rc) goto err_vdd; if (clk->parent != C(NONE)) { rc = local_clk_enable_nolock(clk->parent); if (rc) goto err_par; } rc = local_src_enable(clk->current_freq->src); if (rc) goto err_src; local_clk_enable_reg(id); } clk->count++; return rc; err_src: if (clk->parent != C(NONE)) rc = local_clk_disable_nolock(clk->parent); err_par: local_unvote_sys_vdd(clk->current_freq->sys_vdd); err_vdd: return rc; }
/* Enable a clock with no locking, enabling parent clocks as needed. */ static int local_clk_enable_nolock(unsigned id) { struct clk_local *clk = &soc_clk_local_tbl[id]; int rc = 0; if (clk->type == RESET) return -EPERM; if (!clk->count) { rc = local_vote_sys_vdd(clk->current_freq->sys_vdd); if (rc) goto err_vdd; if (clk->parent != C(NONE)) { rc = local_clk_enable_nolock(clk->parent); if (rc) goto err_par; } rc = local_src_enable(clk->current_freq->src); if (rc) goto err_src; local_clk_enable_reg(id); /* * With remote rail control, the remote processor might modify * the clock control register when the rail is enabled/disabled. * Enable the rail inside the lock to protect against this. */ rc = soc_set_pwr_rail(id, 1); if (rc) goto err_pwr; } clk->count++; return rc; err_pwr: local_clk_disable_reg(id); err_src: if (clk->parent != C(NONE)) rc = local_clk_disable_nolock(clk->parent); err_par: local_unvote_sys_vdd(clk->current_freq->sys_vdd); err_vdd: return rc; }
/* Set a clock's frequency. */ static int _local_clk_set_rate(unsigned id, struct clk_freq_tbl *nf) { struct clk_local *clk = &soc_clk_local_tbl[id]; struct clk_freq_tbl *cf; const int32_t *chld = clk->children; int i, rc = 0; unsigned long flags; spin_lock_irqsave(&local_clock_reg_lock, flags); /* Check if frequency is actually changed. */ cf = clk->current_freq; if (nf == cf) goto release_lock; /* Disable branch if clock isn't dual-banked with a glitch-free MUX. */ if (clk->banked_mnd_masks == NULL) { /* Disable all branches to prevent glitches. */ for (i = 0; chld && chld[i] != C(NONE); i++) { struct clk_local *ch = &soc_clk_local_tbl[chld[i]]; /* Don't bother turning off if it is already off. * Checking ch->count is cheaper (cache) than reading * and writing to a register (uncached/unbuffered). */ if (ch->count) local_clk_disable_reg(chld[i]); } if (clk->count) local_clk_disable_reg(id); } if (clk->count) { /* Vote for voltage and source for new freq. */ rc = local_vote_sys_vdd(nf->sys_vdd); if (rc) goto sys_vdd_vote_failed; rc = local_src_enable(nf->src); if (rc) { local_unvote_sys_vdd(nf->sys_vdd); goto src_enable_failed; } } /* Perform clock-specific frequency switch operations. */ BUG_ON(!clk->set_rate); clk->set_rate(clk, nf); /* Release requirements of the old freq. */ if (clk->count) { local_src_disable(cf->src); local_unvote_sys_vdd(cf->sys_vdd); } /* Current freq must be updated before local_clk_enable_reg() * is called to make sure the MNCNTR_EN bit is set correctly. */ clk->current_freq = nf; src_enable_failed: sys_vdd_vote_failed: /* Enable any clocks that were disabled. */ if (clk->banked_mnd_masks == NULL) { if (clk->count) local_clk_enable_reg(id); /* Enable only branches that were ON before. */ for (i = 0; chld && chld[i] != C(NONE); i++) { struct clk_local *ch = &soc_clk_local_tbl[chld[i]]; if (ch->count) local_clk_enable_reg(chld[i]); } } release_lock: spin_unlock_irqrestore(&local_clock_reg_lock, flags); return rc; }