static void __clk_enable(struct clk *clk) { if (clk->enabled++ == 0) { if (clk->parent_cluster) __clk_enable(clk->parent_cluster); if (clk->parent_periph) __clk_enable(clk->parent_periph); if (clk->ops && clk->ops->enable) clk->ops->enable(clk); } }
static int __clk_enable(struct clk *clk) { if (clk == NULL || IS_ERR(clk)) return -EINVAL; if (clk->usecount++ == 0) { __clk_enable(clk->parent); __clk_enable(clk->secondary); if (clk->enable) clk->enable(clk); } return 0; }
int clk_set_parent(struct clk *clk, struct clk *parent) { int err = 0; unsigned long flags; struct clk **p; if ((clk == NULL) || (clk->parents == NULL)) return -EINVAL; for (p = clk->parents; *p != parent; p++) { if (*p == NULL) /* invalid parent */ return -EINVAL; } __clk_lock(clk, NO_LOCK, &flags); if ((clk->ops != NULL) && (clk->ops->set_parent != NULL)) { err = clk->ops->set_parent(clk, parent); if (err) goto unlock_and_return; } else if (clk->enabled) { err = __clk_enable(parent, clk->mutex); if (err) goto unlock_and_return; __clk_disable(clk->parent, clk->mutex); } clk->parent = parent; unlock_and_return: __clk_unlock(clk, NO_LOCK, flags); return err; }
int clk_enable(struct clk *clk) { if (clk == NULL) return -EINVAL; return __clk_enable(clk, NO_LOCK); }
static void __clk_enable(struct clk *clk) { if (clk->parent) __clk_enable(clk->parent); if (clk->users++ == 0 && clk->mode) clk->mode(clk, 1); }
static int ab_intclk_set_parent(struct clk *clk, struct clk *parent) { int err; if (!clk->enabled) return 0; err = __clk_enable(parent, clk->mutex); if (unlikely(err)) goto parent_enable_error; if (parent == clk->parents[AB_INTCLK_PARENT_ULPCLK]) { err = ab8500_sysctrl_write(AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK, (1 << AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT)); } else { err = ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK); } if (unlikely(err)) goto config_error; __clk_disable(clk->parent, clk->mutex); return 0; config_error: __clk_disable(parent, clk->mutex); parent_enable_error: return err; }
static void prcc_kclk_disable(struct clk *clk) { void __iomem *io_base = __io_address(clk->io_base); (void)__clk_enable(clk->clock, clk->mutex); writel(clk->cg_sel, (io_base + PRCC_KCKDIS)); __clk_disable(clk->clock, clk->mutex); }
int clk_enable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clk_lock, flags); __clk_enable(clk); spin_unlock_irqrestore(&clk_lock, flags); return 0; }
int clk_enable(struct clk *clk) { unsigned long flags; int ret; spin_lock_irqsave(&enable_lock, flags); ret = __clk_enable(clk); spin_unlock_irqrestore(&enable_lock, flags); return ret; }
int __clk_enable(struct clk *clk, void *current_lock) { int err; unsigned long flags; if (clk == NULL) return 0; __clk_lock(clk, current_lock, &flags); if (!clk->enabled) { err = __clk_enable(clk->bus_parent, clk->mutex); if (unlikely(err)) goto bus_parent_error; err = __clk_enable(clk->parent, clk->mutex); if (unlikely(err)) goto parent_error; if ((clk->ops != NULL) && (clk->ops->enable != NULL)) { err = clk->ops->enable(clk); if (unlikely(err)) goto enable_error; } } clk->enabled++; __clk_unlock(clk, current_lock, flags); return 0; enable_error: __clk_disable(clk->parent, clk->mutex); parent_error: __clk_disable(clk->bus_parent, clk->mutex); bus_parent_error: __clk_unlock(clk, current_lock, flags); return err; }
/* This function increments the reference count on the clock and enables the * clock if not already enabled. The parent clock tree is recursively enabled */ int clk_enable(struct clk *clk) { int ret = 0; if (clk == NULL || IS_ERR(clk)) return -EINVAL; mutex_lock(&clocks_mutex); ret = __clk_enable(clk); mutex_unlock(&clocks_mutex); return ret; }
/** Enable the Clock @param clk structure defining the clock the needs to be enabled. @return EINVAL or zero This function increments the reference count on the clock and enables the clock if not already enabled. The parent clock tree is recursively enabled */ int clk_enable(struct clk *clk) { int ret = 0; unsigned long flags; if (clk == NULL || IS_ERR(clk)) return -EINVAL; spin_lock_irqsave(&clocks_lock, flags); ret = __clk_enable(clk); spin_unlock_irqrestore(&clocks_lock, flags); return ret; }
static int __clk_enable(struct clk *clk) { int ret = 0; volatile u32 regVal; if (clk == NULL || IS_ERR(clk)) return -EINVAL; if (clk->flags & BCM_CLK_ALWAYS_ENABLED) { pr_info("Clock %u cannot be enabled. it is always enabled\n", clk->id); } else { /* Enable the clock, only if the cnt is 0, which means no one is using it, so mostly it is disabled. */ if (clk->cnt++ == 0) { /* enable parent, if any */ if (clk->parent && ((clk->parent->flags & BCM_CLK_ALWAYS_ENABLED) == 0)) __clk_enable(clk->parent); if (clk->enable) ret = clk->enable(clk); else { /* Generic implementation */ if (clk->enable_reg == NULL || clk->enable_bit_mask == 0) { pr_info ("Invalid enable_reg / enable_bit_mask values for clk -> %u\n", clk->id); return -EINVAL; } regVal = readl(clk->enable_reg); if (clk->flags & BCM_CLK_INVERT_ENABLE) { regVal &= ~(clk->enable_bit_mask); writel(regVal, clk->enable_reg); } else { regVal |= clk->enable_bit_mask; writel(regVal, clk->enable_reg); } ret = 0; } } } return ret; }
static int prcc_kclk_enable(struct clk *clk) { int err; void __iomem *io_base = __io_address(clk->io_base); err = __clk_enable(clk->clock, clk->mutex); if (err) return err; writel(clk->cg_sel, (io_base + PRCC_KCKEN)); while (!(readl(io_base + PRCC_KCKSR) & clk->cg_sel)) cpu_relax(); __clk_disable(clk->clock, clk->mutex); return 0; }
static int mm_pi_enable(struct pi *pi, int enable) { int ret; pi_dbg(pi->id, PI_LOG_EN_DIS, "%s\n", __func__); #ifdef CONFIG_PLL1_8PHASE_OFF_ERRATUM if (is_pm_erratum(ERRATUM_PLL1_8PHASE_OFF)) { if (enable && ref_8ph_en_pll1_clk) __clk_enable(ref_8ph_en_pll1_clk); } #endif #ifdef CONFIG_MM_FREEZE_VAR500M_ERRATUM if (is_pm_erratum(ERRATUM_MM_FREEZE_VAR500M) && enable) mm_varvdd_clk_en_override(true); #endif ret = gen_pi_ops.enable(pi, enable); #ifdef CONFIG_MM_FREEZE_VAR500M_ERRATUM #ifdef CONFIG_MM_312M_SOURCE_CLK if (is_pm_erratum(ERRATUM_MM_FREEZE_VAR500M) && enable) mm_varvdd_clk_en_override(false); #else if (is_pm_erratum(ERRATUM_MM_FREEZE_VAR500M) && !enable) mm_varvdd_clk_en_override(false); #endif #endif #ifdef CONFIG_PLL1_8PHASE_OFF_ERRATUM #ifdef CONFIG_MOVE_MM_CLK_TO_PLL0 if (is_pm_erratum(ERRATUM_PLL1_8PHASE_OFF)) { if (enable && ref_8ph_en_pll1_clk) __clk_disable(ref_8ph_en_pll1_clk); } #else if (is_pm_erratum(ERRATUM_PLL1_8PHASE_OFF)) { if (!enable && ref_8ph_en_pll1_clk) __clk_disable(ref_8ph_en_pll1_clk); } #endif #endif return ret; }
static void __clk_enable(struct clk *clk) { if (!clk) return; if (clk->parent) __clk_enable(clk->parent); if (clk->use_cnt++ == 0) { if (clk_is_pll1(clk)) { chipcHw_pll1Enable(clk->rate_hz, 0); } else if (clk_is_pll2(clk)) { chipcHw_pll2Enable(clk->rate_hz); } else if (clk_is_using_xtal(clk)) { if (!clk_is_primary(clk)) chipcHw_bypassClockEnable(clk->csp_id); } else { chipcHw_setClockEnable(clk->csp_id); } } }
static void __clk_enable(struct clk *clk) { if (!clk) return; /* enable parent clock first */ if (clk->parent) __clk_enable(clk->parent); if (clk->use_cnt++ == 0) { if (clk_is_pll1(clk)) { /* PLL1 */ chipcHw_pll1Enable(clk->rate_hz, 0); } else if (clk_is_pll2(clk)) { /* PLL2 */ chipcHw_pll2Enable(clk->rate_hz); } else if (clk_is_using_xtal(clk)) { /* source is crystal */ if (!clk_is_primary(clk)) chipcHw_bypassClockEnable(clk->csp_id); } else { /* source is PLL */ chipcHw_setClockEnable(clk->csp_id); } } }
int clk_set_parent(struct clk *clk, struct clk *parent) { unsigned long flags; struct clk *old_parent; if (!clk || !parent) return -EINVAL; if (!clk_is_primary(parent) || !clk_is_bypassable(clk)) return -EINVAL; if (clk->use_cnt > 1) return -EBUSY; if (clk->parent == parent) return 0; spin_lock_irqsave(&clk_lock, flags); old_parent = clk->parent; clk->parent = parent; if (clk_is_using_xtal(parent)) clk->mode |= CLK_MODE_XTAL; else clk->mode &= (~CLK_MODE_XTAL); if (clk->use_cnt != 0) { clk->use_cnt--; __clk_enable(clk); __clk_disable(old_parent); } spin_unlock_irqrestore(&clk_lock, flags); return 0; }
int clk_set_parent(struct clk *clk, struct clk *parent) { unsigned long flags; struct clk *old_parent; if (!clk || !parent) return -EINVAL; if (!clk_is_primary(parent) || !clk_is_bypassable(clk)) return -EINVAL; /* if more than one user, parent is not allowed */ if (clk->use_cnt > 1) return -EBUSY; if (clk->parent == parent) return 0; spin_lock_irqsave(&clk_lock, flags); old_parent = clk->parent; clk->parent = parent; if (clk_is_using_xtal(parent)) clk->mode |= CLK_MODE_XTAL; else clk->mode &= (~CLK_MODE_XTAL); /* if clock is active */ if (clk->use_cnt != 0) { clk->use_cnt--; /* enable clock with the new parent */ __clk_enable(clk); /* disable the old parent */ __clk_disable(old_parent); } spin_unlock_irqrestore(&clk_lock, flags); return 0; }