int omap2_clk_enable(struct clk *clk) { int ret = 0; if (clk->usecount++ == 0) { if (clk->clkdm) omap2_clkdm_clk_enable(clk->clkdm, clk); if (clk->parent) { ret = omap2_clk_enable(clk->parent); if (ret) goto err; } ret = _omap2_clk_enable(clk); if (ret) { if (clk->parent) omap2_clk_disable(clk->parent); goto err; } } return ret; err: if (clk->clkdm) omap2_clkdm_clk_disable(clk->clkdm, clk); clk->usecount--; return ret; }
int omap2_clk_enable(struct clk *clk) { int ret = 0; if (clk->usecount++ == 0) { #ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once clkdm f/w is in place */ if (clk->clkdm) omap2_clkdm_clk_enable(clk->clkdm, clk); #endif if (clk->parent) { ret = omap2_clk_enable(clk->parent); if (ret) goto err; } ret = _omap2_clk_enable(clk); if (ret) { if (clk->parent) omap2_clk_disable(clk->parent); goto err; } } return ret; err: #ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once clkdm f/w is in place */ if (clk->clkdm) omap2_clkdm_clk_disable(clk->clkdm, clk); #endif clk->usecount--; return ret; }
static int hsi_clk_enable(struct clk *clk) { #ifdef OMAP_HSI_EXAMPLE_PWR_CODE struct hsi_internal_clk *hsi_clk = container_of(clk, struct hsi_internal_clk, clk); int err; int i; for (i = 0; i < hsi_clk->n_childs; i++) { err = omap2_clk_enable(hsi_clk->childs[i]); if (unlikely(err < 0)) goto rollback; } #ifdef __HSI_CLK_FIX__ /* * FIXME: To be removed * Wait until the HSI controller has the clocks stable */ check_hsi_active(); #endif hsi_restore_ctx(hsi_clk->pdev); if (!hsi_clk->rate_change) hsi_restore_mode(hsi_clk->pdev); #endif return 0; #ifdef OMAP_HSI_EXAMPLE_PWR_CODE rollback: pr_err("Error on HSI clk child %d\n", i); for (i = i - 1; i >= 0; i--) omap2_clk_disable(hsi_clk->childs[i]); return err; #endif }
static int ssi_clk_enable(struct clk *clk) { struct ssi_internal_clk *ssi_clk = container_of(clk, struct ssi_internal_clk, clk); int err; int i; for (i = 0; i < ssi_clk->n_childs; i++) { err = omap2_clk_enable(ssi_clk->childs[i]); if (unlikely(err < 0)) goto rollback; } #ifdef __HSI_CLK_FIX__ /* * FIXME: To be removed * Wait until the SSI controller has the clocks stable */ check_ssi_active(); #endif ssi_restore_ctx(ssi_clk->pdev); if (!ssi_clk->rate_change) ssi_restore_mode(ssi_clk->pdev); return 0; rollback: pr_err("Error on SSI clk child %d\n", i); for (i = i - 1; i >= 0; i--) omap2_clk_disable(ssi_clk->childs[i]); return err; }
/** * omap2_clk_enable - request that the system enable a clock * @clk: struct clk * to enable * * Increments the usecount on struct clk @clk. If there were no users * previously, then recurse up the clock tree, enabling all of the * clock's parents and all of the parent clockdomains, and finally, * enabling @clk's clockdomain, and @clk itself. Intended to be * called with the clockfw_lock spinlock held. Returns 0 upon success * or a negative error code upon failure. */ int omap2_clk_enable(struct clk *clk) { int ret; pr_debug("clock: %s: incrementing usecount\n", clk->name); clk->usecount++; if (clk->usecount > 1) return 0; pr_debug("clock: %s: enabling in hardware\n", clk->name); if (clk->parent) { ret = omap2_clk_enable(clk->parent); if (ret) { WARN(1, "clock: %s: could not enable parent %s: %d\n", clk->name, clk->parent->name, ret); goto oce_err1; } } if (clk->clkdm) { ret = omap2_clkdm_clk_enable(clk->clkdm, clk); if (ret) { WARN(1, "clock: %s: could not enable clockdomain %s: " "%d\n", clk->name, clk->clkdm->name, ret); goto oce_err2; } } ret = clk->ops->enable(clk); if (ret) { WARN(1, "clock: %s: could not enable: %d\n", clk->name, ret); goto oce_err3; } if (clk->clkdm) { ret = omap2_clkdm_clk_enable_post(clk->clkdm, clk); if (ret) { WARN(1, "clock: %s: could not enable clockdomain %s: " "%d\n", clk->name, clk->clkdm->name, ret); goto oce_err2; } } return 0; oce_err3: if (clk->clkdm) omap2_clkdm_clk_disable(clk->clkdm, clk); oce_err2: if (clk->parent) omap2_clk_disable(clk->parent); oce_err1: clk->usecount--; return ret; }
/** * omap2_clk_enable - request that the system enable a clock * @clk: struct clk * to enable * * Increments the usecount on struct clk @clk. If there were no users * previously, then recurse up the clock tree, enabling all of the * clock's parents and all of the parent clockdomains, and finally, * enabling @clk's clockdomain, and @clk itself. Intended to be * called with the clockfw_lock spinlock held. Returns 0 upon success * or a negative error code upon failure. */ int omap2_clk_enable(struct clk *clk) { int ret; pr_debug("clock: %s: incrementing usecount\n", clk->name); clk->usecount++; if (clk->usecount > 1) return 0; pr_debug("clock: %s: enabling in hardware\n", clk->name); if (clk->parent) { ret = omap2_clk_enable(clk->parent); if (ret) { WARN(1, "clock: %s: could not enable parent %s: %d\n", clk->name, clk->parent->name, ret); goto oce_err1; } } if (clk->clkdm) { ret = clkdm_clk_enable(clk->clkdm, clk); if (ret) { WARN(1, "clock: %s: could not enable clockdomain %s: " "%d\n", clk->name, clk->clkdm->name, ret); goto oce_err2; } } if (clk->ops && clk->ops->enable) { trace_clock_enable(clk->name, 1, smp_processor_id()); ret = clk->ops->enable(clk); if (ret) { WARN(1, "clock: %s: could not enable: %d\n", clk->name, ret); goto oce_err3; } } /* If clockdomain supports hardware control, enable it */ if (clk->clkdm) clkdm_allow_idle(clk->clkdm); return 0; oce_err3: if (clk->clkdm) clkdm_clk_disable(clk->clkdm, clk); oce_err2: if (clk->parent) omap2_clk_disable(clk->parent); oce_err1: clk->usecount--; return ret; }
void omap2_clk_disable_unused(struct clk *clk) { u32 regval32, v; v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0; regval32 = __raw_readl(clk->enable_reg); if ((regval32 & (1 << clk->enable_bit)) == v) return; printk(KERN_DEBUG "Disabling unused clock \"%s\"\n", clk->name); if (cpu_is_omap34xx()) { omap2_clk_enable(clk); omap2_clk_disable(clk); } else _omap2_clk_disable(clk); if (clk->clkdm != NULL) pwrdm_clkdm_state_switch(clk->clkdm); }
int omap2_clk_enable(struct clk *clk) { int ret = 0; if (clk->usecount++ == 0) { if (likely((u32)clk->parent)) ret = omap2_clk_enable(clk->parent); if (unlikely(ret != 0)) { clk->usecount--; return ret; } ret = _omap2_clk_enable(clk); if (unlikely(ret != 0) && clk->parent) { omap2_clk_disable(clk->parent); clk->usecount--; } } return ret; }
void omap2_clk_disable_unused(struct clk *clk) { u32 regval32, v; v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0; regval32 = __raw_readl(clk->enable_reg); if ((regval32 & (1 << clk->enable_bit)) == v) return; if (!strcmp(clk->name, "bandgap_fclk")) return; pr_debug("Disabling unused clock \"%s\"\n", clk->name); if (cpu_is_omap34xx()) { omap2_clk_enable(clk); omap2_clk_disable(clk); } else { clk->ops->disable(clk); } if (clk->clkdm != NULL) pwrdm_state_switch(clk->clkdm->pwrdm.ptr); }
/** * omap4_core_dpll_set_rate - set the rate for the CORE DPLL * @clk: struct clk * of the DPLL to set * @rate: rounded target rate * * Program the CORE DPLL, including handling of EMIF frequency changes on M2 * divider. Returns 0 on success, otherwise a negative error code. */ int omap4_core_dpll_set_rate(struct clk *clk, unsigned long rate) { int i = 0, m2_div, m5_div; u32 mask, reg; u32 shadow_freq_cfg1 = 0, shadow_freq_cfg2 = 0; struct clk *new_parent; struct dpll_data *dd; if (!clk || !rate) return -EINVAL; if (!clk->dpll_data) return -EINVAL; dd = clk->dpll_data; if (rate == clk->rate) return 0; /* enable reference and bypass clocks */ omap2_clk_enable(dd->clk_bypass); omap2_clk_enable(dd->clk_ref); /* Just to avoid look-up on every call to speed up */ if (!l3_emif_clkdm) l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm"); if (!dpll_core_m2_ck) dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck"); if (!dpll_core_m5x2_ck) dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck"); if (!gpmc_ick) gpmc_ick = clk_get(NULL, "gpmc_ick"); /* Make sure MEMIF clkdm is in SW_WKUP & GPMC clocks are active */ omap2_clkdm_wakeup(l3_emif_clkdm); omap2_clk_enable(gpmc_ick); /* FIXME set m3, m6 & m7 rates here? */ /* check for bypass rate */ if (rate == dd->clk_bypass->rate && clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)) { /* * DDR clock = DPLL_CORE_M2_CK / 2. Program EMIF timing * parameters in EMIF shadow registers for bypass clock rate * divided by 2 */ omap_emif_setup_registers(rate / 2, LPDDR2_VOLTAGE_STABLE); /* * program CM_DIV_M5_DPLL_CORE.DPLL_CLKOUT_DIV into shadow * register as well as L3_CLK freq and update GPMC frequency * * HACK: hardcode L3_CLK = CORE_CLK / 2 for DPLL cascading * HACK: hardcode CORE_CLK = CORE_X2_CLK / 2 for DPLL * cascading */ m5_div = omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg, dpll_core_m5x2_ck->clksel_mask); shadow_freq_cfg2 = (m5_div << OMAP4430_DPLL_CORE_M5_DIV_SHIFT) | (1 << OMAP4430_CLKSEL_L3_SHADOW_SHIFT) | (0 << OMAP4430_CLKSEL_CORE_1_1_SHIFT) | (1 << OMAP4430_GPMC_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2); /* * program CM_DIV_M2_DPLL_CORE.DPLL_CLKOUT_DIV for divide by * two and put DPLL_CORE into LP Bypass */ m2_div = omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg, dpll_core_m2_ck->clksel_mask); shadow_freq_cfg1 = (m2_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) | (DPLL_LOW_POWER_BYPASS << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) | (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); new_parent = dd->clk_bypass; } else { if (dd->last_rounded_rate != rate) rate = clk->round_rate(clk, rate); if (dd->last_rounded_rate == 0) return -EINVAL; /* * DDR clock = DPLL_CORE_M2_CK / 2. Program EMIF timing * parameters in EMIF shadow registers for rate divided * by 2. */ omap_emif_setup_registers(rate / 2, LPDDR2_VOLTAGE_STABLE); /* * FIXME skipping bypass part of omap3_noncore_dpll_program. * also x-loader's configure_core_dpll_no_lock bypasses * DPLL_CORE directly through CM_CLKMODE_DPLL_CORE via MN * bypass; no shadow register necessary! */ mask = (dd->mult_mask | dd->div1_mask); reg = (dd->last_rounded_m << __ffs(dd->mult_mask)) | ((dd->last_rounded_n - 1) << __ffs(dd->div1_mask)); /* program mn divider values */ omap4_prm_rmw_reg_bits(mask, reg, dd->mult_div1_reg); /* * program CM_DIV_M5_DPLL_CORE.DPLL_CLKOUT_DIV into shadow * register as well as L3_CLK freq and update GPMC frequency * * HACK: hardcode L3_CLK = CORE_CLK / 2 for DPLL cascading * HACK: hardcode CORE_CLK = CORE_X2_CLK / 1 for DPLL * cascading */ m5_div = omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg, dpll_core_m5x2_ck->clksel_mask); shadow_freq_cfg2 = (m5_div << OMAP4430_DPLL_CORE_M5_DIV_SHIFT) | (1 << OMAP4430_CLKSEL_L3_SHADOW_SHIFT) | (0 << OMAP4430_CLKSEL_CORE_1_1_SHIFT) | (1 << OMAP4430_GPMC_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2); /* * program DPLL_CORE_M2_DIV with same value as the one already * in direct register and lock DPLL_CORE */ m2_div = omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg, dpll_core_m2_ck->clksel_mask); shadow_freq_cfg1 = (m2_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) | (DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) | (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); new_parent = dd->clk_ref; } /* wait for the configuration to be applied */ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1) & OMAP4430_FREQ_UPDATE_MASK) == 0), MAX_FREQ_UPDATE_TIMEOUT, i); /* clear GPMC_FREQ_UPDATE bit */ shadow_freq_cfg2 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG2); shadow_freq_cfg2 &= ~1; __raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2); /* * Switch the parent clock in the heirarchy, and make sure that the * new parent's usecount is correct. Note: we enable the new parent * before disabling the old to avoid any unnecessary hardware * disable->enable transitions. */ if (clk->usecount) { omap2_clk_enable(new_parent); omap2_clk_disable(clk->parent); } clk_reparent(clk, new_parent); clk->rate = rate; /* disable reference and bypass clocks */ omap2_clk_disable(dd->clk_bypass); omap2_clk_disable(dd->clk_ref); /* Configures MEMIF domain back to HW_WKUP & let GPMC clocks to idle */ omap2_clkdm_allow_idle(l3_emif_clkdm); omap2_clk_disable(gpmc_ick); /* * FIXME PRCM functional spec says we should set GPMC_FREQ_UPDATE bit * here, but we're not even handling CM_SHADOW_FREQ_CONFIG2 at all. */ if (i == MAX_FREQ_UPDATE_TIMEOUT) { pr_err("%s: Frequency update for CORE DPLL M2 change failed\n", __func__); return -1; } return 0; }
/** * omap3_noncore_dpll_set_rate - set non-core DPLL rate * @clk: struct clk * of DPLL to set * @rate: rounded target rate * * Set the DPLL CLKOUT to the target rate. If the DPLL can enter * low-power bypass, and the target rate is the bypass source clock * rate, then configure the DPLL for bypass. Otherwise, round the * target rate if it hasn't been done already, then program and lock * the DPLL. Returns -EINVAL upon error, or 0 upon success. */ int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) { struct clk *new_parent = NULL; u16 freqsel = 0; struct dpll_data *dd; int ret; unsigned long orig_rate = 0; if (!clk || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; if (rate == omap2_get_dpll_rate(clk)) return 0; /* * Ensure both the bypass and ref clocks are enabled prior to * doing anything; we need the bypass clock running to reprogram * the DPLL. */ omap2_clk_enable(dd->clk_bypass); omap2_clk_enable(dd->clk_ref); if (dd->clk_bypass->rate == rate && (clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) { pr_debug("clock: %s: set rate: entering bypass.\n", clk->name); ret = _omap3_noncore_dpll_bypass(clk); if (!ret) new_parent = dd->clk_bypass; } else { /* * On 4460, the MPU clk for frequencies higher than 1Ghz * is sourced from CLKOUTX2_M3, instead of CLKOUT_M2, while * value of M3 is fixed to 1. Hence for frequencies higher * than 1 Ghz, lock the DPLL at half the rate so the * CLKOUTX2_M3 then matches the requested rate. */ if (cpu_is_omap4460() && !strcmp(clk->name, "dpll_mpu_ck") && (rate > 1000000000)) { orig_rate = rate; rate = rate/2; } if (dd->last_rounded_rate != rate) rate = clk->round_rate(clk, rate); if (dd->last_rounded_rate == 0) return -EINVAL; /* No freqsel on OMAP4 and OMAP3630 */ if (!cpu_is_omap44xx() && !cpu_is_omap3630()) { freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); if (!freqsel) WARN_ON(1); } /* Set the rate back to original for book keeping*/ if (orig_rate) rate = orig_rate; pr_debug("clock: %s: set rate: locking rate to %lu.\n", clk->name, rate); ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m, dd->last_rounded_n, freqsel, orig_rate); if (!ret) new_parent = dd->clk_ref; } if (!ret) { /* * Switch the parent clock in the heirarchy, and make sure * that the new parent's usecount is correct. Note: we * enable the new parent before disabling the old to avoid * any unnecessary hardware disable->enable transitions. */ if (clk->usecount) { omap2_clk_enable(new_parent); omap2_clk_disable(clk->parent); } clk_reparent(clk, new_parent); clk->rate = rate; } omap2_clk_disable(dd->clk_ref); omap2_clk_disable(dd->clk_bypass); return 0; }
/** * omap3_noncore_dpll_set_rate - set non-core DPLL rate * @clk: struct clk * of DPLL to set * @rate: rounded target rate * * Set the DPLL CLKOUT to the target rate. If the DPLL can enter * low-power bypass, and the target rate is the bypass source clock * rate, then configure the DPLL for bypass. Otherwise, round the * target rate if it hasn't been done already, then program and lock * the DPLL. Returns -EINVAL upon error, or 0 upon success. */ int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) { struct clk *new_parent = NULL; u16 freqsel = 0; struct dpll_data *dd; int ret; if (!clk || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; if (rate == omap2_get_dpll_rate(clk)) return 0; /* * Ensure both the bypass and ref clocks are enabled prior to * doing anything; we need the bypass clock running to reprogram * the DPLL. */ omap2_clk_enable(dd->clk_bypass); omap2_clk_enable(dd->clk_ref); if (dd->clk_bypass->rate == rate && (clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) { pr_debug("clock: %s: set rate: entering bypass.\n", clk->name); ret = _omap3_noncore_dpll_bypass(clk); if (!ret) new_parent = dd->clk_bypass; } else { if (dd->last_rounded_rate != rate) rate = clk->round_rate(clk, rate); if (dd->last_rounded_rate == 0) return -EINVAL; /* No freqsel on OMAP4 and OMAP3630 */ if (!cpu_is_omap44xx() && !cpu_is_omap3630()) { freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); if (!freqsel) WARN_ON(1); } pr_debug("clock: %s: set rate: locking rate to %lu.\n", clk->name, rate); ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m, dd->last_rounded_n, freqsel); if (!ret) new_parent = dd->clk_ref; } if (!ret) { /* * Switch the parent clock in the hierarchy, and make sure * that the new parent's usecount is correct. Note: we * enable the new parent before disabling the old to avoid * any unnecessary hardware disable->enable transitions. */ if (clk->usecount) { omap2_clk_enable(new_parent); omap2_clk_disable(clk->parent); } clk_reparent(clk, new_parent); clk->rate = rate; } omap2_clk_disable(dd->clk_ref); omap2_clk_disable(dd->clk_bypass); return 0; }
static int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate) { struct clk *new_parent = NULL; u16 freqsel; struct dpll_data *dd; int ret; if (!clk || !rate) return -EINVAL; dd = clk->dpll_data; if (!dd) return -EINVAL; if (rate == omap2_get_dpll_rate(clk)) return 0; omap2_clk_enable(dd->clk_bypass); omap2_clk_enable(dd->clk_ref); if (dd->clk_bypass->rate == rate && (clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) { pr_debug("clock: %s: set rate: entering bypass.\n", clk->name); ret = _omap3_noncore_dpll_bypass(clk); if (!ret) new_parent = dd->clk_bypass; } else { if (dd->last_rounded_rate != rate) omap2_dpll_round_rate(clk, rate); if (dd->last_rounded_rate == 0) return -EINVAL; freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); if (!freqsel) WARN_ON(1); pr_debug("clock: %s: set rate: locking rate to %lu.\n", clk->name, rate); ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m, dd->last_rounded_n, freqsel); if (!ret) new_parent = dd->clk_ref; } if (!ret) { if (clk->usecount) { omap2_clk_enable(new_parent); omap2_clk_disable(clk->parent); } clk_reparent(clk, new_parent); clk->rate = rate; } omap2_clk_disable(dd->clk_ref); omap2_clk_disable(dd->clk_bypass); return 0; }
/** * omap2_clk_enable - request that the system enable a clock * @clk: struct clk * to enable * * Increments the usecount on struct clk @clk. If there were no users * previously, then recurse up the clock tree, enabling all of the * clock's parents and all of the parent clockdomains, and finally, * enabling @clk's clockdomain, and @clk itself. Intended to be * called with the clockfw_lock spinlock held. Returns 0 upon success * or a negative error code upon failure. */ int omap2_clk_enable(struct clk *clk) { int ret; if (clk->usecount == 127) { /* 20110626 [email protected] usecount range check for + value also */ WARN(1, "clock: %s: omap2_clk_enable() called, but usecount " "already 127?", clk->name); return; } pr_debug("clock: %s: incrementing usecount\n", clk->name); clk->usecount++; if (clk->usecount > 1) return 0; pr_debug("clock: %s: enabling in hardware\n", clk->name); if (clk->parent) { ret = omap2_clk_enable(clk->parent); if (ret) { WARN(1, "clock: %s: could not enable parent %s: %d\n", clk->name, clk->parent->name, ret); goto oce_err1; } } if (clk->clkdm) { ret = omap2_clkdm_clk_enable(clk->clkdm, clk); if (ret) { WARN(1, "clock: %s: could not enable clockdomain %s: " "%d\n", clk->name, clk->clkdm->name, ret); goto oce_err2; } } ret = clk->ops->enable(clk); if (ret) { WARN(1, "clock: %s: could not enable: %d\n", clk->name, ret); goto oce_err3; } if (clk->clkdm) { ret = omap2_clkdm_clk_enable_post(clk->clkdm, clk); if (ret) { WARN(1, "clock: %s: could not enable clockdomain %s: " "%d\n", clk->name, clk->clkdm->name, ret); goto oce_err2; } } return 0; oce_err3: if (clk->clkdm) omap2_clkdm_clk_disable(clk->clkdm, clk); oce_err2: if (clk->parent) omap2_clk_disable(clk->parent); oce_err1: clk->usecount--; return ret; }