static int ab_intclk_set_parent(struct clk *clk, struct clk *parent)
{
	int err;

	if (!clk->enabled)
		return 0;

	err = __clk_enable(parent, clk->mutex);

	if (unlikely(err))
		goto parent_enable_error;

	if (parent == clk->parents[AB_INTCLK_PARENT_ULPCLK]) {
		err = ab8500_sysctrl_write(AB8500_SYSULPCLKCTRL1,
			AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK,
			(1 << AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT));
	} else {
		err = ab8500_sysctrl_clear(AB8500_SYSULPCLKCTRL1,
			AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK);
	}
	if (unlikely(err))
		goto config_error;

	__clk_disable(clk->parent, clk->mutex);

	return 0;

config_error:
	__clk_disable(parent, clk->mutex);
parent_enable_error:
	return err;
}
Exemple #2
0
static void __clk_disable(struct clk *clk)
{
	if (clk == NULL || IS_ERR(clk))
		return;
	WARN_ON(!clk->usecount);

	if (!(--clk->usecount)) {
		if (clk->disable)
			clk->disable(clk);
		__clk_disable(clk->parent);
		__clk_disable(clk->secondary);
	}
}
Exemple #3
0
static void __clk_disable(struct clk *clk)
{
    if (--clk->enabled == 0) {
        if (clk->ops && clk->ops->disable)
            clk->ops->disable(clk);

        if (clk->parent_periph)
            __clk_disable(clk->parent_periph);

        if (clk->parent_cluster)
            __clk_disable(clk->parent_cluster);
    }
}
int clk_set_parent(struct clk *clk, struct clk *parent)
{
	int err = 0;
	unsigned long flags;
	struct clk **p;

	if ((clk == NULL) || (clk->parents == NULL))
		return -EINVAL;
	for (p = clk->parents; *p != parent; p++) {
		if (*p == NULL) /* invalid parent */
			return -EINVAL;
	}

	__clk_lock(clk, NO_LOCK, &flags);

	if ((clk->ops != NULL) && (clk->ops->set_parent != NULL)) {
		err = clk->ops->set_parent(clk, parent);
		if (err)
			goto unlock_and_return;
	} else if (clk->enabled) {
		err = __clk_enable(parent, clk->mutex);
		if (err)
			goto unlock_and_return;
		__clk_disable(clk->parent, clk->mutex);
	}

	clk->parent = parent;

unlock_and_return:
	__clk_unlock(clk, NO_LOCK, flags);

	return err;
}
static void prcc_kclk_disable(struct clk *clk)
{
	void __iomem *io_base = __io_address(clk->io_base);

	(void)__clk_enable(clk->clock, clk->mutex);
	writel(clk->cg_sel, (io_base + PRCC_KCKDIS));
	__clk_disable(clk->clock, clk->mutex);
}
Exemple #6
0
void clk_disable(struct clk *clk)
{
	unsigned long flags;

	spin_lock_irqsave(&clk_lock, flags);
	__clk_disable(clk);
	spin_unlock_irqrestore(&clk_lock, flags);
}
void clk_disable(struct clk *clk)
{

	if (clk == NULL)
		return;

	WARN_ON(!clk->enabled);
	__clk_disable(clk, NO_LOCK);
}
Exemple #8
0
/* This function decrements the reference count on the clock and disables
 * the clock when reference count is 0. The parent clock tree is
 * recursively disabled
 */
void clk_disable(struct clk *clk)
{
	if (clk == NULL || IS_ERR(clk))
		return;

	mutex_lock(&clocks_mutex);
	__clk_disable(clk);
	mutex_unlock(&clocks_mutex);
}
Exemple #9
0
void clk_disable(struct clk *clk)
{
    unsigned long flags;

    WARN_ON(!clk->enabled);

    spin_lock_irqsave(&clocks_lock, flags);
    __clk_disable(clk);
    spin_unlock_irqrestore(&clocks_lock, flags);
}
static int mm_pi_enable(struct pi *pi, int enable)
{
	int ret;
	pi_dbg(pi->id, PI_LOG_EN_DIS, "%s\n", __func__);

#ifdef CONFIG_PLL1_8PHASE_OFF_ERRATUM
	if (is_pm_erratum(ERRATUM_PLL1_8PHASE_OFF)) {
		if (enable && ref_8ph_en_pll1_clk)
			__clk_enable(ref_8ph_en_pll1_clk);
	}
#endif

#ifdef CONFIG_MM_FREEZE_VAR500M_ERRATUM
	if (is_pm_erratum(ERRATUM_MM_FREEZE_VAR500M) && enable)
		mm_varvdd_clk_en_override(true);
#endif
	ret = gen_pi_ops.enable(pi, enable);

#ifdef CONFIG_MM_FREEZE_VAR500M_ERRATUM
	#ifdef CONFIG_MM_312M_SOURCE_CLK
	if (is_pm_erratum(ERRATUM_MM_FREEZE_VAR500M) && enable)
		mm_varvdd_clk_en_override(false);
	#else
	if (is_pm_erratum(ERRATUM_MM_FREEZE_VAR500M) && !enable)
		mm_varvdd_clk_en_override(false);
	#endif
#endif

#ifdef CONFIG_PLL1_8PHASE_OFF_ERRATUM
	#ifdef CONFIG_MOVE_MM_CLK_TO_PLL0
	if (is_pm_erratum(ERRATUM_PLL1_8PHASE_OFF)) {
		if (enable && ref_8ph_en_pll1_clk)
			__clk_disable(ref_8ph_en_pll1_clk);
	}
	#else
	if (is_pm_erratum(ERRATUM_PLL1_8PHASE_OFF)) {
		if (!enable && ref_8ph_en_pll1_clk)
			__clk_disable(ref_8ph_en_pll1_clk);
	}
	#endif
#endif
	return ret;
}
Exemple #11
0
/**
	Disable the Clock
	@param	clk structure defining the clock the needs to be disabled.

	This function decrements the reference count on the clock and disables
	the clock when reference count is 0. The parent clock tree is
	recursively disabled
 */
void clk_disable(struct clk *clk)
{
	unsigned long flags;
	if (clk == NULL || IS_ERR(clk))
		return;

	spin_lock_irqsave(&clocks_lock, flags);
	__clk_disable(clk);
	spin_unlock_irqrestore(&clocks_lock, flags);
}
void __clk_disable(struct clk *clk, void *current_lock)
{
	unsigned long flags;

	if (clk == NULL)
		return;

	__clk_lock(clk, current_lock, &flags);

	if (clk->enabled && (--clk->enabled == 0)) {
		if ((clk->ops != NULL) && (clk->ops->disable != NULL))
			clk->ops->disable(clk);
		__clk_disable(clk->parent, clk->mutex);
		__clk_disable(clk->bus_parent, clk->mutex);
	}

	__clk_unlock(clk, current_lock, flags);

	return;
}
int __clk_enable(struct clk *clk, void *current_lock)
{
	int err;
	unsigned long flags;

	if (clk == NULL)
		return 0;

	__clk_lock(clk, current_lock, &flags);

	if (!clk->enabled) {
		err = __clk_enable(clk->bus_parent, clk->mutex);
		if (unlikely(err))
			goto bus_parent_error;

		err = __clk_enable(clk->parent, clk->mutex);
		if (unlikely(err))
			goto parent_error;

		if ((clk->ops != NULL) && (clk->ops->enable != NULL)) {
			err = clk->ops->enable(clk);
			if (unlikely(err))
				goto enable_error;
		}
	}
	clk->enabled++;

	__clk_unlock(clk, current_lock, flags);

	return 0;

enable_error:
	__clk_disable(clk->parent, clk->mutex);
parent_error:
	__clk_disable(clk->bus_parent, clk->mutex);
bus_parent_error:

	__clk_unlock(clk, current_lock, flags);

	return err;
}
Exemple #14
0
static void __clk_disable(struct clk *clk)
{
	volatile u32 regVal;

	if (clk == NULL || IS_ERR(clk))
		return;

	if (clk->flags & BCM_CLK_ALWAYS_ENABLED) {
		pr_info("Clock:%u cannot be disabled\n", clk->id);
	} else {
		if (clk->cnt > 0) {
			/* Decrement usage count of this clock. */
			clk->cnt = clk->cnt - 1;

			/* if clk->cnt == 0, then it means all the modules that are using this clock have stopped using it. */
			/* Ideally clk_put needs to be called to completely free/disable the clock. But freeing it here for */
			/* power save in case modules don't call clk_put. */
			if (clk->cnt == 0) {
				/* Disable parent, if any */
				if (clk->parent
				    &&
				    ((clk->
				      parent->flags & BCM_CLK_ALWAYS_ENABLED) ==
				     0))
					__clk_disable(clk->parent);

				if (clk->disable)
					clk->disable(clk);

				/* Generic implementation */
				else {
					if (clk->enable_reg == NULL
					    || clk->enable_bit_mask == 0) {
						pr_info
						    ("Invalid enable_reg / enable_bit_mask values for clk -> %u\n",
						     clk->id);
						return;
					}
					regVal = readl(clk->enable_reg);
					if (clk->flags & BCM_CLK_INVERT_ENABLE) {
						regVal |= clk->enable_bit_mask;
						writel(regVal, clk->enable_reg);
					} else {
						regVal &=
						    ~(clk->enable_bit_mask);
						writel(regVal, clk->enable_reg);
					}

				}
			}
		}
	}
}
Exemple #15
0
static void __clk_disable(struct clk *clk)
{
	if (clk->users == 0) {
		printk(KERN_ERR "%s: mismatched disable\n", clk->name);
		WARN_ON(1);
		return;
	}

	if (--clk->users == 0 && clk->mode)
		clk->mode(clk, 0);
	if (clk->parent)
		__clk_disable(clk->parent);
}
static int prcc_kclk_enable(struct clk *clk)
{
	int err;
	void __iomem *io_base = __io_address(clk->io_base);

	err = __clk_enable(clk->clock, clk->mutex);
	if (err)
		return err;

	writel(clk->cg_sel, (io_base + PRCC_KCKEN));
	while (!(readl(io_base + PRCC_KCKSR) & clk->cg_sel))
		cpu_relax();

	__clk_disable(clk->clock, clk->mutex);

	return 0;
}
Exemple #17
0
int clk_set_parent(struct clk *clk, struct clk *parent)
{
	unsigned long flags;
	struct clk *old_parent;

	if (!clk || !parent)
		return -EINVAL;

	if (!clk_is_primary(parent) || !clk_is_bypassable(clk))
		return -EINVAL;

	
	if (clk->use_cnt > 1)
		return -EBUSY;

	if (clk->parent == parent)
		return 0;

	spin_lock_irqsave(&clk_lock, flags);
	old_parent = clk->parent;
	clk->parent = parent;
	if (clk_is_using_xtal(parent))
		clk->mode |= CLK_MODE_XTAL;
	else
		clk->mode &= (~CLK_MODE_XTAL);

	
	if (clk->use_cnt != 0) {
		clk->use_cnt--;
		
		__clk_enable(clk);
		
		__clk_disable(old_parent);
	}
	spin_unlock_irqrestore(&clk_lock, flags);

	return 0;
}
Exemple #18
0
int clk_set_parent(struct clk *clk, struct clk *parent)
{
	unsigned long flags;
	struct clk *old_parent;

	if (!clk || !parent)
		return -EINVAL;

	if (!clk_is_primary(parent) || !clk_is_bypassable(clk))
		return -EINVAL;

	/* if more than one user, parent is not allowed */
	if (clk->use_cnt > 1)
		return -EBUSY;

	if (clk->parent == parent)
		return 0;

	spin_lock_irqsave(&clk_lock, flags);
	old_parent = clk->parent;
	clk->parent = parent;
	if (clk_is_using_xtal(parent))
		clk->mode |= CLK_MODE_XTAL;
	else
		clk->mode &= (~CLK_MODE_XTAL);

	/* if clock is active */
	if (clk->use_cnt != 0) {
		clk->use_cnt--;
		/* enable clock with the new parent */
		__clk_enable(clk);
		/* disable the old parent */
		__clk_disable(old_parent);
	}
	spin_unlock_irqrestore(&clk_lock, flags);

	return 0;
}
Exemple #19
0
static void __clk_disable(struct clk *clk)
{
	if (!clk)
		return;

	BUG_ON(clk->use_cnt == 0);

	if (--clk->use_cnt == 0) {
		if (clk_is_pll1(clk)) {	/* PLL1 */
			chipcHw_pll1Disable();
		} else if (clk_is_pll2(clk)) {	/* PLL2 */
			chipcHw_pll2Disable();
		} else if (clk_is_using_xtal(clk)) {	/* source is crystal */
			if (!clk_is_primary(clk))
				chipcHw_bypassClockDisable(clk->csp_id);
		} else {	/* source is PLL */
			chipcHw_setClockDisable(clk->csp_id);
		}
	}

	if (clk->parent)
		__clk_disable(clk->parent);
}