Пример #1
0
static int ti_adpll_init_clkout(struct ti_adpll_data *d,
				enum ti_adpll_clocks index,
				int output_index, int gate_bit,
				char *name, struct clk *clk0,
				struct clk *clk1)
{
	struct ti_adpll_clkout_data *co;
	struct clk_init_data init;
	struct clk_ops *ops;
	const char *parent_names[2];
	const char *child_name;
	struct clk *clock;
	int err;

	co = devm_kzalloc(d->dev, sizeof(*co), GFP_KERNEL);
	if (!co)
		return -ENOMEM;
	co->adpll = d;

	err = of_property_read_string_index(d->np,
					    "clock-output-names",
					    output_index,
					    &child_name);
	if (err)
		return err;

	ops = devm_kzalloc(d->dev, sizeof(*ops), GFP_KERNEL);
	if (!ops)
		return -ENOMEM;

	init.name = child_name;
	init.ops = ops;
	init.flags = 0;
	co->hw.init = &init;
	parent_names[0] = __clk_get_name(clk0);
	parent_names[1] = __clk_get_name(clk1);
	init.parent_names = parent_names;
	init.num_parents = 2;

	ops->get_parent = ti_adpll_clkout_get_parent;
	ops->determine_rate = __clk_mux_determine_rate;
	if (gate_bit) {
		co->gate.lock = &d->lock;
		co->gate.reg = d->regs + ADPLL_CLKCTRL_OFFSET;
		co->gate.bit_idx = gate_bit;
		ops->enable = ti_adpll_clkout_enable;
		ops->disable = ti_adpll_clkout_disable;
		ops->is_enabled = ti_adpll_clkout_is_enabled;
	}

	clock = devm_clk_register(d->dev, &co->hw);
	if (IS_ERR(clock)) {
		dev_err(d->dev, "failed to register output %s: %li\n",
			name, PTR_ERR(clock));
		return PTR_ERR(clock);
	}

	return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
				    NULL);
}
Пример #2
0
static int clk_prcmu_opp_prepare(struct clk_hw *hw)
{
	int err;
	struct clk_prcmu *clk = to_clk_prcmu(hw);

	if (!clk->opp_requested) {
		err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
						(char *)__clk_get_name(hw->clk),
						100);
		if (err) {
			pr_err("clk_prcmu: %s fail req APE OPP for %s.\n",
				__func__, __clk_get_name(hw->clk));
			return err;
		}
		clk->opp_requested = 1;
	}

	err = prcmu_request_clock(clk->cg_sel, true);
	if (err) {
		prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
					(char *)__clk_get_name(hw->clk));
		clk->opp_requested = 0;
		return err;
	}

	clk->is_prepared = 1;
	return 0;
}
Пример #3
0
static int ti_adpll_init_mux(struct ti_adpll_data *d,
			     enum ti_adpll_clocks index,
			     char *name, struct clk *clk0,
			     struct clk *clk1,
			     void __iomem *reg,
			     u8 shift)
{
	const char *child_name;
	const char *parents[2];
	struct clk *clock;

	child_name = ti_adpll_clk_get_name(d, -ENODEV, name);
	if (!child_name)
		return -ENOMEM;
	parents[0] = __clk_get_name(clk0);
	parents[1] = __clk_get_name(clk1);
	clock = clk_register_mux(d->dev, child_name, parents, 2, 0,
				 reg, shift, 1, 0, &d->lock);
	if (IS_ERR(clock)) {
		dev_err(d->dev, "failed to register mux %s: %li\n",
			name, PTR_ERR(clock));
		return PTR_ERR(clock);
	}

	return ti_adpll_setup_clock(d, clock, index, -ENODEV, child_name,
				    clk_unregister_mux);
}
Пример #4
0
/**
 * _divisor_to_clksel() - turn clksel integer divisor into a field value
 * @clk: OMAP struct clk to use
 * @div: integer divisor to search for
 *
 * Given a struct clk of a rate-selectable clksel clock, and a clock
 * divisor, find the corresponding register field value.  Returns the
 * register field value _before_ left-shifting (i.e., LSB is at bit
 * 0); or returns 0xFFFFFFFF (~0) upon error.
 */
static u32 _divisor_to_clksel(struct clk *clk, u32 div)
{
	const struct clksel *clks;
	const struct clksel_rate *clkr;
	struct clk *parent;

	/* should never happen */
	WARN_ON(div == 0);

	parent = __clk_get_parent(clk);
	clks = _get_clksel_by_parent(clk, parent);
	if (!clks)
		return ~0;

	for (clkr = clks->rates; clkr->div; clkr++) {
		if (!(clkr->flags & cpu_mask))
			continue;

		if (clkr->div == div)
			break;
	}

	if (!clkr->div) {
		pr_err("clock: %s: could not find divisor %d for parent %s\n",
		       __clk_get_name(clk), div, __clk_get_name(parent));
		return ~0;
	}

	return clkr->val;
}
Пример #5
0
/**
 * _clksel_to_divisor() - turn clksel field value into integer divider
 * @clk: OMAP struct clk to use
 * @field_val: register field value to find
 *
 * Given a struct clk of a rate-selectable clksel clock, and a register field
 * value to search for, find the corresponding clock divisor.  The register
 * field value should be pre-masked and shifted down so the LSB is at bit 0
 * before calling.  Returns 0 on error or returns the actual integer divisor
 * upon success.
 */
static u32 _clksel_to_divisor(struct clk *clk, u32 field_val)
{
	const struct clksel *clks;
	const struct clksel_rate *clkr;
	struct clk *parent;

	parent = __clk_get_parent(clk);
	clks = _get_clksel_by_parent(clk, parent);
	if (!clks)
		return 0;

	for (clkr = clks->rates; clkr->div; clkr++) {
		if (!(clkr->flags & cpu_mask))
			continue;

		if (clkr->val == field_val)
			break;
	}

	if (!clkr->div) {
		/* This indicates a data error */
		WARN(1, "clock: %s: could not find fieldval %d for parent %s\n",
		     __clk_get_name(clk), field_val, __clk_get_name(parent));
		return 0;
	}

	return clkr->div;
}
Пример #6
0
/**
 * omap2_clksel_set_parent() - change a clock's parent clock
 * @clk: struct clk * of the child clock
 * @new_parent: struct clk * of the new parent clock
 *
 * This function is intended to be called only by the clock framework.
 * Change the parent clock of clock @clk to @new_parent.  This is
 * intended to be used while @clk is disabled.  This function does not
 * currently check the usecount of the clock, so if multiple drivers
 * are using the clock, and the parent is changed, they will all be
 * affected without any notification.  Returns -EINVAL upon error, or
 * 0 upon success.
 */
int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent)
{
	u32 field_val = 0;
	u32 parent_div;

	if (!clk->clksel || !clk->clksel_mask)
		return -EINVAL;

	parent_div = _get_div_and_fieldval(new_parent, clk, &field_val);
	if (!parent_div)
		return -EINVAL;

	_write_clksel_reg(clk, field_val);

	clk_reparent(clk, new_parent);

	/* CLKSEL clocks follow their parents' rates, divided by a divisor */
	clk->rate = __clk_get_rate(new_parent);

	if (parent_div > 0)
		__clk_get_rate(clk) /= parent_div;

	pr_debug("clock: %s: set parent to %s (new rate %ld)\n",
		 __clk_get_name(clk),
		 __clk_get_name(__clk_get_parent(clk)),
		 __clk_get_rate(clk));

	return 0;
}
Пример #7
0
/**
 * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
 * @hw: struct clk_hw * of the clock being enabled
 *
 * Increment the usecount of the clockdomain of the clock pointed to
 * by @hw; if the usecount is 1, the clockdomain will be "enabled."
 * Only needed for clocks that don't use omap2_dflt_clk_enable() as
 * their enable function pointer.  Passes along the return value of
 * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
 * clockdomain, or 0 if clock framework-based clockdomain control is
 * not implemented.
 */
int omap2_clkops_enable_clkdm(struct clk_hw *hw)
{
	struct clk_hw_omap *clk;
	int ret = 0;

	clk = to_clk_hw_omap(hw);

	if (unlikely(!clk->clkdm)) {
		pr_err("%s: %s: no clkdm set ?!\n", __func__,
		       __clk_get_name(hw->clk));
		return -EINVAL;
	}

	if (unlikely(clk->enable_reg))
		pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
		       __clk_get_name(hw->clk));

	if (!clkdm_control) {
		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
		       __func__, __clk_get_name(hw->clk));
		return 0;
	}

	ret = clkdm_clk_enable(clk->clkdm, hw->clk);
	WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
	     __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret);

	return ret;
}
Пример #8
0
/**
 * omap2_clksel_round_rate_div() - find divisor for the given clock and rate
 * @clk: OMAP struct clk to use
 * @target_rate: desired clock rate
 * @new_div: ptr to where we should store the divisor
 *
 * Finds 'best' divider value in an array based on the source and target
 * rates.  The divider array must be sorted with smallest divider first.
 * This function is also used by the DPLL3 M2 divider code.
 *
 * Returns the rounded clock rate or returns 0xffffffff on error.
 */
u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
				u32 *new_div)
{
	unsigned long test_rate;
	const struct clksel *clks;
	const struct clksel_rate *clkr;
	u32 last_div = 0;
	struct clk *parent;
	unsigned long parent_rate;
	const char *clk_name;

	parent = __clk_get_parent(clk);
	parent_rate = __clk_get_rate(parent);
	clk_name = __clk_get_name(clk);

	if (!clk->clksel || !clk->clksel_mask)
		return ~0;

	pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n",
		 clk_name, target_rate);

	*new_div = 1;

	clks = _get_clksel_by_parent(clk, parent);
	if (!clks)
		return ~0;

	for (clkr = clks->rates; clkr->div; clkr++) {
		if (!(clkr->flags & cpu_mask))
			continue;

		/* Sanity check */
		if (clkr->div <= last_div)
			pr_err("clock: %s: clksel_rate table not sorted\n",
			       clk_name);

		last_div = clkr->div;

		test_rate = parent_rate / clkr->div;

		if (test_rate <= target_rate)
			break; /* found it */
	}

	if (!clkr->div) {
		pr_err("clock: %s: could not find divisor for target rate %ld for parent %s\n",
		       clk_name, target_rate, __clk_get_name(parent));
		return ~0;
	}

	*new_div = clkr->div;

	pr_debug("clock: new_div = %d, new_rate = %ld\n", *new_div,
		 (parent_rate / clkr->div));

	return parent_rate / clkr->div;
}
Пример #9
0
static int s5p_mfc_clock_set_rate(struct s5p_mfc_dev *dev, unsigned long rate)
{
	struct clk *clk_child = NULL;

#if defined(CONFIG_SOC_EXYNOS5430)
	if (dev->id == 0) {
		clk_child = clk_get(dev->device, "dout_aclk_mfc0_333");
		if (IS_ERR(clk_child)) {
			pr_err("failed to get %s clock\n", __clk_get_name(clk_child));
			return PTR_ERR(clk_child);
		}

	} else if (dev->id == 1) {
		clk_child = clk_get(dev->device, "dout_aclk_mfc1_333");
		if (IS_ERR(clk_child)) {
			pr_err("failed to get %s clock\n", __clk_get_name(clk_child));
			return PTR_ERR(clk_child);
		}
	}
#elif defined(CONFIG_SOC_EXYNOS5422)
	clk_child = clk_get(dev->device, "dout_aclk_333");
	if (IS_ERR(clk_child)) {
		pr_err("failed to get %s clock\n", __clk_get_name(clk_child));
		return PTR_ERR(clk_child);
	}
#elif defined(CONFIG_SOC_EXYNOS5433)
	/* Do not set clock rate */
	return 0;

	/*
	clk_child = clk_get(dev->device, "dout_aclk_mfc_400");
	if (IS_ERR(clk_child)) {
		pr_err("failed to get %s clock\n", __clk_get_name(clk_child));
		return PTR_ERR(clk_child);
	}
	*/
#endif

#ifdef CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ
	spin_lock(&int_div_lock);
#endif
	if(clk_child)
		clk_set_rate(clk_child, rate * 1000);

#ifdef CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ
	spin_unlock(&int_div_lock);
#endif

	if(clk_child)
		clk_put(clk_child);

	return 0;
}
Пример #10
0
/**
 * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
 * @clk: struct clk * belonging to the module
 *
 * If the necessary clocks for the OMAP hardware IP block that
 * corresponds to clock @clk are enabled, then wait for the module to
 * indicate readiness (i.e., to leave IDLE).  This code does not
 * belong in the clock code and will be moved in the medium term to
 * module-dependent code.  No return value.
 */
static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
{
	void __iomem *companion_reg, *idlest_reg;
	u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
	s16 prcm_mod;
	int r;

	/* Not all modules have multiple clocks that their IDLEST depends on */
	if (clk->ops->find_companion) {
		clk->ops->find_companion(clk, &companion_reg, &other_bit);
		if (!(omap2_clk_readl(clk, companion_reg) & (1 << other_bit)))
			return;
	}

	clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
	r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
	if (r) {
		/* IDLEST register not in the CM module */
		_wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit),
				     idlest_val, __clk_get_name(clk->hw.clk));
	} else {
		omap_cm_wait_module_ready(0, prcm_mod, idlest_reg_id,
					  idlest_bit);
	};
}
Пример #11
0
static void xgene_clk_disable(struct clk_hw *hw)
{
	struct xgene_clk *pclk = to_xgene_clk(hw);
	unsigned long flags = 0;
	u32 data;

	if (pclk->lock)
		spin_lock_irqsave(pclk->lock, flags);

	if (pclk->param.csr_reg != NULL) {
		pr_debug("%s clock disabled\n", __clk_get_name(hw->clk));
		/* First put the CSR in reset */
		data = xgene_clk_read(pclk->param.csr_reg +
					pclk->param.reg_csr_offset);
		data |= pclk->param.reg_csr_mask;
		xgene_clk_write(data, pclk->param.csr_reg +
					pclk->param.reg_csr_offset);

		/* Second disable the clock */
		data = xgene_clk_read(pclk->param.csr_reg +
					pclk->param.reg_clk_offset);
		data &= ~pclk->param.reg_clk_mask;
		xgene_clk_write(data, pclk->param.csr_reg +
					pclk->param.reg_clk_offset);
	}

	if (pclk->lock)
		spin_unlock_irqrestore(pclk->lock, flags);
}
Пример #12
0
static int omap2_apll_enable(struct clk_hw *hw)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
	struct dpll_data *ad = clk->dpll_data;
	u32 v;
	int i = 0;

	v = ti_clk_ll_ops->clk_readl(ad->control_reg);
	v &= ~ad->enable_mask;
	v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask);
	ti_clk_ll_ops->clk_writel(v, ad->control_reg);

	while (1) {
		v = ti_clk_ll_ops->clk_readl(ad->idlest_reg);
		if (v & ad->idlest_mask)
			break;
		if (i > MAX_APLL_WAIT_TRIES)
			break;
		i++;
		udelay(1);
	}

	if (i == MAX_APLL_WAIT_TRIES) {
		pr_warn("%s failed to transition to locked\n",
			__clk_get_name(clk->hw.clk));
		return -EBUSY;
	}

	return 0;
}
Пример #13
0
static struct clk * __init r8a77970_cpg_clk_register(struct device *dev,
	const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
	struct clk **clks, void __iomem *base,
	struct raw_notifier_head *notifiers)
{
	const struct clk_div_table *table;
	const struct clk *parent;
	unsigned int shift;

	switch (core->type) {
	case CLK_TYPE_R8A77970_SD0H:
		table = cpg_sd0h_div_table;
		shift = 8;
		break;
	case CLK_TYPE_R8A77970_SD0:
		table = cpg_sd0_div_table;
		shift = 4;
		break;
	default:
		return rcar_gen3_cpg_clk_register(dev, core, info, clks, base,
						  notifiers);
	}

	parent = clks[core->parent];
	if (IS_ERR(parent))
		return ERR_CAST(parent);

	return clk_register_divider_table(NULL, core->name,
					  __clk_get_name(parent), 0,
					  base + CPG_SD0CKCR,
					  shift, 4, 0, table, &cpg_lock);
}
static int __init imx_amp_power_init(void)
{
	int i;
	void __iomem *shared_mem_base;

	if (!(imx_src_is_m4_enabled() && cpu_is_imx6sx()))
		return 0;

	amp_power_mutex = imx_sema4_mutex_create(0, MCC_POWER_SHMEM_NUMBER);

	shared_mem_base = ioremap_nocache(shared_mem_paddr, shared_mem_size);

	if (!amp_power_mutex) {
		pr_err("Failed to create sema4 mutex!\n");
		return 0;
	}

	shared_mem = (struct imx_shared_mem *)shared_mem_base;

	for (i = 0; i < ARRAY_SIZE(clks_shared); i++) {
		shared_mem->imx_clk[i].self = clks[clks_shared[i]];
		shared_mem->imx_clk[i].ca9_enabled = 1;
		pr_debug("%d: name %s, addr 0x%x\n", i,
			__clk_get_name(shared_mem->imx_clk[i].self),
			(u32)&(shared_mem->imx_clk[i]));
	}
	/* enable amp power management */
	shared_mem->ca9_valid = SHARED_MEM_MAGIC_NUMBER;

	pr_info("A9-M4 sema4 num %d, A9-M4 magic number 0x%x - 0x%x.\n",
		amp_power_mutex->gate_num, shared_mem->ca9_valid,
		shared_mem->cm4_valid);

	return 0;
}
Пример #15
0
static int ti_adpll_init_gate(struct ti_adpll_data *d,
			      enum ti_adpll_clocks index,
			      int output_index, char *name,
			      struct clk *parent_clock,
			      void __iomem *reg,
			      u8 bit_idx,
			      u8 clk_gate_flags)
{
	const char *child_name;
	const char *parent_name;
	struct clk *clock;

	child_name = ti_adpll_clk_get_name(d, output_index, name);
	if (!child_name)
		return -EINVAL;

	parent_name = __clk_get_name(parent_clock);
	clock = clk_register_gate(d->dev, child_name, parent_name, 0,
				  reg, bit_idx, clk_gate_flags,
				  &d->lock);
	if (IS_ERR(clock)) {
		dev_err(d->dev, "failed to register gate %s: %li\n",
			name, PTR_ERR(clock));
		return PTR_ERR(clock);
	}

	return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
				    clk_unregister_gate);
}
Пример #16
0
/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
{
	const struct dpll_data *dd;
	int i = 0;
	int ret = -EINVAL;
	const char *clk_name;

	dd = clk->dpll_data;
	clk_name = __clk_get_name(clk->hw.clk);

	state <<= __ffs(dd->idlest_mask);

	while (((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask)
		!= state) && i < MAX_DPLL_WAIT_TRIES) {
		i++;
		udelay(1);
	}

	if (i == MAX_DPLL_WAIT_TRIES) {
		printk(KERN_ERR "clock: %s failed transition to '%s'\n",
		       clk_name, (state) ? "locked" : "bypassed");
	} else {
		pr_debug("clock: %s transition to '%s' in %d loops\n",
			 clk_name, (state) ? "locked" : "bypassed", i);

		ret = 0;
	}

	return ret;
}
Пример #17
0
/**
 * omap3_noncore_dpll_set_rate - set rate for a DPLL clock
 * @hw: pointer to the clock to set parent for
 * @rate: target rate for the clock
 * @parent_rate: rate of the parent clock
 *
 * Sets rate for a DPLL clock. First checks if the clock parent is
 * reference clock (in bypass mode, the rate of the clock can't be
 * changed) and proceeds with the rate change operation. Returns 0
 * with success, negative error value otherwise.
 */
int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
				unsigned long parent_rate)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
	struct dpll_data *dd;
	u16 freqsel = 0;
	int ret;

	if (!hw || !rate)
		return -EINVAL;

	dd = clk->dpll_data;
	if (!dd)
		return -EINVAL;

	if (__clk_get_hw(__clk_get_parent(hw->clk)) !=
	    __clk_get_hw(dd->clk_ref))
		return -EINVAL;

	if (dd->last_rounded_rate == 0)
		return -EINVAL;

	/* Freqsel is available only on OMAP343X devices */
	if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) {
		freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
		WARN_ON(!freqsel);
	}

	pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
		 __clk_get_name(hw->clk), rate);

	ret = omap3_noncore_dpll_program(clk, freqsel);

	return ret;
}
Пример #18
0
static void __init of_ti_clockdomain_setup(struct device_node *node)
{
	struct clk *clk;
	struct clk_hw *clk_hw;
	const char *clkdm_name = node->name;
	int i;
	int num_clks;

	num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells");

	for (i = 0; i < num_clks; i++) {
		clk = of_clk_get(node, i);
		if (IS_ERR(clk)) {
			pr_err("%s: Failed get %s' clock nr %d (%ld)\n",
			       __func__, node->full_name, i, PTR_ERR(clk));
			continue;
		}
		if (__clk_get_flags(clk) & CLK_IS_BASIC) {
			pr_warn("can't setup clkdm for basic clk %s\n",
				__clk_get_name(clk));
			continue;
		}
		clk_hw = __clk_get_hw(clk);
		to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
		omap2_init_clk_clkdm(clk_hw);
	}
}
Пример #19
0
static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll)
{
	int i;
	u32 val, lock_mask;
	void __iomem *lock_addr;

	if (!(pll->params->flags & TEGRA_PLL_USE_LOCK)) {
		udelay(pll->params->lock_delay);
		return 0;
	}

	lock_addr = pll->clk_base;
	if (pll->params->flags & TEGRA_PLL_LOCK_MISC)
		lock_addr += pll->params->misc_reg;
	else
		lock_addr += pll->params->base_reg;

	lock_mask = pll->params->lock_mask;

	for (i = 0; i < pll->params->lock_delay; i++) {
		val = readl_relaxed(lock_addr);
		if ((val & lock_mask) == lock_mask) {
			udelay(PLL_POST_LOCK_DELAY);
			return 0;
		}
		udelay(2); /* timeout = 2 * lock time */
	}

	pr_err("%s: Timed out waiting for pll %s lock\n", __func__,
	       __clk_get_name(pll->hw.clk));

	return -1;
}
Пример #20
0
static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw)
{
	int err;
	struct clk_prcmu *clk = to_clk_prcmu(hw);

	if (!clk->opp_requested) {
		err = prcmu_request_ape_opp_100_voltage(true);
		if (err) {
			pr_err("clk_prcmu: %s fail req APE OPP VOLT for %s.\n",
				__func__, __clk_get_name(hw->clk));
			return err;
		}
		clk->opp_requested = 1;
	}

	err = prcmu_request_clock(clk->cg_sel, true);
	if (err) {
		prcmu_request_ape_opp_100_voltage(false);
		clk->opp_requested = 0;
		return err;
	}

	clk->is_prepared = 1;
	return 0;
}
Пример #21
0
/**
 * omap2_dflt_clk_disable - disable a clock in the hardware
 * @hw: struct clk_hw * of the clock to disable
 *
 * Disable the clock @hw in the hardware, and call into the OMAP
 * clockdomain code to "disable" the corresponding clockdomain if all
 * clocks/hwmods in that clockdomain are now disabled.  No return
 * value.
 */
void omap2_dflt_clk_disable(struct clk_hw *hw)
{
	struct clk_hw_omap *clk;
	u32 v;

	clk = to_clk_hw_omap(hw);
	if (!clk->enable_reg) {
		/*
		 * 'independent' here refers to a clock which is not
		 * controlled by its parent.
		 */
		pr_err("%s: independent clock %s has no enable_reg\n",
		       __func__, __clk_get_name(hw->clk));
		return;
	}

	v = omap2_clk_readl(clk, clk->enable_reg);
	if (clk->flags & INVERT_ENABLE)
		v |= (1 << clk->enable_bit);
	else
		v &= ~(1 << clk->enable_bit);
	omap2_clk_writel(v, clk, clk->enable_reg);
	/* No OCP barrier needed here since it is a disable operation */

	if (clkdm_control && clk->clkdm)
		clkdm_clk_disable(clk->clkdm, hw->clk);
}
Пример #22
0
/*
 * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to lock.  Waits for the DPLL to report
 * readiness before returning.  Will save and restore the DPLL's
 * autoidle state across the enable, per the CDP code.  If the DPLL
 * locked successfully, return 0; if the DPLL did not lock in the time
 * allotted, or DPLL3 was passed in, return -EINVAL.
 */
static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
{
	const struct dpll_data *dd;
	u8 ai;
	u8 state = 1;
	int r = 0;

	pr_debug("clock: locking DPLL %s\n", __clk_get_name(clk->hw.clk));

	dd = clk->dpll_data;
	state <<= __ffs(dd->idlest_mask);

	/* Check if already locked */
	if ((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask) == state)
		goto done;

	ai = omap3_dpll_autoidle_read(clk);

	if (ai)
		omap3_dpll_deny_idle(clk);

	_omap3_dpll_write_clken(clk, DPLL_LOCKED);

	r = _omap3_wait_dpll_status(clk, 1);

	if (ai)
		omap3_dpll_allow_idle(clk);

done:
	return r;
}
Пример #23
0
static int mxs_saif_mclk_init(struct platform_device *pdev)
{
	struct mxs_saif *saif = platform_get_drvdata(pdev);
	struct device_node *np = pdev->dev.of_node;
	struct clk *clk;
	int ret;

	clk = clk_register_divider(&pdev->dev, "mxs_saif_mclk",
				   __clk_get_name(saif->clk), 0,
				   saif->base + SAIF_CTRL,
				   BP_SAIF_CTRL_BITCLK_MULT_RATE, 3,
				   0, NULL);
	if (IS_ERR(clk)) {
		ret = PTR_ERR(clk);
		if (ret == -EEXIST)
			return 0;
		dev_err(&pdev->dev, "failed to register mclk: %d\n", ret);
		return PTR_ERR(clk);
	}

	ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
	if (ret)
		return ret;

	return 0;
}
Пример #24
0
/**
 * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
 * The choice of modes depends on the DPLL's programmed rate: if it is
 * the same as the DPLL's parent clock, it will enter bypass;
 * otherwise, it will enter lock.  This code will wait for the DPLL to
 * indicate readiness before returning, unless the DPLL takes too long
 * to enter the target state.  Intended to be used as the struct clk's
 * enable function.  If DPLL3 was passed in, or the DPLL does not
 * support low-power stop, or if the DPLL took too long to enter
 * bypass or lock, return -EINVAL; otherwise, return 0.
 */
int omap3_noncore_dpll_enable(struct clk_hw *hw)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
	int r;
	struct dpll_data *dd;
	struct clk *parent;

	dd = clk->dpll_data;
	if (!dd)
		return -EINVAL;

	if (clk->clkdm) {
		r = clkdm_clk_enable(clk->clkdm, hw->clk);
		if (r) {
			WARN(1,
			     "%s: could not enable %s's clockdomain %s: %d\n",
			     __func__, __clk_get_name(hw->clk),
			     clk->clkdm->name, r);
			return r;
		}
	}

	parent = __clk_get_parent(hw->clk);

	if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
		WARN_ON(parent != dd->clk_bypass);
		r = _omap3_noncore_dpll_bypass(clk);
	} else {
		WARN_ON(parent != dd->clk_ref);
		r = _omap3_noncore_dpll_lock(clk);
	}

	return r;
}
Пример #25
0
static int update_config(struct clk_rcg2 *rcg)
{
	int count, ret;
	u32 cmd;
	struct clk_hw *hw = &rcg->clkr.hw;
	const char *name = __clk_get_name(hw->clk);

	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
				 CMD_UPDATE, CMD_UPDATE);
	if (ret)
		return ret;

	/* Wait for update to take effect */
	for (count = 500; count > 0; count--) {
		ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
		if (ret)
			return ret;
		if (!(cmd & CMD_UPDATE))
			return 0;
		udelay(1);
	}

	WARN(1, "%s: rcg didn't update its configuration.", name);
	return 0;
}
Пример #26
0
static void clk_gate_disable(struct clk_hw *hw)
{
#if !defined(CONFIG_MTK_LEGACY) /* FIXME: only for bring up */
	printk("[CCF] %s: %s\n", __func__, __clk_get_name(hw->clk));
	return;
#endif /* !defined(CONFIG_MTK_LEGACY) */
	clk_gate_endisable(hw, 0);
}
Пример #27
0
static void __init mvebu_clk_gating_setup(
	struct device_node *np, const struct mvebu_soc_descr *descr)
{
	struct mvebu_gating_ctrl *ctrl;
	struct clk *clk;
	void __iomem *base;
	const char *default_parent = NULL;
	int n;

	base = of_iomap(np, 0);

	clk = of_clk_get(np, 0);
	if (!IS_ERR(clk)) {
		default_parent = __clk_get_name(clk);
		clk_put(clk);
	}

	ctrl = kzalloc(sizeof(struct mvebu_gating_ctrl), GFP_KERNEL);
	if (WARN_ON(!ctrl))
		return;

	spin_lock_init(&ctrl->lock);

	/*
	 * Count, allocate, and register clock gates
	 */
	for (n = 0; descr[n].name;)
		n++;

	ctrl->num_gates = n;
	ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
			      GFP_KERNEL);
	if (WARN_ON(!ctrl->gates)) {
		kfree(ctrl);
		return;
	}

	for (n = 0; n < ctrl->num_gates; n++) {
		u8 flags = 0;
		const char *parent =
			(descr[n].parent) ? descr[n].parent : default_parent;

		/*
		 * On Armada 370, the DDR clock is a special case: it
		 * isn't taken by any driver, but should anyway be
		 * kept enabled, so we mark it as IGNORE_UNUSED for
		 * now.
		 */
		if (!strcmp(descr[n].name, "ddr"))
			flags |= CLK_IGNORE_UNUSED;

		ctrl->gates[n] = clk_register_gate(NULL, descr[n].name, parent,
				   flags, base, descr[n].bit_idx, 0, &ctrl->lock);
		WARN_ON(IS_ERR(ctrl->gates[n]));
	}
	of_clk_add_provider(np, mvebu_clk_gating_get_src, ctrl);
}
Пример #28
0
/**
 * _get_clksel_by_parent() - return clksel struct for a given clk & parent
 * @clk: OMAP struct clk ptr to inspect
 * @src_clk: OMAP struct clk ptr of the parent clk to search for
 *
 * Scan the struct clksel array associated with the clock to find
 * the element associated with the supplied parent clock address.
 * Returns a pointer to the struct clksel on success or NULL on error.
 */
static const struct clksel *_get_clksel_by_parent(struct clk *clk,
						  struct clk *src_clk)
{
	const struct clksel *clks;

	for (clks = clk->clksel; clks->parent; clks++)
		if (clks->parent == src_clk)
			break; /* Found the requested parent */

	if (!clks->parent) {
		/* This indicates a data problem */
		WARN(1, "clock: %s: could not find parent clock %s in clksel array\n",
		     __clk_get_name(clk), __clk_get_name(src_clk));
		return NULL;
	}

	return clks;
}
Пример #29
0
static void clk_prcmu_unprepare(struct clk_hw *hw)
{
	struct clk_prcmu *clk = to_clk_prcmu(hw);
	if (prcmu_request_clock(clk->cg_sel, false))
		pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
			__clk_get_name(hw->clk));
	else
		clk->is_prepared = 0;
}
Пример #30
0
static int xgene_clk_is_enabled(struct clk_hw *hw)
{
	struct xgene_clk *pclk = to_xgene_clk(hw);
	u32 data = 0;

	if (pclk->param.csr_reg != NULL) {
		pr_debug("%s clock checking\n", __clk_get_name(hw->clk));
		data = xgene_clk_read(pclk->param.csr_reg +
					pclk->param.reg_clk_offset);
		pr_debug("%s clock is %s\n", __clk_get_name(hw->clk),
			data & pclk->param.reg_clk_mask ? "enabled" :
							"disabled");
	}

	if (pclk->param.csr_reg == NULL)
		return 1;
	return data & pclk->param.reg_clk_mask ? 1 : 0;
}