Пример #1
0
/**
 * omap3_noncore_dpll_determine_rate - determine rate for a DPLL
 * @hw: pointer to the clock to determine rate for
 * @rate: target rate for the DPLL
 * @best_parent_rate: pointer for returning best parent rate
 * @best_parent_clk: pointer for returning best parent clock
 *
 * Determines which DPLL mode to use for reaching a desired target rate.
 * Checks whether the DPLL shall be in bypass or locked mode, and if
 * locked, calculates the M,N values for the DPLL via round-rate.
 * Returns a positive clock rate with success, negative error value
 * in failure.
 */
long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
				       unsigned long min_rate,
				       unsigned long max_rate,
				       unsigned long *best_parent_rate,
				       struct clk_hw **best_parent_clk)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
	struct dpll_data *dd;

	if (!hw || !rate)
		return -EINVAL;

	dd = clk->dpll_data;
	if (!dd)
		return -EINVAL;

	if (__clk_get_rate(dd->clk_bypass) == rate &&
	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
		*best_parent_clk = __clk_get_hw(dd->clk_bypass);
	} else {
		rate = omap2_dpll_round_rate(hw, rate, best_parent_rate);
		*best_parent_clk = __clk_get_hw(dd->clk_ref);
	}

	*best_parent_rate = rate;

	return rate;
}
Пример #2
0
/**
 * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
 * @clk: pointer to a DPLL struct clk
 *
 * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
 * The choice of modes depends on the DPLL's programmed rate: if it is
 * the same as the DPLL's parent clock, it will enter bypass;
 * otherwise, it will enter lock.  This code will wait for the DPLL to
 * indicate readiness before returning, unless the DPLL takes too long
 * to enter the target state.  Intended to be used as the struct clk's
 * enable function.  If DPLL3 was passed in, or the DPLL does not
 * support low-power stop, or if the DPLL took too long to enter
 * bypass or lock, return -EINVAL; otherwise, return 0.
 */
int omap3_noncore_dpll_enable(struct clk_hw *hw)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
	int r;
	struct dpll_data *dd;
	struct clk_hw *parent;

	dd = clk->dpll_data;
	if (!dd)
		return -EINVAL;

	if (clk->clkdm) {
		r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
		if (r) {
			WARN(1,
			     "%s: could not enable %s's clockdomain %s: %d\n",
			     __func__, clk_hw_get_name(hw),
			     clk->clkdm_name, r);
			return r;
		}
	}

	parent = clk_hw_get_parent(hw);

	if (clk_hw_get_rate(hw) ==
	    clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
		WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
		r = _omap3_noncore_dpll_bypass(clk);
	} else {
		WARN_ON(parent != __clk_get_hw(dd->clk_ref));
		r = _omap3_noncore_dpll_lock(clk);
	}

	return r;
}
Пример #3
0
/**
 * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL
 * @hw: pointer to the clock to determine rate for
 * @req: target rate request
 *
 * Determines which DPLL mode to use for reaching a desired rate.
 * Checks whether the DPLL shall be in bypass or locked mode, and if
 * locked, calculates the M,N values for the DPLL via round-rate.
 * Returns 0 on success and a negative error value otherwise.
 */
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
				       struct clk_rate_request *req)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
	struct dpll_data *dd;

	if (!req->rate)
		return -EINVAL;

	dd = clk->dpll_data;
	if (!dd)
		return -EINVAL;

	if (clk_get_rate(dd->clk_bypass) == req->rate &&
	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
		req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
	} else {
		req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
						&req->best_parent_rate);
		req->best_parent_hw = __clk_get_hw(dd->clk_ref);
	}

	req->best_parent_rate = req->rate;

	return 0;
}
Пример #4
0
/**
 * omap3_noncore_dpll_set_rate - set rate for a DPLL clock
 * @hw: pointer to the clock to set parent for
 * @rate: target rate for the clock
 * @parent_rate: rate of the parent clock
 *
 * Sets rate for a DPLL clock. First checks if the clock parent is
 * reference clock (in bypass mode, the rate of the clock can't be
 * changed) and proceeds with the rate change operation. Returns 0
 * with success, negative error value otherwise.
 */
int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
				unsigned long parent_rate)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
	struct dpll_data *dd;
	u16 freqsel = 0;
	int ret;

	if (!hw || !rate)
		return -EINVAL;

	dd = clk->dpll_data;
	if (!dd)
		return -EINVAL;

	if (__clk_get_hw(__clk_get_parent(hw->clk)) !=
	    __clk_get_hw(dd->clk_ref))
		return -EINVAL;

	if (dd->last_rounded_rate == 0)
		return -EINVAL;

	/* Freqsel is available only on OMAP343X devices */
	if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) {
		freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
		WARN_ON(!freqsel);
	}

	pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
		 __clk_get_name(hw->clk), rate);

	ret = omap3_noncore_dpll_program(clk, freqsel);

	return ret;
}
Пример #5
0
/**
 * _register_dpll - low level registration of a DPLL clock
 * @hw: hardware clock definition for the clock
 * @node: device node for the clock
 *
 * Finalizes DPLL registration process. In case a failure (clk-ref or
 * clk-bypass is missing), the clock is added to retry list and
 * the initialization is retried on later stage.
 */
static void __init _register_dpll(struct clk_hw *hw,
                                  struct device_node *node)
{
    struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
    struct dpll_data *dd = clk_hw->dpll_data;
    struct clk *clk;

    clk = of_clk_get(node, 0);
    if (IS_ERR(clk)) {
        pr_debug("clk-ref missing for %s, retry later\n",
                 node->name);
        if (!ti_clk_retry_init(node, hw, _register_dpll))
            return;

        goto cleanup;
    }

    dd->clk_ref = __clk_get_hw(clk);

    clk = of_clk_get(node, 1);

    if (IS_ERR(clk)) {
        pr_debug("clk-bypass missing for %s, retry later\n",
                 node->name);
        if (!ti_clk_retry_init(node, hw, _register_dpll))
            return;

        goto cleanup;
    }

    dd->clk_bypass = __clk_get_hw(clk);

    /* register the clock */
    clk = clk_register(NULL, &clk_hw->hw);

    if (!IS_ERR(clk)) {
        omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
        of_clk_add_provider(node, of_clk_src_simple_get, clk);
        kfree(clk_hw->hw.init->parent_names);
        kfree(clk_hw->hw.init);
        return;
    }

cleanup:
    kfree(clk_hw->dpll_data);
    kfree(clk_hw->hw.init->parent_names);
    kfree(clk_hw->hw.init);
    kfree(clk_hw);
}
Пример #6
0
/**
 * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
 *         from HSDivider PWRDN problem Implements Errata ID: i556.
 * @clk: DPLL output struct clk
 *
 * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
 * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
 * valueafter their respective PWRDN bits are set.  Any dummy write
 * (Any other value different from the Read value) to the
 * corresponding CM_CLKSEL register will refresh the dividers.
 */
int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
{
	struct clk_divider *parent;
	struct clk_hw *parent_hw;
	u32 dummy_v, orig_v;
	struct clk_hw_omap *omap_clk = to_clk_hw_omap(clk);
	int ret;

	/* Clear PWRDN bit of HSDIVIDER */
	ret = omap2_dflt_clk_enable(clk);

	parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
	parent = to_clk_divider(parent_hw);

	/* Restore the dividers */
	if (!ret) {
		orig_v = omap2_clk_readl(omap_clk, parent->reg);
		dummy_v = orig_v;

		/* Write any other value different from the Read value */
		dummy_v ^= (1 << parent->shift);
		omap2_clk_writel(dummy_v, omap_clk, parent->reg);

		/* Write the original divider */
		omap2_clk_writel(orig_v, omap_clk, parent->reg);
	}

	return ret;
}
Пример #7
0
static void __init of_ti_clockdomain_setup(struct device_node *node)
{
	struct clk *clk;
	struct clk_hw *clk_hw;
	const char *clkdm_name = node->name;
	int i;
	int num_clks;

	num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells");

	for (i = 0; i < num_clks; i++) {
		clk = of_clk_get(node, i);
		if (IS_ERR(clk)) {
			pr_err("%s: Failed get %s' clock nr %d (%ld)\n",
			       __func__, node->full_name, i, PTR_ERR(clk));
			continue;
		}
		if (__clk_get_flags(clk) & CLK_IS_BASIC) {
			pr_warn("can't setup clkdm for basic clk %s\n",
				__clk_get_name(clk));
			continue;
		}
		clk_hw = __clk_get_hw(clk);
		to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
		omap2_init_clk_clkdm(clk_hw);
	}
}
Пример #8
0
/**
 * omap36xx_gate_clk_enable_with_hsdiv_restore - enable clocks suffering
 *         from HSDivider PWRDN problem Implements Errata ID: i556.
 * @clk: DPLL output struct clk
 *
 * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
 * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
 * valueafter their respective PWRDN bits are set.  Any dummy write
 * (Any other value different from the Read value) to the
 * corresponding CM_CLKSEL register will refresh the dividers.
 */
static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
{
	struct clk_divider *parent;
	struct clk_hw *parent_hw;
	u32 dummy_v, orig_v;
	int ret;

	/* Clear PWRDN bit of HSDIVIDER */
	ret = omap2_dflt_clk_enable(clk);

	/* Parent is the x2 node, get parent of parent for the m2 div */
	parent_hw = __clk_get_hw(__clk_get_parent(__clk_get_parent(clk->clk)));
	parent = to_clk_divider(parent_hw);

	/* Restore the dividers */
	if (!ret) {
		orig_v = ti_clk_ll_ops->clk_readl(parent->reg);
		dummy_v = orig_v;

		/* Write any other value different from the Read value */
		dummy_v ^= (1 << parent->shift);
		ti_clk_ll_ops->clk_writel(dummy_v, parent->reg);

		/* Write the original divider */
		ti_clk_ll_ops->clk_writel(orig_v, parent->reg);
	}

	return ret;
}
Пример #9
0
/**
 * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
 *         from HSDivider PWRDN problem Implements Errata ID: i556.
 * @clk: DPLL output struct clk
 *
 * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
 * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
 * valueafter their respective PWRDN bits are set.  Any dummy write
 * (Any other value different from the Read value) to the
 * corresponding CM_CLKSEL register will refresh the dividers.
 */
int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
{
	struct clk_hw_omap *parent;
	struct clk_hw *parent_hw;
	u32 dummy_v, orig_v, clksel_shift;
	int ret;

	/* Clear PWRDN bit of HSDIVIDER */
	ret = omap2_dflt_clk_enable(clk);

	parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
	parent = to_clk_hw_omap(parent_hw);

	/* Restore the dividers */
	if (!ret) {
		clksel_shift = __ffs(parent->clksel_mask);
		orig_v = __raw_readl(parent->clksel_reg);
		dummy_v = orig_v;

		/* Write any other value different from the Read value */
		dummy_v ^= (1 << clksel_shift);
		__raw_writel(dummy_v, parent->clksel_reg);

		/* Write the original divider */
		__raw_writel(orig_v, parent->clksel_reg);
	}

	return ret;
}
Пример #10
0
/**
 * omap2_clk_allow_idle - enable autoidle on an OMAP clock
 * @clk: struct clk * to enable autoidle for
 *
 * Enable autoidle on an OMAP clock.
 */
int omap2_clk_allow_idle(struct clk *clk)
{
	struct clk_hw_omap *c;

	c = to_clk_hw_omap(__clk_get_hw(clk));
	if (c->ops && c->ops->allow_idle)
		c->ops->allow_idle(c);
	return 0;
}
Пример #11
0
/**
 * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
 * @clk: struct clk * to initialize
 *
 * Add an OMAP clock @clk to the internal list of OMAP clocks.  Used
 * temporarily for autoidle handling, until this support can be
 * integrated into the common clock framework code in some way.  No
 * return value.
 */
void omap2_init_clk_hw_omap_clocks(struct clk *clk)
{
	struct clk_hw_omap *c;

	if (__clk_get_flags(clk) & CLK_IS_BASIC)
		return;

	c = to_clk_hw_omap(__clk_get_hw(clk));
	list_add(&c->node, &clk_hw_omap_clocks);
}
Пример #12
0
void clk_unregister_fixed_factor(struct clk *clk)
{
	struct clk_hw *hw;

	hw = __clk_get_hw(clk);
	if (!hw)
		return;

	clk_unregister(clk);
	kfree(to_clk_fixed_factor(hw));
}
Пример #13
0
/**
 * omap2_clk_allow_idle - enable autoidle on an OMAP clock
 * @clk: struct clk * to enable autoidle for
 *
 * Enable autoidle on an OMAP clock.
 */
int omap2_clk_allow_idle(struct clk *clk)
{
	struct clk_hw_omap *c;

	if (__clk_get_flags(clk) & CLK_IS_BASIC)
		return -EINVAL;

	c = to_clk_hw_omap(__clk_get_hw(clk));
	if (c->ops && c->ops->allow_idle)
		c->ops->allow_idle(c);
	return 0;
}
void clk_unregister_gate(struct clk *clk)
{
	struct clk_gate *gate;
	struct clk_hw *hw;

	hw = __clk_get_hw(clk);
	if (!hw)
		return;

	gate = to_clk_gate(hw);

	clk_unregister(clk);
	kfree(gate);
}
Пример #15
0
void clk_unregister_composite(struct clk *clk)
{
	struct clk_composite *composite;
	struct clk_hw *hw;

	hw = __clk_get_hw(clk);
	if (!hw)
		return;

	composite = to_clk_composite(hw);

	clk_unregister(clk);
	kfree(composite);
}
Пример #16
0
static struct clk *clk_gating_get_src(
	struct of_phandle_args *clkspec, void *data)
{
	int n;

	if (clkspec->args_count < 1)
		return ERR_PTR(-EINVAL);

	for (n = 0; n < ctrl->num_gates; n++) {
		struct clk_gate *gate =
			to_clk_gate(__clk_get_hw(ctrl->gates[n]));
		if (clkspec->args[0] == gate->bit_idx)
			return ctrl->gates[n];
	}
	return ERR_PTR(-ENODEV);
}
Пример #17
0
static void kona_clk_teardown(struct clk *clk)
{
	struct clk_hw *hw;
	struct kona_clk *bcm_clk;

	if (!clk)
		return;

	hw = __clk_get_hw(clk);
	if (!hw) {
		pr_err("%s: clk %p has null hw pointer\n", __func__, clk);
		return;
	}
	clk_unregister(clk);

	bcm_clk = to_kona_clk(hw);
	bcm_clk_teardown(bcm_clk);
}
Пример #18
0
void tegra_periph_reset_assert(struct clk *c)
{
	struct clk_hw *hw = __clk_get_hw(c);
	struct tegra_clk_periph *periph = to_clk_periph(hw);
	struct tegra_clk_periph_gate *gate;

	if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
		gate = to_clk_periph_gate(hw);
		if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
			WARN_ON(1);
			return;
		}
	} else {
		gate = &periph->gate;
	}

	tegra_periph_reset(gate, 1);
}
Пример #19
0
/**
 * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
 * @clk: DPLL output struct clk
 *
 * Using parent clock DPLL data, look up DPLL state.  If locked, set our
 * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
 */
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
				    unsigned long parent_rate)
{
	const struct dpll_data *dd;
	unsigned long rate;
	u32 v;
	struct clk_hw_omap *pclk = NULL;
	struct clk *parent;

	/* Walk up the parents of clk, looking for a DPLL */
	do {
		do {
			parent = __clk_get_parent(hw->clk);
			hw = __clk_get_hw(parent);
		} while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
		if (!hw)
			break;
		pclk = to_clk_hw_omap(hw);
	} while (pclk && !pclk->dpll_data);

	/* clk does not have a DPLL as a parent?  error in the clock data */
	if (!pclk) {
		WARN_ON(1);
		return 0;
	}

	dd = pclk->dpll_data;

	WARN_ON(!dd->enable_mask);

	v = __raw_readl(dd->control_reg) & dd->enable_mask;
	v >>= __ffs(dd->enable_mask);
	if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
		rate = parent_rate;
	else
		rate = parent_rate * 2;
	return rate;
}
Пример #20
0
/* Find the parent DPLL for the given clkoutx2 clock */
static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
{
	struct clk_hw_omap *pclk = NULL;
	struct clk *parent;

	/* Walk up the parents of clk, looking for a DPLL */
	do {
		do {
			parent = __clk_get_parent(hw->clk);
			hw = __clk_get_hw(parent);
		} while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
		if (!hw)
			break;
		pclk = to_clk_hw_omap(hw);
	} while (pclk && !pclk->dpll_data);

	/* clk does not have a DPLL as a parent?  error in the clock data */
	if (!pclk) {
		WARN_ON(1);
		return NULL;
	}

	return pclk;
}
Пример #21
0
static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
					unsigned long min_rate,
					unsigned long max_rate,
					unsigned long *best_parent_rate,
					struct clk_hw **best_parent_p)
{
	struct clk_composite *composite = to_clk_composite(hw);
	const struct clk_ops *rate_ops = composite->rate_ops;
	const struct clk_ops *mux_ops = composite->mux_ops;
	struct clk_hw *rate_hw = composite->rate_hw;
	struct clk_hw *mux_hw = composite->mux_hw;
	struct clk *parent;
	unsigned long parent_rate;
	long tmp_rate, best_rate = 0;
	unsigned long rate_diff;
	unsigned long best_rate_diff = ULONG_MAX;
	int i;

	if (rate_hw && rate_ops && rate_ops->determine_rate) {
		__clk_hw_set_clk(rate_hw, hw);
		return rate_ops->determine_rate(rate_hw, rate, min_rate,
						max_rate,
						best_parent_rate,
						best_parent_p);
	} else if (rate_hw && rate_ops && rate_ops->round_rate &&
		   mux_hw && mux_ops && mux_ops->set_parent) {
		*best_parent_p = NULL;

		if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) {
			parent = clk_get_parent(mux_hw->clk);
			*best_parent_p = __clk_get_hw(parent);
			*best_parent_rate = __clk_get_rate(parent);

			return rate_ops->round_rate(rate_hw, rate,
						    best_parent_rate);
		}

		for (i = 0; i < __clk_get_num_parents(mux_hw->clk); i++) {
			parent = clk_get_parent_by_index(mux_hw->clk, i);
			if (!parent)
				continue;

			parent_rate = __clk_get_rate(parent);

			tmp_rate = rate_ops->round_rate(rate_hw, rate,
							&parent_rate);
			if (tmp_rate < 0)
				continue;

			rate_diff = abs(rate - tmp_rate);

			if (!rate_diff || !*best_parent_p
				       || best_rate_diff > rate_diff) {
				*best_parent_p = __clk_get_hw(parent);
				*best_parent_rate = parent_rate;
				best_rate_diff = rate_diff;
				best_rate = tmp_rate;
			}

			if (!rate_diff)
				return rate;
		}

		return best_rate;
	} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
		__clk_hw_set_clk(mux_hw, hw);
		return mux_ops->determine_rate(mux_hw, rate, min_rate,
					       max_rate, best_parent_rate,
					       best_parent_p);
	} else {
		pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
		return 0;
	}
}
Пример #22
0
static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
	struct device_node *np;
	int i, count;
	u32 freq;
	struct clk *clk;
	const struct clk_hw *hwclk;
	struct cpufreq_frequency_table *table;
	struct cpu_data *data;
	unsigned int cpu = policy->cpu;
	u64 u64temp;

	np = of_get_cpu_node(cpu, NULL);
	if (!np)
		return -ENODEV;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		goto err_np;

	policy->clk = of_clk_get(np, 0);
	if (IS_ERR(policy->clk)) {
		pr_err("%s: no clock information\n", __func__);
		goto err_nomem2;
	}

	hwclk = __clk_get_hw(policy->clk);
	count = clk_hw_get_num_parents(hwclk);

	data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
	if (!data->pclk)
		goto err_nomem2;

	table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
	if (!table)
		goto err_pclk;

	for (i = 0; i < count; i++) {
		clk = clk_hw_get_parent_by_index(hwclk, i)->clk;
		data->pclk[i] = clk;
		freq = clk_get_rate(clk);
		table[i].frequency = freq / 1000;
		table[i].driver_data = i;
	}
	freq_table_redup(table, count);
	freq_table_sort(table, count);
	table[i].frequency = CPUFREQ_TABLE_END;
	policy->freq_table = table;
	data->table = table;

	/* update ->cpus if we have cluster, no harm if not */
	set_affected_cpus(policy);
	policy->driver_data = data;

	/* Minimum transition latency is 12 platform clocks */
	u64temp = 12ULL * NSEC_PER_SEC;
	do_div(u64temp, get_bus_freq());
	policy->cpuinfo.transition_latency = u64temp + 1;

	of_node_put(np);

	return 0;

err_pclk:
	kfree(data->pclk);
err_nomem2:
	kfree(data);
err_np:
	of_node_put(np);

	return -ENODEV;
}
Пример #23
0
struct clk *ti_clk_register_dpll(struct ti_clk *setup)
{
    struct clk_hw_omap *clk_hw;
    struct clk_init_data init = { NULL };
    struct dpll_data *dd;
    struct clk *clk;
    struct ti_clk_dpll *dpll;
    const struct clk_ops *ops = &omap3_dpll_ck_ops;
    struct clk *clk_ref;
    struct clk *clk_bypass;

    dpll = setup->data;

    if (dpll->num_parents < 2)
        return ERR_PTR(-EINVAL);

    clk_ref = clk_get_sys(NULL, dpll->parents[0]);
    clk_bypass = clk_get_sys(NULL, dpll->parents[1]);

    if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
        return ERR_PTR(-EAGAIN);

    dd = kzalloc(sizeof(*dd), GFP_KERNEL);
    clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
    if (!dd || !clk_hw) {
        clk = ERR_PTR(-ENOMEM);
        goto cleanup;
    }

    clk_hw->dpll_data = dd;
    clk_hw->ops = &clkhwops_omap3_dpll;
    clk_hw->hw.init = &init;
    clk_hw->flags = MEMMAP_ADDRESSING;

    init.name = setup->name;
    init.ops = ops;

    init.num_parents = dpll->num_parents;
    init.parent_names = dpll->parents;

    dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
    dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
    dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
    dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);

    dd->modes = dpll->modes;
    dd->div1_mask = dpll->div1_mask;
    dd->idlest_mask = dpll->idlest_mask;
    dd->mult_mask = dpll->mult_mask;
    dd->autoidle_mask = dpll->autoidle_mask;
    dd->enable_mask = dpll->enable_mask;
    dd->sddiv_mask = dpll->sddiv_mask;
    dd->dco_mask = dpll->dco_mask;
    dd->max_divider = dpll->max_divider;
    dd->min_divider = dpll->min_divider;
    dd->max_multiplier = dpll->max_multiplier;
    dd->auto_recal_bit = dpll->auto_recal_bit;
    dd->recal_en_bit = dpll->recal_en_bit;
    dd->recal_st_bit = dpll->recal_st_bit;

    dd->clk_ref = __clk_get_hw(clk_ref);
    dd->clk_bypass = __clk_get_hw(clk_bypass);

    if (dpll->flags & CLKF_CORE)
        ops = &omap3_dpll_core_ck_ops;

    if (dpll->flags & CLKF_PER)
        ops = &omap3_dpll_per_ck_ops;

    if (dpll->flags & CLKF_J_TYPE)
        dd->flags |= DPLL_J_TYPE;

    clk = clk_register(NULL, &clk_hw->hw);

    if (!IS_ERR(clk))
        return clk;

cleanup:
    kfree(dd);
    kfree(clk_hw);
    return clk;
}
Пример #24
0
static int of_dra7_atl_clk_probe(struct platform_device *pdev)
{
	struct device_node *node = pdev->dev.of_node;
	struct dra7_atl_clock_info *cinfo;
	int i;
	int ret = 0;

	if (!node)
		return -ENODEV;

	cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
	if (!cinfo)
		return -ENOMEM;

	cinfo->iobase = of_iomap(node, 0);
	cinfo->dev = &pdev->dev;
	pm_runtime_enable(cinfo->dev);
	pm_runtime_irq_safe(cinfo->dev);

	pm_runtime_get_sync(cinfo->dev);
	atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);

	for (i = 0; i < DRA7_ATL_INSTANCES; i++) {
		struct device_node *cfg_node;
		char prop[5];
		struct dra7_atl_desc *cdesc;
		struct of_phandle_args clkspec;
		struct clk *clk;
		int rc;

		rc = of_parse_phandle_with_args(node, "ti,provided-clocks",
						NULL, i, &clkspec);

		if (rc) {
			pr_err("%s: failed to lookup atl clock %d\n", __func__,
			       i);
			return -EINVAL;
		}

		clk = of_clk_get_from_provider(&clkspec);

		cdesc = to_atl_desc(__clk_get_hw(clk));
		cdesc->cinfo = cinfo;
		cdesc->id = i;

		/* Get configuration for the ATL instances */
		snprintf(prop, sizeof(prop), "atl%u", i);
		cfg_node = of_find_node_by_name(node, prop);
		if (cfg_node) {
			ret = of_property_read_u32(cfg_node, "bws",
						   &cdesc->bws);
			ret |= of_property_read_u32(cfg_node, "aws",
						    &cdesc->aws);
			if (!ret) {
				cdesc->valid = true;
				atl_write(cinfo, DRA7_ATL_BWSMUX_REG(i),
					  cdesc->bws);
				atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i),
					  cdesc->aws);
			}
		}

		cdesc->probed = true;
		/*
		 * Enable the clock if it has been asked prior to loading the
		 * hw driver
		 */
		if (cdesc->enabled)
			atl_clk_enable(__clk_get_hw(clk));
	}
	pm_runtime_put_sync(cinfo->dev);

	return ret;
}