unsigned long tegra_clk_measure_input_freq(void)
{
	u32 clock_autodetect;

	if (osc_input_freq)
		return osc_input_freq;

	clk_writel(OSC_FREQ_DET_TRIG | 1, OSC_FREQ_DET);
	do {} while (clk_readl(OSC_FREQ_DET_STATUS) & OSC_FREQ_DET_BUSY);
	clock_autodetect = clk_readl(OSC_FREQ_DET_STATUS);

	if (clock_autodetect >= 732 - 3 && clock_autodetect <= 732 + 3)
		osc_input_freq = 12000000;
	else if (clock_autodetect >= 794 - 3 && clock_autodetect <= 794 + 3)
		osc_input_freq = 13000000;
	else if (clock_autodetect >= 1172 - 3 && clock_autodetect <= 1172 + 3)
		osc_input_freq = 19200000;
	else if (clock_autodetect >= 1587 - 3 && clock_autodetect <= 1587 + 3)
		osc_input_freq = 26000000;
#ifndef CONFIG_ARCH_TEGRA_2x_SOC
	else if (clock_autodetect >= 1025 - 3 && clock_autodetect <= 1025 + 3)
		osc_input_freq = 16800000;
	else if (clock_autodetect >= 2344 - 3 && clock_autodetect <= 2344 + 3)
		osc_input_freq = 38400000;
	else if (clock_autodetect >= 2928 - 3 && clock_autodetect <= 2928 + 3)
		osc_input_freq = 48000000;
#endif
	else {
		pr_err("%s: Unexpected clock autodetect value %d", __func__,
			clock_autodetect);
		BUG();
	}
	return osc_input_freq;
}
Пример #2
0
static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
{
	struct mstp_clock *clock = to_mstp_clock(hw);
	struct mstp_clock_group *group = clock->group;
	u32 value;

	if (group->mstpsr)
		value = clk_readl(group->mstpsr);
	else
		value = clk_readl(group->smstpcr);

	return !!(value & BIT(clock->bit_index));
}
Пример #3
0
static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
					unsigned long parent_rate)
{
	struct clk_fractional_divider *fd = to_clk_fd(hw);
	unsigned long flags = 0;
	u32 val, m, n;
	u64 ret;

	if (fd->lock)
		spin_lock_irqsave(fd->lock, flags);
	else
		__acquire(fd->lock);

	val = clk_readl(fd->reg);

	if (fd->lock)
		spin_unlock_irqrestore(fd->lock, flags);
	else
		__release(fd->lock);

	m = (val & fd->mmask) >> fd->mshift;
	n = (val & fd->nmask) >> fd->nshift;

	if (!n || !m)
		return parent_rate;

	ret = (u64)parent_rate * m;
	do_div(ret, n);

	return ret;
}
Пример #4
0
static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable)
{
	struct clk_gate *gate = to_clk_gate(hw);
	u32 val;

	/*
	 * Divider field is write only, so divider stat field must
	 * be read so divider field can be set accordingly.
	 */
	val = clk_readl(gate->reg);
	if (val & LPC18XX_CCU_DIVSTAT)
		val |= LPC18XX_CCU_DIV;

	if (enable) {
		val |= LPC18XX_CCU_RUN;
	} else {
		/*
		 * To safely disable a branch clock a squence of two separate
		 * writes must be used. First write should set the AUTO bit
		 * and the next write should clear the RUN bit.
		 */
		val |= LPC18XX_CCU_AUTO;
		clk_writel(val, gate->reg);

		val &= ~LPC18XX_CCU_RUN;
	}

	clk_writel(val, gate->reg);

	return 0;
}
Пример #5
0
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
			   unsigned long parent_rate)
{
	struct clk_fractional_divider *fd = to_clk_fd(hw);
	unsigned long flags = 0;
	unsigned long m, n;
	u32 val;

	rational_best_approximation(rate, parent_rate,
			GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
			&m, &n);

	if (fd->lock)
		spin_lock_irqsave(fd->lock, flags);
#if 0
	else
		__acquire(fd->lock);
#endif

	val = clk_readl(fd->reg);
	val &= ~(fd->mmask | fd->nmask);
	val |= (m << fd->mshift) | (n << fd->nshift);
	clk_writel(val, fd->reg);

	if (fd->lock)
		spin_unlock_irqrestore(fd->lock, flags);
#if 0
	else
		__release(fd->lock);
#endif

	return 0;
}
/*
 * It works on following logic:
 *
 * For enabling clock, enable = 1
 *	set2dis = 1	-> clear bit	-> set = 0
 *	set2dis = 0	-> set bit	-> set = 1
 *
 * For disabling clock, enable = 0
 *	set2dis = 1	-> set bit	-> set = 1
 *	set2dis = 0	-> clear bit	-> set = 0
 *
 * So, result is always: enable xor set2dis.
 */
static void clk_gate_endisable(struct clk_hw *hw, int enable)
{
	struct clk_gate *gate = to_clk_gate(hw);
	int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
	unsigned long uninitialized_var(flags);
	u32 reg;

	set ^= enable;

	if (gate->lock)
		spin_lock_irqsave(gate->lock, flags);
	else
		__acquire(gate->lock);

	if (gate->flags & CLK_GATE_HIWORD_MASK) {
		reg = BIT(gate->bit_idx + 16);
		if (set)
			reg |= BIT(gate->bit_idx);
	} else {
		reg = clk_readl(gate->reg);

		if (set)
			reg |= BIT(gate->bit_idx);
		else
			reg &= ~BIT(gate->bit_idx);
	}

	clk_writel(reg, gate->reg);

	if (gate->lock)
		spin_unlock_irqrestore(gate->lock, flags);
	else
		__release(gate->lock);
}
Пример #7
0
/* clk_m functions */
static unsigned long tegra2_clk_m_autodetect_rate(struct clk *c)
{
	u32 auto_clock_control = clk_readl(OSC_CTRL) & ~OSC_CTRL_OSC_FREQ_MASK;

	c->rate = clk_measure_input_freq();
	switch (c->rate) {
	case 12000000:
		auto_clock_control |= OSC_CTRL_OSC_FREQ_12MHZ;
		break;
	case 13000000:
		auto_clock_control |= OSC_CTRL_OSC_FREQ_13MHZ;
		break;
	case 19200000:
		auto_clock_control |= OSC_CTRL_OSC_FREQ_19_2MHZ;
		break;
	case 26000000:
		auto_clock_control |= OSC_CTRL_OSC_FREQ_26MHZ;
		break;
	default:
		pr_err("%s: Unexpected clock rate %ld", __func__, c->rate);
		BUG();
	}
	clk_writel(auto_clock_control, OSC_CTRL);
	return c->rate;
}
Пример #8
0
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
			   unsigned long parent_rate)
{
	struct clk_fractional_divider *fd = to_clk_fd(hw);
	unsigned long flags = 0;
	unsigned long div;
	unsigned n, m;
	u32 val;

	div = gcd(parent_rate, rate);
	m = rate / div;
	n = parent_rate / div;

	if (fd->lock)
		spin_lock_irqsave(fd->lock, flags);
	else
		__acquire(fd->lock);

	val = clk_readl(fd->reg);
	val &= ~(fd->mmask | fd->nmask);
	val |= (m << fd->mshift) | (n << fd->nshift);
	clk_writel(val, fd->reg);

	if (fd->lock)
		spin_unlock_irqrestore(fd->lock, flags);
	else
		__release(fd->lock);

	return 0;
}
Пример #9
0
static unsigned long cpg_div6_clock_recalc_rate(struct clk_hw *hw,
						unsigned long parent_rate)
{
	struct div6_clock *clock = to_div6_clock(hw);
	unsigned int div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;

	return parent_rate / div;
}
Пример #10
0
static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
{
    struct mstp_clock *clock = to_mstp_clock(hw);
    struct cpg_mssr_priv *priv = clock->priv;
    u32 value;

    value = clk_readl(priv->base + MSTPSR(clock->index / 32));

    return !(value & BIT(clock->index % 32));
}
Пример #11
0
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
{
    struct mstp_clock *clock = to_mstp_clock(hw);
    struct cpg_mssr_priv *priv = clock->priv;
    unsigned int reg = clock->index / 32;
    unsigned int bit = clock->index % 32;
    struct device *dev = priv->dev;
    u32 bitmask = BIT(bit);
    unsigned long flags;
    unsigned int i;
    u32 value;

    dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
            enable ? "ON" : "OFF");
    spin_lock_irqsave(&priv->mstp_lock, flags);

    value = clk_readl(priv->base + SMSTPCR(reg));
    if (enable)
        value &= ~bitmask;
    else
        value |= bitmask;
    clk_writel(value, priv->base + SMSTPCR(reg));

    spin_unlock_irqrestore(&priv->mstp_lock, flags);

    if (!enable)
        return 0;

    for (i = 1000; i > 0; --i) {
        if (!(clk_readl(priv->base + MSTPSR(reg)) &
                bitmask))
            break;
        cpu_relax();
    }

    if (!i) {
        dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
                priv->base + SMSTPCR(reg), bit);
        return -ETIMEDOUT;
    }

    return 0;
}
Пример #12
0
unsigned long clk_measure_input_freq(void)
{
	u32 clock_autodetect;
	clk_writel(OSC_FREQ_DET_TRIG | 1, OSC_FREQ_DET);
	do {} while (clk_readl(OSC_FREQ_DET_STATUS) & OSC_FREQ_DET_BUSY);
	clock_autodetect = clk_readl(OSC_FREQ_DET_STATUS);
	if (clock_autodetect >= 732 - 3 && clock_autodetect <= 732 + 3) {
		return 12000000;
	} else if (clock_autodetect >= 794 - 3 && clock_autodetect <= 794 + 3) {
		return 13000000;
	} else if (clock_autodetect >= 1172 - 3 && clock_autodetect <= 1172 + 3) {
		return 19200000;
	} else if (clock_autodetect >= 1587 - 3 && clock_autodetect <= 1587 + 3) {
		return 26000000;
	} else {
		pr_err("%s: Unexpected clock autodetect value %d", __func__, clock_autodetect);
		BUG();
		return 0;
	}
}
Пример #13
0
static int cpg_div6_clock_set_rate(struct clk_hw *hw, unsigned long rate,
				   unsigned long parent_rate)
{
	struct div6_clock *clock = to_div6_clock(hw);
	unsigned int div = cpg_div6_clock_calc_div(rate, parent_rate);

	clock->div = div;

	/* Only program the new divisor if the clock isn't stopped. */
	if (!(clk_readl(clock->reg) & CPG_DIV6_CKSTP))
		clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg);

	return 0;
}
Пример #14
0
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
{
	struct mstp_clock *clock = to_mstp_clock(hw);
	struct mstp_clock_group *group = clock->group;
	u32 bitmask = BIT(clock->bit_index);
	unsigned long flags;
	unsigned int i;
	u32 value;

	spin_lock_irqsave(&group->lock, flags);

	value = clk_readl(group->smstpcr);
	if (enable)
		value &= ~bitmask;
	else
		value |= bitmask;
	clk_writel(value, group->smstpcr);

	spin_unlock_irqrestore(&group->lock, flags);

	if (!enable || !group->mstpsr)
		return 0;

	for (i = 1000; i > 0; --i) {
		if (!(clk_readl(group->mstpsr) & bitmask))
			break;
		cpu_relax();
	}

	if (!i) {
		pr_err("%s: failed to enable %p[%d]\n", __func__,
		       group->smstpcr, clock->bit_index);
		return -ETIMEDOUT;
	}

	return 0;
}
static int clk_gate_is_enabled(struct clk_hw *hw)
{
	u32 reg;
	struct clk_gate *gate = to_clk_gate(hw);

	reg = clk_readl(gate->reg);

	/* if a set bit disables this clk, flip it before masking */
	if (gate->flags & CLK_GATE_SET_TO_DISABLE)
		reg ^= BIT(gate->bit_idx);

	reg &= BIT(gate->bit_idx);

	return reg ? 1 : 0;
}
Пример #16
0
static void __init cpg_div6_clock_init(struct device_node *np)
{
	struct clk_init_data init;
	struct div6_clock *clock;
	const char *parent_name;
	const char *name;
	struct clk *clk;
	int ret;

	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
	if (!clock) {
		pr_err("%s: failed to allocate %s DIV6 clock\n",
		       __func__, np->name);
		return;
	}

	/* Remap the clock register and read the divisor. Disabling the
	 * clock overwrites the divisor, so we need to cache its value for the
	 * enable operation.
	 */
	clock->reg = of_iomap(np, 0);
	if (clock->reg == NULL) {
		pr_err("%s: failed to map %s DIV6 clock register\n",
		       __func__, np->name);
		goto error;
	}

	clock->div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;

	/* Parse the DT properties. */
	ret = of_property_read_string(np, "clock-output-names", &name);
	if (ret < 0) {
		pr_err("%s: failed to get %s DIV6 clock output name\n",
		       __func__, np->name);
		goto error;
	}

	parent_name = of_clk_get_parent_name(np, 0);
	if (parent_name == NULL) {
		pr_err("%s: failed to get %s DIV6 clock parent name\n",
		       __func__, np->name);
		goto error;
	}

	/* Register the clock. */
	init.name = name;
	init.ops = &cpg_div6_clock_ops;
	init.flags = CLK_IS_BASIC;
	init.parent_names = &parent_name;
	init.num_parents = 1;

	clock->hw.init = &init;

	clk = clk_register(NULL, &clock->hw);
	if (IS_ERR(clk)) {
		pr_err("%s: failed to register %s DIV6 clock (%ld)\n",
		       __func__, np->name, PTR_ERR(clk));
		goto error;
	}

	of_clk_add_provider(np, of_clk_src_simple_get, clk);

	return;

error:
	if (clock->reg)
		iounmap(clock->reg);
	kfree(clock);
}
Пример #17
0
static void __init zynq_clk_setup(struct device_node *np)
{
	int i;
	u32 tmp;
	int ret;
	struct clk *clk;
	char *clk_name;
	unsigned int fclk_enable = 0;
	const char *clk_output_name[clk_max];
	const char *cpu_parents[4];
	const char *periph_parents[4];
	const char *swdt_ext_clk_mux_parents[2];
	const char *can_mio_mux_parents[NUM_MIO_PINS];

	pr_info("Zynq clock init\n");

	/* get clock output names from DT */
	for (i = 0; i < clk_max; i++) {
		if (of_property_read_string_index(np, "clock-output-names",
				  i, &clk_output_name[i])) {
			pr_err("%s: clock output name not in DT\n", __func__);
			BUG();
		}
	}
	cpu_parents[0] = clk_output_name[armpll];
	cpu_parents[1] = clk_output_name[armpll];
	cpu_parents[2] = clk_output_name[ddrpll];
	cpu_parents[3] = clk_output_name[iopll];
	periph_parents[0] = clk_output_name[iopll];
	periph_parents[1] = clk_output_name[iopll];
	periph_parents[2] = clk_output_name[armpll];
	periph_parents[3] = clk_output_name[ddrpll];

	of_property_read_u32(np, "fclk-enable", &fclk_enable);

	/* ps_clk */
	ret = of_property_read_u32(np, "ps-clk-frequency", &tmp);
	if (ret) {
		pr_warn("ps_clk frequency not specified, using 33 MHz.\n");
		tmp = 33333333;
	}
	ps_clk = clk_register_fixed_rate(NULL, "ps_clk", NULL, CLK_IS_ROOT,
			tmp);

	ret = of_property_read_u32(np, "fclk-enable", &fclk_enable);
	if (ret)
		fclk_enable = 0xf;

	/* PLLs */
	clk = clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
			SLCR_PLL_STATUS, 0, &armpll_lock);
	clks[armpll] = clk_register_mux(NULL, clk_output_name[armpll],
			armpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
			SLCR_ARMPLL_CTRL, 4, 1, 0, &armpll_lock);

	clk = clk_register_zynq_pll("ddrpll_int", "ps_clk", SLCR_DDRPLL_CTRL,
			SLCR_PLL_STATUS, 1, &ddrpll_lock);
	clks[ddrpll] = clk_register_mux(NULL, clk_output_name[ddrpll],
			ddrpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
			SLCR_DDRPLL_CTRL, 4, 1, 0, &ddrpll_lock);

	clk = clk_register_zynq_pll("iopll_int", "ps_clk", SLCR_IOPLL_CTRL,
			SLCR_PLL_STATUS, 2, &iopll_lock);
	clks[iopll] = clk_register_mux(NULL, clk_output_name[iopll],
			iopll_parents, 2, CLK_SET_RATE_NO_REPARENT,
			SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock);

	/* CPU clocks */
	tmp = readl(SLCR_621_TRUE) & 1;
	clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
			CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
			&armclk_lock);
	clk = clk_register_divider(NULL, "cpu_div", "cpu_mux", 0,
			SLCR_ARM_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
			CLK_DIVIDER_ALLOW_ZERO, &armclk_lock);

	clks[cpu_6or4x] = clk_register_gate(NULL, clk_output_name[cpu_6or4x],
			"cpu_div", CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
			SLCR_ARM_CLK_CTRL, 24, 0, &armclk_lock);

	clk = clk_register_fixed_factor(NULL, "cpu_3or2x_div", "cpu_div", 0,
			1, 2);
	clks[cpu_3or2x] = clk_register_gate(NULL, clk_output_name[cpu_3or2x],
			"cpu_3or2x_div", CLK_IGNORE_UNUSED,
			SLCR_ARM_CLK_CTRL, 25, 0, &armclk_lock);

	clk = clk_register_fixed_factor(NULL, "cpu_2x_div", "cpu_div", 0, 1,
			2 + tmp);
	clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
			"cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
			26, 0, &armclk_lock);

	clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
			4 + 2 * tmp);
	clks[cpu_1x] = clk_register_gate(NULL, clk_output_name[cpu_1x],
			"cpu_1x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL, 27,
			0, &armclk_lock);

	/* Timers */
	swdt_ext_clk_mux_parents[0] = clk_output_name[cpu_1x];
	for (i = 0; i < ARRAY_SIZE(swdt_ext_clk_input_names); i++) {
		int idx = of_property_match_string(np, "clock-names",
				swdt_ext_clk_input_names[i]);
		if (idx >= 0)
			swdt_ext_clk_mux_parents[i + 1] =
				of_clk_get_parent_name(np, idx);
		else
			swdt_ext_clk_mux_parents[i + 1] = dummy_nm;
	}
	clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
			swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT |
			CLK_SET_RATE_NO_REPARENT, SLCR_SWDT_CLK_SEL, 0, 1, 0,
			&swdtclk_lock);

	/* DDR clocks */
	clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
			SLCR_DDR_CLK_CTRL, 26, 6, CLK_DIVIDER_ONE_BASED |
			CLK_DIVIDER_ALLOW_ZERO, &ddrclk_lock);
	clks[ddr2x] = clk_register_gate(NULL, clk_output_name[ddr2x],
			"ddr2x_div", 0, SLCR_DDR_CLK_CTRL, 1, 0, &ddrclk_lock);
	clk_prepare_enable(clks[ddr2x]);
	clk = clk_register_divider(NULL, "ddr3x_div", "ddrpll", 0,
			SLCR_DDR_CLK_CTRL, 20, 6, CLK_DIVIDER_ONE_BASED |
			CLK_DIVIDER_ALLOW_ZERO, &ddrclk_lock);
	clks[ddr3x] = clk_register_gate(NULL, clk_output_name[ddr3x],
			"ddr3x_div", 0, SLCR_DDR_CLK_CTRL, 0, 0, &ddrclk_lock);
	clk_prepare_enable(clks[ddr3x]);

	clk = clk_register_divider(NULL, "dci_div0", "ddrpll", 0,
			SLCR_DCI_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
			CLK_DIVIDER_ALLOW_ZERO, &dciclk_lock);
	clk = clk_register_divider(NULL, "dci_div1", "dci_div0",
			CLK_SET_RATE_PARENT, SLCR_DCI_CLK_CTRL, 20, 6,
			CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
			&dciclk_lock);
	clks[dci] = clk_register_gate(NULL, clk_output_name[dci], "dci_div1",
			CLK_SET_RATE_PARENT, SLCR_DCI_CLK_CTRL, 0, 0,
			&dciclk_lock);
	clk_prepare_enable(clks[dci]);

	/* Peripheral clocks */
	for (i = fclk0; i <= fclk3; i++) {
		int enable = !!(fclk_enable & BIT(i - fclk0));
		zynq_clk_register_fclk(i, clk_output_name[i],
				SLCR_FPGA0_CLK_CTRL + 0x10 * (i - fclk0),
				periph_parents, enable);
	}

	zynq_clk_register_periph_clk(lqspi, 0, clk_output_name[lqspi], NULL,
			SLCR_LQSPI_CLK_CTRL, periph_parents, 0);

	zynq_clk_register_periph_clk(smc, 0, clk_output_name[smc], NULL,
			SLCR_SMC_CLK_CTRL, periph_parents, 0);

	zynq_clk_register_periph_clk(pcap, 0, clk_output_name[pcap], NULL,
			SLCR_PCAP_CLK_CTRL, periph_parents, 0);

	zynq_clk_register_periph_clk(sdio0, sdio1, clk_output_name[sdio0],
			clk_output_name[sdio1], SLCR_SDIO_CLK_CTRL,
			periph_parents, 1);

	zynq_clk_register_periph_clk(uart0, uart1, clk_output_name[uart0],
			clk_output_name[uart1], SLCR_UART_CLK_CTRL,
			periph_parents, 1);

	zynq_clk_register_periph_clk(spi0, spi1, clk_output_name[spi0],
			clk_output_name[spi1], SLCR_SPI_CLK_CTRL,
			periph_parents, 1);

	for (i = 0; i < ARRAY_SIZE(gem0_emio_input_names); i++) {
		int idx = of_property_match_string(np, "clock-names",
				gem0_emio_input_names[i]);
		if (idx >= 0)
			gem0_mux_parents[i + 1] = of_clk_get_parent_name(np,
					idx);
	}
	clk = clk_register_mux(NULL, "gem0_mux", periph_parents, 4,
			CLK_SET_RATE_NO_REPARENT, SLCR_GEM0_CLK_CTRL, 4, 2, 0,
			&gem0clk_lock);
	clk = clk_register_divider(NULL, "gem0_div0", "gem0_mux", 0,
			SLCR_GEM0_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
			CLK_DIVIDER_ALLOW_ZERO, &gem0clk_lock);
	clk = clk_register_divider(NULL, "gem0_div1", "gem0_div0",
			CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
			CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
			&gem0clk_lock);
	clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
			SLCR_GEM0_CLK_CTRL, 6, 1, 0,
			&gem0clk_lock);
	clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
			"gem0_emio_mux", CLK_SET_RATE_PARENT,
			SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock);

	for (i = 0; i < ARRAY_SIZE(gem1_emio_input_names); i++) {
		int idx = of_property_match_string(np, "clock-names",
				gem1_emio_input_names[i]);
		if (idx >= 0)
			gem1_mux_parents[i + 1] = of_clk_get_parent_name(np,
					idx);
	}
	clk = clk_register_mux(NULL, "gem1_mux", periph_parents, 4,
			CLK_SET_RATE_NO_REPARENT, SLCR_GEM1_CLK_CTRL, 4, 2, 0,
			&gem1clk_lock);
	clk = clk_register_divider(NULL, "gem1_div0", "gem1_mux", 0,
			SLCR_GEM1_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
			CLK_DIVIDER_ALLOW_ZERO, &gem1clk_lock);
	clk = clk_register_divider(NULL, "gem1_div1", "gem1_div0",
			CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
			CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
			&gem1clk_lock);
	clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
			CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
			SLCR_GEM1_CLK_CTRL, 6, 1, 0,
			&gem1clk_lock);
	clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
			"gem1_emio_mux", CLK_SET_RATE_PARENT,
			SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);

	tmp = strlen("mio_clk_00x");
	clk_name = kmalloc(tmp, GFP_KERNEL);
	for (i = 0; i < NUM_MIO_PINS; i++) {
		int idx;

		snprintf(clk_name, tmp, "mio_clk_%2.2d", i);
		idx = of_property_match_string(np, "clock-names", clk_name);
		if (idx >= 0)
			can_mio_mux_parents[i] = of_clk_get_parent_name(np,
						idx);
		else
			can_mio_mux_parents[i] = dummy_nm;
	}
	kfree(clk_name);
	clk = clk_register_mux(NULL, "can_mux", periph_parents, 4,
			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
			&canclk_lock);
	clk = clk_register_divider(NULL, "can_div0", "can_mux", 0,
			SLCR_CAN_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
			CLK_DIVIDER_ALLOW_ZERO, &canclk_lock);
	clk = clk_register_divider(NULL, "can_div1", "can_div0",
			CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 20, 6,
			CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
			&canclk_lock);
	clk = clk_register_gate(NULL, "can0_gate", "can_div1",
			CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 0, 0,
			&canclk_lock);
	clk = clk_register_gate(NULL, "can1_gate", "can_div1",
			CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 1, 0,
			&canclk_lock);
	clk = clk_register_mux(NULL, "can0_mio_mux",
			can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 0, 6, 0,
			&canmioclk_lock);
	clk = clk_register_mux(NULL, "can1_mio_mux",
			can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 16, 6,
			0, &canmioclk_lock);
	clks[can0] = clk_register_mux(NULL, clk_output_name[can0],
			can0_mio_mux2_parents, 2, CLK_SET_RATE_PARENT |
			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 6, 1, 0,
			&canmioclk_lock);
	clks[can1] = clk_register_mux(NULL, clk_output_name[can1],
			can1_mio_mux2_parents, 2, CLK_SET_RATE_PARENT |
			CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 22, 1,
			0, &canmioclk_lock);

	for (i = 0; i < ARRAY_SIZE(dbgtrc_emio_input_names); i++) {
		int idx = of_property_match_string(np, "clock-names",
				dbgtrc_emio_input_names[i]);
		if (idx >= 0)
			dbg_emio_mux_parents[i + 1] = of_clk_get_parent_name(np,
					idx);
	}
	clk = clk_register_mux(NULL, "dbg_mux", periph_parents, 4,
			CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 4, 2, 0,
			&dbgclk_lock);
	clk = clk_register_divider(NULL, "dbg_div", "dbg_mux", 0,
			SLCR_DBG_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
			CLK_DIVIDER_ALLOW_ZERO, &dbgclk_lock);
	clk = clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2,
			CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 6, 1, 0,
			&dbgclk_lock);
	clks[dbg_trc] = clk_register_gate(NULL, clk_output_name[dbg_trc],
			"dbg_emio_mux", CLK_SET_RATE_PARENT, SLCR_DBG_CLK_CTRL,
			0, 0, &dbgclk_lock);
	clks[dbg_apb] = clk_register_gate(NULL, clk_output_name[dbg_apb],
			clk_output_name[cpu_1x], 0, SLCR_DBG_CLK_CTRL, 1, 0,
			&dbgclk_lock);

	/* leave debug clocks in the state the bootloader set them up to */
	tmp = clk_readl(SLCR_DBG_CLK_CTRL);
	if (tmp & DBG_CLK_CTRL_CLKACT_TRC)
		if (clk_prepare_enable(clks[dbg_trc]))
			pr_warn("%s: trace clk enable failed\n", __func__);
	if (tmp & DBG_CLK_CTRL_CPU_1XCLKACT)
		if (clk_prepare_enable(clks[dbg_apb]))
			pr_warn("%s: debug APB clk enable failed\n", __func__);

	/* One gated clock for all APER clocks. */
	clks[dma] = clk_register_gate(NULL, clk_output_name[dma],
			clk_output_name[cpu_2x], 0, SLCR_APER_CLK_CTRL, 0, 0,
			&aperclk_lock);
	clks[usb0_aper] = clk_register_gate(NULL, clk_output_name[usb0_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 2, 0,
			&aperclk_lock);
	clks[usb1_aper] = clk_register_gate(NULL, clk_output_name[usb1_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 3, 0,
			&aperclk_lock);
	clks[gem0_aper] = clk_register_gate(NULL, clk_output_name[gem0_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 6, 0,
			&aperclk_lock);
	clks[gem1_aper] = clk_register_gate(NULL, clk_output_name[gem1_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 7, 0,
			&aperclk_lock);
	clks[sdio0_aper] = clk_register_gate(NULL, clk_output_name[sdio0_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 10, 0,
			&aperclk_lock);
	clks[sdio1_aper] = clk_register_gate(NULL, clk_output_name[sdio1_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 11, 0,
			&aperclk_lock);
	clks[spi0_aper] = clk_register_gate(NULL, clk_output_name[spi0_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 14, 0,
			&aperclk_lock);
	clks[spi1_aper] = clk_register_gate(NULL, clk_output_name[spi1_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 15, 0,
			&aperclk_lock);
	clks[can0_aper] = clk_register_gate(NULL, clk_output_name[can0_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 16, 0,
			&aperclk_lock);
	clks[can1_aper] = clk_register_gate(NULL, clk_output_name[can1_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 17, 0,
			&aperclk_lock);
	clks[i2c0_aper] = clk_register_gate(NULL, clk_output_name[i2c0_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 18, 0,
			&aperclk_lock);
	clks[i2c1_aper] = clk_register_gate(NULL, clk_output_name[i2c1_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 19, 0,
			&aperclk_lock);
	clks[uart0_aper] = clk_register_gate(NULL, clk_output_name[uart0_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 20, 0,
			&aperclk_lock);
	clks[uart1_aper] = clk_register_gate(NULL, clk_output_name[uart1_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 21, 0,
			&aperclk_lock);
	clks[gpio_aper] = clk_register_gate(NULL, clk_output_name[gpio_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 22, 0,
			&aperclk_lock);
	clks[lqspi_aper] = clk_register_gate(NULL, clk_output_name[lqspi_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 23, 0,
			&aperclk_lock);
	clks[smc_aper] = clk_register_gate(NULL, clk_output_name[smc_aper],
			clk_output_name[cpu_1x], 0, SLCR_APER_CLK_CTRL, 24, 0,
			&aperclk_lock);

	for (i = 0; i < ARRAY_SIZE(clks); i++) {
		if (IS_ERR(clks[i])) {
			pr_err("Zynq clk %d: register failed with %ld\n",
			       i, PTR_ERR(clks[i]));
			BUG();
		}
	}

	clk_data.clks = clks;
	clk_data.clk_num = ARRAY_SIZE(clks);
	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
Пример #18
0
static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
		const char *clk_name, void __iomem *fclk_ctrl_reg,
		const char **parents, int enable)
{
	struct clk *clk;
	u32 enable_reg;
	char *mux_name;
	char *div0_name;
	char *div1_name;
	spinlock_t *fclk_lock;
	spinlock_t *fclk_gate_lock;
	void __iomem *fclk_gate_reg = fclk_ctrl_reg + 8;

	fclk_lock = kmalloc(sizeof(*fclk_lock), GFP_KERNEL);
	if (!fclk_lock)
		goto err;
	fclk_gate_lock = kmalloc(sizeof(*fclk_gate_lock), GFP_KERNEL);
	if (!fclk_gate_lock)
		goto err_fclk_gate_lock;
	spin_lock_init(fclk_lock);
	spin_lock_init(fclk_gate_lock);

	mux_name = kasprintf(GFP_KERNEL, "%s_mux", clk_name);
	if (!mux_name)
		goto err_mux_name;
	div0_name = kasprintf(GFP_KERNEL, "%s_div0", clk_name);
	if (!div0_name)
		goto err_div0_name;
	div1_name = kasprintf(GFP_KERNEL, "%s_div1", clk_name);
	if (!div1_name)
		goto err_div1_name;

	clk = clk_register_mux(NULL, mux_name, parents, 4,
			CLK_SET_RATE_NO_REPARENT, fclk_ctrl_reg, 4, 2, 0,
			fclk_lock);

	clk = clk_register_divider(NULL, div0_name, mux_name,
			0, fclk_ctrl_reg, 8, 6, CLK_DIVIDER_ONE_BASED |
			CLK_DIVIDER_ALLOW_ZERO, fclk_lock);

	clk = clk_register_divider(NULL, div1_name, div0_name,
			CLK_SET_RATE_PARENT, fclk_ctrl_reg, 20, 6,
			CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
			fclk_lock);

	clks[fclk] = clk_register_gate(NULL, clk_name,
			div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg,
			0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock);
	enable_reg = clk_readl(fclk_gate_reg) & 1;
	if (enable && !enable_reg) {
		if (clk_prepare_enable(clks[fclk]))
			pr_warn("%s: FCLK%u enable failed\n", __func__,
					fclk - fclk0);
	}
	kfree(mux_name);
	kfree(div0_name);
	kfree(div1_name);

	return;

err_div1_name:
	kfree(div0_name);
err_div0_name:
	kfree(mux_name);
err_mux_name:
	kfree(fclk_gate_lock);
err_fclk_gate_lock:
	kfree(fclk_lock);
err:
	clks[fclk] = ERR_PTR(-ENOMEM);
}
Пример #19
0
static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
{
	struct div6_clock *clock = to_div6_clock(hw);

	return !(clk_readl(clock->reg) & CPG_DIV6_CKSTP);
}