struct clk *mtk_clk_register_mux( const char *name, const char **parent_names, u8 num_parents, void __iomem *base_addr, u8 shift, u8 width, u8 gate_bit) { struct clk *clk; struct clk_mux *mux; struct clk_gate *gate = NULL; struct clk_hw *gate_hw = NULL; const struct clk_ops *gate_ops = NULL; u32 mask = BIT(width) - 1; #if MT_CCF_DEBUG pr_debug("name: %s, num_parents: %d, gate_bit: %d\n", name, (int)num_parents, (int)gate_bit); #endif /* MT_CCF_DEBUG */ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); if (!mux) return ERR_PTR(-ENOMEM); mux->reg = base_addr; mux->mask = mask; mux->shift = shift; mux->flags = 0; mux->lock = &clk_ops_lock; if (gate_bit <= MAX_MUX_GATE_BIT) { gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); if (!gate) { kfree(mux); return ERR_PTR(-ENOMEM); } gate->reg = base_addr; gate->bit_idx = gate_bit; gate->flags = CLK_GATE_SET_TO_DISABLE; gate->lock = &clk_ops_lock; gate_hw = &gate->hw; gate_ops = &clk_gate_ops; } clk = clk_register_composite(NULL, name, parent_names, num_parents, &mux->hw, &clk_mux_ops, NULL, NULL, gate_hw, gate_ops, CLK_IGNORE_UNUSED); if (IS_ERR(clk)) { kfree(gate); kfree(mux); } return clk; }
static void __init sun4i_mod1_clk_setup(struct device_node *node) { struct clk *clk; struct clk_mux *mux; struct clk_gate *gate; const char *parents[4]; const char *clk_name = node->name; void __iomem *reg; int i; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) return; mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) goto err_unmap; gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) goto err_free_mux; of_property_read_string(node, "clock-output-names", &clk_name); i = of_clk_parent_fill(node, parents, SUN4I_MOD1_MAX_PARENTS); gate->reg = reg; gate->bit_idx = SUN4I_MOD1_ENABLE; gate->lock = &mod1_lock; mux->reg = reg; mux->shift = SUN4I_MOD1_MUX; mux->mask = BIT(SUN4I_MOD1_MUX_WIDTH) - 1; mux->lock = &mod1_lock; clk = clk_register_composite(NULL, clk_name, parents, i, &mux->hw, &clk_mux_ops, NULL, NULL, &gate->hw, &clk_gate_ops, CLK_SET_RATE_PARENT); if (IS_ERR(clk)) goto err_free_gate; of_clk_add_provider(node, of_clk_src_simple_get, clk); return; err_free_gate: kfree(gate); err_free_mux: kfree(mux); err_unmap: iounmap(reg); }
static struct clk *rockchip_clk_register_frac_branch(const char *name, const char *const *parent_names, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 div_flags, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, spinlock_t *lock) { struct clk *clk; struct clk_gate *gate = NULL; struct clk_fractional_divider *div = NULL; const struct clk_ops *div_ops = NULL, *gate_ops = NULL; if (gate_offset >= 0) { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) return ERR_PTR(-ENOMEM); gate->flags = gate_flags; gate->reg = base + gate_offset; gate->bit_idx = gate_shift; gate->lock = lock; gate_ops = &clk_gate_ops; } if (muxdiv_offset < 0) return ERR_PTR(-EINVAL); div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) return ERR_PTR(-ENOMEM); div->flags = div_flags; div->reg = base + muxdiv_offset; div->mshift = 16; div->mwidth = 16; div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift; div->nshift = 0; div->nwidth = 16; div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; div->lock = lock; div_ops = &clk_fractional_divider_ops; clk = clk_register_composite(NULL, name, parent_names, num_parents, NULL, NULL, &div->hw, div_ops, gate ? &gate->hw : NULL, gate_ops, flags); return clk; }
static struct clk *rockchip_clk_register_factor_branch(const char *name, const char *const *parent_names, u8 num_parents, void __iomem *base, unsigned int mult, unsigned int div, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, spinlock_t *lock) { struct clk *clk; struct clk_gate *gate = NULL; struct clk_fixed_factor *fix = NULL; /* without gate, register a simple factor clock */ if (gate_offset == 0) { return clk_register_fixed_factor(NULL, name, parent_names[0], flags, mult, div); } gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) return ERR_PTR(-ENOMEM); gate->flags = gate_flags; gate->reg = base + gate_offset; gate->bit_idx = gate_shift; gate->lock = lock; fix = kzalloc(sizeof(*fix), GFP_KERNEL); if (!fix) { kfree(gate); return ERR_PTR(-ENOMEM); } fix->mult = mult; fix->div = div; clk = clk_register_composite(NULL, name, parent_names, num_parents, NULL, NULL, &fix->hw, &clk_fixed_factor_ops, &gate->hw, &clk_gate_ops, flags); if (IS_ERR(clk)) { kfree(fix); kfree(gate); } return clk; }
static void __init sun4i_osc_clk_setup(struct device_node *node) { struct clk *clk; struct clk_fixed_rate *fixed; struct clk_gate *gate; const char *clk_name = node->name; u32 rate; if (of_property_read_u32(node, "clock-frequency", &rate)) return; /* allocate fixed-rate and gate clock structs */ fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL); if (!fixed) return; gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); if (!gate) goto err_free_fixed; of_property_read_string(node, "clock-output-names", &clk_name); /* set up gate and fixed rate properties */ gate->reg = of_iomap(node, 0); gate->bit_idx = SUNXI_OSC24M_GATE; gate->lock = &hosc_lock; fixed->fixed_rate = rate; clk = clk_register_composite(NULL, clk_name, NULL, 0, NULL, NULL, &fixed->hw, &clk_fixed_rate_ops, &gate->hw, &clk_gate_ops, 0); if (IS_ERR(clk)) goto err_free_gate; of_clk_add_provider(node, of_clk_src_simple_get, clk); return; err_free_gate: kfree(gate); err_free_fixed: kfree(fixed); }
static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *branch, void __iomem *reg_base, const char *parent) { const struct clk_ops *div_ops = NULL; struct clk_divider *div = NULL; struct clk_hw *div_hw = NULL; if (branch->flags & CCU_BRANCH_HAVE_DIV2) { div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) return; div->reg = branch->offset + reg_base; div->flags = CLK_DIVIDER_READ_ONLY; div->shift = 27; div->width = 1; div_hw = &div->hw; div_ops = &clk_divider_ops; } branch->gate.reg = branch->offset + reg_base; branch->gate.bit_idx = 0; branch->clk = clk_register_composite(NULL, branch->name, &parent, 1, NULL, NULL, div_hw, div_ops, &branch->gate.hw, &lpc18xx_ccu_gate_ops, 0); if (IS_ERR(branch->clk)) { kfree(div); pr_warn("%s: failed to register %s\n", __func__, branch->name); return; } /* Grab essential branch clocks for CPU and SDRAM */ switch (branch->offset) { case CLK_CPU_EMC: case CLK_CPU_CORE: case CLK_CPU_CREG: case CLK_CPU_EMCDIV: clk_prepare_enable(branch->clk); } }
int __init clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks) { int i; struct pxa_clk_cken *pclk; struct clk *clk; for (i = 0; i < nb_clks; i++) { pclk = clks + i; pclk->gate.lock = &lock; clk = clk_register_composite(NULL, pclk->name, pclk->parent_names, 2, &pclk->hw, &cken_mux_ops, &pclk->hw, &cken_rate_ops, &pclk->gate.hw, &clk_gate_ops, pclk->flags); clkdev_pxa_register(pclk->ckid, pclk->con_id, pclk->dev_id, clk); } return 0; }
static struct clk * __init meson_clk_register_composite(const struct clk_conf *clk_conf, void __iomem *clk_base) { struct clk *clk; struct clk_mux *mux = NULL; struct clk_divider *div = NULL; struct clk_gate *gate = NULL; const struct clk_ops *mux_ops = NULL; const struct composite_conf *composite_conf; composite_conf = clk_conf->conf.composite; if (clk_conf->num_parents > 1) { mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) return ERR_PTR(-ENOMEM); mux->reg = clk_base + clk_conf->reg_off + composite_conf->mux_parm.reg_off; mux->shift = composite_conf->mux_parm.shift; mux->mask = BIT(composite_conf->mux_parm.width) - 1; mux->flags = composite_conf->mux_flags; mux->lock = &clk_lock; mux->table = composite_conf->mux_table; mux_ops = (composite_conf->mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops : &clk_mux_ops; } if (MESON_PARM_APPLICABLE(&composite_conf->div_parm)) { div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) { clk = ERR_PTR(-ENOMEM); goto error; } div->reg = clk_base + clk_conf->reg_off + composite_conf->div_parm.reg_off; div->shift = composite_conf->div_parm.shift; div->width = composite_conf->div_parm.width; div->lock = &clk_lock; div->flags = composite_conf->div_flags; div->table = composite_conf->div_table; } if (MESON_PARM_APPLICABLE(&composite_conf->gate_parm)) { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) { clk = ERR_PTR(-ENOMEM); goto error; } gate->reg = clk_base + clk_conf->reg_off + composite_conf->div_parm.reg_off; gate->bit_idx = composite_conf->gate_parm.shift; gate->flags = composite_conf->gate_flags; gate->lock = &clk_lock; } clk = clk_register_composite(NULL, clk_conf->clk_name, clk_conf->clks_parent, clk_conf->num_parents, mux ? &mux->hw : NULL, mux_ops, div ? &div->hw : NULL, &clk_divider_ops, gate ? &gate->hw : NULL, &clk_gate_ops, clk_conf->flags); if (IS_ERR(clk)) goto error; return clk; error: kfree(gate); kfree(div); kfree(mux); return clk; }
/** * Register a clock branch. * Most clock branches have a form like * * src1 --|--\ * |M |--[div]-[gate]- * src2 --|--/ * * sometimes without one of those components. */ static struct clk *amlogic_clk_register_branch(const char *name, const char **parent_names, u8 num_parents, void __iomem *base, int mux_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, int div_offset, u8 div_shift, u8 div_width, u8 div_flags, struct clk_div_table *div_table, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, spinlock_t *lock) { struct clk *clk; struct clk_mux *mux = NULL; struct clk_gate *gate = NULL; struct clk_divider *div = NULL; const struct clk_ops *mux_ops = NULL, *div_ops = NULL, *gate_ops = NULL; if (num_parents > 1) { mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) return ERR_PTR(-ENOMEM); mux->reg = base + mux_offset; mux->shift = mux_shift; mux->mask = BIT(mux_width) - 1; mux->flags = mux_flags; mux->lock = lock; mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops : &clk_mux_rw_ops; } if (gate_offset >= 0) { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) return ERR_PTR(-ENOMEM); gate->flags = gate_flags; gate->reg = base + gate_offset; gate->bit_idx = gate_shift; gate->lock = lock; gate_ops = &clk_gate_ops; } if (div_width > 0) { div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) return ERR_PTR(-ENOMEM); div->flags = div_flags; div->reg = base + div_offset; div->shift = div_shift; div->width = div_width; div->lock = lock; div->table = div_table; div_ops = &clk_divider_ops; } clk = clk_register_composite(NULL, name, parent_names, num_parents, mux ? &mux->hw : NULL, mux_ops, div ? &div->hw : NULL, div_ops, gate ? &gate->hw : NULL, gate_ops, flags); return clk; }
static void __init sun7i_a20_gmac_clk_setup(struct device_node *node) { struct clk *clk; struct clk_mux *mux; struct clk_gate *gate; const char *clk_name = node->name; const char *parents[SUN7I_A20_GMAC_PARENTS]; void __iomem *reg; if (of_property_read_string(node, "clock-output-names", &clk_name)) return; /* allocate mux and gate clock structs */ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); if (!mux) return; gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); if (!gate) goto free_mux; /* gmac clock requires exactly 2 parents */ parents[0] = of_clk_get_parent_name(node, 0); parents[1] = of_clk_get_parent_name(node, 1); if (!parents[0] || !parents[1]) goto free_gate; reg = of_iomap(node, 0); if (!reg) goto free_gate; /* set up gate and fixed rate properties */ gate->reg = reg; gate->bit_idx = SUN7I_A20_GMAC_GPIT; gate->lock = &gmac_lock; mux->reg = reg; mux->mask = SUN7I_A20_GMAC_MASK; mux->flags = CLK_MUX_INDEX_BIT; mux->lock = &gmac_lock; clk = clk_register_composite(NULL, clk_name, parents, SUN7I_A20_GMAC_PARENTS, &mux->hw, &clk_mux_ops, NULL, NULL, &gate->hw, &clk_gate_ops, 0); if (IS_ERR(clk)) goto iounmap_reg; of_clk_add_provider(node, of_clk_src_simple_get, clk); clk_register_clkdev(clk, clk_name, NULL); return; iounmap_reg: iounmap(reg); free_gate: kfree(gate); free_mux: kfree(mux); }
static void __init sun4i_pll2_setup(struct device_node *node, int post_div_offset) { const char *clk_name = node->name, *parent; struct clk **clks, *base_clk, *prediv_clk; struct clk_onecell_data *clk_data; struct clk_multiplier *mult; struct clk_gate *gate; void __iomem *reg; u32 val; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) return; clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); if (!clk_data) goto err_unmap; clks = kcalloc(SUN4I_PLL2_OUTPUTS, sizeof(struct clk *), GFP_KERNEL); if (!clks) goto err_free_data; parent = of_clk_get_parent_name(node, 0); prediv_clk = clk_register_divider(NULL, "pll2-prediv", parent, 0, reg, SUN4I_PLL2_PRE_DIV_SHIFT, SUN4I_PLL2_PRE_DIV_WIDTH, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, &sun4i_a10_pll2_lock); if (IS_ERR(prediv_clk)) { pr_err("Couldn't register the prediv clock\n"); goto err_free_array; } /* Setup the gate part of the PLL2 */ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); if (!gate) goto err_unregister_prediv; gate->reg = reg; gate->bit_idx = SUN4I_PLL2_ENABLE; gate->lock = &sun4i_a10_pll2_lock; /* Setup the multiplier part of the PLL2 */ mult = kzalloc(sizeof(struct clk_multiplier), GFP_KERNEL); if (!mult) goto err_free_gate; mult->reg = reg; mult->shift = SUN4I_PLL2_N_SHIFT; mult->width = 7; mult->flags = CLK_MULTIPLIER_ZERO_BYPASS | CLK_MULTIPLIER_ROUND_CLOSEST; mult->lock = &sun4i_a10_pll2_lock; parent = __clk_get_name(prediv_clk); base_clk = clk_register_composite(NULL, "pll2-base", &parent, 1, NULL, NULL, &mult->hw, &clk_multiplier_ops, &gate->hw, &clk_gate_ops, CLK_SET_RATE_PARENT); if (IS_ERR(base_clk)) { pr_err("Couldn't register the base multiplier clock\n"); goto err_free_multiplier; } parent = __clk_get_name(base_clk); /* * PLL2-1x * * This is supposed to have a post divider, but we won't need * to use it, we just need to initialise it to 4, and use a * fixed divider. */ val = readl(reg); val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT); val |= (SUN4I_PLL2_POST_DIV_VALUE - post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT; writel(val, reg); of_property_read_string_index(node, "clock-output-names", SUN4I_A10_PLL2_1X, &clk_name); clks[SUN4I_A10_PLL2_1X] = clk_register_fixed_factor(NULL, clk_name, parent, CLK_SET_RATE_PARENT, 1, SUN4I_PLL2_POST_DIV_VALUE); WARN_ON(IS_ERR(clks[SUN4I_A10_PLL2_1X])); /* * PLL2-2x * * This clock doesn't use the post divider, and really is just * a fixed divider from the PLL2 base clock. */ of_property_read_string_index(node, "clock-output-names", SUN4I_A10_PLL2_2X, &clk_name); clks[SUN4I_A10_PLL2_2X] = clk_register_fixed_factor(NULL, clk_name, parent, CLK_SET_RATE_PARENT, 1, 2); WARN_ON(IS_ERR(clks[SUN4I_A10_PLL2_2X])); /* PLL2-4x */ of_property_read_string_index(node, "clock-output-names", SUN4I_A10_PLL2_4X, &clk_name); clks[SUN4I_A10_PLL2_4X] = clk_register_fixed_factor(NULL, clk_name, parent, CLK_SET_RATE_PARENT, 1, 1); WARN_ON(IS_ERR(clks[SUN4I_A10_PLL2_4X])); /* PLL2-8x */ of_property_read_string_index(node, "clock-output-names", SUN4I_A10_PLL2_8X, &clk_name); clks[SUN4I_A10_PLL2_8X] = clk_register_fixed_factor(NULL, clk_name, parent, CLK_SET_RATE_PARENT, 2, 1); WARN_ON(IS_ERR(clks[SUN4I_A10_PLL2_8X])); clk_data->clks = clks; clk_data->clk_num = SUN4I_PLL2_OUTPUTS; of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); return; err_free_multiplier: kfree(mult); err_free_gate: kfree(gate); err_unregister_prediv: clk_unregister_divider(prediv_clk); err_free_array: kfree(clks); err_free_data: kfree(clk_data); err_unmap: iounmap(reg); }
static struct clk *rockchip_clk_register_frac_branch(const char *name, const char *const *parent_names, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 div_flags, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, struct rockchip_clk_branch *child, spinlock_t *lock) { struct rockchip_clk_frac *frac; struct clk *clk; struct clk_gate *gate = NULL; struct clk_fractional_divider *div = NULL; const struct clk_ops *div_ops = NULL, *gate_ops = NULL; if (muxdiv_offset < 0) return ERR_PTR(-EINVAL); if (child && child->branch_type != branch_mux) { pr_err("%s: fractional child clock for %s can only be a mux\n", __func__, name); return ERR_PTR(-EINVAL); } frac = kzalloc(sizeof(*frac), GFP_KERNEL); if (!frac) return ERR_PTR(-ENOMEM); if (gate_offset >= 0) { gate = &frac->gate; gate->flags = gate_flags; gate->reg = base + gate_offset; gate->bit_idx = gate_shift; gate->lock = lock; gate_ops = &clk_gate_ops; } div = &frac->div; div->flags = div_flags; div->reg = base + muxdiv_offset; div->mshift = 16; div->mwidth = 16; div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift; div->nshift = 0; div->nwidth = 16; div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; div->lock = lock; div_ops = &clk_fractional_divider_ops; clk = clk_register_composite(NULL, name, parent_names, num_parents, NULL, NULL, &div->hw, div_ops, gate ? &gate->hw : NULL, gate_ops, flags | CLK_SET_RATE_UNGATE); if (IS_ERR(clk)) { kfree(frac); return clk; } if (child) { struct clk_mux *frac_mux = &frac->mux; struct clk_init_data init; struct clk *mux_clk; int i, ret; frac->mux_frac_idx = -1; for (i = 0; i < child->num_parents; i++) { if (!strcmp(name, child->parent_names[i])) { pr_debug("%s: found fractional parent in mux at pos %d\n", __func__, i); frac->mux_frac_idx = i; break; } } frac->mux_ops = &clk_mux_ops; frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb; frac_mux->reg = base + child->muxdiv_offset; frac_mux->shift = child->mux_shift; frac_mux->mask = BIT(child->mux_width) - 1; frac_mux->flags = child->mux_flags; frac_mux->lock = lock; frac_mux->hw.init = &init; init.name = child->name; init.flags = child->flags | CLK_SET_RATE_PARENT; init.ops = frac->mux_ops; init.parent_names = child->parent_names; init.num_parents = child->num_parents; mux_clk = clk_register(NULL, &frac_mux->hw); if (IS_ERR(mux_clk)) return clk; rockchip_clk_add_lookup(mux_clk, child->id); /* notifier on the fraction divider to catch rate changes */ if (frac->mux_frac_idx >= 0) { ret = clk_notifier_register(clk, &frac->clk_nb); if (ret) pr_err("%s: failed to register clock notifier for %s\n", __func__, name); } else { pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n", __func__, name, child->name); } } return clk; }
struct clk *mtk_clk_register_composite(const struct mtk_composite *mc, void __iomem *base, spinlock_t *lock) { struct clk *clk; struct clk_mux *mux = NULL; struct clk_gate *gate = NULL; struct clk_divider *div = NULL; struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *div_hw = NULL; const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, *div_ops = NULL; const char * const *parent_names; const char *parent; int num_parents; int ret; if (mc->mux_shift >= 0) { mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) return ERR_PTR(-ENOMEM); mux->reg = base + mc->mux_reg; mux->mask = BIT(mc->mux_width) - 1; mux->shift = mc->mux_shift; mux->lock = lock; mux_hw = &mux->hw; mux_ops = &clk_mux_ops; parent_names = mc->parent_names; num_parents = mc->num_parents; } else { parent = mc->parent; parent_names = &parent; num_parents = 1; } if (mc->gate_shift >= 0) { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) { ret = -ENOMEM; goto err_out; } gate->reg = base + mc->gate_reg; gate->bit_idx = mc->gate_shift; gate->flags = CLK_GATE_SET_TO_DISABLE; gate->lock = lock; gate_hw = &gate->hw; gate_ops = &clk_gate_ops; } if (mc->divider_shift >= 0) { div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) { ret = -ENOMEM; goto err_out; } div->reg = base + mc->divider_reg; div->shift = mc->divider_shift; div->width = mc->divider_width; div->lock = lock; div_hw = &div->hw; div_ops = &clk_divider_ops; } clk = clk_register_composite(NULL, mc->name, parent_names, num_parents, mux_hw, mux_ops, div_hw, div_ops, gate_hw, gate_ops, mc->flags); if (IS_ERR(clk)) { kfree(gate); kfree(mux); } return clk; err_out: kfree(mux); return ERR_PTR(ret); }
static void __init sun8i_a23_mbus_setup(struct device_node *node) { int num_parents = of_clk_get_parent_count(node); const char **parents; const char *clk_name = node->name; struct resource res; struct clk_divider *div; struct clk_gate *gate; struct clk_mux *mux; struct clk *clk; void __iomem *reg; int err; parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL); if (!parents) return; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(reg)) { pr_err("Could not get registers for sun8i-mbus-clk\n"); goto err_free_parents; } div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) goto err_unmap; mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) goto err_free_div; gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) goto err_free_mux; of_property_read_string(node, "clock-output-names", &clk_name); of_clk_parent_fill(node, parents, num_parents); gate->reg = reg; gate->bit_idx = SUN8I_MBUS_ENABLE; gate->lock = &sun8i_a23_mbus_lock; div->reg = reg; div->shift = SUN8I_MBUS_DIV_SHIFT; div->width = SUN8I_MBUS_DIV_WIDTH; div->lock = &sun8i_a23_mbus_lock; mux->reg = reg; mux->shift = SUN8I_MBUS_MUX_SHIFT; mux->mask = SUN8I_MBUS_MUX_MASK; mux->lock = &sun8i_a23_mbus_lock; clk = clk_register_composite(NULL, clk_name, parents, num_parents, &mux->hw, &clk_mux_ops, &div->hw, &clk_divider_ops, &gate->hw, &clk_gate_ops, 0); if (IS_ERR(clk)) goto err_free_gate; err = of_clk_add_provider(node, of_clk_src_simple_get, clk); if (err) goto err_unregister_clk; kfree(parents); /* parents is deep copied */ /* The MBUS clocks needs to be always enabled */ __clk_get(clk); clk_prepare_enable(clk); return; err_unregister_clk: /* TODO: The composite clock stuff will leak a bit here. */ clk_unregister(clk); err_free_gate: kfree(gate); err_free_mux: kfree(mux); err_free_div: kfree(div); err_unmap: iounmap(reg); of_address_to_resource(node, 0, &res); release_mem_region(res.start, resource_size(&res)); err_free_parents: kfree(parents); }