static struct clk_hw * npcm7xx_clk_register_pll(void __iomem *pllcon, const char *name, const char *parent_name, unsigned long flags) { struct npcm7xx_clk_pll *pll; struct clk_init_data init; struct clk_hw *hw; int ret; pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (!pll) return ERR_PTR(-ENOMEM); pr_debug("%s reg, name=%s, p=%s\n", __func__, name, parent_name); init.name = name; init.ops = &npcm7xx_clk_pll_ops; init.parent_names = &parent_name; init.num_parents = 1; init.flags = flags; pll->pllcon = pllcon; pll->hw.init = &init; hw = &pll->hw; ret = clk_hw_register(NULL, hw); if (ret) { kfree(pll); hw = ERR_PTR(ret); } return hw; }
struct clk_hw *__init clk_hw_register_pll(struct device *dev, const char *name, const char *parent_name, const struct clk_ops *ops, unsigned long flags) { int ret; struct clk_hw *hw; struct clk_init_data init; /* allocate the divider */ hw = kzalloc(sizeof(*hw), GFP_KERNEL); if (!hw) return ERR_PTR(-ENOMEM); init.name = name; init.ops = ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name : NULL); init.num_parents = (parent_name ? 1 : 0); hw->init = &init; /* register the clock */ ret = clk_hw_register(dev, hw); if (ret) { kfree(hw); hw = ERR_PTR(ret); } return hw; }
/** * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with * the clock framework * @dev: device that is registering this clock * @name: name of this clock * @parent_name: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate * @fixed_accuracy: non-adjustable clock rate */ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate, unsigned long fixed_accuracy) { struct clk_fixed_rate *fixed; struct clk_hw *hw; struct clk_init_data init; int ret; /* allocate fixed-rate clock */ fixed = kzalloc(sizeof(*fixed), GFP_KERNEL); if (!fixed) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &clk_fixed_rate_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); /* struct clk_fixed_rate assignments */ fixed->fixed_rate = fixed_rate; fixed->fixed_accuracy = fixed_accuracy; fixed->hw.init = &init; /* register the clock */ hw = &fixed->hw; ret = clk_hw_register(dev, hw); if (ret) { kfree(fixed); hw = ERR_PTR(ret); } return hw; }
static void __init h8s2678_pll_clk_setup(struct device_node *node) { unsigned int num_parents; const char *clk_name = node->name; const char *parent_name; struct pll_clock *pll_clock; struct clk_init_data init; int ret; num_parents = of_clk_get_parent_count(node); if (!num_parents) { pr_err("%s: no parent found", clk_name); return; } pll_clock = kzalloc(sizeof(*pll_clock), GFP_KERNEL); if (!pll_clock) return; pll_clock->sckcr = of_iomap(node, 0); if (pll_clock->sckcr == NULL) { pr_err("%s: failed to map divide register", clk_name); goto free_clock; } pll_clock->pllcr = of_iomap(node, 1); if (pll_clock->pllcr == NULL) { pr_err("%s: failed to map multiply register", clk_name); goto unmap_sckcr; } parent_name = of_clk_get_parent_name(node, 0); init.name = clk_name; init.ops = &pll_ops; init.flags = CLK_IS_BASIC; init.parent_names = &parent_name; init.num_parents = 1; pll_clock->hw.init = &init; ret = clk_hw_register(NULL, &pll_clock->hw); if (ret) { pr_err("%s: failed to register %s div clock (%d)\n", __func__, clk_name, ret); goto unmap_pllcr; } of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clock->hw); return; unmap_pllcr: iounmap(pll_clock->pllcr); unmap_sckcr: iounmap(pll_clock->sckcr); free_clock: kfree(pll_clock); }
struct clk_hw * __init sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock, const char *name, const char *parent_name, u8 id, const struct clk_pll_characteristics *characteristics) { struct sam9x60_pll *pll; struct clk_hw *hw; struct clk_init_data init; unsigned int pllr; int ret; if (id > PLL_MAX_ID) return ERR_PTR(-EINVAL); pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (!pll) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &pll_ops; init.parent_names = &parent_name; init.num_parents = 1; init.flags = CLK_SET_RATE_GATE; pll->id = id; pll->hw.init = &init; pll->characteristics = characteristics; pll->regmap = regmap; pll->lock = lock; regmap_write(regmap, PMC_PLL_UPDT, id); regmap_read(regmap, PMC_PLL_CTRL0, &pllr); pll->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, pllr); regmap_read(regmap, PMC_PLL_CTRL1, &pllr); pll->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, pllr); hw = &pll->hw; ret = clk_hw_register(NULL, hw); if (ret) { kfree(pll); hw = ERR_PTR(ret); } return hw; }
/** * clk_hw_register_gate - register a gate clock with the clock framework * @dev: device that is registering this clock * @name: name of this clock * @parent_name: name of this clock's parent * @flags: framework-specific flags for this clock * @reg: register address to control gating of this clock * @bit_idx: which bit in the register controls gating of this clock * @clk_gate_flags: gate-specific flags for this clock * @lock: shared register lock for this clock */ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 clk_gate_flags, spinlock_t *lock) { struct clk_gate *gate; struct clk_hw *hw; struct clk_init_data init; int ret; if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { if (bit_idx > 15) { pr_err("gate bit exceeds LOWORD field\n"); return ERR_PTR(-EINVAL); } } /* allocate the gate */ gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &clk_gate_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; /* struct clk_gate assignments */ gate->reg = reg; gate->bit_idx = bit_idx; gate->flags = clk_gate_flags; gate->lock = lock; gate->hw.init = &init; hw = &gate->hw; ret = clk_hw_register(dev, hw); if (ret) { kfree(gate); hw = ERR_PTR(ret); } return hw; }
struct clk *imx_clk_sccg_pll(const char *name, const char * const *parent_names, u8 num_parents, u8 parent, u8 bypass1, u8 bypass2, void __iomem *base, unsigned long flags) { struct clk_sccg_pll *pll; struct clk_init_data init; struct clk_hw *hw; int ret; pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (!pll) return ERR_PTR(-ENOMEM); pll->parent = parent; pll->bypass1 = bypass1; pll->bypass2 = bypass2; pll->base = base; init.name = name; init.ops = &clk_sccg_pll_ops; init.flags = flags; init.parent_names = parent_names; init.num_parents = num_parents; pll->base = base; pll->hw.init = &init; hw = &pll->hw; ret = clk_hw_register(NULL, hw); if (ret) { kfree(pll); return ERR_PTR(ret); } return hw->clk; }
static struct clk_hw * __clk_hw_register_fixed_factor(struct device *dev, struct device_node *np, const char *name, const char *parent_name, int index, unsigned long flags, unsigned int mult, unsigned int div) { struct clk_fixed_factor *fix; struct clk_init_data init = { }; struct clk_parent_data pdata = { .index = index }; struct clk_hw *hw; int ret; fix = kmalloc(sizeof(*fix), GFP_KERNEL); if (!fix) return ERR_PTR(-ENOMEM); /* struct clk_fixed_factor assignments */ fix->mult = mult; fix->div = div; fix->hw.init = &init; init.name = name; init.ops = &clk_fixed_factor_ops; init.flags = flags; if (parent_name) init.parent_names = &parent_name; else init.parent_data = &pdata; init.num_parents = 1; hw = &fix->hw; if (dev) ret = clk_hw_register(dev, hw); else ret = of_clk_hw_register(np, hw); if (ret) { kfree(fix); hw = ERR_PTR(ret); } return hw; }
struct clk_hw *clk_hw_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, u8 clk_divider_flags, spinlock_t *lock) { struct clk_fractional_divider *fd; struct clk_init_data init; struct clk_hw *hw; int ret; fd = kzalloc(sizeof(*fd), GFP_KERNEL); if (!fd) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &clk_fractional_divider_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; fd->reg = reg; fd->mshift = mshift; fd->mwidth = mwidth; fd->mmask = GENMASK(mwidth - 1, 0) << mshift; fd->nshift = nshift; fd->nwidth = nwidth; fd->nmask = GENMASK(nwidth - 1, 0) << nshift; fd->flags = clk_divider_flags; fd->lock = lock; fd->hw.init = &init; hw = &fd->hw; ret = clk_hw_register(dev, hw); if (ret) { kfree(fd); hw = ERR_PTR(ret); } return hw; }
static int kona_clk_setup(struct kona_clk *bcm_clk) { int ret; struct clk_init_data *init_data = &bcm_clk->init_data; switch (bcm_clk->type) { case bcm_clk_peri: ret = peri_clk_setup(bcm_clk->u.data, init_data); if (ret) return ret; break; default: pr_err("%s: clock type %d invalid for %s\n", __func__, (int)bcm_clk->type, init_data->name); return -EINVAL; } /* Make sure everything makes sense before we set it up */ if (!kona_clk_valid(bcm_clk)) { pr_err("%s: clock data invalid for %s\n", __func__, init_data->name); ret = -EINVAL; goto out_teardown; } bcm_clk->hw.init = init_data; ret = clk_hw_register(NULL, &bcm_clk->hw); if (ret) { pr_err("%s: error registering clock %s (%d)\n", __func__, init_data->name, ret); goto out_teardown; } return 0; out_teardown: bcm_clk_teardown(bcm_clk); return ret; }
struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name, const char * const *parent_names, int num_parents, struct clk_hw *mux_hw, const struct clk_ops *mux_ops, struct clk_hw *rate_hw, const struct clk_ops *rate_ops, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, unsigned long flags) { struct clk_hw *hw; struct clk_init_data init; struct clk_composite *composite; struct clk_ops *clk_composite_ops; int ret; composite = kzalloc(sizeof(*composite), GFP_KERNEL); if (!composite) return ERR_PTR(-ENOMEM); init.name = name; init.flags = flags | CLK_IS_BASIC; init.parent_names = parent_names; init.num_parents = num_parents; hw = &composite->hw; clk_composite_ops = &composite->ops; if (mux_hw && mux_ops) { if (!mux_ops->get_parent) { hw = ERR_PTR(-EINVAL); goto err; } composite->mux_hw = mux_hw; composite->mux_ops = mux_ops; clk_composite_ops->get_parent = clk_composite_get_parent; if (mux_ops->set_parent) clk_composite_ops->set_parent = clk_composite_set_parent; if (mux_ops->determine_rate) clk_composite_ops->determine_rate = clk_composite_determine_rate; } if (rate_hw && rate_ops) { if (!rate_ops->recalc_rate) { hw = ERR_PTR(-EINVAL); goto err; } clk_composite_ops->recalc_rate = clk_composite_recalc_rate; if (rate_ops->determine_rate) clk_composite_ops->determine_rate = clk_composite_determine_rate; else if (rate_ops->round_rate) clk_composite_ops->round_rate = clk_composite_round_rate; /* .set_rate requires either .round_rate or .determine_rate */ if (rate_ops->set_rate) { if (rate_ops->determine_rate || rate_ops->round_rate) clk_composite_ops->set_rate = clk_composite_set_rate; else WARN(1, "%s: missing round_rate op is required\n", __func__); } composite->rate_hw = rate_hw; composite->rate_ops = rate_ops; } if (mux_hw && mux_ops && rate_hw && rate_ops) { if (mux_ops->set_parent && rate_ops->set_rate) clk_composite_ops->set_rate_and_parent = clk_composite_set_rate_and_parent; } if (gate_hw && gate_ops) { if (!gate_ops->is_enabled || !gate_ops->enable || !gate_ops->disable) { hw = ERR_PTR(-EINVAL); goto err; } composite->gate_hw = gate_hw; composite->gate_ops = gate_ops; clk_composite_ops->is_enabled = clk_composite_is_enabled; clk_composite_ops->enable = clk_composite_enable; clk_composite_ops->disable = clk_composite_disable; } init.ops = clk_composite_ops; composite->hw.init = &init; ret = clk_hw_register(dev, hw); if (ret) { hw = ERR_PTR(ret); goto err; } if (composite->mux_hw) composite->mux_hw->clk = hw->clk; if (composite->rate_hw) composite->rate_hw->clk = hw->clk; if (composite->gate_hw) composite->gate_hw->clk = hw->clk; return hw; err: kfree(composite); return hw; }