static int rk808_clkout_probe(struct platform_device *pdev) { struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent); struct i2c_client *client = rk808->i2c; struct device_node *node = client->dev.of_node; struct clk_init_data init = {}; struct clk **clk_table; struct rk808_clkout *rk808_clkout; rk808_clkout = devm_kzalloc(&client->dev, sizeof(*rk808_clkout), GFP_KERNEL); if (!rk808_clkout) return -ENOMEM; rk808_clkout->rk808 = rk808; clk_table = devm_kcalloc(&client->dev, RK808_NR_OUTPUT, sizeof(struct clk *), GFP_KERNEL); if (!clk_table) return -ENOMEM; init.parent_names = NULL; init.num_parents = 0; init.name = "rk808-clkout1"; init.ops = &rk808_clkout1_ops; rk808_clkout->clkout1_hw.init = &init; /* optional override of the clockname */ of_property_read_string_index(node, "clock-output-names", 0, &init.name); clk_table[0] = devm_clk_register(&client->dev, &rk808_clkout->clkout1_hw); if (IS_ERR(clk_table[0])) return PTR_ERR(clk_table[0]); init.name = "rk808-clkout2"; init.ops = &rk808_clkout2_ops; rk808_clkout->clkout2_hw.init = &init; /* optional override of the clockname */ of_property_read_string_index(node, "clock-output-names", 1, &init.name); clk_table[1] = devm_clk_register(&client->dev, &rk808_clkout->clkout2_hw); if (IS_ERR(clk_table[1])) return PTR_ERR(clk_table[1]); rk808_clkout->clk_data.clks = clk_table; rk808_clkout->clk_data.clk_num = RK808_NR_OUTPUT; return of_clk_add_provider(node, of_clk_src_onecell_get, &rk808_clkout->clk_data); }
int msm_hdmi_pll_8960_init(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct hdmi_pll_8960 *pll; struct clk *clk; int i; /* sanity check: */ for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) if (WARN_ON(freqtbl[i].rate < freqtbl[i + 1].rate)) return -EINVAL; pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); if (!pll) return -ENOMEM; pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL"); if (IS_ERR(pll->mmio)) { dev_err(dev, "failed to map pll base\n"); return -ENOMEM; } pll->pdev = pdev; pll->clk_hw.init = &pll_init; clk = devm_clk_register(dev, &pll->clk_hw); if (IS_ERR(clk)) { dev_err(dev, "failed to register pll clock\n"); return -EINVAL; } return 0; }
static struct clk * scpi_clk_ops_init(struct device *dev, const struct of_device_id *match, struct scpi_clk *sclk, const char *name) { struct clk_init_data init; struct clk *clk; unsigned long min = 0, max = 0; init.name = name; init.flags = CLK_IS_ROOT; init.num_parents = 0; init.ops = match->data; sclk->hw.init = &init; sclk->scpi_ops = get_scpi_ops(); if (init.ops == &scpi_dvfs_ops) { sclk->info = sclk->scpi_ops->dvfs_get_info(sclk->id); if (IS_ERR(sclk->info)) return NULL; } else if (init.ops == &scpi_clk_ops) { if (sclk->scpi_ops->clk_get_range(sclk->id, &min, &max) || !max) return NULL; } else { return NULL; } clk = devm_clk_register(dev, &sclk->hw); if (!IS_ERR(clk) && max) clk_hw_set_rate_range(&sclk->hw, min, max); return clk; }
static int ti_adpll_init_clkout(struct ti_adpll_data *d, enum ti_adpll_clocks index, int output_index, int gate_bit, char *name, struct clk *clk0, struct clk *clk1) { struct ti_adpll_clkout_data *co; struct clk_init_data init; struct clk_ops *ops; const char *parent_names[2]; const char *child_name; struct clk *clock; int err; co = devm_kzalloc(d->dev, sizeof(*co), GFP_KERNEL); if (!co) return -ENOMEM; co->adpll = d; err = of_property_read_string_index(d->np, "clock-output-names", output_index, &child_name); if (err) return err; ops = devm_kzalloc(d->dev, sizeof(*ops), GFP_KERNEL); if (!ops) return -ENOMEM; init.name = child_name; init.ops = ops; init.flags = 0; co->hw.init = &init; parent_names[0] = __clk_get_name(clk0); parent_names[1] = __clk_get_name(clk1); init.parent_names = parent_names; init.num_parents = 2; ops->get_parent = ti_adpll_clkout_get_parent; ops->determine_rate = __clk_mux_determine_rate; if (gate_bit) { co->gate.lock = &d->lock; co->gate.reg = d->regs + ADPLL_CLKCTRL_OFFSET; co->gate.bit_idx = gate_bit; ops->enable = ti_adpll_clkout_enable; ops->disable = ti_adpll_clkout_disable; ops->is_enabled = ti_adpll_clkout_is_enabled; } clock = devm_clk_register(d->dev, &co->hw); if (IS_ERR(clock)) { dev_err(d->dev, "failed to register output %s: %li\n", name, PTR_ERR(clock)); return PTR_ERR(clock); } return ti_adpll_setup_clock(d, clock, index, output_index, child_name, NULL); }
static int ti_adpll_init_dco(struct ti_adpll_data *d) { struct clk_init_data init; struct clk *clock; const char *postfix; int width, err; d->outputs.clks = devm_kcalloc(d->dev, MAX_ADPLL_OUTPUTS, sizeof(struct clk *), GFP_KERNEL); if (!d->outputs.clks) return -ENOMEM; if (d->c->output_index < 0) postfix = "dco"; else postfix = NULL; init.name = ti_adpll_clk_get_name(d, d->c->output_index, postfix); if (!init.name) return -EINVAL; init.parent_names = d->parent_names; init.num_parents = d->c->nr_max_inputs; init.ops = &ti_adpll_ops; init.flags = CLK_GET_RATE_NOCACHE; d->dco.hw.init = &init; if (d->c->is_type_s) width = 5; else width = 4; /* Internal input clock divider N2 */ err = ti_adpll_init_divider(d, TI_ADPLL_N2, -ENODEV, "n2", d->parent_clocks[TI_ADPLL_CLKINP], d->regs + ADPLL_MN2DIV_OFFSET, ADPLL_MN2DIV_N2, width, 0); if (err) return err; clock = devm_clk_register(d->dev, &d->dco.hw); if (IS_ERR(clock)) return PTR_ERR(clock); return ti_adpll_setup_clock(d, clock, TI_ADPLL_DCO, d->c->output_index, init.name, NULL); }
struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi) { struct hdmi_phy_8960 *phy_8960; struct hdmi_phy *phy = NULL; int ret; #ifdef CONFIG_COMMON_CLK int i; /* sanity check: */ for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate)) return ERR_PTR(-EINVAL); #endif phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); if (!phy_8960) { ret = -ENOMEM; goto fail; } phy = &phy_8960->base; phy->funcs = &hdmi_phy_8960_funcs; phy_8960->hdmi = hdmi; #ifdef CONFIG_COMMON_CLK phy_8960->pll_hw.init = &pll_init; phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw); if (IS_ERR(phy_8960->pll)) { ret = PTR_ERR(phy_8960->pll); phy_8960->pll = NULL; goto fail; } #endif return phy; fail: if (phy) hdmi_phy_8960_destroy(phy); return ERR_PTR(ret); }
/* * The SD/eMMC IP block has an internal mux and divider used for * generating the MMC clock. Use the clock framework to create and * manage these clocks. */ static int meson_mmc_clk_init(struct meson_host *host) { struct clk_init_data init; char clk_name[32]; int i, ret = 0; const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; unsigned int mux_parent_count = 0; const char *clk_div_parents[1]; u32 clk_reg, cfg; /* get the mux parents */ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { char name[16]; snprintf(name, sizeof(name), "clkin%d", i); host->mux_parent[i] = devm_clk_get(host->dev, name); if (IS_ERR(host->mux_parent[i])) { ret = PTR_ERR(host->mux_parent[i]); if (PTR_ERR(host->mux_parent[i]) != -EPROBE_DEFER) dev_err(host->dev, "Missing clock %s\n", name); host->mux_parent[i] = NULL; return ret; } mux_parent_names[i] = __clk_get_name(host->mux_parent[i]); mux_parent_count++; } /* create the mux */ snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); init.name = clk_name; init.ops = &clk_mux_ops; init.flags = 0; init.parent_names = mux_parent_names; init.num_parents = mux_parent_count; host->mux.reg = host->regs + SD_EMMC_CLOCK; host->mux.shift = CLK_SRC_SHIFT; host->mux.mask = CLK_SRC_MASK; host->mux.flags = 0; host->mux.table = NULL; host->mux.hw.init = &init; host->mux_clk = devm_clk_register(host->dev, &host->mux.hw); if (WARN_ON(IS_ERR(host->mux_clk))) return PTR_ERR(host->mux_clk); /* create the divider */ snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); init.name = devm_kstrdup(host->dev, clk_name, GFP_KERNEL); init.ops = &clk_divider_ops; init.flags = CLK_SET_RATE_PARENT; clk_div_parents[0] = __clk_get_name(host->mux_clk); init.parent_names = clk_div_parents; init.num_parents = ARRAY_SIZE(clk_div_parents); host->cfg_div.reg = host->regs + SD_EMMC_CLOCK; host->cfg_div.shift = CLK_DIV_SHIFT; host->cfg_div.width = CLK_DIV_WIDTH; host->cfg_div.hw.init = &init; host->cfg_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO; host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw); if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk))) return PTR_ERR(host->cfg_div_clk); /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ clk_reg = 0; clk_reg |= CLK_PHASE_180 << CLK_PHASE_SHIFT; clk_reg |= CLK_SRC_XTAL << CLK_SRC_SHIFT; clk_reg |= CLK_DIV_MAX << CLK_DIV_SHIFT; clk_reg &= ~CLK_ALWAYS_ON; writel(clk_reg, host->regs + SD_EMMC_CLOCK); /* Ensure clock starts in "auto" mode, not "always on" */ cfg = readl(host->regs + SD_EMMC_CFG); cfg &= ~CFG_CLK_ALWAYS_ON; cfg |= CFG_AUTO_CLK; writel(cfg, host->regs + SD_EMMC_CFG); ret = clk_prepare_enable(host->cfg_div_clk); if (ret) return ret; /* Get the nearest minimum clock to 400KHz */ host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000); ret = meson_mmc_clk_set(host, host->mmc->f_min); if (!ret) clk_disable_unprepare(host->cfg_div_clk); return ret; }
static int mtk_hdmi_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_hdmi_phy *hdmi_phy; struct resource *mem; struct clk *ref_clk; const char *ref_clk_name; struct clk_init_data clk_init = { .ops = &mtk_hdmi_pll_ops, .num_parents = 1, .parent_names = (const char * const *)&ref_clk_name, .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, }; struct phy *phy; struct phy_provider *phy_provider; int ret; hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL); if (!hdmi_phy) return -ENOMEM; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdmi_phy->regs = devm_ioremap_resource(dev, mem); if (IS_ERR(hdmi_phy->regs)) { ret = PTR_ERR(hdmi_phy->regs); dev_err(dev, "Failed to get memory resource: %d\n", ret); return ret; } ref_clk = devm_clk_get(dev, "pll_ref"); if (IS_ERR(ref_clk)) { ret = PTR_ERR(ref_clk); dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n", ret); return ret; } ref_clk_name = __clk_get_name(ref_clk); ret = of_property_read_string(dev->of_node, "clock-output-names", &clk_init.name); if (ret < 0) { dev_err(dev, "Failed to read clock-output-names: %d\n", ret); return ret; } hdmi_phy->pll_hw.init = &clk_init; hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw); if (IS_ERR(hdmi_phy->pll)) { ret = PTR_ERR(hdmi_phy->pll); dev_err(dev, "Failed to register PLL: %d\n", ret); return ret; } ret = of_property_read_u32(dev->of_node, "mediatek,ibias", &hdmi_phy->ibias); if (ret < 0) { dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret); return ret; } ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up", &hdmi_phy->ibias_up); if (ret < 0) { dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret); return ret; } dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n"); hdmi_phy->drv_imp_clk = 0x30; hdmi_phy->drv_imp_d2 = 0x30; hdmi_phy->drv_imp_d1 = 0x30; hdmi_phy->drv_imp_d0 = 0x30; phy = devm_phy_create(dev, NULL, &mtk_hdmi_phy_ops); if (IS_ERR(phy)) { dev_err(dev, "Failed to create HDMI PHY\n"); return PTR_ERR(phy); } phy_set_drvdata(phy, hdmi_phy); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) return PTR_ERR(phy_provider); hdmi_phy->dev = dev; return of_clk_add_provider(dev->of_node, of_clk_src_simple_get, hdmi_phy->pll); } static int mtk_hdmi_phy_remove(struct platform_device *pdev) { return 0; } static const struct of_device_id mtk_hdmi_phy_match[] = { { .compatible = "mediatek,mt8173-hdmi-phy", }, {}, };
static struct clk *clk_reg_sysctrl(struct device *dev, const char *name, const char **parent_names, u8 num_parents, u16 *reg_sel, u8 *reg_mask, u8 *reg_bits, unsigned long rate, unsigned long enable_delay_us, unsigned long flags, struct clk_ops *clk_sysctrl_ops) { struct clk_sysctrl *clk; struct clk_init_data clk_sysctrl_init; struct clk *clk_reg; int i; if (!dev) return ERR_PTR(-EINVAL); if (!name || (num_parents > SYSCTRL_MAX_NUM_PARENTS)) { dev_err(dev, "clk_sysctrl: invalid arguments passed\n"); return ERR_PTR(-EINVAL); } clk = devm_kzalloc(dev, sizeof(struct clk_sysctrl), GFP_KERNEL); if (!clk) { dev_err(dev, "clk_sysctrl: could not allocate clk\n"); return ERR_PTR(-ENOMEM); } /* set main clock registers */ clk->reg_sel[0] = reg_sel[0]; clk->reg_bits[0] = reg_bits[0]; clk->reg_mask[0] = reg_mask[0]; /* handle clocks with more than one parent */ for (i = 1; i < num_parents; i++) { clk->reg_sel[i] = reg_sel[i]; clk->reg_bits[i] = reg_bits[i]; clk->reg_mask[i] = reg_mask[i]; } clk->parent_index = 0; clk->rate = rate; clk->enable_delay_us = enable_delay_us; clk->dev = dev; clk_sysctrl_init.name = name; clk_sysctrl_init.ops = clk_sysctrl_ops; clk_sysctrl_init.flags = flags; clk_sysctrl_init.parent_names = parent_names; clk_sysctrl_init.num_parents = num_parents; clk->hw.init = &clk_sysctrl_init; clk_reg = devm_clk_register(clk->dev, &clk->hw); if (IS_ERR(clk_reg)) dev_err(dev, "clk_sysctrl: clk_register failed\n"); return clk_reg; }
static int krait_add_div(struct device *dev, int id, const char *s, unsigned int offset) { struct krait_div2_clk *div; struct clk_init_data init = { .num_parents = 1, .ops = &krait_div2_clk_ops, .flags = CLK_SET_RATE_PARENT, }; const char *p_names[1]; struct clk *clk; div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); if (!div) return -ENOMEM; div->width = 2; div->shift = 6; div->lpl = id >= 0; div->offset = offset; div->hw.init = &init; init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s); if (!init.name) return -ENOMEM; init.parent_names = p_names; p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); if (!p_names[0]) { kfree(init.name); return -ENOMEM; } clk = devm_clk_register(dev, &div->hw); kfree(p_names[0]); kfree(init.name); return PTR_ERR_OR_ZERO(clk); } static int krait_add_sec_mux(struct device *dev, int id, const char *s, unsigned int offset, bool unique_aux) { int ret; struct krait_mux_clk *mux; static const char *sec_mux_list[] = { "acpu_aux", "qsb", }; struct clk_init_data init = { .parent_names = sec_mux_list, .num_parents = ARRAY_SIZE(sec_mux_list), .ops = &krait_mux_clk_ops, .flags = CLK_SET_RATE_PARENT, }; struct clk *clk; mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); if (!mux) return -ENOMEM; mux->offset = offset; mux->lpl = id >= 0; mux->mask = 0x3; mux->shift = 2; mux->parent_map = sec_mux_map; mux->hw.init = &init; mux->safe_sel = 0; init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); if (!init.name) return -ENOMEM; if (unique_aux) { sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s); if (!sec_mux_list[0]) { clk = ERR_PTR(-ENOMEM); goto err_aux; } } clk = devm_clk_register(dev, &mux->hw); ret = krait_notifier_register(dev, clk, mux); if (ret) goto unique_aux; unique_aux: if (unique_aux) kfree(sec_mux_list[0]); err_aux: kfree(init.name); return PTR_ERR_OR_ZERO(clk); }
static struct clk * krait_add_pri_mux(struct device *dev, int id, const char *s, unsigned int offset) { int ret; struct krait_mux_clk *mux; const char *p_names[3]; struct clk_init_data init = { .parent_names = p_names, .num_parents = ARRAY_SIZE(p_names), .ops = &krait_mux_clk_ops, .flags = CLK_SET_RATE_PARENT, }; struct clk *clk; mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); if (!mux) return ERR_PTR(-ENOMEM); mux->mask = 0x3; mux->shift = 0; mux->offset = offset; mux->lpl = id >= 0; mux->parent_map = pri_mux_map; mux->hw.init = &init; mux->safe_sel = 2; init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s); if (!init.name) return ERR_PTR(-ENOMEM); p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); if (!p_names[0]) { clk = ERR_PTR(-ENOMEM); goto err_p0; } p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s); if (!p_names[1]) { clk = ERR_PTR(-ENOMEM); goto err_p1; } p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); if (!p_names[2]) { clk = ERR_PTR(-ENOMEM); goto err_p2; } clk = devm_clk_register(dev, &mux->hw); ret = krait_notifier_register(dev, clk, mux); if (ret) goto err_p3; err_p3: kfree(p_names[2]); err_p2: kfree(p_names[1]); err_p1: kfree(p_names[0]); err_p0: kfree(init.name); return clk; }
static struct clk *clk_register_gpio(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned gpio, bool active_low, unsigned long flags, const struct clk_ops *clk_gpio_ops) { struct clk_gpio *clk_gpio; struct clk *clk; struct clk_init_data init = {}; unsigned long gpio_flags; int err; if (dev) clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio), GFP_KERNEL); else clk_gpio = kzalloc(sizeof(*clk_gpio), GFP_KERNEL); if (!clk_gpio) return ERR_PTR(-ENOMEM); if (active_low) gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH; else gpio_flags = GPIOF_OUT_INIT_LOW; if (dev) err = devm_gpio_request_one(dev, gpio, gpio_flags, name); else err = gpio_request_one(gpio, gpio_flags, name); if (err) { if (err != -EPROBE_DEFER) pr_err("%s: %s: Error requesting clock control gpio %u\n", __func__, name, gpio); if (!dev) kfree(clk_gpio); return ERR_PTR(err); } init.name = name; init.ops = clk_gpio_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = parent_names; init.num_parents = num_parents; clk_gpio->gpiod = gpio_to_desc(gpio); clk_gpio->hw.init = &init; if (dev) clk = devm_clk_register(dev, &clk_gpio->hw); else clk = clk_register(NULL, &clk_gpio->hw); if (!IS_ERR(clk)) return clk; if (!dev) { gpiod_put(clk_gpio->gpiod); kfree(clk_gpio); } return clk; }
static int hi6220_stub_clk_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct clk_init_data init; struct hi6220_stub_clk *stub_clk; struct clk *clk; struct device_node *np = pdev->dev.of_node; int ret; stub_clk = devm_kzalloc(dev, sizeof(*stub_clk), GFP_KERNEL); if (!stub_clk) return -ENOMEM; stub_clk->dfs_map = syscon_regmap_lookup_by_phandle(np, "hisilicon,hi6220-clk-sram"); if (IS_ERR(stub_clk->dfs_map)) { dev_err(dev, "failed to get sram regmap\n"); return PTR_ERR(stub_clk->dfs_map); } stub_clk->hw.init = &init; stub_clk->dev = dev; stub_clk->id = HI6220_STUB_ACPU0; /* Use mailbox client with blocking mode */ stub_clk->cl.dev = dev; stub_clk->cl.tx_done = NULL; stub_clk->cl.tx_block = true; stub_clk->cl.tx_tout = 500; stub_clk->cl.knows_txdone = false; /* Allocate mailbox channel */ stub_clk->mbox = mbox_request_channel(&stub_clk->cl, 0); if (IS_ERR(stub_clk->mbox)) { dev_err(dev, "failed get mailbox channel\n"); return PTR_ERR(stub_clk->mbox); }; init.name = "acpu0"; init.ops = &hi6220_stub_clk_ops; init.num_parents = 0; init.flags = CLK_IS_ROOT; clk = devm_clk_register(dev, &stub_clk->hw); if (IS_ERR(clk)) return PTR_ERR(clk); ret = of_clk_add_provider(np, of_clk_src_simple_get, clk); if (ret) { dev_err(dev, "failed to register OF clock provider\n"); return ret; } /* initialize buffer to zero */ regmap_write(stub_clk->dfs_map, ACPU_DFS_FLAG, 0x0); regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_REQ, 0x0); regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_LMT, 0x0); dev_dbg(dev, "Registered clock '%s'\n", init.name); return 0; }
static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) { struct clk_init_data init; int i, ret; struct device *dev = &dwmac->pdev->dev; char clk_name[32]; const char *clk_div_parents[1]; const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; /* get the mux parents from DT */ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { char name[16]; snprintf(name, sizeof(name), "clkin%d", i); dwmac->m250_mux_parent[i] = devm_clk_get(dev, name); if (IS_ERR(dwmac->m250_mux_parent[i])) { ret = PTR_ERR(dwmac->m250_mux_parent[i]); if (ret != -EPROBE_DEFER) dev_err(dev, "Missing clock %s\n", name); return ret; } mux_parent_names[i] = __clk_get_name(dwmac->m250_mux_parent[i]); } /* create the m250_mux */ snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev)); init.name = clk_name; init.ops = &clk_mux_ops; init.flags = CLK_SET_RATE_PARENT; init.parent_names = mux_parent_names; init.num_parents = MUX_CLK_NUM_PARENTS; dwmac->m250_mux.reg = dwmac->regs + PRG_ETH0; dwmac->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; dwmac->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; dwmac->m250_mux.flags = 0; dwmac->m250_mux.table = NULL; dwmac->m250_mux.hw.init = &init; dwmac->m250_mux_clk = devm_clk_register(dev, &dwmac->m250_mux.hw); if (WARN_ON(IS_ERR(dwmac->m250_mux_clk))) return PTR_ERR(dwmac->m250_mux_clk); /* create the m250_div */ snprintf(clk_name, sizeof(clk_name), "%s#m250_div", dev_name(dev)); init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL); init.ops = &clk_divider_ops; init.flags = CLK_SET_RATE_PARENT; clk_div_parents[0] = __clk_get_name(dwmac->m250_mux_clk); init.parent_names = clk_div_parents; init.num_parents = ARRAY_SIZE(clk_div_parents); dwmac->m250_div.reg = dwmac->regs + PRG_ETH0; dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; dwmac->m250_div.hw.init = &init; dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | CLK_DIVIDER_ROUND_CLOSEST; dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw); if (WARN_ON(IS_ERR(dwmac->m250_div_clk))) return PTR_ERR(dwmac->m250_div_clk); /* create the fixed_div2 */ snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev)); init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL); init.ops = &clk_fixed_factor_ops; init.flags = CLK_SET_RATE_PARENT; clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk); init.parent_names = clk_div_parents; init.num_parents = ARRAY_SIZE(clk_div_parents); dwmac->fixed_div2.mult = 1; dwmac->fixed_div2.div = 2; dwmac->fixed_div2.hw.init = &init; dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw); if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk))) return PTR_ERR(dwmac->fixed_div2_clk); /* create the rgmii_tx_en */ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en", dev_name(dev)); init.ops = &clk_gate_ops; init.flags = CLK_SET_RATE_PARENT; clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk); init.parent_names = clk_div_parents; init.num_parents = ARRAY_SIZE(clk_div_parents); dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0; dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN; dwmac->rgmii_tx_en.hw.init = &init; dwmac->rgmii_tx_en_clk = devm_clk_register(dev, &dwmac->rgmii_tx_en.hw); if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk))) return PTR_ERR(dwmac->rgmii_tx_en_clk); return 0; }