static void __init at91sam9x5_register_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) clk_register(periph_clocks[i]); clkdev_add_table(periph_clocks_lookups, ARRAY_SIZE(periph_clocks_lookups)); if (cpu_is_at91sam9g25() || cpu_is_at91sam9x25()) clk_register(&usart3_clk); if (cpu_is_at91sam9g25() || cpu_is_at91sam9x25() || cpu_is_at91sam9g35() || cpu_is_at91sam9x35()) clk_register(&macb0_clk); if (cpu_is_at91sam9g15() || cpu_is_at91sam9g35() || cpu_is_at91sam9x35()) clk_register(&lcdc_clk); if (cpu_is_at91sam9g25()) clk_register(&isi_clk); if (cpu_is_at91sam9x25()) clk_register(&macb1_clk); if (cpu_is_at91sam9x25() || cpu_is_at91sam9x35()) { clk_register(&can0_clk); clk_register(&can1_clk); } clk_register(&pck0); clk_register(&pck1); }
void __init clk_sp810_of_setup(struct device_node *node) { struct clk_sp810 *sp810 = kzalloc(sizeof(*sp810), GFP_KERNEL); const char *parent_names[2]; char name[12]; struct clk_init_data init; static int instance; int i; if (!sp810) { pr_err("Failed to allocate memory for SP810!\n"); return; } sp810->refclk_index = of_property_match_string(node, "clock-names", "refclk"); parent_names[0] = of_clk_get_parent_name(node, sp810->refclk_index); sp810->timclk_index = of_property_match_string(node, "clock-names", "timclk"); parent_names[1] = of_clk_get_parent_name(node, sp810->timclk_index); if (parent_names[0] <= 0 || parent_names[1] <= 0) { pr_warn("Failed to obtain parent clocks for SP810!\n"); return; } sp810->node = node; sp810->base = of_iomap(node, 0); spin_lock_init(&sp810->lock); init.name = name; init.ops = &clk_sp810_timerclken_ops; init.flags = CLK_IS_BASIC; init.parent_names = parent_names; init.num_parents = ARRAY_SIZE(parent_names); for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) { snprintf(name, sizeof(name), "sp810_%d_%d", instance, i); sp810->timerclken[i].sp810 = sp810; sp810->timerclken[i].channel = i; sp810->timerclken[i].hw.init = &init; sp810->timerclken[i].clk = clk_register(NULL, &sp810->timerclken[i].hw); WARN_ON(IS_ERR(sp810->timerclken[i].clk)); } of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810); instance++; }
void __init vexpress_osc_of_setup(struct device_node *node) { struct clk_init_data init; struct vexpress_osc *osc; struct clk *clk; u32 range[2]; osc = kzalloc(sizeof(*osc), GFP_KERNEL); if (!osc) goto error; osc->func = vexpress_config_func_get_by_node(node); if (!osc->func) { pr_err("Failed to obtain config func for node '%s'!\n", node->name); goto error; } if (of_property_read_u32_array(node, "freq-range", range, ARRAY_SIZE(range)) == 0) { osc->rate_min = range[0]; osc->rate_max = range[1]; } of_property_read_string(node, "clock-output-names", &init.name); if (!init.name) init.name = node->name; init.ops = &vexpress_osc_ops; init.flags = CLK_IS_ROOT; init.num_parents = 0; osc->hw.init = &init; clk = clk_register(NULL, &osc->hw); if (IS_ERR(clk)) { pr_err("Failed to register clock '%s'!\n", init.name); goto error; } of_clk_add_provider(node, of_clk_src_simple_get, clk); pr_debug("Registered clock '%s'\n", init.name); return; error: if (osc->func) vexpress_config_func_put(osc->func); kfree(osc); }
struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, const char *parent_name, void __iomem *base, u32 div_mask, bool always_on) { struct clk_pllv3 *pll; const struct clk_ops *ops; struct clk *clk; struct clk_init_data init; pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (!pll) return ERR_PTR(-ENOMEM); switch (type) { case IMX_PLLV3_SYS: ops = &clk_pllv3_sys_ops; break; case IMX_PLLV3_USB: ops = &clk_pllv3_ops; pll->powerup_set = true; break; case IMX_PLLV3_AV: ops = &clk_pllv3_av_ops; break; case IMX_PLLV3_ENET: ops = &clk_pllv3_enet_ops; break; case IMX_PLLV3_MLB: ops = &clk_pllv3_mlb_ops; break; default: ops = &clk_pllv3_ops; } pll->base = base; pll->div_mask = div_mask; pll->always_on = always_on; init.name = name; init.ops = ops; init.flags = CLK_GET_RATE_NOCACHE; init.parent_names = &parent_name; init.num_parents = 1; pll->hw.init = &init; clk = clk_register(NULL, &pll->hw); if (IS_ERR(clk)) kfree(pll); return clk; }
int __init sh_hwblk_clk_register(struct clk *clks, int nr) { struct clk *clkp; int ret = 0; int k; for (k = 0; !ret && (k < nr); k++) { clkp = clks + k; clkp->ops = &sh_hwblk_clk_ops; ret |= clk_register(clkp); } return ret; }
static void __init at572d940hf_register_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) clk_register(periph_clocks[i]); clk_register(&pck0); clk_register(&pck1); clk_register(&pck2); clk_register(&pck3); clk_register(&mAgicV_mem_clk); clk_register(&hck0); clk_register(&hck1); }
static struct clk * __init at91_clk_register_main_osc(struct at91_pmc *pmc, unsigned int irq, const char *name, const char *parent_name, bool bypass) { int ret; struct clk_main_osc *osc; struct clk *clk = NULL; struct clk_init_data init; if (!pmc || !irq || !name || !parent_name) return ERR_PTR(-EINVAL); osc = kzalloc(sizeof(*osc), GFP_KERNEL); if (!osc) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &main_osc_ops; init.parent_names = &parent_name; init.num_parents = 1; init.flags = CLK_IGNORE_UNUSED; osc->hw.init = &init; osc->pmc = pmc; osc->irq = irq; init_waitqueue_head(&osc->wait); irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); ret = request_irq(osc->irq, clk_main_osc_irq_handler, IRQF_TRIGGER_HIGH, name, osc); if (ret) return ERR_PTR(ret); if (bypass) pmc_write(pmc, AT91_CKGR_MOR, (pmc_read(pmc, AT91_CKGR_MOR) & ~(MOR_KEY_MASK | AT91_PMC_MOSCEN)) | AT91_PMC_OSCBYPASS | AT91_PMC_KEY); clk = clk_register(NULL, &osc->hw); if (IS_ERR(clk)) { free_irq(irq, osc); kfree(osc); } return clk; }
static __init struct clk *__socfpga_pll_init(struct device_node *node, const struct clk_ops *ops) { u32 reg; struct clk *clk; struct socfpga_pll *pll_clk; const char *clk_name = node->name; const char *parent_name[SOCFPGA_MAX_PARENTS]; struct clk_init_data init; struct device_node *clkmgr_np; int rc; int i = 0; of_property_read_u32(node, "reg", ®); pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); if (WARN_ON(!pll_clk)) return NULL; clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); clk_mgr_base_addr = of_iomap(clkmgr_np, 0); BUG_ON(!clk_mgr_base_addr); pll_clk->hw.reg = clk_mgr_base_addr + reg; of_property_read_string(node, "clock-output-names", &clk_name); init.name = clk_name; init.ops = ops; init.flags = 0; while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] = of_clk_get_parent_name(node, i)) != NULL) i++; init.num_parents = i; init.parent_names = parent_name; pll_clk->hw.hw.init = &init; pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; clk_pll_ops.enable = clk_gate_ops.enable; clk_pll_ops.disable = clk_gate_ops.disable; clk = clk_register(NULL, &pll_clk->hw.hw); if (WARN_ON(IS_ERR(clk))) { kfree(pll_clk); return NULL; } rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); return clk; }
void __init r8a7779_clock_init(void) { int k, ret = 0; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++) ret = clk_register(late_main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) shmobile_clk_init(); else panic("failed to setup r8a7779 clocks\n"); }
/** * _register_dpll - low level registration of a DPLL clock * @hw: hardware clock definition for the clock * @node: device node for the clock * * Finalizes DPLL registration process. In case a failure (clk-ref or * clk-bypass is missing), the clock is added to retry list and * the initialization is retried on later stage. */ static void __init _register_dpll(struct clk_hw *hw, struct device_node *node) { struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); struct dpll_data *dd = clk_hw->dpll_data; struct clk *clk; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_debug("clk-ref missing for %s, retry later\n", node->name); if (!ti_clk_retry_init(node, hw, _register_dpll)) return; goto cleanup; } dd->clk_ref = __clk_get_hw(clk); clk = of_clk_get(node, 1); if (IS_ERR(clk)) { pr_debug("clk-bypass missing for %s, retry later\n", node->name); if (!ti_clk_retry_init(node, hw, _register_dpll)) return; goto cleanup; } dd->clk_bypass = __clk_get_hw(clk); /* register the clock */ clk = clk_register(NULL, &clk_hw->hw); if (!IS_ERR(clk)) { omap2_init_clk_hw_omap_clocks(&clk_hw->hw); of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(clk_hw->hw.init->parent_names); kfree(clk_hw->hw.init); return; } cleanup: kfree(clk_hw->dpll_data); kfree(clk_hw->hw.init->parent_names); kfree(clk_hw->hw.init); kfree(clk_hw); }
void __init r8a7790_clock_init(void) { u32 mode = rcar_gen2_read_mode_pins(); int k, ret = 0; switch (mode & (MD(14) | MD(13))) { case 0: R8A7790_CLOCK_ROOT(15, &extal_clk, 172, 208, 106, 88); break; case MD(13): R8A7790_CLOCK_ROOT(20, &extal_clk, 130, 156, 80, 66); break; case MD(14): R8A7790_CLOCK_ROOT(26 / 2, &extal_div2_clk, 200, 240, 122, 102); break; case MD(13) | MD(14): R8A7790_CLOCK_ROOT(30 / 2, &extal_div2_clk, 172, 208, 106, 88); break; } if (mode & (MD(18))) SH_CLK_SET_RATIO(&lb_clk_ratio, 1, 36); else SH_CLK_SET_RATIO(&lb_clk_ratio, 1, 24); if ((mode & (MD(3) | MD(2) | MD(1))) == MD(2)) SH_CLK_SET_RATIO(&qspi_clk_ratio, 1, 16); else SH_CLK_SET_RATIO(&qspi_clk_ratio, 1, 20); for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) shmobile_clk_init(); else panic("failed to setup r8a7790 clocks\n"); }
/** * clk_register_zynq_pll() - Register PLL with the clock framework * @name PLL name * @parent Parent clock name * @pll_ctrl Pointer to PLL control register * @pll_status Pointer to PLL status register * @lock_index Bit index to this PLL's lock status bit in @pll_status * @lock Register lock * Returns handle to the registered clock. */ struct clk *clk_register_zynq_pll(const char *name, const char *parent, void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index, spinlock_t *lock) { struct zynq_pll *pll; struct clk *clk; u32 reg; const char *parent_arr[1] = {parent}; unsigned long flags = 0; struct clk_init_data initd = { .name = name, .parent_names = parent_arr, .ops = &zynq_pll_ops, .num_parents = 1, .flags = 0 }; pll = kmalloc(sizeof(*pll), GFP_KERNEL); if (!pll) { pr_err("%s: Could not allocate Zynq PLL clk.\n", __func__); return ERR_PTR(-ENOMEM); } /* Populate the struct */ pll->hw.init = &initd; pll->pll_ctrl = pll_ctrl; pll->pll_status = pll_status; pll->lockbit = lock_index; pll->lock = lock; spin_lock_irqsave(pll->lock, flags); reg = readl(pll->pll_ctrl); reg &= ~PLLCTRL_BPQUAL_MASK; writel(reg, pll->pll_ctrl); spin_unlock_irqrestore(pll->lock, flags); clk = clk_register(NULL, &pll->hw); if (WARN_ON(IS_ERR(clk))) goto free_pll; return clk; free_pll: kfree(pll); return clk; }
static struct clk *clk_reg_prcc(const char *name, const char *parent_name, resource_size_t phy_base, u32 cg_sel, unsigned long flags, struct clk_ops *clk_prcc_ops) { struct clk_prcc *clk; struct clk_init_data clk_prcc_init; struct clk *clk_reg; if (!name) { pr_err("clk_prcc: %s invalid arguments passed\n", __func__); return ERR_PTR(-EINVAL); } clk = kzalloc(sizeof(struct clk_prcc), GFP_KERNEL); if (!clk) { pr_err("clk_prcc: %s could not allocate clk\n", __func__); return ERR_PTR(-ENOMEM); } clk->base = ioremap(phy_base, SZ_4K); if (!clk->base) goto free_clk; clk->cg_sel = cg_sel; clk->is_enabled = 1; clk_prcc_init.name = name; clk_prcc_init.ops = clk_prcc_ops; clk_prcc_init.flags = flags; clk_prcc_init.parent_names = (parent_name ? &parent_name : NULL); clk_prcc_init.num_parents = (parent_name ? 1 : 0); clk->hw.init = &clk_prcc_init; clk_reg = clk_register(NULL, &clk->hw); if (IS_ERR_OR_NULL(clk_reg)) goto unmap_clk; return clk_reg; unmap_clk: iounmap(clk->base); free_clk: kfree(clk); pr_err("clk_prcc: %s failed to register clk\n", __func__); return ERR_PTR(-ENOMEM); }
void __init spear3xx_clk_init(void) { int i, cnt; struct clk_lookup *lookups; if (machine_is_spear300()) { cnt = ARRAY_SIZE(spear300_clk_lookups); lookups = spear300_clk_lookups; } else if (machine_is_spear310()) { cnt = ARRAY_SIZE(spear310_clk_lookups); lookups = spear310_clk_lookups; } else { cnt = ARRAY_SIZE(spear320_clk_lookups); lookups = spear320_clk_lookups; } for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++) clk_register(&spear_clk_lookups[i]); for (i = 0; i < cnt; i++) clk_register(&lookups[i]); clk_init(); }
int __init arch_clk_init(void) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(clks); i++) ret |= clk_register(clks[i]); if (!ret) ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), &div4_table); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; }
static int __init aaec2000_init(void) { int i; clk_register(&aaec2000_clcd_clk); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; amba_device_register(d, &iomem_resource); } platform_device_register(&aaec2000_flash_device); return 0; };
static struct clk *clk_reg_prcmu(const char *name, const char *parent_name, u8 cg_sel, unsigned long rate, unsigned long flags, struct clk_ops *clk_prcmu_ops) { struct clk_prcmu *clk; struct clk_init_data clk_prcmu_init; struct clk *clk_reg; if (!name) { pr_err("clk_prcmu: %s invalid arguments passed\n", __func__); return ERR_PTR(-EINVAL); } clk = kzalloc(sizeof(struct clk_prcmu), GFP_KERNEL); if (!clk) { pr_err("clk_prcmu: %s could not allocate clk\n", __func__); return ERR_PTR(-ENOMEM); } clk->cg_sel = cg_sel; clk->is_prepared = 1; clk->is_enabled = 1; clk->opp_requested = 0; /* "rate" can be used for changing the initial frequency */ if (rate) prcmu_set_clock_rate(cg_sel, rate); clk_prcmu_init.name = name; clk_prcmu_init.ops = clk_prcmu_ops; clk_prcmu_init.flags = flags; clk_prcmu_init.parent_names = (parent_name ? &parent_name : NULL); clk_prcmu_init.num_parents = (parent_name ? 1 : 0); clk->hw.init = &clk_prcmu_init; clk_reg = clk_register(NULL, &clk->hw); if (IS_ERR_OR_NULL(clk_reg)) goto free_clk; return clk_reg; free_clk: kfree(clk); pr_err("clk_prcmu: %s failed to register clk\n", __func__); return ERR_PTR(-ENOMEM); }
static struct clk * __init at91_clk_register_sam9x5_main(struct at91_pmc *pmc, unsigned int irq, const char *name, const char **parent_names, int num_parents) { int ret; struct clk_sam9x5_main *clkmain; struct clk *clk = NULL; struct clk_init_data init; if (!pmc || !irq || !name) return ERR_PTR(-EINVAL); if (!parent_names || !num_parents) return ERR_PTR(-EINVAL); clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL); if (!clkmain) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &sam9x5_main_ops; init.parent_names = parent_names; init.num_parents = num_parents; init.flags = CLK_SET_PARENT_GATE; clkmain->hw.init = &init; clkmain->pmc = pmc; clkmain->irq = irq; clkmain->parent = !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN); init_waitqueue_head(&clkmain->wait); irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN); ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler, IRQF_TRIGGER_HIGH, name, clkmain); if (ret) return ERR_PTR(ret); clk = clk_register(NULL, &clkmain->hw); if (IS_ERR(clk)) { free_irq(clkmain->irq, clkmain); kfree(clkmain); } return clk; }
struct clk *clk_mux(const char *name, void __iomem *reg, u8 shift, u8 width, const char **parents, u8 num_parents, unsigned flags) { struct clk *m; int ret; m = clk_mux_alloc(name, reg, shift, width, parents, num_parents, flags); ret = clk_register(m); if (ret) { free(to_clk_mux(m)); return ERR_PTR(ret); } return m; }
static struct clk * __init at91_clk_register_system(struct at91_pmc *pmc, const char *name, const char *parent_name, u8 id, int irq) { struct clk_system *sys; struct clk *clk = NULL; struct clk_init_data init; int ret; if (!parent_name || id > SYSTEM_MAX_ID) return ERR_PTR(-EINVAL); sys = kzalloc(sizeof(*sys), GFP_KERNEL); if (!sys) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &system_ops; init.parent_names = &parent_name; init.num_parents = 1; /* * CLK_IGNORE_UNUSED is used to avoid ddrck switch off. * TODO : we should implement a driver supporting at91 ddr controller * (see drivers/memory) which would request and enable the ddrck clock. * When this is done we will be able to remove CLK_IGNORE_UNUSED flag. */ init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED; sys->id = id; sys->hw.init = &init; sys->pmc = pmc; sys->irq = irq; if (irq) { init_waitqueue_head(&sys->wait); irq_set_status_flags(sys->irq, IRQ_NOAUTOEN); ret = request_irq(sys->irq, clk_system_irq_handler, IRQF_TRIGGER_HIGH, name, sys); if (ret) return ERR_PTR(ret); } clk = clk_register(NULL, &sys->hw); if (IS_ERR(clk)) kfree(sys); return clk; }
void __init r8a73a4_clock_init(void) { void __iomem *reg; int k, ret = 0; u32 ckscr; atomic_set(&frqcr_lock, -1); reg = ioremap_nocache(CKSCR, PAGE_SIZE); BUG_ON(!reg); ckscr = ioread32(reg); iounmap(reg); switch ((ckscr >> 28) & 0x3) { case 0: main_clk.parent = &extal1_clk; break; case 1: main_clk.parent = &extal1_div2_clk; break; case 2: main_clk.parent = &extal2_clk; break; case 3: main_clk.parent = &extal2_div2_clk; break; } for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) shmobile_clk_init(); else panic("failed to setup r8a73a4 clocks\n"); }
static struct clk *_tegra_clk_register_periph(const char *name, const char **parent_names, int num_parents, struct tegra_clk_periph *periph, void __iomem *clk_base, u32 offset, unsigned long flags) { struct clk *clk; struct clk_init_data init; struct tegra_clk_periph_regs *bank; bool div = !(periph->gate.flags & TEGRA_PERIPH_NO_DIV); if (periph->gate.flags & TEGRA_PERIPH_NO_DIV) { flags |= CLK_SET_RATE_PARENT; init.ops = &tegra_clk_periph_nodiv_ops; } else if (periph->gate.flags & TEGRA_PERIPH_NO_GATE) init.ops = &tegra_clk_periph_no_gate_ops; else init.ops = &tegra_clk_periph_ops; init.name = name; init.flags = flags; init.parent_names = parent_names; init.num_parents = num_parents; bank = get_reg_bank(periph->gate.clk_num); if (!bank) return ERR_PTR(-EINVAL); /* Data in .init is copied by clk_register(), so stack variable OK */ periph->hw.init = &init; periph->magic = TEGRA_CLK_PERIPH_MAGIC; periph->mux.reg = clk_base + offset; periph->divider.reg = div ? (clk_base + offset) : NULL; periph->gate.clk_base = clk_base; periph->gate.regs = bank; periph->gate.enable_refcnt = periph_clk_enb_refcnt; clk = clk_register(NULL, &periph->hw); if (IS_ERR(clk)) return clk; periph->mux.hw.clk = clk; periph->divider.hw.clk = div ? clk : NULL; periph->gate.hw.clk = clk; return clk; }
/** * clk_register_factors - register a factors clock with * the clock framework * @dev: device registering this clock * @name: name of this clock * @parent_name: name of clock's parent * @flags: framework-specific flags * @reg: register address to adjust factors * @config: shift and width of factors n, k, m, p, div1 and div2 * @get_factors: function to calculate the factors for a given frequency * @lock: shared register lock for this clock */ struct clk *sunxi_clk_register_factors(struct device *dev, void __iomem *base, spinlock_t *lock, struct factor_init_data* init_data) { struct sunxi_clk_factors *factors; struct clk *clk; struct clk_init_data init; /* allocate the factors */ factors = kzalloc(sizeof(struct sunxi_clk_factors), GFP_KERNEL); if (!factors) { pr_err("%s: could not allocate factors clk\n", __func__); return ERR_PTR(-ENOMEM); } #ifdef __SUNXI_ALL_CLK_IGNORE_UNUSED__ init_data->flags |= CLK_IGNORE_UNUSED; #endif init.name = init_data->name; init.ops = init_data->priv_ops?(init_data->priv_ops):(&clk_factors_ops); factors->priv_regops = init_data->priv_regops?(init_data->priv_regops):NULL; init.flags = init_data->flags; init.parent_names = init_data->parent_names; init.num_parents = init_data->num_parents; /* struct clk_factors assignments */ factors->reg = base + init_data->reg; factors->lock_reg = base + init_data->lock_reg; factors->lock_bit = init_data->lock_bit; factors->pll_lock_ctrl_reg = base + init_data->pll_lock_ctrl_reg; factors->lock_en_bit = init_data->lock_en_bit; factors->lock_mode = init_data->lock_mode; factors->config = init_data->config; factors->config->sdmpat = (unsigned long __force)(base + factors->config->sdmpat); factors->lock = lock; factors->hw.init = &init; factors->get_factors = init_data->get_factors; factors->calc_rate = init_data->calc_rate; factors->flags = init_data->flags; /* register the clock */ clk = clk_register(dev, &factors->hw); if (IS_ERR(clk)) kfree(factors); return clk; }
static void __init of_dra7_atl_clock_setup(struct device_node *node) { struct dra7_atl_desc *clk_hw = NULL; struct clk_init_data init = { 0 }; const char **parent_names = NULL; struct clk *clk; clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); if (!clk_hw) { pr_err("%s: could not allocate dra7_atl_desc\n", __func__); return; } clk_hw->hw.init = &init; clk_hw->divider = 1; init.name = node->name; init.ops = &atl_clk_ops; init.flags = CLK_IGNORE_UNUSED; init.num_parents = of_clk_get_parent_count(node); if (init.num_parents != 1) { pr_err("%s: atl clock %s must have 1 parent\n", __func__, node->name); goto cleanup; } parent_names = kzalloc(sizeof(char *), GFP_KERNEL); if (!parent_names) goto cleanup; parent_names[0] = of_clk_get_parent_name(node, 0); init.parent_names = parent_names; clk = clk_register(NULL, &clk_hw->hw); if (!IS_ERR(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(parent_names); return; } cleanup: kfree(parent_names); kfree(clk_hw); }
int __init arch_clk_init(void) { int k, ret = 0; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); return ret; }
static void __init at91sam9261_register_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) clk_register(periph_clocks[i]); clk_register(&pck0); clk_register(&pck1); clk_register(&pck2); clk_register(&pck3); clk_register(&hck0); clk_register(&hck1); }
int __init arch_clk_init(void) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(clks); i++) ret |= clk_register(clks[i]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), &div4_table); if (!ret) ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); return ret; }
struct clk * __init v2m_osc_register(const char *name, struct v2m_osc *osc) { struct clk_init_data init; WARN_ON(osc->site > 2); WARN_ON(osc->stack > 15); WARN_ON(osc->osc > 4095); init.name = name; init.ops = &v2m_osc_ops; init.flags = CLK_IS_ROOT; init.num_parents = 0; osc->hw.init = &init; return clk_register(NULL, &osc->hw); }
static struct clk * __init at91_clk_register_system(struct at91_pmc *pmc, const char *name, const char *parent_name, u8 id, int irq) { struct clk_system *sys; struct clk *clk = NULL; struct clk_init_data init; int ret; if (!parent_name || id > SYSTEM_MAX_ID) return ERR_PTR(-EINVAL); sys = kzalloc(sizeof(*sys), GFP_KERNEL); if (!sys) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &system_ops; init.parent_names = &parent_name; init.num_parents = 1; init.flags = CLK_SET_RATE_PARENT; sys->id = id; sys->hw.init = &init; sys->pmc = pmc; sys->irq = irq; if (irq) { init_waitqueue_head(&sys->wait); irq_set_status_flags(sys->irq, IRQ_NOAUTOEN); ret = request_irq(sys->irq, clk_system_irq_handler, IRQF_TRIGGER_HIGH, name, sys); if (ret) { kfree(sys); return ERR_PTR(ret); } } clk = clk_register(NULL, &sys->hw); if (IS_ERR(clk)) { free_irq(sys->irq, sys); kfree(sys); } return clk; }
static struct clk * __init at91_clk_register_main_rc_osc(struct at91_pmc *pmc, unsigned int irq, const char *name, u32 frequency, u32 accuracy) { int ret; struct clk_main_rc_osc *osc; struct clk *clk = NULL; struct clk_init_data init; if (!pmc || !irq || !name || !frequency) return ERR_PTR(-EINVAL); osc = kzalloc(sizeof(*osc), GFP_KERNEL); if (!osc) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &main_rc_osc_ops; init.parent_names = NULL; init.num_parents = 0; init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED; osc->hw.init = &init; osc->pmc = pmc; osc->irq = irq; osc->frequency = frequency; osc->accuracy = accuracy; init_waitqueue_head(&osc->wait); irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); ret = request_irq(osc->irq, clk_main_rc_osc_irq_handler, IRQF_TRIGGER_HIGH, name, osc); if (ret) return ERR_PTR(ret); clk = clk_register(NULL, &osc->hw); if (IS_ERR(clk)) { free_irq(irq, osc); kfree(osc); } return clk; }