static void __init omap_clk_register_apll(struct clk_hw *hw, struct device_node *node) { struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); struct dpll_data *ad = clk_hw->dpll_data; struct clk *clk; ad->clk_ref = of_clk_get(node, 0); ad->clk_bypass = of_clk_get(node, 1); if (IS_ERR(ad->clk_ref) || IS_ERR(ad->clk_bypass)) { pr_debug("clk-ref or clk-bypass for %s not ready, retry\n", node->name); if (!ti_clk_retry_init(node, hw, omap_clk_register_apll)) return; goto cleanup; } clk = clk_register(NULL, &clk_hw->hw); if (!IS_ERR(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(clk_hw->hw.init->parent_names); kfree(clk_hw->hw.init); return; } cleanup: kfree(clk_hw->dpll_data); kfree(clk_hw->hw.init->parent_names); kfree(clk_hw->hw.init); kfree(clk_hw); }
/* * FIXME - setting the parent every time .prepare is invoked is inefficient. * This is better handled by a dedicated clock tree configuration mechanism at * init-time. Revisit this later when such a mechanism exists */ static int clk_sp810_timerclken_prepare(struct clk_hw *hw) { struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw); struct clk_sp810 *sp810 = timerclken->sp810; struct clk *old_parent = __clk_get_parent(hw->clk); struct clk *new_parent; if (!sp810->refclk) sp810->refclk = of_clk_get(sp810->node, sp810->refclk_index); if (!sp810->timclk) sp810->timclk = of_clk_get(sp810->node, sp810->timclk_index); if (WARN_ON(IS_ERR(sp810->refclk) || IS_ERR(sp810->timclk))) return -ENOENT; /* Select fastest parent */ if (clk_get_rate(sp810->refclk) > clk_get_rate(sp810->timclk)) new_parent = sp810->refclk; else new_parent = sp810->timclk; /* Switch the parent if necessary */ if (old_parent != new_parent) { clk_prepare(new_parent); clk_set_parent(hw->clk, new_parent); clk_unprepare(old_parent); } return 0; }
int asoc_simple_card_parse_clk(struct device_node *node, struct device_node *dai_of_node, struct asoc_simple_dai *simple_dai) { struct clk *clk; u32 val; /* * Parse dai->sysclk come from "clocks = <&xxx>" * (if system has common clock) * or "system-clock-frequency = <xxx>" * or device's module clock. */ clk = of_clk_get(node, 0); if (!IS_ERR(clk)) { simple_dai->sysclk = clk_get_rate(clk); simple_dai->clk = clk; } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) { simple_dai->sysclk = val; } else { clk = of_clk_get(dai_of_node, 0); if (!IS_ERR(clk)) simple_dai->sysclk = clk_get_rate(clk); } return 0; }
static int asoc_simple_card_sub_parse_of(struct device_node *np, struct asoc_simple_dai *dai, struct device_node **p_node, const char **name, int *args_count) { struct of_phandle_args args; struct clk *clk; u32 val; int ret; /* * Get node via "sound-dai = <&phandle port>" * it will be used as xxx_of_node on soc_bind_dai_link() */ ret = of_parse_phandle_with_args(np, "sound-dai", "#sound-dai-cells", 0, &args); if (ret) return ret; *p_node = args.np; if (args_count) *args_count = args.args_count; /* Get dai->name */ ret = snd_soc_of_get_dai_name(np, name); if (ret < 0) return ret; /* Parse TDM slot */ ret = snd_soc_of_parse_tdm_slot(np, &dai->slots, &dai->slot_width); if (ret) return ret; /* * Parse dai->sysclk come from "clocks = <&xxx>" * (if system has common clock) * or "system-clock-frequency = <xxx>" * or device's module clock. */ if (of_property_read_bool(np, "clocks")) { clk = of_clk_get(np, 0); if (IS_ERR(clk)) { ret = PTR_ERR(clk); return ret; } dai->sysclk = clk_get_rate(clk); } else if (!of_property_read_u32(np, "system-clock-frequency", &val)) { dai->sysclk = val; } else { clk = of_clk_get(args.np, 0); if (!IS_ERR(clk)) dai->sysclk = clk_get_rate(clk); } return 0; }
int sprd_iommu_mm_init(struct sprd_iommu_dev *dev, struct sprd_iommu_init_data *data) { int err=-1; #ifdef CONFIG_OF && CONFIG_COMMON_CLK struct device_node *np; np = dev->misc_dev.this_device->of_node; if(!np) { return -1; } dev->mmu_clock=of_clk_get(np, 0) ; dev->mmu_mclock=of_clk_get(np,1); #else dev->mmu_mclock= clk_get(NULL,"clk_mm_i"); dev->mmu_clock=clk_get(NULL,"clk_mmu"); if (!dev->mmu_mclock) { printk ("%s, cant get dev->mmu_mclock\n", __FUNCTION__); return -1; } #endif if (!dev->mmu_clock) { printk ("%s, cant get dev->mmu_clock\n", __FUNCTION__); return -1; } sprd_iommu_mm_enable(dev); err=sprd_iommu_init(dev,data); sprd_iommu_mm_disable(dev); return err; }
static void __init sp804_of_init(struct device_node *np) { static bool initialized = false; void __iomem *base; int irq; u32 irq_num = 0; struct clk *clk1, *clk2; const char *name = of_get_property(np, "compatible", NULL); base = of_iomap(np, 0); if (WARN_ON(!base)) return; /* Ensure timers are disabled */ writel(0, base + TIMER_CTRL); writel(0, base + TIMER_2_BASE + TIMER_CTRL); if (initialized || !of_device_is_available(np)) goto err; clk1 = of_clk_get(np, 0); if (IS_ERR(clk1)) clk1 = NULL; /* Get the 2nd clock if the timer has 3 timer clocks */ if (of_count_phandle_with_args(np, "clocks", "#clock-cells") == 3) { clk2 = of_clk_get(np, 1); if (IS_ERR(clk2)) { pr_err("sp804: %s clock not found: %d\n", np->name, (int)PTR_ERR(clk2)); clk2 = NULL; } } else clk2 = clk1; irq = irq_of_parse_and_map(np, 0); if (irq <= 0) goto err; of_property_read_u32(np, "arm,sp804-has-irq", &irq_num); if (irq_num == 2) { __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); } else { __sp804_clockevents_init(base, irq, clk1 , name); __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, name, clk2, 1); } initialized = true; return; err: iounmap(base); }
/** * ttc_timer_init - Initialize the timer * * Initializes the timer hardware and register the clock source and clock event * timers with Linux kernal timer framework */ static void __init ttc_timer_init(struct device_node *timer) { unsigned int irq; void __iomem *timer_baseaddr; struct clk *clk_cs, *clk_ce; static int initialized; int clksel; if (initialized) return; initialized = 1; /* * Get the 1st Triple Timer Counter (TTC) block from the device tree * and use it. Note that the event timer uses the interrupt and it's the * 2nd TTC hence the irq_of_parse_and_map(,1) */ timer_baseaddr = of_iomap(timer, 0); if (!timer_baseaddr) { pr_err("ERROR: invalid timer base address\n"); BUG(); } irq = irq_of_parse_and_map(timer, 1); if (irq <= 0) { pr_err("ERROR: invalid interrupt number\n"); BUG(); } clksel = __raw_readl(timer_baseaddr + TTC_CLK_CNTRL_OFFSET); clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); clk_cs = of_clk_get(timer, clksel); if (IS_ERR(clk_cs)) { pr_err("ERROR: timer input clock not found\n"); BUG(); } clksel = __raw_readl(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); clk_ce = of_clk_get(timer, clksel); if (IS_ERR(clk_ce)) { pr_err("ERROR: timer input clock not found\n"); BUG(); } ttc_setup_clocksource(clk_cs, timer_baseaddr); ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq); pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq); }
/** * _register_dpll - low level registration of a DPLL clock * @hw: hardware clock definition for the clock * @node: device node for the clock * * Finalizes DPLL registration process. In case a failure (clk-ref or * clk-bypass is missing), the clock is added to retry list and * the initialization is retried on later stage. */ static void __init _register_dpll(struct clk_hw *hw, struct device_node *node) { struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); struct dpll_data *dd = clk_hw->dpll_data; struct clk *clk; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_debug("clk-ref missing for %s, retry later\n", node->name); if (!ti_clk_retry_init(node, hw, _register_dpll)) return; goto cleanup; } dd->clk_ref = __clk_get_hw(clk); clk = of_clk_get(node, 1); if (IS_ERR(clk)) { pr_debug("clk-bypass missing for %s, retry later\n", node->name); if (!ti_clk_retry_init(node, hw, _register_dpll)) return; goto cleanup; } dd->clk_bypass = __clk_get_hw(clk); /* register the clock */ clk = clk_register(NULL, &clk_hw->hw); if (!IS_ERR(clk)) { omap2_init_clk_hw_omap_clocks(&clk_hw->hw); of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(clk_hw->hw.init->parent_names); kfree(clk_hw->hw.init); return; } cleanup: kfree(clk_hw->dpll_data); kfree(clk_hw->hw.init->parent_names); kfree(clk_hw->hw.init); kfree(clk_hw); }
static int __init sun5i_timer_init(struct device_node *node) { struct reset_control *rstc; void __iomem *timer_base; struct clk *clk; int irq, ret; timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); if (IS_ERR(timer_base)) { pr_err("Can't map registers\n"); return PTR_ERR(timer_base); } irq = irq_of_parse_and_map(node, 0); if (irq <= 0) { pr_err("Can't parse IRQ\n"); return -EINVAL; } clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("Can't get timer clock\n"); return PTR_ERR(clk); } rstc = of_reset_control_get(node, NULL); if (!IS_ERR(rstc)) reset_control_deassert(rstc); ret = sun5i_setup_clocksource(node, timer_base, clk, irq); if (ret) return ret; return sun5i_setup_clockevent(node, timer_base, clk, irq); }
static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd, struct device *dev) { struct clk *clk; int i; int error; dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name); error = pm_clk_create(dev); if (error) { dev_err(dev, "pm_clk_create failed %d\n", error); return error; } i = 0; while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) { dev_dbg(dev, "adding clock '%pC' to list of PM clocks\n", clk); error = pm_clk_add_clk(dev, clk); if (error) { dev_err(dev, "pm_clk_add_clk failed %d\n", error); clk_put(clk); pm_clk_destroy(dev); return error; } } return 0; }
static void __init ath79_of_plat_time_init(void) { struct device_node *np; struct clk *clk; unsigned long cpu_clk_rate; of_clk_init(NULL); np = of_get_cpu_node(0, NULL); if (!np) { pr_err("Failed to get CPU node\n"); return; } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk)); return; } cpu_clk_rate = clk_get_rate(clk); pr_info("CPU clock: %lu.%03lu MHz\n", cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000); mips_hpt_frequency = cpu_clk_rate / 2; clk_put(clk); }
static int __init h8300_16timer_init(struct device_node *node) { void __iomem *base[2]; int ret, irq; unsigned int ch; struct clk *clk; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("failed to get clock for clocksource\n"); return PTR_ERR(clk); } ret = -ENXIO; base[REG_CH] = of_iomap(node, 0); if (!base[REG_CH]) { pr_err("failed to map registers for clocksource\n"); goto free_clk; } base[REG_COMM] = of_iomap(node, 1); if (!base[REG_COMM]) { pr_err("failed to map registers for clocksource\n"); goto unmap_ch; } ret = -EINVAL; irq = irq_of_parse_and_map(node, 0); if (!irq) { pr_err("failed to get irq for clockevent\n"); goto unmap_comm; } of_property_read_u32(node, "renesas,channel", &ch); timer16_priv.mapbase = base[REG_CH]; timer16_priv.mapcommon = base[REG_COMM]; timer16_priv.enb = ch; timer16_priv.ovf = ch; timer16_priv.ovie = 4 + ch; ret = request_irq(irq, timer16_interrupt, IRQF_TIMER, timer16_priv.cs.name, &timer16_priv); if (ret < 0) { pr_err("failed to request irq %d of clocksource\n", irq); goto unmap_comm; } clocksource_register_hz(&timer16_priv.cs, clk_get_rate(clk) / 8); return 0; unmap_comm: iounmap(base[REG_COMM]); unmap_ch: iounmap(base[REG_CH]); free_clk: clk_put(clk); return ret; }
static void __init of_ti_clockdomain_setup(struct device_node *node) { struct clk *clk; struct clk_hw *clk_hw; const char *clkdm_name = node->name; int i; int num_clks; num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells"); for (i = 0; i < num_clks; i++) { clk = of_clk_get(node, i); if (IS_ERR(clk)) { pr_err("%s: Failed get %s' clock nr %d (%ld)\n", __func__, node->full_name, i, PTR_ERR(clk)); continue; } if (__clk_get_flags(clk) & CLK_IS_BASIC) { pr_warn("can't setup clkdm for basic clk %s\n", __clk_get_name(clk)); continue; } clk_hw = __clk_get_hw(clk); to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name; omap2_init_clk_clkdm(clk_hw); } }
static void __init gic_clocksource_of_init(struct device_node *node) { struct clk *clk; if (WARN_ON(!gic_present || !node->parent || !of_device_is_compatible(node->parent, "mti,gic"))) return; clk = of_clk_get(node, 0); if (!IS_ERR(clk)) { gic_frequency = clk_get_rate(clk); clk_put(clk); } else if (of_property_read_u32(node, "clock-frequency", &gic_frequency)) { pr_err("GIC frequency not specified.\n"); return; } gic_timer_irq = irq_of_parse_and_map(node, 0); if (!gic_timer_irq) { pr_err("GIC timer IRQ not specified.\n"); return; } __gic_clocksource_init(); }
static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks, struct clk_bulk_data *clks) { int ret; int i; for (i = 0; i < num_clks; i++) clks[i].clk = NULL; for (i = 0; i < num_clks; i++) { clks[i].clk = of_clk_get(np, i); if (IS_ERR(clks[i].clk)) { ret = PTR_ERR(clks[i].clk); pr_err("%pOF: Failed to get clk index: %d ret: %d\n", np, i, ret); clks[i].clk = NULL; goto err; } } return 0; err: clk_bulk_put(i, clks); return ret; }
static void __init h8300_tpu_init(struct device_node *node) { void __iomem *base[2]; struct clk *clk; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("failed to get clock for clocksource\n"); return; } base[CH_L] = of_iomap(node, CH_L); if (!base[CH_L]) { pr_err("failed to map registers for clocksource\n"); goto free_clk; } base[CH_H] = of_iomap(node, CH_H); if (!base[CH_H]) { pr_err("failed to map registers for clocksource\n"); goto unmap_L; } tpu_priv.mapbase1 = base[CH_L]; tpu_priv.mapbase2 = base[CH_H]; clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64); return; unmap_L: iounmap(base[CH_H]); free_clk: clk_put(clk); }
void __init mxc_arch_reset_init_dt(void) { struct device_node *np = NULL; if (cpu_is_imx6q() || cpu_is_imx6dl()) np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc"); else if (cpu_is_imx6sl()) np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpc"); if (np) of_property_read_u32(np, "fsl,wdog-reset", &wdog_source); pr_info("Use WDOG%d as reset source\n", wdog_source); np = of_find_compatible_node(NULL, NULL, "fsl,imx21-wdt"); wdog_base = of_iomap(np, 0); WARN_ON(!wdog_base); /* Some i.MX6 boards use WDOG2 to reset board in ldo-bypass mode */ if (wdog_source == 2 && (cpu_is_imx6q() || cpu_is_imx6dl() || cpu_is_imx6sl())) { np = of_find_compatible_node(np, NULL, "fsl,imx21-wdt"); wdog_base = of_iomap(np, 0); WARN_ON(!wdog_base); } wdog_clk = of_clk_get(np, 0); if (IS_ERR(wdog_clk)) { pr_warn("%s: failed to get wdog clock\n", __func__); wdog_clk = NULL; return; } clk_prepare(wdog_clk); }
static void __init imx6q_1588_init(void) { struct device_node *np; struct clk *ptp_clk; struct regmap *gpr; np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-fec"); if (!np) { pr_warn("%s: failed to find fec node\n", __func__); return; } ptp_clk = of_clk_get(np, 2); if (IS_ERR(ptp_clk)) { pr_warn("%s: failed to get ptp clock\n", __func__); goto put_node; } /* * If enet_ref from ANATOP/CCM is the PTP clock source, we need to * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad * (external OSC), and we need to clear the bit. */ gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); if (!IS_ERR(gpr)) regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_ENET_CLK_SEL_MASK, IMX6Q_GPR1_ENET_CLK_SEL_ANATOP); else pr_err("failed to find fsl,imx6q-iomux-gpr regmap\n"); clk_put(ptp_clk); put_node: of_node_put(np); }
static void __init sun5i_timer_init(struct device_node *node) { struct reset_control *rstc; void __iomem *timer_base; struct clk *clk; int irq; timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); if (!timer_base) panic("Can't map registers"); irq = irq_of_parse_and_map(node, 0); if (irq <= 0) panic("Can't parse IRQ"); clk = of_clk_get(node, 0); if (IS_ERR(clk)) panic("Can't get timer clock"); rstc = of_reset_control_get(node, NULL); if (!IS_ERR(rstc)) reset_control_deassert(rstc); sun5i_setup_clocksource(node, timer_base, clk, irq); sun5i_setup_clockevent(node, timer_base, clk, irq); }
static void __init tango_clocksource_init(struct device_node *np) { struct clk *clk; int xtal_freq, ret; xtal_in_cnt = of_iomap(np, 0); if (xtal_in_cnt == NULL) { pr_err("%s: invalid address\n", np->full_name); return; } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("%s: invalid clock\n", np->full_name); return; } xtal_freq = clk_get_rate(clk); delay_timer.freq = xtal_freq; delay_timer.read_current_timer = read_xtal_counter; ret = clocksource_register_hz(&tango_xtal, xtal_freq); if (ret != 0) { pr_err("%s: registration failed\n", np->full_name); return; } sched_clock_register(read_sched_clock, 32, xtal_freq); register_current_timer_delay(&delay_timer); }
static int __init armada_375_timer_init(struct device_node *np) { struct clk *clk; int ret; clk = of_clk_get_by_name(np, "fixed"); if (!IS_ERR(clk)) { ret = clk_prepare_enable(clk); if (ret) return ret; timer_clk = clk_get_rate(clk); } else { /* * This fallback is required in order to retain proper * devicetree backwards compatibility. */ clk = of_clk_get(np, 0); /* Must have at least a clock */ if (IS_ERR(clk)) { pr_err("Failed to get clock"); return PTR_ERR(clk); } ret = clk_prepare_enable(clk); if (ret) return ret; timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; timer25Mhz = false; } return armada_370_xp_timer_common_init(np); }
static int __init nps_get_timer_clk(struct device_node *node, unsigned long *timer_freq, struct clk **clk) { int ret; *clk = of_clk_get(node, 0); ret = PTR_ERR_OR_ZERO(*clk); if (ret) { pr_err("timer missing clk"); return ret; } ret = clk_prepare_enable(*clk); if (ret) { pr_err("Couldn't enable parent clk\n"); clk_put(*clk); return ret; } *timer_freq = clk_get_rate(*clk); if (!(*timer_freq)) { pr_err("Couldn't get clk rate\n"); clk_disable_unprepare(*clk); clk_put(*clk); return -EINVAL; } return 0; }
static int moxart_wdt_probe(struct platform_device *pdev) { struct moxart_wdt_dev *moxart_wdt; struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct resource *res; struct clk *clk; int err; unsigned int max_timeout; bool nowayout = WATCHDOG_NOWAYOUT; moxart_wdt = devm_kzalloc(dev, sizeof(*moxart_wdt), GFP_KERNEL); if (!moxart_wdt) return -ENOMEM; platform_set_drvdata(pdev, moxart_wdt); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); moxart_wdt->base = devm_ioremap_resource(dev, res); if (IS_ERR(moxart_wdt->base)) return PTR_ERR(moxart_wdt->base); clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("%s: of_clk_get failed\n", __func__); return PTR_ERR(clk); } moxart_wdt->clock_frequency = clk_get_rate(clk); if (moxart_wdt->clock_frequency == 0) { pr_err("%s: incorrect clock frequency\n", __func__); return -EINVAL; } max_timeout = UINT_MAX / moxart_wdt->clock_frequency; moxart_wdt->dev.info = &moxart_wdt_info; moxart_wdt->dev.ops = &moxart_wdt_ops; moxart_wdt->dev.timeout = max_timeout; moxart_wdt->dev.min_timeout = 1; moxart_wdt->dev.max_timeout = max_timeout; moxart_wdt->dev.parent = dev; watchdog_init_timeout(&moxart_wdt->dev, heartbeat, dev); watchdog_set_nowayout(&moxart_wdt->dev, nowayout); watchdog_set_drvdata(&moxart_wdt->dev, moxart_wdt); err = watchdog_register_device(&moxart_wdt->dev); if (err) return err; moxart_restart_ctx = moxart_wdt; arm_pm_restart = moxart_wdt_restart; dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n", moxart_wdt->dev.timeout, nowayout); return 0; }
static void __init armada_375_timer_init(struct device_node *np) { struct clk *clk; clk = of_clk_get_by_name(np, "fixed"); if (!IS_ERR(clk)) { clk_prepare_enable(clk); timer_clk = clk_get_rate(clk); } else { /* * This fallback is required in order to retain proper * devicetree backwards compatibility. */ clk = of_clk_get(np, 0); /* Must have at least a clock */ BUG_ON(IS_ERR(clk)); clk_prepare_enable(clk); timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; timer25Mhz = false; } armada_370_xp_timer_common_init(np); }
static int hisi_pwm_probe(struct platform_device *pdev) { struct device_node *np = NULL; int ret = 0; HISI_FB_DEBUG("+.\n"); BUG_ON(pdev == NULL); np = of_find_compatible_node(NULL, NULL, DTS_COMP_PWM_NAME); if (!np) { HISI_FB_ERR("NOT FOUND device node %s!\n", DTS_COMP_PWM_NAME); ret = -ENXIO; goto err_return; } /* get pwm reg base */ hisifd_pwm_base = of_iomap(np, 0); #if !defined(CONFIG_ARCH_HI3630FPGA) && !defined(CONFIG_HISI_3635_FPGA) \ && !defined(CONFIG_HISI_3650_FPGA) /* pwm pinctrl init */ ret = pinctrl_cmds_tx(pdev, pwm_pinctrl_init_cmds, ARRAY_SIZE(pwm_pinctrl_init_cmds)); if (ret != 0) { HISI_FB_ERR("Init pwm pinctrl failed! ret=%d.\n", ret); goto err_return; } /* get blpwm clk resource */ g_dss_pwm_clk = of_clk_get(np, 0); if (IS_ERR(g_dss_pwm_clk)) { HISI_FB_ERR("%s clock not found: %d!\n", np->name, (int)PTR_ERR(g_dss_pwm_clk)); ret = -ENXIO; goto err_return; } ret = clk_set_rate(g_dss_pwm_clk, DEFAULT_PWM_CLK_RATE); if (ret != 0) { HISI_FB_ERR("dss_pwm_clk clk_set_rate(%lu) failed, error=%d!\n", DEFAULT_PWM_CLK_RATE, ret); ret = -EINVAL; goto err_return; } HISI_FB_INFO("dss_pwm_clk:[%lu]->[%lu].\n", DEFAULT_PWM_CLK_RATE, clk_get_rate(g_dss_pwm_clk)); #endif hisi_fb_device_set_status0(DTS_PWM_READY); HISI_FB_DEBUG("-.\n"); return 0; err_return: return ret; }
static void __init mxs_timer_init(struct device_node *np) { struct clk *timer_clk; int irq; mxs_timrot_base = of_iomap(np, 0); WARN_ON(!mxs_timrot_base); timer_clk = of_clk_get(np, 0); if (IS_ERR(timer_clk)) { pr_err("%s: failed to get clk\n", __func__); return; } clk_prepare_enable(timer_clk); /* * Initialize timers to a known state */ stmp_reset_block(mxs_timrot_base + HW_TIMROT_ROTCTRL); /* get timrot version */ timrot_major_version = __raw_readl(mxs_timrot_base + (of_device_is_compatible(np, "fsl,imx23-timrot") ? MX23_TIMROT_VERSION_OFFSET : MX28_TIMROT_VERSION_OFFSET)); timrot_major_version >>= BP_TIMROT_MAJOR_VERSION; /* one for clock_event */ __raw_writel((timrot_is_v1() ? BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL : BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) | BM_TIMROT_TIMCTRLn_UPDATE | BM_TIMROT_TIMCTRLn_IRQ_EN, mxs_timrot_base + HW_TIMROT_TIMCTRLn(0)); /* another for clocksource */ __raw_writel((timrot_is_v1() ? BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL : BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS) | BM_TIMROT_TIMCTRLn_RELOAD, mxs_timrot_base + HW_TIMROT_TIMCTRLn(1)); /* set clocksource timer fixed count to the maximum */ if (timrot_is_v1()) __raw_writel(0xffff, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)); else __raw_writel(0xffffffff, mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1)); /* init and register the timer to the framework */ mxs_clocksource_init(timer_clk); mxs_clockevent_init(timer_clk); /* Make irqs happen */ irq = irq_of_parse_and_map(np, 0); setup_irq(irq, &mxs_timer_irq); }
static int alt_fpga_bridge_probe(struct platform_device *pdev) { struct altera_hps2fpga_data *priv; const struct of_device_id *of_id; struct device *dev = &pdev->dev; uint32_t init_val; int rc; struct clk *clk; of_id = of_match_device(altera_fpga_of_match, dev); priv = (struct altera_hps2fpga_data *)of_id->data; WARN_ON(!priv); priv->np = dev->of_node; priv->pdev = pdev; priv->bridge_reset = devm_reset_control_get(dev, priv->name); if (IS_ERR(priv->bridge_reset)) { dev_err(dev, "Could not get %s reset control!\n", priv->name); return PTR_ERR(priv->bridge_reset); } priv->l3reg = syscon_regmap_lookup_by_compatible("altr,l3regs"); if (IS_ERR(priv->l3reg)) { dev_err(dev, "regmap for altr,l3regs lookup failed.\n"); return PTR_ERR(priv->l3reg); } clk = of_clk_get(pdev->dev.of_node, 0); if (IS_ERR(clk)) { dev_err(dev, "no clock specified\n"); return PTR_ERR(clk); } rc = clk_prepare_enable(clk); if (rc) { dev_err(dev, "could not enable clock\n"); return -EBUSY; } rc = register_fpga_bridge(pdev, &altera_hps2fpga_br_ops, priv->name, priv); if (rc) return rc; if (of_property_read_u32(priv->np, "init-val", &init_val)) dev_info(&priv->pdev->dev, "init-val not specified\n"); else if (init_val > 1) dev_warn(&priv->pdev->dev, "invalid init-val %u > 1\n", init_val); else { dev_info(&priv->pdev->dev, "%s bridge\n", (init_val ? "enabling" : "disabling")); _alt_hps2fpga_enable_set(priv, init_val); } return rc; }
static int __init integrator_ap_timer_init_of(struct device_node *node) { const char *path; void __iomem *base; int err; int irq; struct clk *clk; unsigned long rate; struct device_node *pri_node; struct device_node *sec_node; base = of_io_request_and_map(node, 0, "integrator-timer"); if (IS_ERR(base)) return PTR_ERR(base); clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("No clock for %pOFn\n", node); return PTR_ERR(clk); } clk_prepare_enable(clk); rate = clk_get_rate(clk); writel(0, base + TIMER_CTRL); err = of_property_read_string(of_aliases, "arm,timer-primary", &path); if (err) { pr_warn("Failed to read property\n"); return err; } pri_node = of_find_node_by_path(path); err = of_property_read_string(of_aliases, "arm,timer-secondary", &path); if (err) { pr_warn("Failed to read property\n"); return err; } sec_node = of_find_node_by_path(path); if (node == pri_node) /* The primary timer lacks IRQ, use as clocksource */ return integrator_clocksource_init(rate, base); if (node == sec_node) { /* The secondary timer will drive the clock event */ irq = irq_of_parse_and_map(node, 0); return integrator_clockevent_init(rate, base, irq); } pr_info("Timer @%p unused\n", base); clk_disable_unprepare(clk); return 0; }
/* initialize the kernel jiffy timer source */ static int __init sirfsoc_prima2_timer_init(struct device_node *np) { unsigned long rate; struct clk *clk; int ret; clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("Failed to get clock"); return PTR_ERR(clk); } ret = clk_prepare_enable(clk); if (ret) { pr_err("Failed to enable clock"); return ret; } rate = clk_get_rate(clk); if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) { pr_err("Invalid clock rate"); return -EINVAL; } sirfsoc_timer_base = of_iomap(np, 0); if (!sirfsoc_timer_base) { pr_err("unable to map timer cpu registers\n"); return -ENXIO; } sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ); if (ret) { pr_err("Failed to register clocksource"); return ret; } sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ); ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); if (ret) { pr_err("Failed to setup irq"); return ret; } sirfsoc_clockevent_init(); return 0; }
static void __init mvebu_clk_gating_setup( struct device_node *np, const struct mvebu_soc_descr *descr) { struct mvebu_gating_ctrl *ctrl; struct clk *clk; void __iomem *base; const char *default_parent = NULL; int n; base = of_iomap(np, 0); clk = of_clk_get(np, 0); if (!IS_ERR(clk)) { default_parent = __clk_get_name(clk); clk_put(clk); } ctrl = kzalloc(sizeof(struct mvebu_gating_ctrl), GFP_KERNEL); if (WARN_ON(!ctrl)) return; spin_lock_init(&ctrl->lock); /* * Count, allocate, and register clock gates */ for (n = 0; descr[n].name;) n++; ctrl->num_gates = n; ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *), GFP_KERNEL); if (WARN_ON(!ctrl->gates)) { kfree(ctrl); return; } for (n = 0; n < ctrl->num_gates; n++) { u8 flags = 0; const char *parent = (descr[n].parent) ? descr[n].parent : default_parent; /* * On Armada 370, the DDR clock is a special case: it * isn't taken by any driver, but should anyway be * kept enabled, so we mark it as IGNORE_UNUSED for * now. */ if (!strcmp(descr[n].name, "ddr")) flags |= CLK_IGNORE_UNUSED; ctrl->gates[n] = clk_register_gate(NULL, descr[n].name, parent, flags, base, descr[n].bit_idx, 0, &ctrl->lock); WARN_ON(IS_ERR(ctrl->gates[n])); } of_clk_add_provider(np, mvebu_clk_gating_get_src, ctrl); }