static int gdsc_attach(struct generic_pm_domain *domain, struct device *dev) { int ret, i = 0, j = 0; struct gdsc *sc = domain_to_gdsc(domain); struct of_phandle_args clkspec; struct device_node *np = dev->of_node; if (!sc->clock_count) return 0; ret = pm_clk_create(dev); if (ret) { dev_dbg(dev, "pm_clk_create failed %d\n", ret); return ret; } sc->clks = devm_kcalloc(dev, sc->clock_count, sizeof(sc->clks), GFP_KERNEL); if (!sc->clks) return -ENOMEM; while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec)) { if (match(clkspec.args[0], sc->clocks, sc->clock_count)) { sc->clks[j] = of_clk_get_from_provider(&clkspec); pm_clk_add_clk(dev, sc->clks[j]); j++; } else if (clkspec.args[0] == sc->root_clock) sc->root_clk = of_clk_get_from_provider(&clkspec); i++; } return 0; };
static void __init kirkwood_legacy_clk_init(void) { struct device_node *np = of_find_compatible_node( NULL, NULL, "marvell,kirkwood-gating-clock"); struct of_phandle_args clkspec; struct clk *clk; clkspec.np = np; clkspec.args_count = 1; clkspec.args[0] = CGC_BIT_PEX0; orion_clkdev_add("0", "pcie", of_clk_get_from_provider(&clkspec)); clkspec.args[0] = CGC_BIT_PEX1; orion_clkdev_add("1", "pcie", of_clk_get_from_provider(&clkspec)); /* * The ethernet interfaces forget the MAC address assigned by * u-boot if the clocks are turned off. Until proper DT support * is available we always enable them for now. */ clkspec.args[0] = CGC_BIT_GE0; clk = of_clk_get_from_provider(&clkspec); clk_prepare_enable(clk); clkspec.args[0] = CGC_BIT_GE1; clk = of_clk_get_from_provider(&clkspec); clk_prepare_enable(clk); }
static struct clk * __init imx_obtain_fixed_clock_from_dt(const char *name) { unsigned int len = 0; struct vmm_devtree_phandle_args phandle; struct clk *clk = ERR_PTR(-ENODEV); char *path; len = strlen(name) + strlen("/clocks/") + 1; if (NULL == (path = kmalloc(GFP_KERNEL, len))) { vmm_printf("Failed to allocate fixed clock \"%s\" path " "string\n", name); return NULL; } sprintf(path, "/clocks/%s", name); phandle.np = vmm_devtree_getnode(path); kfree(path); if (phandle.np) { clk = of_clk_get_from_provider(&phandle); of_node_put(phandle.np); } return clk; }
static void disable_clock(struct of_phandle_args *clkspec) { struct clk *clk; clk = of_clk_get_from_provider(clkspec); if (!IS_ERR(clk)) { clk_disable_unprepare(clk); clk_put(clk); } }
/* * There are still devices that doesn't even know about DT, * get clock gates here and add a clock lookup. */ static void __init dove_legacy_clk_init(void) { struct device_node *np = of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock"); struct of_phandle_args clkspec; clkspec.np = np; clkspec.args_count = 1; clkspec.args[0] = CLOCK_GATING_BIT_GBE; orion_clkdev_add(NULL, "mv643xx_eth_port.0", of_clk_get_from_provider(&clkspec)); clkspec.args[0] = CLOCK_GATING_BIT_PCIE0; orion_clkdev_add("0", "pcie", of_clk_get_from_provider(&clkspec)); clkspec.args[0] = CLOCK_GATING_BIT_PCIE1; orion_clkdev_add("1", "pcie", of_clk_get_from_provider(&clkspec)); }
int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev) { struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain; struct device_node *np = dev->of_node; struct of_phandle_args clkspec; struct clk *clk; int i = 0; int error; if (!pd) { dev_dbg(dev, "CPG/MSSR clock domain not yet available\n"); return -EPROBE_DEFER; } while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec)) { if (cpg_mssr_is_pm_clk(&clkspec, pd)) goto found; of_node_put(clkspec.np); i++; } return 0; found: clk = of_clk_get_from_provider(&clkspec); of_node_put(clkspec.np); if (IS_ERR(clk)) return PTR_ERR(clk); error = pm_clk_create(dev); if (error) { dev_err(dev, "pm_clk_create failed %d\n", error); goto fail_put; } error = pm_clk_add_clk(dev, clk); if (error) { dev_err(dev, "pm_clk_add_clk %pC failed %d\n", clk, error); goto fail_destroy; } return 0; fail_destroy: pm_clk_destroy(dev); fail_put: clk_put(clk); return error; }
int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev) { struct device_node *np = dev->of_node; struct of_phandle_args clkspec; struct clk *clk; int i = 0; int error; while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec)) { if (of_device_is_compatible(clkspec.np, "renesas,cpg-mstp-clocks")) goto found; /* BSC on r8a73a4/sh73a0 uses zb_clk instead of an mstp clock */ if (of_node_name_eq(clkspec.np, "zb_clk")) goto found; of_node_put(clkspec.np); i++; } return 0; found: clk = of_clk_get_from_provider(&clkspec); of_node_put(clkspec.np); if (IS_ERR(clk)) return PTR_ERR(clk); error = pm_clk_create(dev); if (error) { dev_err(dev, "pm_clk_create failed %d\n", error); goto fail_put; } error = pm_clk_add_clk(dev, clk); if (error) { dev_err(dev, "pm_clk_add_clk %pC failed %d\n", clk, error); goto fail_destroy; } return 0; fail_destroy: pm_clk_destroy(dev); fail_put: clk_put(clk); return error; }
static void _add_clkdev(struct omap_device *od, const char *clk_alias, const char *clk_name) { struct clk *r; int rc; if (!clk_alias || !clk_name) return; dev_dbg(&od->pdev->dev, "Creating %s -> %s\n", clk_alias, clk_name); r = clk_get_sys(dev_name(&od->pdev->dev), clk_alias); if (!IS_ERR(r)) { dev_dbg(&od->pdev->dev, "alias %s already exists\n", clk_alias); clk_put(r); return; } r = clk_get_sys(NULL, clk_name); if (IS_ERR(r)) { struct of_phandle_args clkspec; clkspec.np = of_find_node_by_name(NULL, clk_name); r = of_clk_get_from_provider(&clkspec); rc = clk_register_clkdev(r, clk_alias, dev_name(&od->pdev->dev)); } else { rc = clk_add_alias(clk_alias, dev_name(&od->pdev->dev), clk_name, NULL); } if (rc) { if (rc == -ENODEV || rc == -ENOMEM) dev_err(&od->pdev->dev, "clkdev_alloc for %s failed\n", clk_alias); else dev_err(&od->pdev->dev, "clk_get for %s failed\n", clk_name); } }
static struct clk * __init imx_obtain_fixed_clock_from_dt(const char *name) { struct of_phandle_args phandle; struct clk *clk = ERR_PTR(-ENODEV); char *path; path = kasprintf(GFP_KERNEL, "/clocks/%s", name); if (!path) return ERR_PTR(-ENOMEM); phandle.np = of_find_node_by_path(path); kfree(path); if (phandle.np) { clk = of_clk_get_from_provider(&phandle); of_node_put(phandle.np); } return clk; }
/** * omap_dt_clocks_register - register DT duplicate clocks during boot * @oclks: list of clocks to register * * Register duplicate or non-standard DT clock entries during boot. By * default, DT clocks are found based on their node name. If any * additional con-id / dev-id -> clock mapping is required, use this * function to list these. */ void __init omap_dt_clocks_register(struct omap_dt_clk oclks[]) { struct omap_dt_clk *c; struct device_node *node; struct clk *clk; struct of_phandle_args clkspec; for (c = oclks; c->node_name != NULL; c++) { node = of_find_node_by_name(NULL, c->node_name); clkspec.np = node; clk = of_clk_get_from_provider(&clkspec); if (!IS_ERR(clk)) { c->lk.clk = clk; clkdev_add(&c->lk); } else { pr_warn("%s: failed to lookup clock node %s\n", __func__, c->node_name); } } }
static void __init kirkwood_legacy_clk_init(void) { struct device_node *np = of_find_compatible_node( NULL, NULL, "marvell,kirkwood-gating-clock"); struct of_phandle_args clkspec; clkspec.np = np; clkspec.args_count = 1; clkspec.args[0] = CGC_BIT_GE0; orion_clkdev_add(NULL, "mv643xx_eth_port.0", of_clk_get_from_provider(&clkspec)); clkspec.args[0] = CGC_BIT_PEX0; orion_clkdev_add("0", "pcie", of_clk_get_from_provider(&clkspec)); clkspec.args[0] = CGC_BIT_USB0; orion_clkdev_add(NULL, "orion-ehci.0", of_clk_get_from_provider(&clkspec)); clkspec.args[0] = CGC_BIT_PEX1; orion_clkdev_add("1", "pcie", of_clk_get_from_provider(&clkspec)); clkspec.args[0] = CGC_BIT_GE1; orion_clkdev_add(NULL, "mv643xx_eth_port.1", of_clk_get_from_provider(&clkspec)); clkspec.args[0] = CGC_BIT_SDIO; orion_clkdev_add(NULL, "mvsdio", of_clk_get_from_provider(&clkspec)); }
static int of_dra7_atl_clk_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct dra7_atl_clock_info *cinfo; int i; int ret = 0; if (!node) return -ENODEV; cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL); if (!cinfo) return -ENOMEM; cinfo->iobase = of_iomap(node, 0); cinfo->dev = &pdev->dev; pm_runtime_enable(cinfo->dev); pm_runtime_irq_safe(cinfo->dev); pm_runtime_get_sync(cinfo->dev); atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX); for (i = 0; i < DRA7_ATL_INSTANCES; i++) { struct device_node *cfg_node; char prop[5]; struct dra7_atl_desc *cdesc; struct of_phandle_args clkspec; struct clk *clk; int rc; rc = of_parse_phandle_with_args(node, "ti,provided-clocks", NULL, i, &clkspec); if (rc) { pr_err("%s: failed to lookup atl clock %d\n", __func__, i); return -EINVAL; } clk = of_clk_get_from_provider(&clkspec); cdesc = to_atl_desc(__clk_get_hw(clk)); cdesc->cinfo = cinfo; cdesc->id = i; /* Get configuration for the ATL instances */ snprintf(prop, sizeof(prop), "atl%u", i); cfg_node = of_find_node_by_name(node, prop); if (cfg_node) { ret = of_property_read_u32(cfg_node, "bws", &cdesc->bws); ret |= of_property_read_u32(cfg_node, "aws", &cdesc->aws); if (!ret) { cdesc->valid = true; atl_write(cinfo, DRA7_ATL_BWSMUX_REG(i), cdesc->bws); atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i), cdesc->aws); } } cdesc->probed = true; /* * Enable the clock if it has been asked prior to loading the * hw driver */ if (cdesc->enabled) atl_clk_enable(__clk_get_hw(clk)); } pm_runtime_put_sync(cinfo->dev); return ret; }