void __init imx_src_init(void) { struct device_node *np; u32 val; np = of_find_compatible_node(NULL, NULL, "fsl,imx51-src"); if (!np) return; src_base = of_iomap(np, 0); WARN_ON(!src_base); imx_reset_controller.of_node = np; if (IS_ENABLED(CONFIG_RESET_CONTROLLER)) reset_controller_register(&imx_reset_controller); /* * force warm reset sources to generate cold reset * for a more reliable restart */ spin_lock(&scr_lock); val = readl_relaxed(src_base + SRC_SCR); /* bit 4 is m4c_non_sclr_rst on i.MX6SX */ if (cpu_is_imx6sx() && ((val & (1 << BP_SRC_SCR_SW_OPEN_VG_RST)) == 0)) m4_is_enabled = true; else m4_is_enabled = false; val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE); writel_relaxed(val, src_base + SRC_SCR); spin_unlock(&scr_lock); }
void __init imx_src_init(void) { struct device_node *np; u32 val; np = of_find_compatible_node(NULL, NULL, "fsl,imx51-src"); if (!np) return; src_base = of_iomap(np, 0); WARN_ON(!src_base); imx_reset_controller.of_node = np; if (IS_ENABLED(CONFIG_RESET_CONTROLLER)) reset_controller_register(&imx_reset_controller); /* * force warm reset sources to generate cold reset * for a more reliable restart */ spin_lock(&scr_lock); val = readl_relaxed(src_base + SRC_SCR); val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE); writel_relaxed(val, src_base + SRC_SCR); spin_unlock(&scr_lock); }
static int imx_src_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *nodeid) { int ret = VMM_OK; struct vmm_devtree_node *np = dev->node; u32 val; ret = vmm_devtree_request_regmap(np, (virtual_addr_t *)&src_base, 0, "i.MX Reset Control"); if (VMM_OK != ret) { vmm_printf("Failed to retrive %s register mapping\n"); return ret; } imx_reset_controller.node = np; #ifdef CONFIG_RESET_CONTROLLER reset_controller_register(&imx_reset_controller); #endif /* CONFIG_RESET_CONTROLLER */ /* * force warm reset sources to generate cold reset * for a more reliable restart */ spin_lock(&scr_lock); val = readl_relaxed(src_base + SRC_SCR); val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE); writel_relaxed(val, src_base + SRC_SCR); spin_unlock(&scr_lock); return 0; }
void ralink_rst_init(void) { reset_dev.of_node = of_find_compatible_node(NULL, NULL, "ralink,rt2880-reset"); if (!reset_dev.of_node) pr_err("Failed to find reset controller node"); else reset_controller_register(&reset_dev); }
void tegra_clk_init_rst_controller(void __iomem *base, struct device_node *np, unsigned int num) { car_base = base; rst_ctlr.of_node = np; rst_ctlr.nr_resets = num; reset_controller_register(&rst_ctlr); }
int qcom_cc_really_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc, struct regmap *regmap) { int i, ret; struct device *dev = &pdev->dev; struct clk *clk; struct clk_onecell_data *data; struct clk **clks; struct qcom_reset_controller *reset; struct qcom_cc *cc; size_t num_clks = desc->num_clks; struct clk_regmap **rclks = desc->clks; cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks, GFP_KERNEL); if (!cc) return -ENOMEM; clks = cc->clks; data = &cc->data; data->clks = clks; data->clk_num = num_clks; for (i = 0; i < num_clks; i++) { if (!rclks[i]) { clks[i] = ERR_PTR(-ENOENT); continue; } clk = devm_clk_register_regmap(dev, rclks[i]); if (IS_ERR(clk)) return PTR_ERR(clk); clks[i] = clk; } ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data); if (ret) return ret; reset = &cc->reset; reset->rcdev.of_node = dev->of_node; reset->rcdev.ops = &qcom_reset_ops; reset->rcdev.owner = dev->driver->owner; reset->rcdev.nr_resets = desc->num_resets; reset->regmap = regmap; reset->reset_map = desc->resets; platform_set_drvdata(pdev, &reset->rcdev); ret = reset_controller_register(&reset->rcdev); if (ret) of_clk_del_provider(dev->of_node); return ret; }
static int syscfg_reset_controller_register(struct device *dev, const struct syscfg_reset_controller_data *data) { struct syscfg_reset_controller *rc; size_t size; int i, err; rc = devm_kzalloc(dev, sizeof(*rc), GFP_KERNEL); if (!rc) return -ENOMEM; size = sizeof(struct syscfg_reset_channel) * data->nr_channels; rc->channels = devm_kzalloc(dev, size, GFP_KERNEL); if (!rc->channels) return -ENOMEM; rc->rst.ops = &syscfg_reset_ops, rc->rst.of_node = dev->of_node; rc->rst.nr_resets = data->nr_channels; rc->active_low = data->active_low; for (i = 0; i < data->nr_channels; i++) { struct regmap *map; struct regmap_field *f; const char *compatible = data->channels[i].compatible; map = syscon_regmap_lookup_by_compatible(compatible); if (IS_ERR(map)) return PTR_ERR(map); f = devm_regmap_field_alloc(dev, map, data->channels[i].reset); if (IS_ERR(f)) return PTR_ERR(f); rc->channels[i].reset = f; if (!data->wait_for_ack) continue; f = devm_regmap_field_alloc(dev, map, data->channels[i].ack); if (IS_ERR(f)) return PTR_ERR(f); rc->channels[i].ack = f; } err = reset_controller_register(&rc->rst); if (!err) dev_info(dev, "registered\n"); return err; }
static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; int ret; scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); if (!scm) return -ENOMEM; scm->core_clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(scm->core_clk)) { if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) return PTR_ERR(scm->core_clk); scm->core_clk = NULL; } if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) { scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); if (IS_ERR(scm->iface_clk)) { if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to acquire iface clk\n"); return PTR_ERR(scm->iface_clk); } scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(scm->bus_clk)) { if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to acquire bus clk\n"); return PTR_ERR(scm->bus_clk); } } scm->reset.ops = &qcom_scm_pas_reset_ops; scm->reset.nr_resets = 1; scm->reset.of_node = pdev->dev.of_node; reset_controller_register(&scm->reset); /* vote for max clk rate for highest performance */ ret = clk_set_rate(scm->core_clk, INT_MAX); if (ret) return ret; __scm = scm; __scm->dev = &pdev->dev; __qcom_scm_init(); return 0; }
void mmp_clk_reset_register(struct device_node *np, struct mmp_clk_reset_cell *cells, int nr_resets) { struct mmp_clk_reset_unit *unit; unit = kzalloc(sizeof(*unit), GFP_KERNEL); if (!unit) return; unit->cells = cells; unit->rcdev.of_reset_n_cells = 1; unit->rcdev.nr_resets = nr_resets; unit->rcdev.ops = &mmp_clk_reset_ops; unit->rcdev.of_node = np; unit->rcdev.of_xlate = mmp_of_reset_xlate; reset_controller_register(&unit->rcdev); }
void __init tegra_add_of_provider(struct device_node *np) { int i; for (i = 0; i < clk_num; i++) { if (IS_ERR(clks[i])) { pr_err ("Tegra clk %d: register failed with %ld\n", i, PTR_ERR(clks[i])); } if (!clks[i]) clks[i] = ERR_PTR(-EINVAL); } clk_data.clks = clks; clk_data.clk_num = clk_num; of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); rst_ctlr.of_node = np; rst_ctlr.nr_resets = clk_num * 32; reset_controller_register(&rst_ctlr); }