int dsi_pll_clock_register_lpm(struct platform_device *pdev, struct mdss_pll_resources *pll_res) { int rc; if (!pdev || !pdev->dev.of_node) { pr_err("Invalid input parameters\n"); return -EINVAL; } if (!pll_res || !pll_res->pll_base) { pr_err("Invalid PLL resources\n"); return -EPROBE_DEFER; } /* Set client data to mux, div and vco clocks */ byte_clk_src.priv = pll_res; pixel_clk_src.priv = pll_res; byte_mux_8916.priv = pll_res; indirect_path_div2_clk_8916.priv = pll_res; analog_postdiv_clk_8916.priv = pll_res; dsi_vco_clk_8916.priv = pll_res; pll_res->vco_delay = VCO_DELAY_USEC; /* Set clock source operations */ pixel_clk_src_ops = clk_ops_slave_div; pixel_clk_src_ops.prepare = dsi_pll_div_prepare; analog_postdiv_clk_ops = clk_ops_div; analog_postdiv_clk_ops.prepare = dsi_pll_div_prepare; byte_clk_src_ops = clk_ops_div; byte_clk_src_ops.prepare = dsi_pll_div_prepare; byte_mux_clk_ops = clk_ops_gen_mux; byte_mux_clk_ops.prepare = dsi_pll_mux_prepare; if ((pll_res->target_id == MDSS_PLL_TARGET_8916) || (pll_res->target_id == MDSS_PLL_TARGET_8939) || (pll_res->target_id == MDSS_PLL_TARGET_8909) || (pll_res->target_id == MDSS_PLL_TARGET_8952)) { rc = of_msm_clock_register(pdev->dev.of_node, mdss_dsi_pllcc_8916, ARRAY_SIZE(mdss_dsi_pllcc_8916)); if (rc) { pr_err("Clock register failed\n"); rc = -EPROBE_DEFER; } } else { pr_err("Invalid target ID\n"); rc = -EINVAL; } if (!rc) pr_info("Registered DSI PLL clocks successfully\n"); return rc; }
int dsi_pll_clock_register(struct platform_device *pdev, struct mdss_pll_resources *pll_res) { int rc; const char *compatible_stream = NULL; int compat_len = 0; if (!pdev || !pll_res || !pdev->dev.of_node) { pr_err("Invalid input parameters\n"); return -EINVAL; } compatible_stream = of_get_property(pdev->dev.of_node, "compatible", &compat_len); if (!compatible_stream || (compat_len <= 0)) { pr_err("Invalid compatible string\n"); return -EINVAL; } if (!pll_res || !pll_res->pll_base) { pr_err("Invalid input parameters\n"); return -EPROBE_DEFER; } /* Set client data to mux, div and vco clocks */ byte_clk_src.priv = pll_res; pixel_clk_src.priv = pll_res; byte_mux.priv = pll_res; indirect_path_div2_clk.priv = pll_res; analog_postdiv_clk.priv = pll_res; /* Set clock source operations */ pixel_clk_src_ops = clk_ops_slave_div; pixel_clk_src_ops.prepare = dsi_pll_div_prepare; analog_postdiv_clk_ops = clk_ops_div; analog_postdiv_clk_ops.prepare = dsi_pll_div_prepare; byte_clk_src_ops = clk_ops_div; byte_clk_src_ops.prepare = dsi_pll_div_prepare; byte_mux_clk_ops = clk_ops_gen_mux; byte_mux_clk_ops.prepare = dsi_pll_mux_prepare; if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8916")) { dsi_vco_clk_8916.priv = pll_res; analog_postdiv_clk.c.parent = &dsi_vco_clk_8916.c; byte_mux.parents[0] = (struct clk_src) {&dsi_vco_clk_8916.c, 0}; byte_mux.c.parent = &dsi_vco_clk_8916.c; pixel_clk_src.c.parent = &dsi_vco_clk_8916.c; rc = of_msm_clock_register(pdev->dev.of_node, mdss_dsi_pllcc_8916, ARRAY_SIZE(mdss_dsi_pllcc_8916)); if (rc) { pr_err("Clock register failed\n"); rc = -EPROBE_DEFER; } } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8974")) {
static int msm_rpmcc_8974_probe(struct platform_device *pdev) { struct resource *res; int ret; ret = enable_rpm_scaling(); if (ret < 0) return ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base"); if (!res) { dev_err(&pdev->dev, "Unable to retrieve register base.\n"); return -ENOMEM; } virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!virt_base) { dev_err(&pdev->dev, "Failed to map in CC registers.\n"); return -ENOMEM; } ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_rpm_8974, ARRAY_SIZE(msm_clocks_rpm_8974)); if (ret) return ret; /* * Hold an active set vote for the PNOC AHB source. Sleep set vote is 0. */ clk_set_rate(&pnoc_keepalive_a_clk.c, 19200000); clk_prepare_enable(&pnoc_keepalive_a_clk.c); /* * Hold an active set vote at a rate of 40MHz for the MMSS NOC AHB * source. Sleep set vote is 0. */ clk_set_rate(&mmssnoc_ahb_a_clk.c, 40000000); clk_prepare_enable(&mmssnoc_ahb_a_clk.c); /* * Hold an active set vote for CXO; this is because CXO is expected * to remain on whenever CPUs aren't power collapsed. */ clk_prepare_enable(&cxo_a_clk_src.c); dev_info(&pdev->dev, "Registered RPM clocks.\n"); return 0; }
static int msm_clock_debug_probe(struct platform_device *pdev) { int ret; clk_ops_debug_mux = clk_ops_gen_mux; clk_ops_debug_mux.get_rate = measure_get_rate; ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_measure, ARRAY_SIZE(msm_clocks_measure)); if (ret) { dev_err(&pdev->dev, "Failed to register debug Mux\n"); return ret; } dev_info(&pdev->dev, "Registered Debug Mux successfully\n"); return ret; }
int dsi_pll_clock_register(struct platform_device *pdev, struct mdss_pll_resources *pll_res) { int rc; if (!pll_res || !pll_res->pll_base) { pr_err("Invalide input parameters\n"); return -EPROBE_DEFER; } /* Set client data to mux, div and vco clocks */ byte_clk_src_8974.priv = pll_res; byte_mux_8974.priv = pll_res; pixel_clk_src_8974.priv = pll_res; indirect_path_div2_clk_8974.priv = pll_res; analog_postdiv_clk_8974.priv = pll_res; dsi_vco_clk_8974.priv = pll_res; /* Set clock source operations */ pixel_clk_src_ops = clk_ops_slave_div; pixel_clk_src_ops.prepare = dsi_pll_div_prepare; analog_postdiv_clk_ops = clk_ops_div; analog_postdiv_clk_ops.prepare = dsi_pll_div_prepare; byte_clk_src_ops = clk_ops_div; byte_clk_src_ops.prepare = dsi_pll_div_prepare; byte_mux_clk_ops = clk_ops_gen_mux; byte_mux_clk_ops.prepare = dsi_pll_mux_prepare; rc = of_msm_clock_register(pdev->dev.of_node, mdss_dsi_pllcc_8974, ARRAY_SIZE(mdss_dsi_pllcc_8974)); if (rc) { pr_err("Clock register failed\n"); rc = -EPROBE_DEFER; } return rc; }
static int msm_rpmcc_8916_probe(struct platform_device *pdev) { struct resource *res; int ret; ret = enable_rpm_scaling(); if (ret) return ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base"); if (!res) { dev_err(&pdev->dev, "Unable to get register base\n"); return -ENOMEM; } virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!virt_base) { dev_err(&pdev->dev, "Failed to map CC registers\n"); return -ENOMEM; } ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_rpm, ARRAY_SIZE(msm_clocks_rpm)); if (ret) { dev_err(&pdev->dev, "Unable to register RPM clocks\n"); return ret; } /* * Hold an active set vote for PCNOC AHB source. Sleep set vote is 0. */ clk_set_rate(&pcnoc_keepalive_a_clk.c, 19200000); clk_prepare_enable(&pcnoc_keepalive_a_clk.c); clk_prepare_enable(&xo_a_clk_src.c); dev_info(&pdev->dev, "Registered RPM clocks.\n"); return 0; }
static int msm_lpasscc_8974_probe(struct platform_device *pdev) { struct resource *res; int ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base"); if (!res) { dev_err(&pdev->dev, "Unable to retrieve register base.\n"); return -ENOMEM; } virt_bases[LPASS_BASE] = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!virt_bases[LPASS_BASE]) return -ENOMEM; ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_lpass_8974, ARRAY_SIZE(msm_clocks_lpass_8974)); if (ret) return ret; dev_info(&pdev->dev, "Registered LPASS clocks.\n"); return 0; }
int hdmi_20nm_pll_clock_register(struct platform_device *pdev, struct mdss_pll_resources *pll_res) { int rc = -ENOTSUPP; if (!pll_res || !pll_res->phy_base || !pll_res->pll_base) { pr_err("Invalide input parameters\n"); return -EPROBE_DEFER; } /* Set client data for vco, mux and div clocks */ hdmi_20nm_vco_clk.priv = pll_res; rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8994, ARRAY_SIZE(hdmipllcc_8994)); if (rc) { pr_err("Clock register failed rc=%d\n", rc); rc = -EPROBE_DEFER; } else { pr_debug("%s: SUCCESS\n", __func__); } return rc; }
static int msm_gcc_probe(struct platform_device *pdev) { struct resource *res; int ret; u32 regval; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base"); if (!res) { dev_err(&pdev->dev, "Register base not defined\n"); return -ENOMEM; } virt_bases[GCC_BASE] = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!virt_bases[GCC_BASE]) { dev_err(&pdev->dev, "Failed to ioremap CC registers\n"); return -ENOMEM; } /*Vote for GPLL0 to turn on. Needed by acpuclock. */ regval = readl_relaxed(GCC_REG_BASE(APCS_GPLL_ENA_VOTE)); regval |= BIT(0); writel_relaxed(regval, GCC_REG_BASE(APCS_GPLL_ENA_VOTE)); ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_lookup, ARRAY_SIZE(msm_clocks_lookup)); if (ret) return ret; clk_prepare_enable(&xo_a_clk_src.c); dev_info(&pdev->dev, "Registered GCC clocks\n"); return 0; }
int dsi_pll_clock_register_hpm(struct platform_device *pdev, struct mdss_pll_resources *pll_res) { int rc; if (!pdev || !pdev->dev.of_node) { pr_err("Invalid input parameters\n"); return -EINVAL; } if (!pll_res || !pll_res->pll_base) { pr_err("Invalid PLL resources\n"); return -EPROBE_DEFER; } /* * Set client data to mux, div and vco clocks * This needs to be done for PLL0 and PLL1 separately * based on the cell index. */ if (!pll_res->index) { dsi_pll0_byte_clk_src.priv = pll_res; dsi_pll0_pixel_clk_src.priv = pll_res; dsi_pll0_byte_mux.priv = pll_res; dsi_pll0_indirect_path_div2_clk.priv = pll_res; dsi_pll0_analog_postdiv_clk.priv = pll_res; dsi_pll0_vco_clk.priv = pll_res; } else { dsi_pll1_byte_clk_src.priv = pll_res; dsi_pll1_pixel_clk_src.priv = pll_res; dsi_pll1_byte_mux.priv = pll_res; dsi_pll1_indirect_path_div2_clk.priv = pll_res; dsi_pll1_analog_postdiv_clk.priv = pll_res; dsi_pll1_vco_clk.priv = pll_res; } pll_res->vco_delay = VCO_DELAY_USEC; /* Set clock source operations */ pixel_clk_src_ops = clk_ops_slave_div; pixel_clk_src_ops.prepare = dsi_pll_div_prepare; analog_postdiv_clk_ops = clk_ops_div; analog_postdiv_clk_ops.prepare = dsi_pll_div_prepare; byte_clk_src_ops = clk_ops_div; byte_clk_src_ops.prepare = dsi_pll_div_prepare; byte_mux_clk_ops = clk_ops_gen_mux; byte_mux_clk_ops.prepare = dsi_pll_mux_prepare; if ((pll_res->target_id == MDSS_PLL_TARGET_8974) || (pll_res->target_id == MDSS_PLL_TARGET_8976)) { if (!pll_res->index) rc = of_msm_clock_register(pdev->dev.of_node, dsi_pll0_cc, ARRAY_SIZE(dsi_pll0_cc)); else rc = of_msm_clock_register(pdev->dev.of_node, dsi_pll1_cc, ARRAY_SIZE(dsi_pll1_cc)); if (rc) { pr_err("Clock register failed\n"); rc = -EPROBE_DEFER; } } else { pr_err("Invalid target ID\n"); rc = -EINVAL; } if (!rc) pr_info("Registered DSI PLL:%d clocks successfully\n", pll_res->index); return rc; }
if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8916")) { dsi_vco_clk_8916.priv = pll_res; analog_postdiv_clk.c.parent = &dsi_vco_clk_8916.c; byte_mux.parents[0] = (struct clk_src) {&dsi_vco_clk_8916.c, 0}; byte_mux.c.parent = &dsi_vco_clk_8916.c; pixel_clk_src.c.parent = &dsi_vco_clk_8916.c; rc = of_msm_clock_register(pdev->dev.of_node, mdss_dsi_pllcc_8916, ARRAY_SIZE(mdss_dsi_pllcc_8916)); if (rc) { pr_err("Clock register failed\n"); rc = -EPROBE_DEFER; } } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8974")) { dsi_vco_clk_8974.priv = pll_res; rc = of_msm_clock_register(pdev->dev.of_node, mdss_dsi_pllcc_8974, ARRAY_SIZE(mdss_dsi_pllcc_8974)); if (rc) { pr_err("Clock register failed\n"); rc = -EPROBE_DEFER; } } else { pr_err("Invalid compatible string\n"); rc = -EINVAL; } if (!rc) pr_info("Registered DSI PLL clocks successfully\n"); return rc; }
int dsi_pll_clock_register_20nm(struct platform_device *pdev, struct mdss_pll_resources *pll_res) { int rc; struct dss_vreg *pll_reg; if (!pdev || !pdev->dev.of_node) { pr_err("Invalid input parameters\n"); return -EINVAL; } if (!pll_res || !pll_res->pll_base) { pr_err("Invalid PLL resources\n"); return -EPROBE_DEFER; } /* * Set client data to mux, div and vco clocks. * This needs to be done only for PLL0 since, that is the one in * use. **/ if (!pll_res->index) { dsi0pll_byte_clk_src.priv = pll_res; dsi0pll_pixel_clk_src.priv = pll_res; dsi0pll_bypass_lp_div_mux.priv = pll_res; dsi0pll_indirect_path_div2_clk.priv = pll_res; dsi0pll_ndiv_clk.priv = pll_res; dsi0pll_fixed_hr_oclk2_div_clk.priv = pll_res; dsi0pll_hr_oclk3_div_clk.priv = pll_res; dsi0pll_vco_clk.priv = pll_res; dsi0pll_shadow_byte_clk_src.priv = pll_res; dsi0pll_shadow_pixel_clk_src.priv = pll_res; dsi0pll_shadow_bypass_lp_div_mux.priv = pll_res; dsi0pll_shadow_indirect_path_div2_clk.priv = pll_res; dsi0pll_shadow_ndiv_clk.priv = pll_res; dsi0pll_shadow_fixed_hr_oclk2_div_clk.priv = pll_res; dsi0pll_shadow_hr_oclk3_div_clk.priv = pll_res; dsi0pll_shadow_dsi_vco_clk.priv = pll_res; if (pll_res->pll_en_90_phase) { dsi0pll_vco_clk.min_rate = 1000000000; dsi0pll_vco_clk.max_rate = 2000000000; dsi0pll_shadow_dsi_vco_clk.min_rate = 1000000000; dsi0pll_shadow_dsi_vco_clk.max_rate = 2000000000; pr_debug("%s:Update VCO range: 1GHz-2Ghz", __func__); } } else { dsi1pll_byte_clk_src.priv = pll_res; dsi1pll_pixel_clk_src.priv = pll_res; dsi1pll_bypass_lp_div_mux.priv = pll_res; dsi1pll_indirect_path_div2_clk.priv = pll_res; dsi1pll_ndiv_clk.priv = pll_res; dsi1pll_fixed_hr_oclk2_div_clk.priv = pll_res; dsi1pll_hr_oclk3_div_clk.priv = pll_res; dsi1pll_vco_clk.priv = pll_res; dsi1pll_shadow_byte_clk_src.priv = pll_res; dsi1pll_shadow_pixel_clk_src.priv = pll_res; dsi1pll_shadow_bypass_lp_div_mux.priv = pll_res; dsi1pll_shadow_indirect_path_div2_clk.priv = pll_res; dsi1pll_shadow_ndiv_clk.priv = pll_res; dsi1pll_shadow_fixed_hr_oclk2_div_clk.priv = pll_res; dsi1pll_shadow_hr_oclk3_div_clk.priv = pll_res; dsi1pll_shadow_dsi_vco_clk.priv = pll_res; dsi1pll_vco_dummy_clk.priv = pll_res; if (pll_res->pll_en_90_phase) { dsi1pll_vco_clk.min_rate = 1000000000; dsi1pll_vco_clk.max_rate = 2000000000; dsi1pll_shadow_dsi_vco_clk.min_rate = 1000000000; dsi1pll_shadow_dsi_vco_clk.max_rate = 2000000000; pr_debug("%s:Update VCO range: 1GHz-2Ghz", __func__); } } pll_res->vco_delay = VCO_DELAY_USEC; /* Set clock source operations */ pixel_clk_src_ops = clk_ops_slave_div; pixel_clk_src_ops.prepare = dsi_pll_div_prepare; ndiv_clk_ops = clk_ops_div; ndiv_clk_ops.prepare = dsi_pll_div_prepare; byte_clk_src_ops = clk_ops_div; byte_clk_src_ops.prepare = dsi_pll_div_prepare; bypass_lp_div_mux_clk_ops = clk_ops_gen_mux; bypass_lp_div_mux_clk_ops.prepare = dsi_pll_mux_prepare; clk_ops_gen_mux_dsi = clk_ops_gen_mux; clk_ops_gen_mux_dsi.round_rate = parent_round_rate; clk_ops_gen_mux_dsi.set_rate = parent_set_rate; shadow_pixel_clk_src_ops = clk_ops_slave_div; shadow_pixel_clk_src_ops.prepare = dsi_pll_div_prepare; shadow_byte_clk_src_ops = clk_ops_div; shadow_byte_clk_src_ops.prepare = dsi_pll_div_prepare; if ((pll_res->target_id == MDSS_PLL_TARGET_8994) || (pll_res->target_id == MDSS_PLL_TARGET_8992)) { if (pll_res->index) { rc = of_msm_clock_register(pdev->dev.of_node, dsi1_pllcc_20nm, ARRAY_SIZE(dsi1_pllcc_20nm)); if (rc) { pr_err("Clock register failed\n"); rc = -EPROBE_DEFER; } } else { rc = of_msm_clock_register(pdev->dev.of_node, dsi0_pllcc_20nm, ARRAY_SIZE(dsi0_pllcc_20nm)); if (rc) { pr_err("Clock register failed\n"); rc = -EPROBE_DEFER; } } pll_res->gdsc_cb.notifier_call = dsi_pll_regulator_notifier_call; INIT_WORK(&pll_res->pll_off, dsi_pll_off_work); pll_reg = mdss_pll_get_mp_by_reg_name(pll_res, "gdsc"); if (pll_reg) { pr_debug("Registering for gdsc regulator events\n"); if (regulator_register_notifier(pll_reg->vreg, &(pll_res->gdsc_cb))) pr_err("Regulator notification registration failed!\n"); } } else { pr_err("Invalid target ID\n"); rc = -EINVAL; } if (!rc) pr_info("Registered DSI PLL clocks successfully\n"); return rc; }