static int gdsc_disable(struct regulator_dev *rdev) { struct gdsc *sc = rdev_get_drvdata(rdev); uint32_t regval; int i, ret = 0; for (i = sc->clock_count-1; i >= 0; i--) { if (sc->toggle_mem) clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM); if (sc->toggle_periph) clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH); } if (sc->toggle_logic) { regval = readl_relaxed(sc->gdscr); regval |= SW_COLLAPSE_MASK; writel_relaxed(regval, sc->gdscr); ret = readl_tight_poll_timeout(sc->gdscr, regval, !(regval & PWR_ON_MASK), TIMEOUT_US); if (ret) dev_err(&rdev->dev, "%s disable timed out\n", sc->rdesc.name); } else { for (i = sc->clock_count-1; i >= 0; i--) clk_reset(sc->clocks[i], CLK_RESET_ASSERT); sc->resets_asserted = true; } return ret; }
static int gfx2d_footswitch_disable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); struct fs_clk_data *clock; uint32_t regval, rc = 0; regval = readl_relaxed(fs->gfs_ctl_reg); if ((regval & ENABLE_BIT) == 0) return 0; rc = setup_clocks(fs); if (rc) return rc; clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN); if (fs->bus_port0) { rc = msm_bus_axi_porthalt(fs->bus_port0); if (rc) { pr_err("%s port 0 halt failed.\n", fs->desc.name); goto err; } } clk_disable_unprepare(fs->core_clk); for (clock = fs->clk_data; clock->clk; clock++) ; for (clock--; clock >= fs->clk_data; clock--) clk_reset(clock->clk, CLK_RESET_ASSERT); udelay(5); regval |= CLAMP_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); regval &= ~ENABLE_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); clk_prepare_enable(fs->core_clk); restore_clocks(fs); fs->is_enabled = false; return 0; err: clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); restore_clocks(fs); return rc; }
static int gdsc_enable(struct regulator_dev *rdev) { struct gdsc *sc = rdev_get_drvdata(rdev); uint32_t regval; int i, ret; if (sc->root_en) { for (i = 0; i < sc->clock_count; i++) clk_prepare_enable(sc->clocks[i]); } if (sc->toggle_logic) { regval = readl_relaxed(sc->gdscr); if (regval & HW_CONTROL_MASK) { dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n", sc->rdesc.name); return -EBUSY; } regval &= ~SW_COLLAPSE_MASK; writel_relaxed(regval, sc->gdscr); ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK, TIMEOUT_US); if (ret) { dev_err(&rdev->dev, "%s enable timed out: 0x%x\n", sc->rdesc.name, regval); udelay(TIMEOUT_US); regval = readl_relaxed(sc->gdscr); dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n", sc->rdesc.name, regval, TIMEOUT_US); return ret; } } else { for (i = 0; i < sc->clock_count; i++) clk_reset(sc->clocks[i], CLK_RESET_DEASSERT); sc->resets_asserted = false; } for (i = 0; i < sc->clock_count; i++) { if (sc->toggle_mem) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM); if (sc->toggle_periph) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH); } /* * If clocks to this power domain were already on, they will take an * additional 4 clock cycles to re-enable after the rail is enabled. * Delay to account for this. A delay is also needed to ensure clocks * are not enabled within 400ns of enabling power to the memories. */ udelay(1); return 0; }
static int gdsc_enable(struct regulator_dev *rdev) { struct gdsc *sc = rdev_get_drvdata(rdev); uint32_t regval; int i, ret; if (sc->toggle_logic) { #ifdef CONFIG_MACH_LGE if (sc->use_lge_workaround) { ret = lge_gdsc_enable(sc); if (ret) return ret; } else #endif { regval = readl_relaxed(sc->gdscr); regval &= ~SW_COLLAPSE_MASK; writel_relaxed(regval, sc->gdscr); ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK, TIMEOUT_US); if (ret) { dev_err(&rdev->dev, "%s enable timed out\n", sc->rdesc.name); return ret; } } } else { for (i = 0; i < sc->clock_count; i++) clk_reset(sc->clocks[i], CLK_RESET_DEASSERT); sc->resets_asserted = false; } for (i = 0; i < sc->clock_count; i++) { if (sc->toggle_mem) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM); if (sc->toggle_periph) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH); } /* * If clocks to this power domain were already on, they will take an * additional 4 clock cycles to re-enable after the rail is enabled. * Delay to account for this. A delay is also needed to ensure clocks * are not enabled within 400ns of enabling power to the memories. */ udelay(1); return 0; }
static int gdsc_disable(struct regulator_dev *rdev) { struct gdsc *sc = rdev_get_drvdata(rdev); uint32_t regval; int i, ret = 0; for (i = sc->clock_count-1; i >= 0; i--) { if (unlikely(i == sc->root_clk_idx)) continue; if (sc->toggle_mem) clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM); if (sc->toggle_periph) clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH); } if (sc->toggle_logic) { regval = readl_relaxed(sc->gdscr); if (regval & HW_CONTROL_MASK) { dev_warn(&rdev->dev, "Invalid disable while %s is under HW control\n", sc->rdesc.name); return -EBUSY; } regval |= SW_COLLAPSE_MASK; writel_relaxed(regval, sc->gdscr); ret = poll_gdsc_status(sc->gdscr, DISABLED); if (ret) dev_err(&rdev->dev, "%s disable timed out: 0x%x\n", sc->rdesc.name, regval); if (sc->domain_addr) { regval = readl_relaxed(sc->domain_addr); regval |= GMEM_CLAMP_IO_MASK; writel_relaxed(regval, sc->domain_addr); } } else { for (i = sc->clock_count-1; i >= 0; i--) if (likely(i != sc->root_clk_idx)) clk_reset(sc->clocks[i], CLK_RESET_ASSERT); sc->resets_asserted = true; } if (sc->root_en) clk_disable_unprepare(sc->clocks[sc->root_clk_idx]); return ret; }
static int mxhci_hsic_resume(struct mxhci_hsic_hcd *mxhci) { struct usb_hcd *hcd = hsic_to_hcd(mxhci); int ret; unsigned long flags; if (!mxhci->in_lpm) { dev_dbg(mxhci->dev, "%s called in !in_lpm\n", __func__); return 0; } pm_stay_awake(mxhci->dev); /* enable force-on mode for periph_on */ clk_set_flags(mxhci->system_clk, CLKFLAG_RETAIN_PERIPH); if (mxhci->bus_perf_client) { mxhci->bus_vote = true; queue_work(mxhci->wq, &mxhci->bus_vote_w); } spin_lock_irqsave(&mxhci->wakeup_lock, flags); if (mxhci->wakeup_irq_enabled) { disable_irq_wake(mxhci->wakeup_irq); disable_irq_nosync(mxhci->wakeup_irq); mxhci->wakeup_irq_enabled = 0; } if (mxhci->pm_usage_cnt) { mxhci->pm_usage_cnt = 0; pm_runtime_put_noidle(mxhci->dev); } spin_unlock_irqrestore(&mxhci->wakeup_lock, flags); ret = regulator_set_voltage(mxhci->hsic_vddcx, mxhci->vdd_low_vol_level, mxhci->vdd_high_vol_level); if (ret < 0) dev_err(mxhci->dev, "unable to set nominal vddcx voltage (no VDD MIN)\n"); clk_prepare_enable(mxhci->system_clk); clk_prepare_enable(mxhci->cal_clk); clk_prepare_enable(mxhci->hsic_clk); clk_prepare_enable(mxhci->utmi_clk); clk_prepare_enable(mxhci->core_clk); if (mxhci->wakeup_irq) usb_hcd_resume_root_hub(hcd); mxhci->in_lpm = 0; dev_dbg(mxhci->dev, "HSIC-USB exited from low power mode\n"); xhci_dbg_log_event(&dbg_hsic, NULL, "Controller resumed", 0); return 0; }
void msm_camio_clk_sel(enum msm_camio_clk_src_type srctype) { struct clk *clk = NULL; clk = camio_vfe_clk; if (clk != NULL) { switch (srctype) { case MSM_CAMIO_CLK_SRC_INTERNAL: clk_set_flags(clk, 0x00000100 << 1); break; case MSM_CAMIO_CLK_SRC_EXTERNAL: clk_set_flags(clk, 0x00000100); break; default: break; } } }
static int gfx2d_footswitch_disable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); struct fs_clk_data *clock; uint32_t regval, rc = 0; /* Return early if already disabled. */ regval = readl_relaxed(fs->gfs_ctl_reg); if ((regval & ENABLE_BIT) == 0) return 0; /* Make sure required clocks are on at the correct rates. */ rc = setup_clocks(fs); if (rc) return rc; /* Allow core memory to collapse when its clock is gated. */ clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN); /* Halt all bus ports in the power domain. */ if (fs->bus_port0) { rc = msm_bus_axi_porthalt(fs->bus_port0); if (rc) { pr_err("%s port 0 halt failed.\n", fs->desc.name); goto err; } } /* Disable core clock. */ clk_disable_unprepare(fs->core_clk); /* * Assert resets for all clocks in the clock domain so that * outputs settle prior to clamping. */ for (clock = fs->clk_data; clock->clk; clock++) ; /* Do nothing */ for (clock--; clock >= fs->clk_data; clock--) clk_reset(clock->clk, CLK_RESET_ASSERT); /* Wait for synchronous resets to propagate. */ udelay(5); /* * Clamp the I/O ports of the core to ensure the values * remain fixed while the core is collapsed. */ regval |= CLAMP_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); /* Collapse the power rail at the footswitch. */ regval &= ~ENABLE_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); /* Re-enable core clock. */ clk_prepare_enable(fs->core_clk); /* Return clocks to their state before this function. */ restore_clocks(fs); fs->is_enabled = false; return 0; err: clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); restore_clocks(fs); return rc; }
static int gfx2d_footswitch_enable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); struct fs_clk_data *clock; uint32_t regval, rc = 0; mutex_lock(&claim_lock); fs->is_claimed = true; mutex_unlock(&claim_lock); /* Return early if already enabled. */ regval = readl_relaxed(fs->gfs_ctl_reg); if ((regval & (ENABLE_BIT | CLAMP_BIT)) == ENABLE_BIT) return 0; /* Make sure required clocks are on at the correct rates. */ rc = setup_clocks(fs); if (rc) return rc; /* Un-halt all bus ports in the power domain. */ if (fs->bus_port0) { rc = msm_bus_axi_portunhalt(fs->bus_port0); if (rc) { pr_err("%s port 0 unhalt failed.\n", fs->desc.name); goto err; } } /* Disable core clock. */ clk_disable_unprepare(fs->core_clk); /* * (Re-)Assert resets for all clocks in the clock domain, since * footswitch_enable() is first called before footswitch_disable() * and resets should be asserted before power is restored. */ for (clock = fs->clk_data; clock->clk; clock++) ; /* Do nothing */ for (clock--; clock >= fs->clk_data; clock--) clk_reset(clock->clk, CLK_RESET_ASSERT); /* Wait for synchronous resets to propagate. */ udelay(RESET_DELAY_US); /* Enable the power rail at the footswitch. */ regval |= ENABLE_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); mb(); udelay(1); /* Un-clamp the I/O ports. */ regval &= ~CLAMP_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); /* Deassert resets for all clocks in the power domain. */ for (clock = fs->clk_data; clock->clk; clock++) clk_reset(clock->clk, CLK_RESET_DEASSERT); udelay(RESET_DELAY_US); /* Re-enable core clock. */ clk_prepare_enable(fs->core_clk); /* Prevent core memory from collapsing when its clock is gated. */ clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); /* Return clocks to their state before this function. */ restore_clocks(fs); fs->is_enabled = true; return 0; err: restore_clocks(fs); return rc; }
static int __devinit gdsc_probe(struct platform_device *pdev) { static atomic_t gdsc_count = ATOMIC_INIT(-1); struct regulator_init_data *init_data; struct resource *res; struct gdsc *sc; uint32_t regval; bool retain_mem, retain_periph; int i, ret; #ifdef CONFIG_MACH_LGE int use_lge_workaround = 0; /* default: all not applied */ #endif sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL); if (sc == NULL) return -ENOMEM; init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); if (init_data == NULL) return -ENOMEM; if (of_get_property(pdev->dev.of_node, "parent-supply", NULL)) init_data->supply_regulator = "parent"; ret = of_property_read_string(pdev->dev.of_node, "regulator-name", &sc->rdesc.name); if (ret) return ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -EINVAL; sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (sc->gdscr == NULL) return -ENOMEM; sc->clock_count = of_property_count_strings(pdev->dev.of_node, "qcom,clock-names"); if (sc->clock_count == -EINVAL) { sc->clock_count = 0; } else if (IS_ERR_VALUE(sc->clock_count)) { dev_err(&pdev->dev, "Failed to get clock names\n"); return -EINVAL; } sc->clocks = devm_kzalloc(&pdev->dev, sizeof(struct clk *) * sc->clock_count, GFP_KERNEL); if (!sc->clocks) return -ENOMEM; for (i = 0; i < sc->clock_count; i++) { const char *clock_name; of_property_read_string_index(pdev->dev.of_node, "qcom,clock-names", i, &clock_name); sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name); if (IS_ERR(sc->clocks[i])) { int rc = PTR_ERR(sc->clocks[i]); if (rc != -EPROBE_DEFER) dev_err(&pdev->dev, "Failed to get %s\n", clock_name); return rc; } } #ifdef CONFIG_MACH_LGE of_property_read_u32(pdev->dev.of_node, "lge,use_workaround", &use_lge_workaround); sc->use_lge_workaround = !(!use_lge_workaround); #endif sc->rdesc.id = atomic_inc_return(&gdsc_count); sc->rdesc.ops = &gdsc_ops; sc->rdesc.type = REGULATOR_VOLTAGE; sc->rdesc.owner = THIS_MODULE; platform_set_drvdata(pdev, sc); /* * Disable HW trigger: collapse/restore occur based on registers writes. * Disable SW override: Use hardware state-machine for sequencing. */ regval = readl_relaxed(sc->gdscr); regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK); /* Configure wait time between states. */ regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK); regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; writel_relaxed(regval, sc->gdscr); retain_mem = of_property_read_bool(pdev->dev.of_node, "qcom,retain-mem"); retain_periph = of_property_read_bool(pdev->dev.of_node, "qcom,retain-periph"); for (i = 0; i < sc->clock_count; i++) { if (retain_mem || (regval & PWR_ON_MASK)) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM); else clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM); if (retain_periph || (regval & PWR_ON_MASK)) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH); else clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH); } sc->toggle_mem = !retain_mem; sc->toggle_periph = !retain_periph; sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node, "qcom,skip-logic-collapse"); if (!sc->toggle_logic) { #ifdef CONFIG_MACH_LGE /* LGE workaround is not used if a device is good pdn revision */ if (lge_get_board_revno() >= use_lge_workaround) { regval &= ~SW_COLLAPSE_MASK; writel_relaxed(regval, sc->gdscr); ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK, TIMEOUT_US); if (ret) { dev_err(&pdev->dev, "%s enable timed out\n", sc->rdesc.name); return ret; } } else { pr_info("%s: %s is enabled only at first by lge workaround\n", __func__, sc->rdesc.name); ret = lge_gdsc_enable(sc); if (ret) { dev_err(&pdev->dev, "%s enable timed out\n", sc->rdesc.name); return ret; } } #else /* qmc */ regval &= ~SW_COLLAPSE_MASK; writel_relaxed(regval, sc->gdscr); ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK, TIMEOUT_US); if (ret) { dev_err(&pdev->dev, "%s enable timed out\n", sc->rdesc.name); return ret; } #endif } sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc, pdev->dev.of_node); if (IS_ERR(sc->rdev)) { dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n", sc->rdesc.name); return PTR_ERR(sc->rdev); } return 0; }
static int footswitch_disable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); struct fs_clk_data *clock; uint32_t regval, rc = 0; /* Return early if already disabled. */ regval = readl_relaxed(fs->gfs_ctl_reg); if ((regval & ENABLE_BIT) == 0) return 0; /* Make sure required clocks are on at the correct rates. */ rc = setup_clocks(fs); if (rc) return rc; /* Allow core memory to collapse when its clock is gated. */ //if (fs->desc.id != FS_GFX3D_8064) clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN_MEM); /* Halt all bus ports in the power domain. */ if (fs->bus_port0) { rc = msm_bus_axi_porthalt(fs->bus_port0); if (rc) { pr_err("%s port 0 halt failed.\n", fs->desc.name); goto err; } } if (fs->bus_port1) { rc = msm_bus_axi_porthalt(fs->bus_port1); if (rc) { pr_err("%s port 1 halt failed.\n", fs->desc.name); goto err_port2_halt; } } /* * Assert resets for all clocks in the clock domain so that * outputs settle prior to clamping. */ for (clock = fs->clk_data; clock->clk; clock++) ; /* Do nothing */ for (clock--; clock >= fs->clk_data; clock--) clk_reset(clock->clk, CLK_RESET_ASSERT); /* Wait for synchronous resets to propagate. */ udelay(fs->reset_delay_us); /* * Return clocks to their state before this function. For robustness * if memory-retention across collapses is required, clocks should * be disabled before asserting the clamps. Assuming clocks were off * before entering footswitch_disable(), this will be true. */ restore_clocks(fs); /* * Clamp the I/O ports of the core to ensure the values * remain fixed while the core is collapsed. */ regval |= CLAMP_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); /* Collapse the power rail at the footswitch. */ regval &= ~ENABLE_BIT; #if defined(CONFIG_ARCH_MSM8930) writel_relaxed(regval, fs->gfs_ctl_reg); #else if (fs->desc.id != FS_GFX3D) writel_relaxed(regval, fs->gfs_ctl_reg); #endif fs->is_enabled = false; return 0; err_port2_halt: msm_bus_axi_portunhalt(fs->bus_port0); err: clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM); restore_clocks(fs); return rc; }
static int __devinit gdsc_probe(struct platform_device *pdev) { static atomic_t gdsc_count = ATOMIC_INIT(-1); struct regulator_init_data *init_data; struct resource *res; struct gdsc *sc; uint32_t regval; bool retain_mem, retain_periph; int i, ret; sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL); if (sc == NULL) return -ENOMEM; init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); if (init_data == NULL) return -ENOMEM; if (of_get_property(pdev->dev.of_node, "parent-supply", NULL)) init_data->supply_regulator = "parent"; ret = of_property_read_string(pdev->dev.of_node, "regulator-name", &sc->rdesc.name); if (ret) return ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -EINVAL; sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (sc->gdscr == NULL) return -ENOMEM; sc->clock_count = of_property_count_strings(pdev->dev.of_node, "qcom,clock-names"); if (sc->clock_count == -EINVAL) { sc->clock_count = 0; } else if (IS_ERR_VALUE(sc->clock_count)) { dev_err(&pdev->dev, "Failed to get clock names\n"); return -EINVAL; } sc->clocks = devm_kzalloc(&pdev->dev, sizeof(struct clk *) * sc->clock_count, GFP_KERNEL); if (!sc->clocks) return -ENOMEM; for (i = 0; i < sc->clock_count; i++) { const char *clock_name; of_property_read_string_index(pdev->dev.of_node, "qcom,clock-names", i, &clock_name); sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name); if (IS_ERR(sc->clocks[i])) { int rc = PTR_ERR(sc->clocks[i]); if (rc != -EPROBE_DEFER) dev_err(&pdev->dev, "Failed to get %s\n", clock_name); return rc; } } sc->rdesc.id = atomic_inc_return(&gdsc_count); sc->rdesc.ops = &gdsc_ops; sc->rdesc.type = REGULATOR_VOLTAGE; sc->rdesc.owner = THIS_MODULE; platform_set_drvdata(pdev, sc); /* * Disable HW trigger: collapse/restore occur based on registers writes. * Disable SW override: Use hardware state-machine for sequencing. */ regval = readl_relaxed(sc->gdscr); regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK); /* Configure wait time between states. */ regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK); regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; writel_relaxed(regval, sc->gdscr); retain_mem = of_property_read_bool(pdev->dev.of_node, "qcom,retain-mem"); sc->toggle_mem = !retain_mem; retain_periph = of_property_read_bool(pdev->dev.of_node, "qcom,retain-periph"); sc->toggle_periph = !retain_periph; sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node, "qcom,skip-logic-collapse"); #if defined(CONFIG_MACH_TBS) && defined(CONFIG_ANDROID_ENGINEERING) { static sharp_smem_common_type *smemdata = NULL; unsigned long hw_revision = sh_boot_get_hw_revision(); unsigned int version = socinfo_get_version(); const char* target_names_collapse[] = { "gdsc_venus", "gdsc_mdss" }; const char* target_names_retention[] = { "gdsc_venus", "gdsc_mdss", "gdsc_oxili_gx" }; if( smemdata == NULL ) { smemdata = sh_smem_get_common_address(); } if( (smemdata != NULL) && ((hw_revision == HW_VERSION_PP_1) || (hw_revision == HW_VERSION_PP_2)) ) { if (smemdata->shdiag_rvcflg != SHDIAG_RVCFLG_ON) { if ( (SOCINFO_VERSION_MAJOR(version) == 2) && (SOCINFO_VERSION_MINOR(version) == 2) ) { int i = 0; for(i=0; i<(sizeof(target_names_retention)/sizeof(target_names_retention[0])); i++) { if( strcmp(sc->rdesc.name, target_names_retention[i]) == 0 ) { break; } } if( i != (sizeof(target_names_retention)/sizeof(target_names_retention[0])) ) { if( retain_mem != true ) { dev_err(&pdev->dev, "%s is forced to use retain_mem\n", sc->rdesc.name); retain_mem = true; sc->toggle_mem = !retain_mem; } if( retain_periph != true ) { dev_err(&pdev->dev, "%s is forced to use retain_periph\n", sc->rdesc.name); retain_periph = true; sc->toggle_periph = !retain_periph; } } for(i=0; i<(sizeof(target_names_collapse)/sizeof(target_names_collapse[0])); i++) { if( strcmp(sc->rdesc.name, target_names_collapse[i]) == 0 ) { break; } } if( i != (sizeof(target_names_collapse)/sizeof(target_names_collapse[0])) ) { if( sc->toggle_logic != false ) { dev_err(&pdev->dev, "%s is forced to use skip_logic_collapse\n", sc->rdesc.name); sc->toggle_logic = false; } } } } } } #endif if (!sc->toggle_logic) { regval &= ~SW_COLLAPSE_MASK; writel_relaxed(regval, sc->gdscr); ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK, TIMEOUT_US); if (ret) { dev_err(&pdev->dev, "%s enable timed out\n", sc->rdesc.name); return ret; } } for (i = 0; i < sc->clock_count; i++) { if (retain_mem || (regval & PWR_ON_MASK)) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM); else clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM); if (retain_periph || (regval & PWR_ON_MASK)) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH); else clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH); } sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc, pdev->dev.of_node); if (IS_ERR(sc->rdev)) { dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n", sc->rdesc.name); return PTR_ERR(sc->rdev); } return 0; }
static int mxhci_hsic_suspend(struct mxhci_hsic_hcd *mxhci) { struct usb_hcd *hcd = hsic_to_hcd(mxhci); int ret; if (mxhci->in_lpm) { dev_dbg(mxhci->dev, "%s called in lpm\n", __func__); return 0; } disable_irq(hcd->irq); /* make sure we don't race against a remote wakeup */ if (test_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags) || (readl_relaxed(MSM_HSIC_PORTSC) & PORT_PLS_MASK) == XDEV_RESUME) { dev_dbg(mxhci->dev, "wakeup pending, aborting suspend\n"); enable_irq(hcd->irq); return -EBUSY; } /* make sure HSIC phy is in LPM */ ret = wait_for_completion_timeout( &mxhci->phy_in_lpm, msecs_to_jiffies(PHY_LPM_WAIT_TIMEOUT_MS)); if (!ret) { dev_err(mxhci->dev, "HSIC phy failed to enter lpm\n"); init_completion(&mxhci->phy_in_lpm); enable_irq(hcd->irq); return -EBUSY; } init_completion(&mxhci->phy_in_lpm); clk_disable_unprepare(mxhci->core_clk); clk_disable_unprepare(mxhci->utmi_clk); clk_disable_unprepare(mxhci->hsic_clk); clk_disable_unprepare(mxhci->cal_clk); clk_disable_unprepare(mxhci->system_clk); ret = regulator_set_voltage(mxhci->hsic_vddcx, mxhci->vdd_no_vol_level, mxhci->vdd_high_vol_level); if (ret < 0) dev_err(mxhci->dev, "unable to set vddcx voltage for VDD MIN\n"); if (mxhci->bus_perf_client) { mxhci->bus_vote = false; queue_work(mxhci->wq, &mxhci->bus_vote_w); } mxhci->in_lpm = 1; enable_irq(hcd->irq); if (mxhci->wakeup_irq) { mxhci->wakeup_irq_enabled = 1; enable_irq_wake(mxhci->wakeup_irq); enable_irq(mxhci->wakeup_irq); } /* disable force-on mode for periph_on */ clk_set_flags(mxhci->system_clk, CLKFLAG_NORETAIN_PERIPH); pm_relax(mxhci->dev); dev_dbg(mxhci->dev, "HSIC-USB in low power mode\n"); xhci_dbg_log_event(&dbg_hsic, NULL, "Controller suspended", 0); return 0; }
static int mxhci_hsic_init_clocks(struct mxhci_hsic_hcd *mxhci, u32 init) { int ret = 0; if (!init) goto disable_all_clks; /* 75Mhz system_clk required for normal hsic operation */ mxhci->system_clk = devm_clk_get(mxhci->dev, "system_clk"); if (IS_ERR(mxhci->system_clk)) { dev_err(mxhci->dev, "failed to get system_clk\n"); ret = PTR_ERR(mxhci->system_clk); goto out; } clk_set_rate(mxhci->system_clk, 75000000); /* 60Mhz core_clk required for LINK protocol engine */ mxhci->core_clk = devm_clk_get(mxhci->dev, "core_clk"); if (IS_ERR(mxhci->core_clk)) { dev_err(mxhci->dev, "failed to get core_clk\n"); ret = PTR_ERR(mxhci->core_clk); goto out; } clk_set_rate(mxhci->core_clk, 60000000); /* 480Mhz main HSIC phy clk */ mxhci->hsic_clk = devm_clk_get(mxhci->dev, "hsic_clk"); if (IS_ERR(mxhci->hsic_clk)) { dev_err(mxhci->dev, "failed to get hsic_clk\n"); ret = PTR_ERR(mxhci->hsic_clk); goto out; } clk_set_rate(mxhci->hsic_clk, 480000000); /* 19.2Mhz utmi_clk ref_clk required to shut off HSIC PLL */ mxhci->utmi_clk = devm_clk_get(mxhci->dev, "utmi_clk"); if (IS_ERR(mxhci->utmi_clk)) { dev_err(mxhci->dev, "failed to get utmi_clk\n"); ret = PTR_ERR(mxhci->utmi_clk); goto out; } clk_set_rate(mxhci->utmi_clk, 19200000); /* 32Khz phy sleep clk */ mxhci->phy_sleep_clk = devm_clk_get(mxhci->dev, "phy_sleep_clk"); if (IS_ERR(mxhci->phy_sleep_clk)) { dev_err(mxhci->dev, "failed to get phy_sleep_clk\n"); ret = PTR_ERR(mxhci->phy_sleep_clk); goto out; } clk_set_rate(mxhci->phy_sleep_clk, 32000); /* 10MHz cal_clk required for calibration of I/O pads */ mxhci->cal_clk = devm_clk_get(mxhci->dev, "cal_clk"); if (IS_ERR(mxhci->cal_clk)) { dev_err(mxhci->dev, "failed to get cal_clk\n"); ret = PTR_ERR(mxhci->cal_clk); goto out; } clk_set_rate(mxhci->cal_clk, 9600000); ret = clk_prepare_enable(mxhci->system_clk); if (ret) { dev_err(mxhci->dev, "failed to enable system_clk\n"); goto out; } /* enable force-on mode for periph_on */ clk_set_flags(mxhci->system_clk, CLKFLAG_RETAIN_PERIPH); ret = clk_prepare_enable(mxhci->core_clk); if (ret) { dev_err(mxhci->dev, "failed to enable core_clk\n"); goto err_core_clk; } ret = clk_prepare_enable(mxhci->hsic_clk); if (ret) { dev_err(mxhci->dev, "failed to enable hsic_clk\n"); goto err_hsic_clk; } ret = clk_prepare_enable(mxhci->utmi_clk); if (ret) { dev_err(mxhci->dev, "failed to enable utmi_clk\n"); goto err_utmi_clk; } ret = clk_prepare_enable(mxhci->cal_clk); if (ret) { dev_err(mxhci->dev, "failed to enable cal_clk\n"); goto err_cal_clk; } ret = clk_prepare_enable(mxhci->phy_sleep_clk); if (ret) { dev_err(mxhci->dev, "failed to enable phy_sleep_clk\n"); goto err_phy_sleep_clk; } return 0; disable_all_clks: clk_disable_unprepare(mxhci->phy_sleep_clk); if (mxhci->in_lpm) goto out; err_phy_sleep_clk: clk_disable_unprepare(mxhci->cal_clk); err_cal_clk: clk_disable_unprepare(mxhci->utmi_clk); err_utmi_clk: clk_disable_unprepare(mxhci->hsic_clk); err_hsic_clk: clk_disable_unprepare(mxhci->core_clk); err_core_clk: clk_disable_unprepare(mxhci->system_clk); out: return ret; }
static int gdsc_probe(struct platform_device *pdev) { static atomic_t gdsc_count = ATOMIC_INIT(-1); struct regulator_config reg_config = {}; struct regulator_init_data *init_data; struct resource *res; struct gdsc *sc; uint32_t regval; bool retain_mem, retain_periph, support_hw_trigger; int i, ret; sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL); if (sc == NULL) return -ENOMEM; init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); if (init_data == NULL) return -ENOMEM; if (of_get_property(pdev->dev.of_node, "parent-supply", NULL)) init_data->supply_regulator = "parent"; ret = of_property_read_string(pdev->dev.of_node, "regulator-name", &sc->rdesc.name); if (ret) return ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -EINVAL; sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (sc->gdscr == NULL) return -ENOMEM; sc->clock_count = of_property_count_strings(pdev->dev.of_node, "clock-names"); if (sc->clock_count == -EINVAL) { sc->clock_count = 0; } else if (IS_ERR_VALUE(sc->clock_count)) { dev_err(&pdev->dev, "Failed to get clock names\n"); return -EINVAL; } sc->clocks = devm_kzalloc(&pdev->dev, sizeof(struct clk *) * sc->clock_count, GFP_KERNEL); if (!sc->clocks) return -ENOMEM; sc->root_en = of_property_read_bool(pdev->dev.of_node, "qcom,enable-root-clk"); for (i = 0; i < sc->clock_count; i++) { const char *clock_name; of_property_read_string_index(pdev->dev.of_node, "clock-names", i, &clock_name); sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name); if (IS_ERR(sc->clocks[i])) { int rc = PTR_ERR(sc->clocks[i]); if (rc != -EPROBE_DEFER) dev_err(&pdev->dev, "Failed to get %s\n", clock_name); return rc; } } sc->rdesc.id = atomic_inc_return(&gdsc_count); sc->rdesc.ops = &gdsc_ops; sc->rdesc.type = REGULATOR_VOLTAGE; sc->rdesc.owner = THIS_MODULE; platform_set_drvdata(pdev, sc); /* * Disable HW trigger: collapse/restore occur based on registers writes. * Disable SW override: Use hardware state-machine for sequencing. */ regval = readl_relaxed(sc->gdscr); regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK); /* Configure wait time between states. */ regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK); regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; writel_relaxed(regval, sc->gdscr); retain_mem = of_property_read_bool(pdev->dev.of_node, "qcom,retain-mem"); sc->toggle_mem = !retain_mem; retain_periph = of_property_read_bool(pdev->dev.of_node, "qcom,retain-periph"); sc->toggle_periph = !retain_periph; sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node, "qcom,skip-logic-collapse"); support_hw_trigger = of_property_read_bool(pdev->dev.of_node, "qcom,support-hw-trigger"); if (support_hw_trigger) { init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE; init_data->constraints.valid_modes_mask |= REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST; } if (!sc->toggle_logic) { regval &= ~SW_COLLAPSE_MASK; writel_relaxed(regval, sc->gdscr); ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK, TIMEOUT_US); if (ret) { dev_err(&pdev->dev, "%s enable timed out: 0x%x\n", sc->rdesc.name, regval); return ret; } } for (i = 0; i < sc->clock_count; i++) { if (retain_mem || (regval & PWR_ON_MASK)) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM); else clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM); if (retain_periph || (regval & PWR_ON_MASK)) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH); else clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH); } reg_config.dev = &pdev->dev; reg_config.init_data = init_data; reg_config.driver_data = sc; reg_config.of_node = pdev->dev.of_node; sc->rdev = regulator_register(&sc->rdesc, ®_config); if (IS_ERR(sc->rdev)) { dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n", sc->rdesc.name); return PTR_ERR(sc->rdev); } return 0; }
static int footswitch_enable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); struct fs_clk_data *clock; uint32_t regval, rc = 0; mutex_lock(&claim_lock); fs->is_claimed = true; mutex_unlock(&claim_lock); /* Return early if already enabled. */ regval = readl_relaxed(fs->gfs_ctl_reg); if ((regval & (ENABLE_BIT | CLAMP_BIT)) == ENABLE_BIT) return 0; /* Make sure required clocks are on at the correct rates. */ rc = setup_clocks(fs); if (rc) return rc; /* Un-halt all bus ports in the power domain. */ if (fs->bus_port0) { rc = msm_bus_axi_portunhalt(fs->bus_port0); if (rc) { pr_err("%s port 0 unhalt failed.\n", fs->desc.name); goto err; } } if (fs->bus_port1) { rc = msm_bus_axi_portunhalt(fs->bus_port1); if (rc) { pr_err("%s port 1 unhalt failed.\n", fs->desc.name); goto err_port2_halt; } } /* * (Re-)Assert resets for all clocks in the clock domain, since * footswitch_enable() is first called before footswitch_disable() * and resets should be asserted before power is restored. */ for (clock = fs->clk_data; clock->clk; clock++) ; /* Do nothing */ for (clock--; clock >= fs->clk_data; clock--) clk_reset(clock->clk, CLK_RESET_ASSERT); /* Wait for synchronous resets to propagate. */ udelay(fs->reset_delay_us); /* Enable the power rail at the footswitch. */ regval |= ENABLE_BIT; #if defined(CONFIG_ARCH_MSM8930) writel_relaxed(regval, fs->gfs_ctl_reg); #else if (fs->desc.id != FS_GFX3D) writel_relaxed(regval, fs->gfs_ctl_reg); #endif /* Wait for the rail to fully charge. */ mb(); udelay(1); /* Un-clamp the I/O ports. */ regval &= ~CLAMP_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); /* Deassert resets for all clocks in the power domain. */ for (clock = fs->clk_data; clock->clk; clock++) clk_reset(clock->clk, CLK_RESET_DEASSERT); /* Toggle core reset again after first power-on (required for GFX3D). */ if (fs->desc.id == FS_GFX3D) { clk_reset(fs->core_clk, CLK_RESET_ASSERT); udelay(fs->reset_delay_us); clk_reset(fs->core_clk, CLK_RESET_DEASSERT); udelay(fs->reset_delay_us); } /* Prevent core memory from collapsing when its clock is gated. */ clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM); /* Return clocks to their state before this function. */ restore_clocks(fs); fs->is_enabled = true; return 0; err_port2_halt: msm_bus_axi_porthalt(fs->bus_port0); err: restore_clocks(fs); return rc; }
static int gfx2d_footswitch_enable(struct regulator_dev *rdev) { struct footswitch *fs = rdev_get_drvdata(rdev); struct fs_clk_data *clock; uint32_t regval, rc = 0; mutex_lock(&claim_lock); fs->is_claimed = true; mutex_unlock(&claim_lock); regval = readl_relaxed(fs->gfs_ctl_reg); if ((regval & (ENABLE_BIT | CLAMP_BIT)) == ENABLE_BIT) return 0; rc = setup_clocks(fs); if (rc) return rc; if (fs->bus_port0) { rc = msm_bus_axi_portunhalt(fs->bus_port0); if (rc) { pr_err("%s port 0 unhalt failed.\n", fs->desc.name); goto err; } } clk_disable_unprepare(fs->core_clk); for (clock = fs->clk_data; clock->clk; clock++) ; for (clock--; clock >= fs->clk_data; clock--) clk_reset(clock->clk, CLK_RESET_ASSERT); udelay(RESET_DELAY_US); regval |= ENABLE_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); mb(); udelay(1); regval &= ~CLAMP_BIT; writel_relaxed(regval, fs->gfs_ctl_reg); for (clock = fs->clk_data; clock->clk; clock++) clk_reset(clock->clk, CLK_RESET_DEASSERT); udelay(RESET_DELAY_US); clk_prepare_enable(fs->core_clk); clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); restore_clocks(fs); fs->is_enabled = true; return 0; err: restore_clocks(fs); return rc; }