static int gdsc_disable(struct regulator_dev *rdev)
{
	struct gdsc *sc = rdev_get_drvdata(rdev);
	uint32_t regval;
	int i, ret = 0;

	for (i = sc->clock_count-1; i >= 0; i--) {
		if (sc->toggle_mem)
			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
		if (sc->toggle_periph)
			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
	}

	if (sc->toggle_logic) {
		regval = readl_relaxed(sc->gdscr);
		regval |= SW_COLLAPSE_MASK;
		writel_relaxed(regval, sc->gdscr);

		ret = readl_tight_poll_timeout(sc->gdscr, regval,
					       !(regval & PWR_ON_MASK),
						TIMEOUT_US);
		if (ret)
			dev_err(&rdev->dev, "%s disable timed out\n",
				sc->rdesc.name);
	} else {
		for (i = sc->clock_count-1; i >= 0; i--)
			clk_reset(sc->clocks[i], CLK_RESET_ASSERT);
		sc->resets_asserted = true;
	}

	return ret;
}
Exemplo n.º 2
0
static int lge_gdsc_disable(struct gdsc *sc)
{
	uint32_t regval;
	int ret;

	regval = readl_relaxed(sc->gdscr);
	regval |= SW_OVERRIDE_MASK;
	writel_relaxed(regval, sc->gdscr);

	regval = readl_relaxed(sc->gdscr);
	regval |= CLK_DISABLE_MASK;
	writel_relaxed(regval, sc->gdscr);

	regval |= RETAIN_MASK;
	writel_relaxed(regval, sc->gdscr);
	regval &= ~RESTORE_MASK;
	regval |= CLAMP_IO_MASK | SAVE_MASK;
	writel_relaxed(regval, sc->gdscr);

	regval = readl_relaxed(sc->gdscr);
	regval |= SW_COLLAPSE_MASK;
	writel_relaxed(regval, sc->gdscr);

	ret = readl_tight_poll_timeout(sc->gdscr, regval,
				       !(regval & PWR_ON_MASK), TIMEOUT_US_LGE);
	if (ret)
		pr_err("%s: %s disable timed out\n", __func__, sc->rdesc.name);
	return ret;
}
Exemplo n.º 3
0
static int gdsc_enable(struct regulator_dev *rdev)
{
	struct gdsc *sc = rdev_get_drvdata(rdev);
	uint32_t regval;
	int i, ret;

	if (sc->root_en) {
		for (i = 0; i < sc->clock_count; i++)
			clk_prepare_enable(sc->clocks[i]);
	}

	if (sc->toggle_logic) {
		regval = readl_relaxed(sc->gdscr);
		if (regval & HW_CONTROL_MASK) {
			dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
				 sc->rdesc.name);
			return -EBUSY;
		}

		regval &= ~SW_COLLAPSE_MASK;
		writel_relaxed(regval, sc->gdscr);

		ret = readl_tight_poll_timeout(sc->gdscr, regval,
					regval & PWR_ON_MASK, TIMEOUT_US);
		if (ret) {
			dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
				sc->rdesc.name, regval);
			udelay(TIMEOUT_US);
			regval = readl_relaxed(sc->gdscr);
			dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
				sc->rdesc.name, regval, TIMEOUT_US);
			return ret;
		}
	} else {
		for (i = 0; i < sc->clock_count; i++)
			clk_reset(sc->clocks[i], CLK_RESET_DEASSERT);
		sc->resets_asserted = false;
	}

	for (i = 0; i < sc->clock_count; i++) {
		if (sc->toggle_mem)
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
		if (sc->toggle_periph)
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
	}

	/*
	 * If clocks to this power domain were already on, they will take an
	 * additional 4 clock cycles to re-enable after the rail is enabled.
	 * Delay to account for this. A delay is also needed to ensure clocks
	 * are not enabled within 400ns of enabling power to the memories.
	 */
	udelay(1);

	return 0;
}
Exemplo n.º 4
0
static int gdsc_enable(struct regulator_dev *rdev)
{
	struct gdsc *sc = rdev_get_drvdata(rdev);
	uint32_t regval;
	int i, ret;

	if (sc->toggle_logic) {
#ifdef CONFIG_MACH_LGE
		if (sc->use_lge_workaround) {
			ret = lge_gdsc_enable(sc);
			if (ret)
				return ret;
		} else
#endif
		{
			regval = readl_relaxed(sc->gdscr);
			regval &= ~SW_COLLAPSE_MASK;
			writel_relaxed(regval, sc->gdscr);

			ret = readl_tight_poll_timeout(sc->gdscr, regval,
						regval & PWR_ON_MASK, TIMEOUT_US);
			if (ret) {
				dev_err(&rdev->dev, "%s enable timed out\n",
					sc->rdesc.name);
				return ret;
			}
		}
	} else {
		for (i = 0; i < sc->clock_count; i++)
			clk_reset(sc->clocks[i], CLK_RESET_DEASSERT);
		sc->resets_asserted = false;
	}

	for (i = 0; i < sc->clock_count; i++) {
		if (sc->toggle_mem)
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
		if (sc->toggle_periph)
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
	}

	/*
	 * If clocks to this power domain were already on, they will take an
	 * additional 4 clock cycles to re-enable after the rail is enabled.
	 * Delay to account for this. A delay is also needed to ensure clocks
	 * are not enabled within 400ns of enabling power to the memories.
	 */
	udelay(1);

	return 0;
}
Exemplo n.º 5
0
static int gdsc_probe(struct platform_device *pdev)
{
	static atomic_t gdsc_count = ATOMIC_INIT(-1);
	struct regulator_config reg_config = {};
	struct regulator_init_data *init_data;
	struct resource *res;
	struct gdsc *sc;
	uint32_t regval;
	bool retain_mem, retain_periph, support_hw_trigger;
	int i, ret;

	sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
	if (sc == NULL)
		return -ENOMEM;

	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
	if (init_data == NULL)
		return -ENOMEM;

	if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
		init_data->supply_regulator = "parent";

	ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
				      &sc->rdesc.name);
	if (ret)
		return ret;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		return -EINVAL;
	sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
	if (sc->gdscr == NULL)
		return -ENOMEM;

	sc->clock_count = of_property_count_strings(pdev->dev.of_node,
					    "clock-names");
	if (sc->clock_count == -EINVAL) {
		sc->clock_count = 0;
	} else if (IS_ERR_VALUE(sc->clock_count)) {
		dev_err(&pdev->dev, "Failed to get clock names\n");
		return -EINVAL;
	}

	sc->clocks = devm_kzalloc(&pdev->dev,
			sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
	if (!sc->clocks)
		return -ENOMEM;

	sc->root_en = of_property_read_bool(pdev->dev.of_node,
						"qcom,enable-root-clk");

	for (i = 0; i < sc->clock_count; i++) {
		const char *clock_name;
		of_property_read_string_index(pdev->dev.of_node, "clock-names",
					      i, &clock_name);
		sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
		if (IS_ERR(sc->clocks[i])) {
			int rc = PTR_ERR(sc->clocks[i]);
			if (rc != -EPROBE_DEFER)
				dev_err(&pdev->dev, "Failed to get %s\n",
					clock_name);
			return rc;
		}
	}

	sc->rdesc.id = atomic_inc_return(&gdsc_count);
	sc->rdesc.ops = &gdsc_ops;
	sc->rdesc.type = REGULATOR_VOLTAGE;
	sc->rdesc.owner = THIS_MODULE;
	platform_set_drvdata(pdev, sc);

	/*
	 * Disable HW trigger: collapse/restore occur based on registers writes.
	 * Disable SW override: Use hardware state-machine for sequencing.
	 */
	regval = readl_relaxed(sc->gdscr);
	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);

	/* Configure wait time between states. */
	regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
	regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
	writel_relaxed(regval, sc->gdscr);

	retain_mem = of_property_read_bool(pdev->dev.of_node,
					    "qcom,retain-mem");
	sc->toggle_mem = !retain_mem;
	retain_periph = of_property_read_bool(pdev->dev.of_node,
					    "qcom,retain-periph");
	sc->toggle_periph = !retain_periph;
	sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
						"qcom,skip-logic-collapse");
	support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
						    "qcom,support-hw-trigger");
	if (support_hw_trigger) {
		init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
		init_data->constraints.valid_modes_mask |=
				REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
	}

	if (!sc->toggle_logic) {
		regval &= ~SW_COLLAPSE_MASK;
		writel_relaxed(regval, sc->gdscr);

		ret = readl_tight_poll_timeout(sc->gdscr, regval,
					regval & PWR_ON_MASK, TIMEOUT_US);
		if (ret) {
			dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
				sc->rdesc.name, regval);
			return ret;
		}
	}

	for (i = 0; i < sc->clock_count; i++) {
		if (retain_mem || (regval & PWR_ON_MASK))
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
		else
			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);

		if (retain_periph || (regval & PWR_ON_MASK))
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
		else
			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
	}

	reg_config.dev = &pdev->dev;
	reg_config.init_data = init_data;
	reg_config.driver_data = sc;
	reg_config.of_node = pdev->dev.of_node;
	sc->rdev = regulator_register(&sc->rdesc, &reg_config);
	if (IS_ERR(sc->rdev)) {
		dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
			sc->rdesc.name);
		return PTR_ERR(sc->rdev);
	}

	return 0;
}
Exemplo n.º 6
0
static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
	struct gdsc *sc = rdev_get_drvdata(rdev);
	uint32_t regval;
	int ret;

	regval = readl_relaxed(sc->gdscr);

	/*
	 * HW control can only be enable/disabled when SW_COLLAPSE
	 * indicates on.
	 */
	if (regval & SW_COLLAPSE_MASK) {
		dev_err(&rdev->dev, "can't enable hw collapse now\n");
		return -EBUSY;
	}

	switch (mode) {
	case REGULATOR_MODE_FAST:
		/* Turn on HW trigger mode */
		regval |= HW_CONTROL_MASK;
		writel_relaxed(regval, sc->gdscr);
		/*
		 * There may be a race with internal HW trigger signal,
		 * that will result in GDSC going through a power down and
		 * up cycle.  In case HW trigger signal is controlled by
		 * firmware that also poll same status bits as we do, FW
		 * might read an 'on' status before the GDSC can finish
		 * power cycle.  We wait 1us before returning to ensure
		 * FW can't immediately poll the status bit.
		 */
		mb();
		udelay(1);
		break;

	case REGULATOR_MODE_NORMAL:
		/* Turn off HW trigger mode */
		regval &= ~HW_CONTROL_MASK;
		writel_relaxed(regval, sc->gdscr);
		/*
		 * There may be a race with internal HW trigger signal,
		 * that will result in GDSC going through a power down and
		 * up cycle.  If we poll too early, status bit will
		 * indicate 'on' before the GDSC can finish the power cycle.
		 * Account for this case by waiting 1us before polling.
		 */
		mb();
		udelay(1);
		ret = readl_tight_poll_timeout(sc->gdscr, regval,
					regval & PWR_ON_MASK, TIMEOUT_US);
		if (ret) {
			dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
				sc->rdesc.name, regval);
			return ret;
		}
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
Exemplo n.º 7
0
static int __devinit gdsc_probe(struct platform_device *pdev)
{
	static atomic_t gdsc_count = ATOMIC_INIT(-1);
	struct regulator_init_data *init_data;
	struct resource *res;
	struct gdsc *sc;
	uint32_t regval;
	bool retain_mem, retain_periph;
	int i, ret;
#ifdef CONFIG_MACH_LGE
	int use_lge_workaround = 0; /* default: all not applied */
#endif

	sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
	if (sc == NULL)
		return -ENOMEM;

	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
	if (init_data == NULL)
		return -ENOMEM;

	if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
		init_data->supply_regulator = "parent";

	ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
				      &sc->rdesc.name);
	if (ret)
		return ret;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		return -EINVAL;
	sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
	if (sc->gdscr == NULL)
		return -ENOMEM;

	sc->clock_count = of_property_count_strings(pdev->dev.of_node,
					    "qcom,clock-names");
	if (sc->clock_count == -EINVAL) {
		sc->clock_count = 0;
	} else if (IS_ERR_VALUE(sc->clock_count)) {
		dev_err(&pdev->dev, "Failed to get clock names\n");
		return -EINVAL;
	}

	sc->clocks = devm_kzalloc(&pdev->dev,
			sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
	if (!sc->clocks)
		return -ENOMEM;
	for (i = 0; i < sc->clock_count; i++) {
		const char *clock_name;
		of_property_read_string_index(pdev->dev.of_node,
					      "qcom,clock-names", i,
					      &clock_name);
		sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
		if (IS_ERR(sc->clocks[i])) {
			int rc = PTR_ERR(sc->clocks[i]);
			if (rc != -EPROBE_DEFER)
				dev_err(&pdev->dev, "Failed to get %s\n",
					clock_name);
			return rc;
		}
	}
#ifdef CONFIG_MACH_LGE
	of_property_read_u32(pdev->dev.of_node, "lge,use_workaround",
			&use_lge_workaround);
	sc->use_lge_workaround = !(!use_lge_workaround);
#endif
	sc->rdesc.id = atomic_inc_return(&gdsc_count);
	sc->rdesc.ops = &gdsc_ops;
	sc->rdesc.type = REGULATOR_VOLTAGE;
	sc->rdesc.owner = THIS_MODULE;
	platform_set_drvdata(pdev, sc);

	/*
	 * Disable HW trigger: collapse/restore occur based on registers writes.
	 * Disable SW override: Use hardware state-machine for sequencing.
	 */
	regval = readl_relaxed(sc->gdscr);
	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);

	/* Configure wait time between states. */
	regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
	regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
	writel_relaxed(regval, sc->gdscr);

	retain_mem = of_property_read_bool(pdev->dev.of_node,
					    "qcom,retain-mem");
	retain_periph = of_property_read_bool(pdev->dev.of_node,
					    "qcom,retain-periph");
	for (i = 0; i < sc->clock_count; i++) {
		if (retain_mem || (regval & PWR_ON_MASK))
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
		else
			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);

		if (retain_periph || (regval & PWR_ON_MASK))
			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
		else
			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
	}
	sc->toggle_mem = !retain_mem;
	sc->toggle_periph = !retain_periph;
	sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
						"qcom,skip-logic-collapse");
	if (!sc->toggle_logic) {
#ifdef CONFIG_MACH_LGE
		/* LGE workaround is not used if a device is good pdn revision */
		if (lge_get_board_revno() >= use_lge_workaround) {
			regval &= ~SW_COLLAPSE_MASK;
			writel_relaxed(regval, sc->gdscr);

			ret = readl_tight_poll_timeout(sc->gdscr, regval,
					regval & PWR_ON_MASK, TIMEOUT_US);
			if (ret) {
				dev_err(&pdev->dev, "%s enable timed out\n",
						sc->rdesc.name);
				return ret;
			}
		} else {
			pr_info("%s: %s is enabled only at first by lge workaround\n",
					__func__, sc->rdesc.name);
			ret = lge_gdsc_enable(sc);
			if (ret) {
				dev_err(&pdev->dev, "%s enable timed out\n",
						sc->rdesc.name);
				return ret;
			}
		}
#else /* qmc */
		regval &= ~SW_COLLAPSE_MASK;
		writel_relaxed(regval, sc->gdscr);

		ret = readl_tight_poll_timeout(sc->gdscr, regval,
					regval & PWR_ON_MASK, TIMEOUT_US);
		if (ret) {
			dev_err(&pdev->dev, "%s enable timed out\n",
				sc->rdesc.name);
			return ret;
		}
#endif
	}

	sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc,
				      pdev->dev.of_node);
	if (IS_ERR(sc->rdev)) {
		dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
			sc->rdesc.name);
		return PTR_ERR(sc->rdev);
	}

	return 0;
}
Exemplo n.º 8
0
static int lge_gdsc_enable(struct gdsc *sc)
{
	uint32_t regval;
	int ret;
	int retry_count = 0;

retry_enable:
	regval = readl_relaxed(sc->gdscr);
	regval |= SW_OVERRIDE_MASK;
	writel_relaxed(regval, sc->gdscr);

	regval = readl_relaxed(sc->gdscr);
	regval |= PD_ARES_MASK|EN_FEW_MASK;
	writel_relaxed(regval, sc->gdscr);
	regval = readl_relaxed(sc->gdscr);
	udelay(15);

	regval &= ~PD_ARES_MASK;
	writel_relaxed(regval, sc->gdscr);
	regval = readl_relaxed(sc->gdscr);

	regval |= EN_RESET_MASK;
	writel_relaxed(regval, sc->gdscr);
	regval = readl_relaxed(sc->gdscr);
	udelay(1);

	regval = readl_relaxed(sc->gdscr);
	regval &= ~SW_COLLAPSE_MASK;
	writel_relaxed(regval, sc->gdscr);

	ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK,
				       TIMEOUT_US_LGE);
	if (ret) {
		pr_err("%s: %s enable timed out, state : 0x%08x, retry count : %d\n",
				__func__, sc->rdesc.name, readl_relaxed(sc->gdscr),
				retry_count+1);
		lge_gdsc_disable(sc);
		if (++retry_count <= MAX_RETRY_COUNT)
			goto retry_enable;
		pr_err("%s: %s fail to enable\n", __func__, sc->rdesc.name);
		BUG();
		return ret;
	}

	regval = readl_relaxed(sc->gdscr);
	regval &= ~CLAMP_IO_MASK;
	writel_relaxed(regval, sc->gdscr);
	regval = readl_relaxed(sc->gdscr);

	regval &= ~RETAIN_MASK;
	writel_relaxed(regval, sc->gdscr);
	regval = readl_relaxed(sc->gdscr);

	regval &= ~SAVE_MASK;
	regval |= RESTORE_MASK;
	writel_relaxed(regval, sc->gdscr);
	regval = readl_relaxed(sc->gdscr);

	regval &= ~(CLK_DISABLE_MASK);
	writel_relaxed(regval, sc->gdscr);
	regval = readl_relaxed(sc->gdscr);

	/*
	 * If clocks to this power domain were already on, they will take an
	 * additional 4 clock cycles to re-enable after the rail is enabled.
	 * Delay to account for this. A delay is also needed to ensure clocks
	 * are not enabled within 400ns of enabling power to the memories.
	 */
	udelay(1);

	return 0;
}
static int pil_pronto_reset(struct pil_desc *pil)
{
	u32 reg;
	int rc;
	struct pronto_data *drv = dev_get_drvdata(pil->dev);
	void __iomem *base = drv->base;
	phys_addr_t start_addr = pil_get_entry_addr(pil);

	/* Deassert reset to subsystem and wait for propagation */
	reg = readl_relaxed(drv->reset_base);
	reg &= ~CLK_CTL_WCNSS_RESTART_BIT;
	writel_relaxed(reg, drv->reset_base);
	mb();
	udelay(2);

	/* Configure boot address */
	writel_relaxed(start_addr >> 16, base +
			PRONTO_PMU_CCPU_BOOT_REMAP_ADDR);

	/* Use the high vector table */
	reg = readl_relaxed(base + PRONTO_PMU_CCPU_CTL);
	reg |= PRONTO_PMU_CCPU_CTL_REMAP_EN | PRONTO_PMU_CCPU_CTL_HIGH_IVT;
	writel_relaxed(reg, base + PRONTO_PMU_CCPU_CTL);

	/* Turn on AHB clock of common_ss */
	reg = readl_relaxed(base + PRONTO_PMU_COMMON_AHB_CBCR);
	reg |= PRONTO_PMU_COMMON_AHB_CBCR_CLK_EN;
	writel_relaxed(reg, base + PRONTO_PMU_COMMON_AHB_CBCR);

	/* Turn on CPU clock of common_ss */
	reg = readl_relaxed(base + PRONTO_PMU_COMMON_CPU_CBCR);
	reg |= PRONTO_PMU_COMMON_CPU_CBCR_CLK_EN;
	writel_relaxed(reg, base + PRONTO_PMU_COMMON_CPU_CBCR);

	/* Enable A2XB bridge */
	reg = readl_relaxed(base + PRONTO_PMU_COMMON_CSR);
	reg |= PRONTO_PMU_COMMON_CSR_A2XB_CFG_EN;
	writel_relaxed(reg, base + PRONTO_PMU_COMMON_CSR);

	/* Enable common_ss power */
	reg = readl_relaxed(base + PRONTO_PMU_COMMON_GDSCR);
	reg &= ~PRONTO_PMU_COMMON_GDSCR_SW_COLLAPSE;
	writel_relaxed(reg, base + PRONTO_PMU_COMMON_GDSCR);

	/* Wait for AHB clock to be on */
	rc = readl_tight_poll_timeout(base + PRONTO_PMU_COMMON_AHB_CBCR,
				      reg,
				      !(reg & PRONTO_PMU_COMMON_AHB_CLK_OFF),
				      CLK_UPDATE_TIMEOUT_US);
	if (rc) {
		dev_err(pil->dev, "pronto common ahb clk enable timeout\n");
		return rc;
	}

	/* Wait for CPU clock to be on */
	rc = readl_tight_poll_timeout(base + PRONTO_PMU_COMMON_CPU_CBCR,
				      reg,
				      !(reg & PRONTO_PMU_COMMON_CPU_CLK_OFF),
				      CLK_UPDATE_TIMEOUT_US);
	if (rc) {
		dev_err(pil->dev, "pronto common cpu clk enable timeout\n");
		return rc;
	}

	/* Deassert ARM9 software reset */
	reg = readl_relaxed(base + PRONTO_PMU_SOFT_RESET);
	reg &= ~PRONTO_PMU_SOFT_RESET_CRCM_CCPU_SOFT_RESET;
	writel_relaxed(reg, base + PRONTO_PMU_SOFT_RESET);

	return 0;
}
Exemplo n.º 10
0
static int __devinit gdsc_probe(struct platform_device *pdev)
{
    static atomic_t gdsc_count = ATOMIC_INIT(-1);
    struct regulator_init_data *init_data;
    struct resource *res;
    struct gdsc *sc;
    uint32_t regval;
    bool retain_mem, retain_periph;
    int i, ret;

    sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
    if (sc == NULL)
        return -ENOMEM;

    init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
    if (init_data == NULL)
        return -ENOMEM;

    if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
        init_data->supply_regulator = "parent";

    ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
                                  &sc->rdesc.name);
    if (ret)
        return ret;

    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    if (res == NULL)
        return -EINVAL;
    sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
    if (sc->gdscr == NULL)
        return -ENOMEM;

    sc->clock_count = of_property_count_strings(pdev->dev.of_node,
                      "qcom,clock-names");
    if (sc->clock_count == -EINVAL) {
        sc->clock_count = 0;
    } else if (IS_ERR_VALUE(sc->clock_count)) {
        dev_err(&pdev->dev, "Failed to get clock names\n");
        return -EINVAL;
    }

    sc->clocks = devm_kzalloc(&pdev->dev,
                              sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
    if (!sc->clocks)
        return -ENOMEM;
    for (i = 0; i < sc->clock_count; i++) {
        const char *clock_name;
        of_property_read_string_index(pdev->dev.of_node,
                                      "qcom,clock-names", i,
                                      &clock_name);
        sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
        if (IS_ERR(sc->clocks[i])) {
            int rc = PTR_ERR(sc->clocks[i]);
            if (rc != -EPROBE_DEFER)
                dev_err(&pdev->dev, "Failed to get %s\n",
                        clock_name);
            return rc;
        }
    }

    sc->rdesc.id = atomic_inc_return(&gdsc_count);
    sc->rdesc.ops = &gdsc_ops;
    sc->rdesc.type = REGULATOR_VOLTAGE;
    sc->rdesc.owner = THIS_MODULE;
    platform_set_drvdata(pdev, sc);

    /*
     * Disable HW trigger: collapse/restore occur based on registers writes.
     * Disable SW override: Use hardware state-machine for sequencing.
     */
    regval = readl_relaxed(sc->gdscr);
    regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);

    /* Configure wait time between states. */
    regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
    regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
    writel_relaxed(regval, sc->gdscr);

    retain_mem = of_property_read_bool(pdev->dev.of_node,
                                       "qcom,retain-mem");
    sc->toggle_mem = !retain_mem;
    retain_periph = of_property_read_bool(pdev->dev.of_node,
                                          "qcom,retain-periph");
    sc->toggle_periph = !retain_periph;
    sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
                       "qcom,skip-logic-collapse");

#if defined(CONFIG_MACH_TBS) && defined(CONFIG_ANDROID_ENGINEERING)
    {
        static sharp_smem_common_type *smemdata = NULL;
        unsigned long hw_revision = sh_boot_get_hw_revision();
        unsigned int version = socinfo_get_version();
        const char* target_names_collapse[] = {
            "gdsc_venus", "gdsc_mdss"
        };
        const char* target_names_retention[] = {
            "gdsc_venus", "gdsc_mdss", "gdsc_oxili_gx"
        };

        if( smemdata == NULL ) {
            smemdata = sh_smem_get_common_address();
        }

        if( (smemdata != NULL)
                && ((hw_revision == HW_VERSION_PP_1) || (hw_revision == HW_VERSION_PP_2)) ) {
            if (smemdata->shdiag_rvcflg != SHDIAG_RVCFLG_ON) {
                if ( (SOCINFO_VERSION_MAJOR(version) == 2) && (SOCINFO_VERSION_MINOR(version) == 2) ) {
                    int i = 0;
                    for(i=0; i<(sizeof(target_names_retention)/sizeof(target_names_retention[0])); i++) {
                        if( strcmp(sc->rdesc.name, target_names_retention[i]) == 0 ) {
                            break;
                        }
                    }
                    if( i != (sizeof(target_names_retention)/sizeof(target_names_retention[0])) ) {
                        if( retain_mem != true ) {
                            dev_err(&pdev->dev, "%s is forced to use retain_mem\n", sc->rdesc.name);
                            retain_mem = true;
                            sc->toggle_mem = !retain_mem;
                        }
                        if( retain_periph != true ) {
                            dev_err(&pdev->dev, "%s is forced to use retain_periph\n", sc->rdesc.name);
                            retain_periph = true;
                            sc->toggle_periph = !retain_periph;
                        }
                    }
                    for(i=0; i<(sizeof(target_names_collapse)/sizeof(target_names_collapse[0])); i++) {
                        if( strcmp(sc->rdesc.name, target_names_collapse[i]) == 0 ) {
                            break;
                        }
                    }
                    if( i != (sizeof(target_names_collapse)/sizeof(target_names_collapse[0])) ) {
                        if( sc->toggle_logic != false ) {
                            dev_err(&pdev->dev, "%s is forced to use skip_logic_collapse\n", sc->rdesc.name);
                            sc->toggle_logic = false;
                        }
                    }
                }
            }
        }
    }
#endif

    if (!sc->toggle_logic) {
        regval &= ~SW_COLLAPSE_MASK;
        writel_relaxed(regval, sc->gdscr);

        ret = readl_tight_poll_timeout(sc->gdscr, regval,
                                       regval & PWR_ON_MASK, TIMEOUT_US);
        if (ret) {
            dev_err(&pdev->dev, "%s enable timed out\n",
                    sc->rdesc.name);
            return ret;
        }
    }

    for (i = 0; i < sc->clock_count; i++) {
        if (retain_mem || (regval & PWR_ON_MASK))
            clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
        else
            clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);

        if (retain_periph || (regval & PWR_ON_MASK))
            clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
        else
            clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
    }

    sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc,
                                  pdev->dev.of_node);
    if (IS_ERR(sc->rdev)) {
        dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
                sc->rdesc.name);
        return PTR_ERR(sc->rdev);
    }

    return 0;
}