static void __pll_clk_disable_reg(void __iomem *mode_reg)
{
	u32 mode = readl_relaxed(mode_reg);
	mode &= ~PLL_MODE_MASK;
	writel_relaxed(mode, mode_reg);
}
Exemple #2
0
/* Initialize the DLL (Programmable Delay Line) */
static int msm_init_cm_dll(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	int wait_cnt = 50;
	unsigned long flags;
	u32 config;

	spin_lock_irqsave(&host->lock, flags);

	/*
	 * Make sure that clock is always enabled when DLL
	 * tuning is in progress. Keeping PWRSAVE ON may
	 * turn off the clock.
	 */
	config = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
	config &= ~CORE_CLK_PWRSAVE;
	writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC);

	if (msm_host->use_14lpp_dll_reset) {
		config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
		config &= ~CORE_CK_OUT_EN;
		writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);

		config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
		config |= CORE_DLL_CLOCK_DISABLE;
		writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
	}

	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
	config |= CORE_DLL_RST;
	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);

	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
	config |= CORE_DLL_PDN;
	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
	msm_cm_dll_set_freq(host);

	if (msm_host->use_14lpp_dll_reset &&
	    !IS_ERR_OR_NULL(msm_host->xo_clk)) {
		u32 mclk_freq = 0;

		config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
		config &= CORE_FLL_CYCLE_CNT;
		if (config)
			mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
					clk_get_rate(msm_host->xo_clk));
		else
			mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
					clk_get_rate(msm_host->xo_clk));

		config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
		config &= ~(0xFF << 10);
		config |= mclk_freq << 10;

		writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
		/* wait for 5us before enabling DLL clock */
		udelay(5);
	}

	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
	config &= ~CORE_DLL_RST;
	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);

	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
	config &= ~CORE_DLL_PDN;
	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);

	if (msm_host->use_14lpp_dll_reset) {
		msm_cm_dll_set_freq(host);
		config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2);
		config &= ~CORE_DLL_CLOCK_DISABLE;
		writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG_2);
	}

	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
	config |= CORE_DLL_EN;
	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);

	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
	config |= CORE_CK_OUT_EN;
	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);

	/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
	while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
		 CORE_DLL_LOCK)) {
		/* max. wait for 50us sec for LOCK bit to be set */
		if (--wait_cnt == 0) {
			dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
			       mmc_hostname(mmc));
			spin_unlock_irqrestore(&host->lock, flags);
			return -ETIMEDOUT;
		}
		udelay(1);
	}

	spin_unlock_irqrestore(&host->lock, flags);
	return 0;
}
Exemple #3
0
static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
					unsigned int uhs)
{
	struct mmc_host *mmc = host->mmc;
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	u16 ctrl_2;
	u32 config;

	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
	/* Select Bus Speed Mode for host */
	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
	switch (uhs) {
	case MMC_TIMING_UHS_SDR12:
		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
		break;
	case MMC_TIMING_UHS_SDR25:
		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
		break;
	case MMC_TIMING_UHS_SDR50:
		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
		break;
	case MMC_TIMING_MMC_HS400:
	case MMC_TIMING_MMC_HS200:
	case MMC_TIMING_UHS_SDR104:
		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
		break;
	case MMC_TIMING_UHS_DDR50:
	case MMC_TIMING_MMC_DDR52:
		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
		break;
	}

	/*
	 * When clock frequency is less than 100MHz, the feedback clock must be
	 * provided and DLL must not be used so that tuning can be skipped. To
	 * provide feedback clock, the mode selection can be any value less
	 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
	 */
	if (host->clock <= CORE_FREQ_100MHZ) {
		if (uhs == MMC_TIMING_MMC_HS400 ||
		    uhs == MMC_TIMING_MMC_HS200 ||
		    uhs == MMC_TIMING_UHS_SDR104)
			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
		/*
		 * DLL is not required for clock <= 100MHz
		 * Thus, make sure DLL it is disabled when not required
		 */
		config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
		config |= CORE_DLL_RST;
		writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);

		config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
		config |= CORE_DLL_PDN;
		writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);

		/*
		 * The DLL needs to be restored and CDCLP533 recalibrated
		 * when the clock frequency is set back to 400MHz.
		 */
		msm_host->calibration_done = false;
	}

	dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
		mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);

	if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
		sdhci_msm_hs400(host, &mmc->ios);
}
Exemple #4
0
static inline void flush_fifo(struct rockchip_spi *rs)
{
	while (readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR))
		readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
}
Exemple #5
0
static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	u32 irq_status, irq_ack = 0;
	int retry = 10;
	int pwr_state = 0, io_level = 0;


	irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
	irq_status &= INT_MASK;

	writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);

	/*
	 * There is a rare HW scenario where the first clear pulse could be
	 * lost when actual reset and clear/read of status register is
	 * happening at a time. Hence, retry for at least 10 times to make
	 * sure status register is cleared. Otherwise, this will result in
	 * a spurious power IRQ resulting in system instability.
	 */
	while (irq_status & readl_relaxed(msm_host->core_mem +
				CORE_PWRCTL_STATUS)) {
		if (retry == 0) {
			pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
					mmc_hostname(host->mmc), irq_status);
			sdhci_msm_dump_pwr_ctrl_regs(host);
			WARN_ON(1);
			break;
		}
		writel_relaxed(irq_status,
				msm_host->core_mem + CORE_PWRCTL_CLEAR);
		retry--;
		udelay(10);
	}

	/* Handle BUS ON/OFF*/
	if (irq_status & CORE_PWRCTL_BUS_ON) {
		pwr_state = REQ_BUS_ON;
		io_level = REQ_IO_HIGH;
		irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
	}
	if (irq_status & CORE_PWRCTL_BUS_OFF) {
		pwr_state = REQ_BUS_OFF;
		io_level = REQ_IO_LOW;
		irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
	}
	/* Handle IO LOW/HIGH */
	if (irq_status & CORE_PWRCTL_IO_LOW) {
		io_level = REQ_IO_LOW;
		irq_ack |= CORE_PWRCTL_IO_SUCCESS;
	}
	if (irq_status & CORE_PWRCTL_IO_HIGH) {
		io_level = REQ_IO_HIGH;
		irq_ack |= CORE_PWRCTL_IO_SUCCESS;
	}

	/*
	 * The driver has to acknowledge the interrupt, switch voltages and
	 * report back if it succeded or not to this register. The voltage
	 * switches are handled by the sdhci core, so just report success.
	 */
	writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);

	if (pwr_state)
		msm_host->curr_pwr_state = pwr_state;
	if (io_level)
		msm_host->curr_io_level = io_level;

	pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
		mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
		irq_ack);
}
Exemple #6
0
int hdmi_pll_enable(void)
{
	unsigned int val;
	u32 ahb_en_reg, ahb_enabled;
	unsigned int timeout_count;
	int pll_lock_retry = 10;

	ahb_en_reg = readl_relaxed(AHB_EN_REG);
	ahb_enabled = ahb_en_reg & BIT(4);
	if (!ahb_enabled) {
		writel_relaxed(ahb_en_reg | BIT(4), AHB_EN_REG);
		/* Make sure iface clock is enabled before register access */
		mb();
	}

	/* Assert PLL S/W reset */
	writel_relaxed(0x8D, HDMI_PHY_PLL_LOCKDET_CFG2);
	writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
	writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
	/* Wait for a short time before de-asserting
	 * to allow the hardware to complete its job.
	 * This much of delay should be fine for hardware
	 * to assert and de-assert.
	 */
	udelay(10);
	/* De-assert PLL S/W reset */
	writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);

	val = readl_relaxed(HDMI_PHY_REG_12);
	val |= BIT(5);
	/* Assert PHY S/W reset */
	writel_relaxed(val, HDMI_PHY_REG_12);
	val &= ~BIT(5);
	/* Wait for a short time before de-asserting
	   to allow the hardware to complete its job.
	   This much of delay should be fine for hardware
	   to assert and de-assert. */
	udelay(10);
	/* De-assert PHY S/W reset */
	writel_relaxed(val, HDMI_PHY_REG_12);
	writel_relaxed(0x3f, HDMI_PHY_REG_2);

	val = readl_relaxed(HDMI_PHY_REG_12);
	val |= PWRDN_B;
	writel_relaxed(val, HDMI_PHY_REG_12);
	/* Wait 10 us for enabling global power for PHY */
	mb();
	udelay(10);

	val = readl_relaxed(HDMI_PHY_PLL_PWRDN_B);
	val |= PLL_PWRDN_B;
	val &= ~PD_PLL;
	writel_relaxed(val, HDMI_PHY_PLL_PWRDN_B);
	writel_relaxed(0x80, HDMI_PHY_REG_2);

	timeout_count = 1000;
	while (!(readl_relaxed(HDMI_PHY_PLL_STATUS0) & BIT(0)) &&
			timeout_count && pll_lock_retry) {
		if (--timeout_count == 0) {
			/*
			 * PLL has still not locked.
			 * Do a software reset and try again
			 * Assert PLL S/W reset first
			 */
			writel_relaxed(0x8D, HDMI_PHY_PLL_LOCKDET_CFG2);

			/* Wait for a short time before de-asserting
			 * to allow the hardware to complete its job.
			 * This much of delay should be fine for hardware
			 * to assert and de-assert.
			 */
			udelay(10);
			writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);

			/*
			 * Wait for a short duration for the PLL calibration
			 * before checking if the PLL gets locked
			 */
			udelay(350);

			timeout_count = 1000;
			pll_lock_retry--;
		}
		udelay(1);
	}

	if (!ahb_enabled)
		writel_relaxed(ahb_en_reg & ~BIT(4), AHB_EN_REG);

	if (!pll_lock_retry) {
		pr_err("%s: HDMI PLL not locked\n", __func__);
		hdmi_pll_disable();
		return -EAGAIN;
	}

	hdmi_pll_on = 1;
	return 0;
}
static int msm_ehci_resume(struct msm_hcd *mhcd)
{
	struct msm_usb_host_platform_data *pdata;
	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
	unsigned long timeout;
	unsigned temp;
	int ret;

	pdata = mhcd->dev->platform_data;

	if (!atomic_read(&mhcd->in_lpm)) {
		dev_dbg(mhcd->dev, "%s called in !in_lpm\n", __func__);
		return 0;
	}

	if (mhcd->pmic_gpio_dp_irq_enabled) {
		disable_irq_wake(mhcd->pmic_gpio_dp_irq);
		disable_irq_nosync(mhcd->pmic_gpio_dp_irq);
		mhcd->pmic_gpio_dp_irq_enabled = 0;
	}
	wake_lock(&mhcd->wlock);

	/* Vote for TCXO when waking up the phy */
	ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
	if (ret)
		dev_err(mhcd->dev, "%s failed to vote for "
			"TCXO D0 buffer%d\n", __func__, ret);

	clk_prepare_enable(mhcd->core_clk);
	clk_prepare_enable(mhcd->iface_clk);

	if (!pdata->mpm_xo_wakeup_int)
		msm_ehci_config_vddcx(mhcd, 1);

	temp = readl_relaxed(USB_USBCMD);
	temp &= ~ASYNC_INTR_CTRL;
	temp &= ~ULPI_STP_CTRL;
	writel_relaxed(temp, USB_USBCMD);

	if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD))
		goto skip_phy_resume;

	temp = readl_relaxed(USB_PORTSC) & ~PORTSC_PHCD;
	writel_relaxed(temp, USB_PORTSC);

	timeout = jiffies + usecs_to_jiffies(PHY_RESUME_TIMEOUT_USEC);
	while ((readl_relaxed(USB_PORTSC) & PORTSC_PHCD) ||
			!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_SYNC_STATE)) {
		if (time_after(jiffies, timeout)) {
			/*This is a fatal error. Reset the link and PHY*/
			dev_err(mhcd->dev, "Unable to resume USB. Resetting the h/w\n");
			msm_hsusb_reset(mhcd);
			break;
		}
		udelay(1);
	}

skip_phy_resume:

	usb_hcd_resume_root_hub(hcd);
	atomic_set(&mhcd->in_lpm, 0);

	if (mhcd->async_int) {
		mhcd->async_int = false;
		pm_runtime_put_noidle(mhcd->dev);
		enable_irq(hcd->irq);
	}

	if (atomic_read(&mhcd->pm_usage_cnt)) {
		atomic_set(&mhcd->pm_usage_cnt, 0);
		pm_runtime_put_noidle(mhcd->dev);
	}

	dev_info(mhcd->dev, "EHCI USB exited from low power mode\n");

	return 0;
}
Exemple #8
0
static int __init gicv2m_init_one(struct device_node *node,
				  struct irq_domain *parent)
{
	int ret;
	struct v2m_data *v2m;
	struct irq_domain *inner_domain, *pci_domain, *plat_domain;

	v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
	if (!v2m) {
		pr_err("Failed to allocate struct v2m_data.\n");
		return -ENOMEM;
	}

	ret = of_address_to_resource(node, 0, &v2m->res);
	if (ret) {
		pr_err("Failed to allocate v2m resource.\n");
		goto err_free_v2m;
	}

	v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
	if (!v2m->base) {
		pr_err("Failed to map GICv2m resource\n");
		ret = -ENOMEM;
		goto err_free_v2m;
	}

	if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) &&
	    !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) {
		pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n",
			v2m->spi_start, v2m->nr_spis);
	} else {
		u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);

		v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
		v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
	}

	if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
		ret = -EINVAL;
		goto err_iounmap;
	}

	v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
			  GFP_KERNEL);
	if (!v2m->bm) {
		ret = -ENOMEM;
		goto err_iounmap;
	}

	inner_domain = irq_domain_add_tree(node, &gicv2m_domain_ops, v2m);
	if (!inner_domain) {
		pr_err("Failed to create GICv2m domain\n");
		ret = -ENOMEM;
		goto err_free_bm;
	}

	inner_domain->bus_token = DOMAIN_BUS_NEXUS;
	inner_domain->parent = parent;
	pci_domain = pci_msi_create_irq_domain(node, &gicv2m_msi_domain_info,
					       inner_domain);
	plat_domain = platform_msi_create_irq_domain(node,
						     &gicv2m_pmsi_domain_info,
						     inner_domain);
	if (!pci_domain || !plat_domain) {
		pr_err("Failed to create MSI domains\n");
		ret = -ENOMEM;
		goto err_free_domains;
	}

	spin_lock_init(&v2m->msi_cnt_lock);

	pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
		(unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
		v2m->spi_start, (v2m->spi_start + v2m->nr_spis));

	return 0;

err_free_domains:
	if (plat_domain)
		irq_domain_remove(plat_domain);
	if (pci_domain)
		irq_domain_remove(pci_domain);
	if (inner_domain)
		irq_domain_remove(inner_domain);
err_free_bm:
	kfree(v2m->bm);
err_iounmap:
	iounmap(v2m->base);
err_free_v2m:
	kfree(v2m);
	return ret;
}
void tegra_init_cache(bool init)
{
#ifdef CONFIG_CACHE_L2X0
	void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
	u32 aux_ctrl;
	u32 speedo;
	u32 tmp;

#ifdef CONFIG_TRUSTED_FOUNDATIONS
	/* issue the SMC to enable the L2 */
	aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
	tegra_cache_smc(true, aux_ctrl);

	/* after init, reread aux_ctrl and register handlers */
	aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
	l2x0_init(p, aux_ctrl, 0xFFFFFFFF);

	/* override outer_disable() with our disable */
	outer_cache.disable = tegra_l2x0_disable;
#else
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
	writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
	writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);

#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
#ifdef CONFIG_TEGRA_SILICON_PLATFORM
	/* PL310 RAM latency is CPU dependent. NOTE: Changes here
	   must also be reflected in __cortex_a9_l2x0_restart */

	if (is_lp_cluster()) {
		writel(0x221, p + L2X0_TAG_LATENCY_CTRL);
		writel(0x221, p + L2X0_DATA_LATENCY_CTRL);
	} else {
		/* relax l2-cache latency for speedos 4,5,6 (T33's chips) */
		speedo = tegra_cpu_speedo_id();
		if (speedo == 4 || speedo == 5 || speedo == 6) {
			writel(0x442, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x552, p + L2X0_DATA_LATENCY_CTRL);
		} else {
			writel(0x441, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x551, p + L2X0_DATA_LATENCY_CTRL);
		}
	}
#else
	writel(0x770, p + L2X0_TAG_LATENCY_CTRL);
	writel(0x770, p + L2X0_DATA_LATENCY_CTRL);
#endif
#endif
	aux_ctrl = readl(p + L2X0_CACHE_TYPE);
	aux_ctrl = (aux_ctrl & 0x700) << (17-8);
	aux_ctrl |= 0x7C000001;
	if (init) {
		l2x0_init(p, aux_ctrl, 0x8200c3fe);
	} else {
		tmp = aux_ctrl;
		aux_ctrl = readl(p + L2X0_AUX_CTRL);
		aux_ctrl &= 0x8200c3fe;
		aux_ctrl |= tmp;
		writel(aux_ctrl, p + L2X0_AUX_CTRL);
	}
	l2x0_enable();
#endif
#endif
}
static unsigned int u2o_get(void __iomem *base, unsigned int offset)
{
	return readl_relaxed(base + offset);
}
static void u2o_write(void __iomem *base, unsigned int offset,
		unsigned int value)
{
	writel_relaxed(value, base + offset);
	readl_relaxed(base + offset);
}
static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
{
	u32 n_regval = 0, m_regval = 0, d_regval = 0;
	u32 cfg_regval;
	struct clk_freq_tbl *freq;
	u32 cmd_rcgr_regval;

	/* Is there a pending configuration? */
	cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
	if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
		return NULL;

	/* Get values of m, n, d, div and src_sel registers. */
	if (has_mnd) {
		m_regval = readl_relaxed(M_REG(rcg));
		n_regval = readl_relaxed(N_REG(rcg));
		d_regval = readl_relaxed(D_REG(rcg));

		/*
		 * The n and d values stored in the frequency tables are sign
		 * extended to 32 bits. The n and d values in the registers are
		 * sign extended to 8 or 16 bits. Sign extend the values read
		 * from the registers so that they can be compared to the
		 * values in the frequency tables.
		 */
		n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
		d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
	}

	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
	cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
				| MND_MODE_MASK;

	/* If mnd counter is present, check if it's in use. */
	has_mnd = (has_mnd) &&
		((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);

	/*
	 * Clear out the mn counter mode bits since we now want to compare only
	 * the source mux selection and pre-divider values in the registers.
	 */
	cfg_regval &= ~MND_MODE_MASK;

	/* Figure out what rate the rcg is running at */
	for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
		if (freq->div_src_val != cfg_regval)
			continue;
		if (has_mnd) {
			if (freq->m_val != m_regval)
				continue;
			if (freq->n_val != n_regval)
				continue;
			if (freq->d_val != d_regval)
				continue;
		}
		break;
	}

	/* No known frequency found */
	if (freq->freq_hz == FREQ_END)
		return NULL;

	rcg->current_freq = freq;
	return freq->src_clk;
}
Exemple #13
0
static int modem_reset(struct pil_desc *pil)
{
	u32 reg;
	const struct modem_data *drv = dev_get_drvdata(pil->dev);
	phys_addr_t start_addr = pil_get_entry_addr(pil);

	/* Put modem AHB0,1,2 clocks into reset */
	writel_relaxed(BIT(0) | BIT(1), drv->cbase + MAHB0_SFAB_PORT_RESET);
	writel_relaxed(BIT(7), drv->cbase + MAHB1_CLK_CTL);
	writel_relaxed(BIT(7), drv->cbase + MAHB2_CLK_CTL);

	/* Vote for pll8 on behalf of the modem */
	reg = readl_relaxed(drv->cbase + PLL_ENA_MARM);
	reg |= BIT(8);
	writel_relaxed(reg, drv->cbase + PLL_ENA_MARM);

	/* Wait for PLL8 to enable */
	while (!(readl_relaxed(drv->cbase + PLL8_STATUS) & BIT(16)))
		cpu_relax();

	/* Set MAHB1 divider to Div-5 to run MAHB1,2 and sfab at 79.8 Mhz*/
	writel_relaxed(0x4, drv->cbase + MAHB1_NS);

	/* Vote for modem AHB1 and 2 clocks to be on on behalf of the modem */
	reg = readl_relaxed(drv->cbase + MARM_CLK_BRANCH_ENA_VOTE);
	reg |= BIT(0) | BIT(1);
	writel_relaxed(reg, drv->cbase + MARM_CLK_BRANCH_ENA_VOTE);

	/* Source marm_clk off of PLL8 */
	reg = readl_relaxed(drv->cbase + MARM_CLK_SRC_CTL);
	if ((reg & 0x1) == 0) {
		writel_relaxed(0x3, drv->cbase + MARM_CLK_SRC1_NS);
		reg |= 0x1;
	} else {
		writel_relaxed(0x3, drv->cbase + MARM_CLK_SRC0_NS);
		reg &= ~0x1;
	}
	writel_relaxed(reg | 0x2, drv->cbase + MARM_CLK_SRC_CTL);

	/*
	 * Force core on and periph on signals to remain active during halt
	 * for marm_clk and mahb2_clk
	 */
	writel_relaxed(0x6F, drv->cbase + MARM_CLK_FS);
	writel_relaxed(0x6F, drv->cbase + MAHB2_CLK_FS);

	/*
	 * Enable all of the marm_clk branches, cxo sourced marm branches,
	 * and sleep clock branches
	 */
	writel_relaxed(0x10, drv->cbase + MARM_CLK_CTL);
	writel_relaxed(0x10, drv->cbase + MAHB0_CLK_CTL);
	writel_relaxed(0x10, drv->cbase + SFAB_MSS_S_HCLK_CTL);
	writel_relaxed(0x10, drv->cbase + MSS_MODEM_CXO_CLK_CTL);
	writel_relaxed(0x10, drv->cbase + MSS_SLP_CLK_CTL);
	writel_relaxed(0x10, drv->cbase + MSS_MARM_SYS_REF_CLK_CTL);

	/* Wait for above clocks to be turned on */
	while (readl_relaxed(drv->cbase + CLK_HALT_MSS_SMPSS_MISC_STATE) &
			(BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(4) | BIT(6)))
		cpu_relax();

	/* Take MAHB0,1,2 clocks out of reset */
	writel_relaxed(0x0, drv->cbase + MAHB2_CLK_CTL);
	writel_relaxed(0x0, drv->cbase + MAHB1_CLK_CTL);
	writel_relaxed(0x0, drv->cbase + MAHB0_SFAB_PORT_RESET);
	mb();

	/* Setup exception vector table base address */
	writel_relaxed(start_addr | 0x1, drv->base + MARM_BOOT_CONTROL);

	/* Wait for vector table to be setup */
	mb();

	/* Bring modem out of reset */
	writel_relaxed(0x0, drv->cbase + MARM_RESET);

	return 0;
}
static int pll_clk_is_enabled(struct clk *c)
{
	return readl_relaxed(PLL_MODE_REG(to_pll_shared_clk(c))) & BIT(0);
}
Exemple #15
0
static inline u32 xgene_clk_read(void __iomem *csr)
{
	return readl_relaxed(csr);
}
static int configure_iris_xo(bool use_48mhz_xo, int on)
{
	u32 reg = 0;
	int rc = 0;

	if (on) {
		msm_riva_base = ioremap(MSM_RIVA_PHYS, SZ_256);
		if (!msm_riva_base) {
			pr_err("ioremap MSM_RIVA_PHYS failed\n");
			goto fail;
		}

		/* Enable IRIS XO */
		writel_relaxed(0, RIVA_PMU_CFG);
		reg = readl_relaxed(RIVA_PMU_CFG);
		reg |= RIVA_PMU_CFG_GC_BUS_MUX_SEL_TOP |
				RIVA_PMU_CFG_IRIS_XO_EN;
		writel_relaxed(reg, RIVA_PMU_CFG);

		/* Clear XO_MODE[b2:b1] bits. Clear implies 19.2 MHz TCXO */
		reg &= ~(RIVA_PMU_CFG_IRIS_XO_MODE);

		if (use_48mhz_xo)
			reg |= RIVA_PMU_CFG_IRIS_XO_MODE_48;

		writel_relaxed(reg, RIVA_PMU_CFG);

		/* Start IRIS XO configuration */
		reg |= RIVA_PMU_CFG_IRIS_XO_CFG;
		writel_relaxed(reg, RIVA_PMU_CFG);

		/* Wait for XO configuration to finish */
		while (readl_relaxed(RIVA_PMU_CFG) &
						RIVA_PMU_CFG_IRIS_XO_CFG_STS)
			cpu_relax();

		/* Stop IRIS XO configuration */
		reg &= ~(RIVA_PMU_CFG_GC_BUS_MUX_SEL_TOP |
				RIVA_PMU_CFG_IRIS_XO_CFG);
		writel_relaxed(reg, RIVA_PMU_CFG);

		if (!use_48mhz_xo) {
			wlan_clock = msm_xo_get(MSM_XO_TCXO_A0, id);
			if (IS_ERR(wlan_clock)) {
				rc = PTR_ERR(wlan_clock);
				pr_err("Failed to get MSM_XO_TCXO_A0 voter"
							" (%d)\n", rc);
				goto fail;
			}

			rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_ON);
			if (rc < 0) {
				pr_err("Configuring MSM_XO_MODE_ON failed"
							" (%d)\n", rc);
				goto msm_xo_vote_fail;
			}
		}
	}  else {
		if (wlan_clock != NULL && !use_48mhz_xo) {
			rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_OFF);
			if (rc < 0)
				pr_err("Configuring MSM_XO_MODE_OFF failed"
							" (%d)\n", rc);
		}
	}

	/* Add some delay for XO to settle */
	msleep(20);

	return rc;

msm_xo_vote_fail:
	msm_xo_put(wlan_clock);

fail:
	return rc;
}
Exemple #17
0
int hdmi_pll_set_rate(unsigned rate)
{
	unsigned int set_power_dwn = 0;
	u32 ahb_en_reg = readl_relaxed(AHB_EN_REG);
	u32 ahb_enabled = ahb_en_reg & BIT(4);

	if (!ahb_enabled) {
		writel_relaxed(ahb_en_reg | BIT(4), AHB_EN_REG);
		/* Make sure iface clock is enabled before register access */
		mb();
	}

	if (hdmi_pll_on) {
		hdmi_pll_disable();
		set_power_dwn = 1;
	}

	switch (rate) {
	case 27030000:
		/* 480p60/480i60 case */
		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
		writel_relaxed(0x38, HDMI_PHY_PLL_REFCLK_CFG);
		writel_relaxed(0x2, HDMI_PHY_PLL_CHRG_PUMP_CFG);
		writel_relaxed(0x20, HDMI_PHY_PLL_LOOP_FLT_CFG0);
		writel_relaxed(0xFF, HDMI_PHY_PLL_LOOP_FLT_CFG1);
		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG0);
		writel_relaxed(0x4E, HDMI_PHY_PLL_SDM_CFG1);
		writel_relaxed(0xD7, HDMI_PHY_PLL_SDM_CFG2);
		writel_relaxed(0x03, HDMI_PHY_PLL_SDM_CFG3);
		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG4);
		writel_relaxed(0x2A, HDMI_PHY_PLL_VCOCAL_CFG0);
		writel_relaxed(0x03, HDMI_PHY_PLL_VCOCAL_CFG1);
		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
		writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
		writel_relaxed(0x33, HDMI_PHY_PLL_VCOCAL_CFG6);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG7);
	break;

	case 25200000:
		/* 640x480p60 */
		writel_relaxed(0x32, HDMI_PHY_PLL_REFCLK_CFG);
		writel_relaxed(0x2, HDMI_PHY_PLL_CHRG_PUMP_CFG);
		writel_relaxed(0x01, HDMI_PHY_PLL_LOOP_FLT_CFG0);
		writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
		writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
		writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
		writel_relaxed(0x77, HDMI_PHY_PLL_SDM_CFG0);
		writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG1);
		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG2);
		writel_relaxed(0xC0, HDMI_PHY_PLL_SDM_CFG3);
		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG4);
		writel_relaxed(0x9A, HDMI_PHY_PLL_SSC_CFG0);
		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
		writel_relaxed(0x20, HDMI_PHY_PLL_SSC_CFG3);
		writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
		writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
		writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
		writel_relaxed(0xF4, HDMI_PHY_PLL_VCOCAL_CFG0);
		writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
		writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
		writel_relaxed(0x33, HDMI_PHY_PLL_VCOCAL_CFG6);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG7);
	break;

	case 27000000:
		/* 576p50/576i50 case */
		writel_relaxed(0x32, HDMI_PHY_PLL_REFCLK_CFG);
		writel_relaxed(0x2, HDMI_PHY_PLL_CHRG_PUMP_CFG);
		writel_relaxed(0x01, HDMI_PHY_PLL_LOOP_FLT_CFG0);
		writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
		writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
		writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
		writel_relaxed(0x7B, HDMI_PHY_PLL_SDM_CFG0);
		writel_relaxed(0x01, HDMI_PHY_PLL_SDM_CFG1);
		writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG2);
		writel_relaxed(0xC0, HDMI_PHY_PLL_SDM_CFG3);
		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG4);
		writel_relaxed(0x9A, HDMI_PHY_PLL_SSC_CFG0);
		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG3);
		writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
		writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
		writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
		writel_relaxed(0x2a, HDMI_PHY_PLL_VCOCAL_CFG0);
		writel_relaxed(0x03, HDMI_PHY_PLL_VCOCAL_CFG1);
		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
		writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
		writel_relaxed(0x33, HDMI_PHY_PLL_VCOCAL_CFG6);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG7);
	break;

	case 74250000:
		/* 720p60/720p50/1080i60/1080i50
		 * 1080p24/1080p30/1080p25 case
		 */
		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
		writel_relaxed(0x12, HDMI_PHY_PLL_REFCLK_CFG);
		writel_relaxed(0x01, HDMI_PHY_PLL_LOOP_FLT_CFG0);
		writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
		writel_relaxed(0x76, HDMI_PHY_PLL_SDM_CFG0);
		writel_relaxed(0xE6, HDMI_PHY_PLL_VCOCAL_CFG0);
		writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
	break;

	case 108000000:
		writel_relaxed(0x08, HDMI_PHY_PLL_REFCLK_CFG);
		writel_relaxed(0x21, HDMI_PHY_PLL_LOOP_FLT_CFG0);
		writel_relaxed(0xF9, HDMI_PHY_PLL_LOOP_FLT_CFG1);
		writel_relaxed(0x1C, HDMI_PHY_PLL_VCOCAL_CFG0);
		writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
		writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
		writel_relaxed(0x49, HDMI_PHY_PLL_SDM_CFG0);
		writel_relaxed(0x49, HDMI_PHY_PLL_SDM_CFG1);
		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG2);
		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG3);
		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG4);
	break;

	case 148500000:
		/* 1080p60/1080p50 case */
		writel_relaxed(0x2, HDMI_PHY_PLL_REFCLK_CFG);
		writel_relaxed(0x2, HDMI_PHY_PLL_CHRG_PUMP_CFG);
		writel_relaxed(0x01, HDMI_PHY_PLL_LOOP_FLT_CFG0);
		writel_relaxed(0x33, HDMI_PHY_PLL_LOOP_FLT_CFG1);
		writel_relaxed(0x2C, HDMI_PHY_PLL_IDAC_ADJ_CFG);
		writel_relaxed(0x6, HDMI_PHY_PLL_I_VI_KVCO_CFG);
		writel_relaxed(0xA, HDMI_PHY_PLL_PWRDN_B);
		writel_relaxed(0x76, HDMI_PHY_PLL_SDM_CFG0);
		writel_relaxed(0x01, HDMI_PHY_PLL_SDM_CFG1);
		writel_relaxed(0x4C, HDMI_PHY_PLL_SDM_CFG2);
		writel_relaxed(0xC0, HDMI_PHY_PLL_SDM_CFG3);
		writel_relaxed(0x00, HDMI_PHY_PLL_SDM_CFG4);
		writel_relaxed(0x9A, HDMI_PHY_PLL_SSC_CFG0);
		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG1);
		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG2);
		writel_relaxed(0x00, HDMI_PHY_PLL_SSC_CFG3);
		writel_relaxed(0x10, HDMI_PHY_PLL_LOCKDET_CFG0);
		writel_relaxed(0x1A, HDMI_PHY_PLL_LOCKDET_CFG1);
		writel_relaxed(0x0D, HDMI_PHY_PLL_LOCKDET_CFG2);
		writel_relaxed(0xe6, HDMI_PHY_PLL_VCOCAL_CFG0);
		writel_relaxed(0x02, HDMI_PHY_PLL_VCOCAL_CFG1);
		writel_relaxed(0x3B, HDMI_PHY_PLL_VCOCAL_CFG2);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG3);
		writel_relaxed(0x86, HDMI_PHY_PLL_VCOCAL_CFG4);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG5);
		writel_relaxed(0x33, HDMI_PHY_PLL_VCOCAL_CFG6);
		writel_relaxed(0x00, HDMI_PHY_PLL_VCOCAL_CFG7);
	break;
	}

	/* Make sure writes complete before disabling iface clock */
	mb();

	if (set_power_dwn)
		hdmi_pll_enable();

	if (!ahb_enabled)
		writel_relaxed(ahb_en_reg & ~BIT(4), AHB_EN_REG);

	return 0;
}
Exemple #18
0
static u64 notrace omap_32k_read_sched_clock(void)
{
	return sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
}
static int msm_ehci_suspend(struct msm_hcd *mhcd)
{
	struct msm_usb_host_platform_data *pdata;
	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
	unsigned long timeout;
	int ret;
	u32 portsc;

	pdata = mhcd->dev->platform_data;
	if (atomic_read(&mhcd->in_lpm)) {
		dev_dbg(mhcd->dev, "%s called in lpm\n", __func__);
		return 0;
	}

	disable_irq(hcd->irq);

	/* Set the PHCD bit, only if it is not set by the controller.
	 * PHY may take some time or even fail to enter into low power
	 * mode (LPM). Hence poll for 500 msec and reset the PHY and link
	 * in failure case.
	 */
	portsc = readl_relaxed(USB_PORTSC);
	if (!(portsc & PORTSC_PHCD)) {
		writel_relaxed(portsc | PORTSC_PHCD,
				USB_PORTSC);

		timeout = jiffies + usecs_to_jiffies(PHY_SUSPEND_TIMEOUT_USEC);
		while (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD)) {
			if (time_after(jiffies, timeout)) {
				dev_err(mhcd->dev, "Unable to suspend PHY\n");
				schedule_work(&mhcd->phy_susp_fail_work);
				return -ETIMEDOUT;
			}
			udelay(1);
		}
	}

	/*
	 * PHY has capability to generate interrupt asynchronously in low
	 * power mode (LPM). This interrupt is level triggered. So USB IRQ
	 * line must be disabled till async interrupt enable bit is cleared
	 * in USBCMD register. Assert STP (ULPI interface STOP signal) to
	 * block data communication from PHY.
	 */
	writel_relaxed(readl_relaxed(USB_USBCMD) | ASYNC_INTR_CTRL |
				ULPI_STP_CTRL, USB_USBCMD);

	/*
	 * Ensure that hardware is put in low power mode before
	 * clocks are turned OFF and VDD is allowed to minimize.
	 */
	mb();

	clk_disable_unprepare(mhcd->iface_clk);
	clk_disable_unprepare(mhcd->core_clk);

	/* usb phy does not require TCXO clock, hence vote for TCXO disable */
	ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
	if (ret)
		dev_err(mhcd->dev, "%s failed to devote for "
			"TCXO D0 buffer%d\n", __func__, ret);

	if (!pdata->mpm_xo_wakeup_int)
		msm_ehci_config_vddcx(mhcd, 0);

	atomic_set(&mhcd->in_lpm, 1);
	enable_irq(hcd->irq);
	if (mhcd->pmic_gpio_dp_irq) {
		mhcd->pmic_gpio_dp_irq_enabled = 1;
		enable_irq_wake(mhcd->pmic_gpio_dp_irq);
		enable_irq(mhcd->pmic_gpio_dp_irq);
	}
	wake_unlock(&mhcd->wlock);

	dev_info(mhcd->dev, "EHCI USB in low power mode\n");

	return 0;
}
Exemple #20
0
static u32 exynos_rng_readl(struct exynos_rng_dev *rng, u32 offset)
{
	return readl_relaxed(rng->mem + offset);
}
static int mipi_dsi_panel_msm_power(int on)
{
	int rc = 0;
	uint32_t lcdc_reset_cfg;

	/* I2C-controlled GPIO Expander -init of the GPIOs very late */
	if (unlikely(!dsi_gpio_initialized)) {
		pmapp_disp_backlight_init();

		rc = gpio_request(GPIO_DISPLAY_PWR_EN, "gpio_disp_pwr");
		if (rc < 0) {
			pr_err("failed to request gpio_disp_pwr\n");
			return rc;
		}

		if (machine_is_msm7x27a_surf() || machine_is_msm7625a_surf()
				|| machine_is_msm8625_surf()) {
			rc = gpio_direction_output(GPIO_DISPLAY_PWR_EN, 1);
			if (rc < 0) {
				pr_err("failed to enable display pwr\n");
				goto fail_gpio1;
			}

			rc = gpio_request(GPIO_BACKLIGHT_EN, "gpio_bkl_en");
			if (rc < 0) {
				pr_err("failed to request gpio_bkl_en\n");
				goto fail_gpio1;
			}

			rc = gpio_direction_output(GPIO_BACKLIGHT_EN, 1);
			if (rc < 0) {
				pr_err("failed to enable backlight\n");
				goto fail_gpio2;
			}
		}

		rc = regulator_bulk_get(NULL, ARRAY_SIZE(regs_dsi), regs_dsi);
		if (rc) {
			pr_err("%s: could not get regulators: %d\n",
					__func__, rc);
			goto fail_gpio2;
		}

		rc = regulator_bulk_set_voltage(ARRAY_SIZE(regs_dsi),
						regs_dsi);
		if (rc) {
			pr_err("%s: could not set voltages: %d\n",
					__func__, rc);
			goto fail_vreg;
		}
		if (pmapp_disp_backlight_set_brightness(100))
			pr_err("backlight set brightness failed\n");

		dsi_gpio_initialized = 1;
	}
	if (machine_is_msm7x27a_surf() || machine_is_msm7625a_surf() ||
			machine_is_msm8625_surf()) {
		gpio_set_value_cansleep(GPIO_DISPLAY_PWR_EN, on);
		gpio_set_value_cansleep(GPIO_BACKLIGHT_EN, on);
	} else if (machine_is_msm7x27a_ffa() || machine_is_msm7625a_ffa()
					|| machine_is_msm8625_ffa()) {
		if (on) {
			/* This line drives an active low pin on FFA */
			rc = gpio_direction_output(GPIO_DISPLAY_PWR_EN, !on);
			if (rc < 0)
				pr_err("failed to set direction for "
					"display pwr\n");
		} else {
			gpio_set_value_cansleep(GPIO_DISPLAY_PWR_EN, !on);
			rc = gpio_direction_input(GPIO_DISPLAY_PWR_EN);
			if (rc < 0)
				pr_err("failed to set direction for "
					"display pwr\n");
		}
	}

	if (on) {
		gpio_set_value_cansleep(GPIO_LCDC_BRDG_PD, 0);

		if (machine_is_msm7x27a_surf() ||
				 machine_is_msm7625a_surf() ||
				 machine_is_msm8625_surf()) {
			lcdc_reset_cfg = readl_relaxed(lcdc_reset_ptr);
			rmb();
			lcdc_reset_cfg &= ~1;

			writel_relaxed(lcdc_reset_cfg, lcdc_reset_ptr);
			msleep(20);
			wmb();
			lcdc_reset_cfg |= 1;
			writel_relaxed(lcdc_reset_cfg, lcdc_reset_ptr);
		} else {
			gpio_set_value_cansleep(GPIO_LCDC_BRDG_RESET_N, 0);
			msleep(20);
			gpio_set_value_cansleep(GPIO_LCDC_BRDG_RESET_N, 1);
		}
	} else {
		gpio_set_value_cansleep(GPIO_LCDC_BRDG_PD, 1);
	}

	rc = on ? regulator_bulk_enable(ARRAY_SIZE(regs_dsi), regs_dsi) :
		  regulator_bulk_disable(ARRAY_SIZE(regs_dsi), regs_dsi);

	if (rc)
		pr_err("%s: could not %sable regulators: %d\n",
				__func__, on ? "en" : "dis", rc);

	return rc;
fail_vreg:
	regulator_bulk_free(ARRAY_SIZE(regs_dsi), regs_dsi);
fail_gpio2:
	gpio_free(GPIO_BACKLIGHT_EN);
fail_gpio1:
	gpio_free(GPIO_DISPLAY_PWR_EN);
	dsi_gpio_initialized = 0;
	return rc;
}
Exemple #22
0
static inline bool pmu_power_domain_is_on(int pd)
{
	return !(readl_relaxed(pmu_base_addr + PMU_PWRDN_ST) & BIT(pd));
}
Exemple #23
0
static int rockchip_spi_transfer_one(
		struct spi_master *master,
		struct spi_device *spi,
		struct spi_transfer *xfer)
{
	int ret = 1;
	struct rockchip_spi *rs = spi_master_get_devdata(master);

	WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
		(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));

	if (!xfer->tx_buf && !xfer->rx_buf) {
		dev_err(rs->dev, "No buffer for transfer\n");
		return -EINVAL;
	}

	rs->speed = xfer->speed_hz;
	rs->bpw = xfer->bits_per_word;
	rs->n_bytes = rs->bpw >> 3;

	rs->tx = xfer->tx_buf;
	rs->tx_end = rs->tx + xfer->len;
	rs->rx = xfer->rx_buf;
	rs->rx_end = rs->rx + xfer->len;
	rs->len = xfer->len;

	rs->tx_sg = xfer->tx_sg;
	rs->rx_sg = xfer->rx_sg;

	if (rs->tx && rs->rx)
		rs->tmode = CR0_XFM_TR;
	else if (rs->tx)
		rs->tmode = CR0_XFM_TO;
	else if (rs->rx)
		rs->tmode = CR0_XFM_RO;

	/* we need prepare dma before spi was enabled */
	if (master->can_dma && master->can_dma(master, spi, xfer))
		rs->use_dma = 1;
	else
		rs->use_dma = 0;

	rockchip_spi_config(rs);

	if (rs->use_dma) {
		if (rs->tmode == CR0_XFM_RO) {
			/* rx: dma must be prepared first */
			ret = rockchip_spi_prepare_dma(rs);
			spi_enable_chip(rs, 1);
		} else {
			/* tx or tr: spi must be enabled first */
			spi_enable_chip(rs, 1);
			ret = rockchip_spi_prepare_dma(rs);
		}
	} else {
		spi_enable_chip(rs, 1);
		ret = rockchip_spi_pio_transfer(rs);
	}

	return ret;
}
/*
 *
 *  This function calls hardware random bit generator directory and retuns it
 *  back to caller
 *
 */
int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, void *data)
{
	struct platform_device *pdev;
	void __iomem *base;
	size_t currsize = 0;
	u32 val;
	u32 *retdata = data;
	int ret;
	int failed = 0;

	pdev = msm_rng_dev->pdev;
	base = msm_rng_dev->base;

	mutex_lock(&msm_rng_dev->rng_lock);

	if (msm_rng_dev->qrng_perf_client) {
		ret = msm_bus_scale_client_update_request(
				msm_rng_dev->qrng_perf_client, 1);
		if (ret)
			pr_err("bus_scale_client_update_req failed!\n");
	}
	/* enable PRNG clock */
	ret = clk_prepare_enable(msm_rng_dev->prng_clk);
	if (ret) {
		dev_err(&pdev->dev, "failed to enable clock in callback\n");
		goto err;
	}
	/* read random data from h/w */
	do {
		/* check status bit if data is available */
		while (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
					& 0x00000001)) {
			if (failed == 10) {
				pr_err("Data not available after retry\n");
				break;
			}
			pr_err("msm_rng:Data not available!\n");
			msleep_interruptible(10);
			failed++;
		}

		/* read FIFO */
		val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
		if (!val)
			break;	/* no data to read so just bail */

		/* write data back to callers pointer */
		*(retdata++) = val;
		currsize += 4;

	} while (currsize < Q_HW_DRBG_BLOCK_BYTES);

	/* vote to turn off clock */
	clk_disable_unprepare(msm_rng_dev->prng_clk);
err:
	if (msm_rng_dev->qrng_perf_client) {
		ret = msm_bus_scale_client_update_request(
				msm_rng_dev->qrng_perf_client, 0);
		if (ret)
			pr_err("bus_scale_client_update_req failed!\n");
	}
	mutex_unlock(&msm_rng_dev->rng_lock);

	val = 0L;
	return currsize;
}
Exemple #25
0
static int sdhci_msm_probe(struct platform_device *pdev)
{
	struct sdhci_host *host;
	struct sdhci_pltfm_host *pltfm_host;
	struct sdhci_msm_host *msm_host;
	struct resource *core_memres;
	struct clk *clk;
	int ret;
	u16 host_version, core_minor;
	u32 core_version, config;
	u8 core_major;

	host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
	if (IS_ERR(host))
		return PTR_ERR(host);

	host->sdma_boundary = 0;
	pltfm_host = sdhci_priv(host);
	msm_host = sdhci_pltfm_priv(pltfm_host);
	msm_host->mmc = host->mmc;
	msm_host->pdev = pdev;

	ret = mmc_of_parse(host->mmc);
	if (ret)
		goto pltfm_free;

	sdhci_get_of_property(pdev);

	msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;

	/* Setup SDCC bus voter clock. */
	msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
	if (!IS_ERR(msm_host->bus_clk)) {
		/* Vote for max. clk rate for max. performance */
		ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
		if (ret)
			goto pltfm_free;
		ret = clk_prepare_enable(msm_host->bus_clk);
		if (ret)
			goto pltfm_free;
	}

	/* Setup main peripheral bus clock */
	clk = devm_clk_get(&pdev->dev, "iface");
	if (IS_ERR(clk)) {
		ret = PTR_ERR(clk);
		dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
		goto bus_clk_disable;
	}
	msm_host->bulk_clks[1].clk = clk;

	/* Setup SDC MMC clock */
	clk = devm_clk_get(&pdev->dev, "core");
	if (IS_ERR(clk)) {
		ret = PTR_ERR(clk);
		dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
		goto bus_clk_disable;
	}
	msm_host->bulk_clks[0].clk = clk;

	/* Vote for maximum clock rate for maximum performance */
	ret = clk_set_rate(clk, INT_MAX);
	if (ret)
		dev_warn(&pdev->dev, "core clock boost failed\n");

	clk = devm_clk_get(&pdev->dev, "cal");
	if (IS_ERR(clk))
		clk = NULL;
	msm_host->bulk_clks[2].clk = clk;

	clk = devm_clk_get(&pdev->dev, "sleep");
	if (IS_ERR(clk))
		clk = NULL;
	msm_host->bulk_clks[3].clk = clk;

	ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
				      msm_host->bulk_clks);
	if (ret)
		goto bus_clk_disable;

	/*
	 * xo clock is needed for FLL feature of cm_dll.
	 * In case if xo clock is not mentioned in DT, warn and proceed.
	 */
	msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
	if (IS_ERR(msm_host->xo_clk)) {
		ret = PTR_ERR(msm_host->xo_clk);
		dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
	}

	core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);

	if (IS_ERR(msm_host->core_mem)) {
		dev_err(&pdev->dev, "Failed to remap registers\n");
		ret = PTR_ERR(msm_host->core_mem);
		goto clk_disable;
	}

	/* Reset the vendor spec register to power on reset state */
	writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
		       host->ioaddr + CORE_VENDOR_SPEC);

	/* Set HC_MODE_EN bit in HC_MODE register */
	writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));

	config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE);
	config |= FF_CLK_SW_RST_DIS;
	writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE);

	host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
	dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
		host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
			       SDHCI_VENDOR_VER_SHIFT));

	core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
	core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
		      CORE_VERSION_MAJOR_SHIFT;
	core_minor = core_version & CORE_VERSION_MINOR_MASK;
	dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
		core_version, core_major, core_minor);

	if (core_major == 1 && core_minor >= 0x42)
		msm_host->use_14lpp_dll_reset = true;

	/*
	 * SDCC 5 controller with major version 1, minor version 0x34 and later
	 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
	 */
	if (core_major == 1 && core_minor < 0x34)
		msm_host->use_cdclp533 = true;

	/*
	 * Support for some capabilities is not advertised by newer
	 * controller versions and must be explicitly enabled.
	 */
	if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
		config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
		config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
		writel_relaxed(config, host->ioaddr +
			       CORE_VENDOR_SPEC_CAPABILITIES0);
	}

	/*
	 * Power on reset state may trigger power irq if previous status of
	 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
	 * interrupt in GIC, any pending power irq interrupt should be
	 * acknowledged. Otherwise power irq interrupt handler would be
	 * fired prematurely.
	 */
	sdhci_msm_handle_pwr_irq(host, 0);

	/*
	 * Ensure that above writes are propogated before interrupt enablement
	 * in GIC.
	 */
	mb();

	/* Setup IRQ for handling power/voltage tasks with PMIC */
	msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
	if (msm_host->pwr_irq < 0) {
		dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
			msm_host->pwr_irq);
		ret = msm_host->pwr_irq;
		goto clk_disable;
	}

	sdhci_msm_init_pwr_irq_wait(msm_host);
	/* Enable pwr irq interrupts */
	writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);

	ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
					sdhci_msm_pwr_irq, IRQF_ONESHOT,
					dev_name(&pdev->dev), host);
	if (ret) {
		dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
		goto clk_disable;
	}

	pm_runtime_get_noresume(&pdev->dev);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev,
					 MSM_MMC_AUTOSUSPEND_DELAY_MS);
	pm_runtime_use_autosuspend(&pdev->dev);

	host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
	ret = sdhci_add_host(host);
	if (ret)
		goto pm_runtime_disable;

	pm_runtime_mark_last_busy(&pdev->dev);
	pm_runtime_put_autosuspend(&pdev->dev);

	return 0;

pm_runtime_disable:
	pm_runtime_disable(&pdev->dev);
	pm_runtime_set_suspended(&pdev->dev);
	pm_runtime_put_noidle(&pdev->dev);
clk_disable:
	clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
				   msm_host->bulk_clks);
bus_clk_disable:
	if (!IS_ERR(msm_host->bus_clk))
		clk_disable_unprepare(msm_host->bus_clk);
pltfm_free:
	sdhci_pltfm_free(pdev);
	return ret;
}
static int msm_rng_drbg_read(struct hwrng *rng,
			void *data, size_t max, bool wait)
{
	struct msm_rng_device *msm_rng_dev;
	struct platform_device *pdev;
	void __iomem *base;
	size_t currsize = 0;
	u32 val;
	u32 *retdata = data;
	int ret, ret1;
	int failed = 0;

	msm_rng_dev = (struct msm_rng_device *)rng->priv;
	pdev = msm_rng_dev->pdev;
	base = msm_rng_dev->base;

	/* no room for word data */
	if (max < 4)
		return 0;

	mutex_lock(&msm_rng_dev->rng_lock);

	/* read random data from CTR-AES based DRBG */
	if (FIPS140_DRBG_ENABLED == msm_rng_dev->fips140_drbg_enabled) {
		ret1 = fips_drbg_gen(msm_rng_dev->drbg_ctx, data, max);
		if (FIPS140_PRNG_ERR == ret1)
			panic("random number generator generator error.\n");
	} else
		ret1 = 1;

	if (msm_rng_dev->qrng_perf_client) {
		ret = msm_bus_scale_client_update_request(
				msm_rng_dev->qrng_perf_client, 1);
		if (ret)
			pr_err("bus_scale_client_update_req failed!\n");
	}

	/* read random data from h/w */
	/* enable PRNG clock */
	ret = clk_prepare_enable(msm_rng_dev->prng_clk);
	if (ret) {
		dev_err(&pdev->dev, "failed to enable clock in callback\n");
		goto err;
	}
	/* read random data from h/w */
	do {
		/* check status bit if data is available */
		while (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
				& 0x00000001)) {
			if (failed == 10) {
				pr_err("Data not available after retry\n");
				break;
			}
			pr_err("msm_rng:Data not available!\n");
			msleep_interruptible(10);
			failed++;
		}

		/* read FIFO */
		val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
		if (!val)
			break;	/* no data to read so just bail */

		/* write data back to callers pointer */
		if (0 != ret1)
			*(retdata++) = val;
		currsize += 4;

		/* make sure we stay on 32bit boundary */
		if ((max - currsize) < 4)
			break;
	} while (currsize < max);
	/* vote to turn off clock */
	clk_disable_unprepare(msm_rng_dev->prng_clk);
err:
	if (msm_rng_dev->qrng_perf_client) {
		ret = msm_bus_scale_client_update_request(
				msm_rng_dev->qrng_perf_client, 0);
		if (ret)
			pr_err("bus_scale_client_update_req failed!\n");
	}

	mutex_unlock(&msm_rng_dev->rng_lock);

	return currsize;
}
Exemple #27
0
static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	u32 config, calib_done;
	int ret;

	pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);

	/*
	 * Retuning in HS400 (DDR mode) will fail, just reset the
	 * tuning block and restore the saved tuning phase.
	 */
	ret = msm_init_cm_dll(host);
	if (ret)
		goto out;

	/* Set the selected phase in delay line hw block */
	ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
	if (ret)
		goto out;

	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
	config |= CORE_CMD_DAT_TRACK_SEL;
	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);

	config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
	config &= ~CORE_CDC_T4_DLY_SEL;
	writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);

	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
	config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);

	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
	config |= CORE_CDC_SWITCH_RC_EN;
	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);

	config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
	config &= ~CORE_START_CDC_TRAFFIC;
	writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);

	/* Perform CDC Register Initialization Sequence */

	writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
	writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
	writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
	writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
	writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
	writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
	writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
	writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
	writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);

	/* CDC HW Calibration */

	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
	config |= CORE_SW_TRIG_FULL_CALIB;
	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);

	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
	config &= ~CORE_SW_TRIG_FULL_CALIB;
	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);

	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
	config |= CORE_HW_AUTOCAL_ENA;
	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);

	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
	config |= CORE_TIMER_ENA;
	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);

	ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
					 calib_done,
					 (calib_done & CORE_CALIBRATION_DONE),
					 1, 50);

	if (ret == -ETIMEDOUT) {
		pr_err("%s: %s: CDC calibration was not completed\n",
		       mmc_hostname(host->mmc), __func__);
		goto out;
	}

	ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
			& CORE_CDC_ERROR_CODE_MASK;
	if (ret) {
		pr_err("%s: %s: CDC error code %d\n",
		       mmc_hostname(host->mmc), __func__, ret);
		ret = -EINVAL;
		goto out;
	}

	config = readl_relaxed(host->ioaddr + CORE_DDR_200_CFG);
	config |= CORE_START_CDC_TRAFFIC;
	writel_relaxed(config, host->ioaddr + CORE_DDR_200_CFG);
out:
	pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
		 __func__, ret);
	return ret;
}
Exemple #28
0
static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
{
	return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
}
Exemple #29
0
int dss_pll_write_config_type_a(struct dss_pll *pll,
		const struct dss_pll_clock_info *cinfo)
{
	const struct dss_pll_hw *hw = pll->hw;
	void __iomem *base = pll->base;
	int r = 0;
	u32 l;

	l = 0;
	if (hw->has_stopmode)
		l = FLD_MOD(l, 1, 0, 0);		/* PLL_STOPMODE */
	l = FLD_MOD(l, cinfo->n - 1, hw->n_msb, hw->n_lsb);	/* PLL_REGN */
	l = FLD_MOD(l, cinfo->m, hw->m_msb, hw->m_lsb);		/* PLL_REGM */
	/* M4 */
	l = FLD_MOD(l, cinfo->mX[0] ? cinfo->mX[0] - 1 : 0,
			hw->mX_msb[0], hw->mX_lsb[0]);
	/* M5 */
	l = FLD_MOD(l, cinfo->mX[1] ? cinfo->mX[1] - 1 : 0,
			hw->mX_msb[1], hw->mX_lsb[1]);
	writel_relaxed(l, base + PLL_CONFIGURATION1);

	l = 0;
	/* M6 */
	l = FLD_MOD(l, cinfo->mX[2] ? cinfo->mX[2] - 1 : 0,
			hw->mX_msb[2], hw->mX_lsb[2]);
	/* M7 */
	l = FLD_MOD(l, cinfo->mX[3] ? cinfo->mX[3] - 1 : 0,
			hw->mX_msb[3], hw->mX_lsb[3]);
	writel_relaxed(l, base + PLL_CONFIGURATION3);

	l = readl_relaxed(base + PLL_CONFIGURATION2);
	if (hw->has_freqsel) {
		u32 f = cinfo->fint < 1000000 ? 0x3 :
			cinfo->fint < 1250000 ? 0x4 :
			cinfo->fint < 1500000 ? 0x5 :
			cinfo->fint < 1750000 ? 0x6 :
			0x7;

		l = FLD_MOD(l, f, 4, 1);	/* PLL_FREQSEL */
	} else if (hw->has_selfreqdco) {
		u32 f = cinfo->clkdco < hw->clkdco_low ? 0x2 : 0x4;

		l = FLD_MOD(l, f, 3, 1);	/* PLL_SELFREQDCO */
	}
	l = FLD_MOD(l, 1, 13, 13);		/* PLL_REFEN */
	l = FLD_MOD(l, 0, 14, 14);		/* PHY_CLKINEN */
	l = FLD_MOD(l, 0, 16, 16);		/* M4_CLOCK_EN */
	l = FLD_MOD(l, 0, 18, 18);		/* M5_CLOCK_EN */
	l = FLD_MOD(l, 1, 20, 20);		/* HSDIVBYPASS */
	if (hw->has_refsel)
		l = FLD_MOD(l, 3, 22, 21);	/* REFSEL = sysclk */
	l = FLD_MOD(l, 0, 23, 23);		/* M6_CLOCK_EN */
	l = FLD_MOD(l, 0, 25, 25);		/* M7_CLOCK_EN */
	writel_relaxed(l, base + PLL_CONFIGURATION2);

	writel_relaxed(1, base + PLL_GO);	/* PLL_GO */

	if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
		DSSERR("DSS DPLL GO bit not going down.\n");
		r = -EIO;
		goto err;
	}

	if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
		DSSERR("cannot lock DSS DPLL\n");
		r = -EIO;
		goto err;
	}

	l = readl_relaxed(base + PLL_CONFIGURATION2);
	l = FLD_MOD(l, 1, 14, 14);			/* PHY_CLKINEN */
	l = FLD_MOD(l, cinfo->mX[0] ? 1 : 0, 16, 16);	/* M4_CLOCK_EN */
	l = FLD_MOD(l, cinfo->mX[1] ? 1 : 0, 18, 18);	/* M5_CLOCK_EN */
	l = FLD_MOD(l, 0, 20, 20);			/* HSDIVBYPASS */
	l = FLD_MOD(l, cinfo->mX[2] ? 1 : 0, 23, 23);	/* M6_CLOCK_EN */
	l = FLD_MOD(l, cinfo->mX[3] ? 1 : 0, 25, 25);	/* M7_CLOCK_EN */
	writel_relaxed(l, base + PLL_CONFIGURATION2);

	r = dss_wait_hsdiv_ack(pll,
		(cinfo->mX[0] ? BIT(7) : 0) |
		(cinfo->mX[1] ? BIT(8) : 0) |
		(cinfo->mX[2] ? BIT(10) : 0) |
		(cinfo->mX[3] ? BIT(11) : 0));
	if (r) {
		DSSERR("failed to enable HSDIV clocks\n");
		goto err;
	}

err:
	return r;
}
static int pll_vote_clk_is_enabled(struct clk *c)
{
	struct pll_vote_clk *pllv = to_pll_vote_clk(c);
	return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
}