static int wait_for_mba_ready(struct device *dev)
{
	struct q6v5_data *drv = dev_get_drvdata(dev);
	int ret;
	u32 status;

	/* Wait for PBL completion. */
	ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
		status != 0, POLL_INTERVAL_US, PBL_MBA_WAIT_TIMEOUT_US);
	if (ret) {
		dev_err(dev, "PBL boot timed out\n");
		return ret;
	}
	if (status != STATUS_PBL_SUCCESS) {
		dev_err(dev, "PBL returned unexpected status %d\n", status);
		return -EINVAL;
	}

	/* Wait for MBA completion. */
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
		status != 0, POLL_INTERVAL_US, PBL_MBA_WAIT_TIMEOUT_US);
	if (ret) {
		dev_err(dev, "MBA boot timed out\n");
		return ret;
	}
	if (status != STATUS_XPU_UNLOCKED &&
	    status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
		dev_err(dev, "MBA returned unexpected status %d\n", status);
		return -EINVAL;
	}

	return 0;
}
Example #2
0
static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
{
	int ret;
	u32 val, mask;

	ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
				 val, !(val & GEN_CMD_FULL), 1000,
				 CMD_PKT_STATUS_TIMEOUT_US);
	if (ret) {
		dev_err(dsi->dev, "failed to get available command FIFO\n");
		return ret;
	}

	dsi_write(dsi, DSI_GEN_HDR, hdr_val);

	mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
	ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
				 val, (val & mask) == mask,
				 1000, CMD_PKT_STATUS_TIMEOUT_US);
	if (ret) {
		dev_err(dsi->dev, "failed to write command FIFO\n");
		return ret;
	}

	return 0;
}
static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
{
    struct device *dev = drv->desc.dev;
    int ret;
    u32 status;


    ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
                             status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
    if (ret) {
        dev_err(dev, "PBL boot timed out\n");
        return ret;
    }
    if (status != STATUS_PBL_SUCCESS) {
        dev_err(dev, "PBL returned unexpected status %d\n", status);
        return -EINVAL;
    }


    ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
                             status != 0, POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
    if (ret) {
        dev_err(dev, "MBA boot timed out\n");
        return ret;
    }
    if (status != STATUS_XPU_UNLOCKED &&
            status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
        dev_err(dev, "MBA returned unexpected status %d\n", status);
        return -EINVAL;
    }

    return 0;
}
Example #4
0
/**
 * stmmac_mdio_read
 * @bus: points to the mii_bus structure
 * @phyaddr: MII addr
 * @phyreg: MII reg
 * Description: it reads data from the MII register from within the phy device.
 * For the 7111 GMAC, we must set the bit 0 in the MII address register while
 * accessing the PHY registers.
 * Fortunately, it seems this has no drawback for the 7109 MAC.
 */
static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
{
	struct net_device *ndev = bus->priv;
	struct stmmac_priv *priv = netdev_priv(ndev);
	unsigned int mii_address = priv->hw->mii.addr;
	unsigned int mii_data = priv->hw->mii.data;
	u32 v;
	int data;
	u32 value = MII_BUSY;

	value |= (phyaddr << priv->hw->mii.addr_shift)
		& priv->hw->mii.addr_mask;
	value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
	value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
		& priv->hw->mii.clk_csr_mask;
	if (priv->plat->has_gmac4)
		value |= MII_GMAC4_READ;

	if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
			       100, 10000))
		return -EBUSY;

	writel(value, priv->ioaddr + mii_address);

	if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
			       100, 10000))
		return -EBUSY;

	/* Read the data from the MII data register */
	data = (int)readl(priv->ioaddr + mii_data);

	return data;
}
Example #5
0
static int dw_mipi_dsi_read(struct dw_mipi_dsi *dsi,
			    const struct mipi_dsi_msg *msg)
{
	int i, j, ret, len = msg->rx_len;
	u8 *buf = msg->rx_buf;
	u32 val;

	/* Wait end of the read operation */
	ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
				 val, !(val & GEN_RD_CMD_BUSY),
				 1000, CMD_PKT_STATUS_TIMEOUT_US);
	if (ret) {
		dev_err(dsi->dev, "Timeout during read operation\n");
		return ret;
	}

	for (i = 0; i < len; i += 4) {
		/* Read fifo must not be empty before all bytes are read */
		ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
					 val, !(val & GEN_PLD_R_EMPTY),
					 1000, CMD_PKT_STATUS_TIMEOUT_US);
		if (ret) {
			dev_err(dsi->dev, "Read payload FIFO is empty\n");
			return ret;
		}

		val = dsi_read(dsi, DSI_GEN_PLD_DATA);
		for (j = 0; j < 4 && j + i < len; j++)
			buf[i + j] = val >> (8 * j);
	}

	return ret;
}
Example #6
0
/* u2-port0 should be powered on and enabled; */
int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks)
{
	void __iomem *ibase = ssusb->ippc_base;
	u32 value, check_val;
	int ret;

	check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE |
			SSUSB_REF_RST_B_STS;

	ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value,
			(check_val == (value & check_val)), 100, 20000);
	if (ret) {
		dev_err(ssusb->dev, "clks of sts1 are not stable!\n");
		return ret;
	}

	ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS2, value,
			(value & SSUSB_U2_MAC_SYS_RST_B_STS), 100, 10000);
	if (ret) {
		dev_err(ssusb->dev, "mac2 clock is not stable\n");
		return ret;
	}

	return 0;
}
Example #7
0
static int dw_mipi_dsi_phy_init(void *priv_data)
{
	struct dw_mipi_dsi_stm *dsi = priv_data;
	u32 val;
	int ret;

	/* Enable the regulator */
	dsi_set(dsi, DSI_WRPCR, WRPCR_REGEN | WRPCR_BGREN);
	ret = readl_poll_timeout(dsi->base + DSI_WISR, val, val & WISR_RRS,
				 SLEEP_US, TIMEOUT_US);
	if (ret)
		DRM_DEBUG_DRIVER("!TIMEOUT! waiting REGU, let's continue\n");

	/* Enable the DSI PLL & wait for its lock */
	dsi_set(dsi, DSI_WRPCR, WRPCR_PLLEN);
	ret = readl_poll_timeout(dsi->base + DSI_WISR, val, val & WISR_PLLLS,
				 SLEEP_US, TIMEOUT_US);
	if (ret)
		DRM_DEBUG_DRIVER("!TIMEOUT! waiting PLL, let's continue\n");

	/* Enable the DSI wrapper */
	dsi_set(dsi, DSI_WCR, WCR_DSIEN);

	return 0;
}
Example #8
0
static bool msdc_cmd_is_ready(struct msdc_host *host)
{
	int ret;
	u32 reg;

	/* The max busy time we can endure is 20ms */
	ret = readl_poll_timeout(&host->base->sdc_sts, reg,
				 !(reg & SDC_STS_CMDBUSY), 20000);

	if (ret) {
		pr_err("CMD bus busy detected\n");
		msdc_reset_hw(host);
		return false;
	}

	if (host->last_resp_type == MMC_RSP_R1b && host->last_data_write) {
		ret = readl_poll_timeout(&host->base->msdc_ps, reg,
					 reg & MSDC_PS_DAT0, 1000000);

		if (ret) {
			pr_err("Card stuck in programming state!\n");
			msdc_reset_hw(host);
			return false;
		}
	}

	return true;
}
Example #9
0
/**
 * stmmac_mdio_write
 * @bus: points to the mii_bus structure
 * @phyaddr: MII addr
 * @phyreg: MII reg
 * @phydata: phy data
 * Description: it writes the data into the MII register from within the device.
 */
static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
			     u16 phydata)
{
	struct net_device *ndev = bus->priv;
	struct stmmac_priv *priv = netdev_priv(ndev);
	unsigned int mii_address = priv->hw->mii.addr;
	unsigned int mii_data = priv->hw->mii.data;
	u32 v;
	u32 value = MII_BUSY;

	value |= (phyaddr << priv->hw->mii.addr_shift)
		& priv->hw->mii.addr_mask;
	value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;

	value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
		& priv->hw->mii.clk_csr_mask;
	if (priv->plat->has_gmac4)
		value |= MII_GMAC4_WRITE;
	else
		value |= MII_WRITE;

	/* Wait until any existing MII operation is complete */
	if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
			       100, 10000))
		return -EBUSY;

	/* Set the MII address register to write */
	writel(phydata, priv->ioaddr + mii_data);
	writel(value, priv->ioaddr + mii_address);

	/* Wait until any existing MII operation is complete */
	return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
				  100, 10000);
}
void mdss_dsi_controller_cfg(int enable,
			     struct mdss_panel_data *pdata)
{

	u32 dsi_ctrl;
	u32 status;
	u32 sleep_us = 1000;
	u32 timeout_us = 16000;
	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;

	if (pdata == NULL) {
		pr_err("%s: Invalid input data\n", __func__);
		return;
	}

	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
				panel_data);

	/* Check for CMD_MODE_DMA_BUSY */
	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
			   status,
			   ((status & 0x02) == 0),
	/* add qcom patch to solve cmd lcd esd issue
	 *Currently, Command engine will be blocked when sending
	 *display off command in ESD test. Root cause is
	 *panel BTA will affect DSI status. Reset dsi driver
	 *when command engine is blocked.*/
			     sleep_us, timeout_us)) {
		pr_info("%s: DSI status=%x failed\n", __func__, status);
		pr_info("%s: Doing sw reset\n", __func__);	
		mdss_dsi_sw_reset(pdata);
	}
	/* Check for x_HS_FIFO_EMPTY */
	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x000c),
			   status,
			   ((status & 0x11111000) == 0x11111000),
			       sleep_us, timeout_us))
		pr_info("%s: FIFO status=%x failed\n", __func__, status);

	/* Check for VIDEO_MODE_ENGINE_BUSY */
	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
			   status,
			   ((status & 0x08) == 0),
			       sleep_us, timeout_us)) {
		pr_debug("%s: DSI status=%x\n", __func__, status);
		pr_debug("%s: Doing sw reset\n", __func__);
		mdss_dsi_sw_reset(pdata);
	}

	dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
	if (enable)
		dsi_ctrl |= 0x01;
	else
		dsi_ctrl &= ~0x01;

	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
	wmb();
}
void mdss_dsi_controller_cfg(int enable,
			     struct mdss_panel_data *pdata)
{

	u32 dsi_ctrl;
	u32 status;
	u32 sleep_us = 1000;
	u32 timeout_us = 16000;
	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;

	if (pdata == NULL) {
		pr_err("%s: Invalid input data\n", __func__);
		return;
	}

	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
				panel_data);

	
	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
			   status,
			   ((status & 0x02) == 0),
			       sleep_us, timeout_us))
		pr_info("%s: DSI status=%x failed\n", __func__, status);

	
	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x000c),
			   status,
			   ((status & 0x11111000) == 0x11111000),
			       sleep_us, timeout_us))
		pr_info("%s: FIFO status=%x failed\n", __func__, status);

	
	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
			   status,
			   ((status & 0x08) == 0),
			       sleep_us, timeout_us)) {
		pr_debug("%s: DSI status=%x\n", __func__, status);
		pr_debug("%s: Doing sw reset\n", __func__);
		mdss_dsi_sw_reset(pdata);
	}

	dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
	if (enable)
		dsi_ctrl |= 0x01;
	else
		dsi_ctrl &= ~0x01;

	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
	wmb();
}
static int pil_pronto_shutdown(struct pil_desc *pil)
{
	struct pronto_data *drv = dev_get_drvdata(pil->dev);
	int ret;
	u32 reg, status;

	/* Halt A2XB */
	writel_relaxed(1, drv->axi_halt_base + AXI_HALTREQ);
	ret = readl_poll_timeout(drv->axi_halt_base + AXI_HALTACK,
				status, status, 50, HALT_ACK_TIMEOUT_US);
	if (ret)
		dev_err(pil->dev, "Port halt timeout\n");
	else if (!readl_relaxed(drv->axi_halt_base + AXI_IDLE))
		dev_err(pil->dev, "Port halt failed\n");

	writel_relaxed(0, drv->axi_halt_base + AXI_HALTREQ);

	/* Assert reset to Pronto */
	reg = readl_relaxed(drv->reset_base);
	reg |= CLK_CTL_WCNSS_RESTART_BIT;
	writel_relaxed(reg, drv->reset_base);

	/* Wait for reset to complete */
	mb();
	usleep_range(1000, 2000);

	/* Deassert reset to subsystem and wait for propagation */
	reg = readl_relaxed(drv->reset_base);
	reg &= ~CLK_CTL_WCNSS_RESTART_BIT;
	writel_relaxed(reg, drv->reset_base);
	mb();
	udelay(2);

	return 0;
}
Example #13
0
static int pil_mba_init_image(struct pil_desc *pil,
                              const u8 *metadata, size_t size)
{
    struct mba_data *drv = dev_get_drvdata(pil->dev);
    u32 status;
    int ret;

    /* Copy metadata to assigned shared buffer location */
    memcpy(drv->metadata_base, metadata, size);

    /* Initialize length counter to 0 */
    writel_relaxed(0, drv->reg_base + RMB_PMI_CODE_LENGTH);
    drv->img_length = 0;

    /* Pass address of meta-data to the MBA and perform authentication */
    writel_relaxed(drv->metadata_phys, drv->reg_base + RMB_PMI_META_DATA);
    writel_relaxed(CMD_META_DATA_READY, drv->reg_base + RMB_MBA_COMMAND);
    ret = readl_poll_timeout(drv->reg_base + RMB_MBA_STATUS, status,
                             status == STATUS_META_DATA_AUTH_SUCCESS,
                             POLL_INTERVAL_US, AUTH_TIMEOUT_US);
    if (ret)
        dev_err(pil->dev, "MBA authentication timed out\n");

    return ret;
}
Example #14
0
static int mdp3_dma_stop(struct mdp3_dma *dma, struct mdp3_intf *intf)
{
	int ret = 0;
	u32 status, display_status_bit;

	if (dma->dma_sel == MDP3_DMA_P)
		display_status_bit = BIT(6);
	else if (dma->dma_sel == MDP3_DMA_S)
		display_status_bit = BIT(7);
	else
		return -EINVAL;

	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
		display_status_bit |= BIT(11);

	intf->stop(intf);
	ret = readl_poll_timeout((mdp3_res->mdp_base + MDP3_REG_DISPLAY_STATUS),
				status,
				((status & display_status_bit) == 0),
				DMA_STOP_POLL_SLEEP_US,
				DMA_STOP_POLL_TIMEOUT_US);

	mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_VSYNC |
					MDP3_DMA_CALLBACK_TYPE_DMA_DONE);

	return ret;
}
Example #15
0
static int exynos5440_pcie_phy_power_off(struct phy *phy)
{
	struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
	u32 val;

	if (readl_poll_timeout(ep->phy_base + PCIE_PHY_PLL_LOCKED, val,
				(val != 0), 1, 500)) {
		dev_err(&phy->dev, "PLL Locked: 0x%x\n", val);
		return -ETIMEDOUT;
	}

	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_COMMON_POWER);
	val |= PCIE_PHY_COMMON_PD_CMN;
	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_COMMON_POWER);

	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV0_POWER);
	val |= PCIE_PHY_TRSV0_PD_TSV;
	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV0_POWER);

	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV1_POWER);
	val |= PCIE_PHY_TRSV1_PD_TSV;
	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV1_POWER);

	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV2_POWER);
	val |= PCIE_PHY_TRSV2_PD_TSV;
	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV2_POWER);

	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV3_POWER);
	val |= PCIE_PHY_TRSV3_PD_TSV;
	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV3_POWER);

	return 0;
}
Example #16
0
static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
{
	struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
	u32 value;
	int ret;
	int i;

	/* power down all u3 ports */
	for (i = 0; i < mtk->num_u3_ports; i++) {
		value = readl(&ippc->u3_ctrl_p[i]);
		value |= CTRL_U3_PORT_PDN;
		writel(value, &ippc->u3_ctrl_p[i]);
	}

	/* power down all u2 ports */
	for (i = 0; i < mtk->num_u2_ports; i++) {
		value = readl(&ippc->u2_ctrl_p[i]);
		value |= CTRL_U2_PORT_PDN;
		writel(value, &ippc->u2_ctrl_p[i]);
	}

	/* power down host ip */
	value = readl(&ippc->ip_pw_ctr1);
	value |= CTRL1_IP_HOST_PDN;
	writel(value, &ippc->ip_pw_ctr1);

	/* wait for host ip to sleep */
	ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
			  (value & STS1_IP_SLEEP_STS), 100, 100000);
	if (ret) {
		dev_err(mtk->dev, "ip sleep failed!!!\n");
		return ret;
	}
	return 0;
}
static int pil_femto_modem_send_rmb_advance(void __iomem *rmb_base, u32 id)
{
	int ret;
	u32 cmd = CMD_RMB_ADVANCE;
	int status;

	if (!rmb_base)
		return -EINVAL;

	/* Prepare the command */
	cmd |= id << 8;

	/* Sent the MBA command */
	writel_relaxed(cmd, rmb_base + RMB_MBA_COMMAND);

	/* Wait for MBA status. */
	ret = readl_poll_timeout(rmb_base + RMB_MBA_STATUS, status,
		((status < 0) || (status == STATUS_RMB_UPDATE_ACK)),
		POLL_INTERVAL_US, TIMEOUT_US);

	if (ret)
		return ret;

	if (status != STATUS_RMB_UPDATE_ACK)
		return -EINVAL;

	return ret;
}
Example #18
0
static int adsp_start(struct rproc *rproc)
{
	struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
	int ret;
	unsigned int val;

	qcom_q6v5_prepare(&adsp->q6v5);

	ret = clk_prepare_enable(adsp->xo);
	if (ret)
		goto disable_irqs;

	dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX);
	ret = pm_runtime_get_sync(adsp->dev);
	if (ret)
		goto disable_xo_clk;

	ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
	if (ret) {
		dev_err(adsp->dev, "adsp clk_enable failed\n");
		goto disable_power_domain;
	}

	/* Program boot address */
	writel(adsp->mem_phys >> 4, adsp->qdsp6ss_base + RST_EVB_REG);

	/* De-assert QDSP6 stop core. QDSP6 will execute after out of reset */
	writel(0x1, adsp->qdsp6ss_base + CORE_START_REG);

	/* Trigger boot FSM to start QDSP6 */
	writel(0x1, adsp->qdsp6ss_base + BOOT_CMD_REG);

	/* Wait for core to come out of reset */
	ret = readl_poll_timeout(adsp->qdsp6ss_base + BOOT_STATUS_REG,
			val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
	if (ret) {
		dev_err(adsp->dev, "failed to bootup adsp\n");
		goto disable_adsp_clks;
	}

	ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5 * HZ));
	if (ret == -ETIMEDOUT) {
		dev_err(adsp->dev, "start timed out\n");
		goto disable_adsp_clks;
	}

	return 0;

disable_adsp_clks:
	clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
disable_power_domain:
	dev_pm_genpd_set_performance_state(adsp->dev, 0);
	pm_runtime_put(adsp->dev);
disable_xo_clk:
	clk_disable_unprepare(adsp->xo);
disable_irqs:
	qcom_q6v5_unprepare(&adsp->q6v5);

	return ret;
}
static void select_clk_source_div(struct acpuclk_drv_data *drv_data,
	struct clkctl_acpu_speed *s)
{
	u32 regval, rc, src_div;
	void __iomem *apcs_rcg_config = drv_data->apcs_rcg_config;
	void __iomem *apcs_rcg_cmd = drv_data->apcs_rcg_cmd;
	struct acpuclk_reg_data *r = &drv_data->reg_data;

	src_div = s->src_div ? ((2 * s->src_div) - 1) : s->src_div;

	regval = readl_relaxed(apcs_rcg_config);
	regval &= ~r->cfg_src_mask;
	regval |= s->src_sel << r->cfg_src_shift;
	regval &= ~r->cfg_div_mask;
	regval |= src_div << r->cfg_div_shift;
	writel_relaxed(regval, apcs_rcg_config);

	/* Update the configuration */
	regval = readl_relaxed(apcs_rcg_cmd);
	regval |= r->update_mask;
	writel_relaxed(regval, apcs_rcg_cmd);

	/* Wait for the update to take effect */
	rc = readl_poll_timeout(apcs_rcg_cmd, regval,
		   !(regval & r->poll_mask),
		   POLL_INTERVAL_US,
		   APCS_RCG_UPDATE_TIMEOUT_US);
	if (rc)
		pr_warn("acpu rcg didn't update its configuration\n");
}
static int pil_msa_mba_auth(struct pil_desc *pil)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	int ret;
	s32 status;

	/* Wait for all segments to be authenticated or an error to occur */
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
			status == STATUS_AUTH_COMPLETE || status < 0,
			50, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of image timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for image\n", status);
		ret = -EINVAL;
	}

	if (drv->q6 && drv->q6->mba_virt) {
		/* Reclaim MBA memory. */
		dma_free_coherent(&drv->mba_mem_dev, drv->q6->mba_size,
					drv->q6->mba_virt, drv->q6->mba_phys);
		drv->q6->mba_virt = NULL;
	}

	if (ret)
		modem_log_rmb_regs(drv->rmb_base);
	return ret;
}
static int mdp3_dma_stop(struct mdp3_dma *dma, struct mdp3_intf *intf)
{
	int ret = 0;
	u32 status, display_status_bit;

	if (dma->dma_sel == MDP3_DMA_P)
		display_status_bit = BIT(6);
	else if (dma->dma_sel == MDP3_DMA_S)
		display_status_bit = BIT(7);
	else
		return -EINVAL;

	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
		display_status_bit |= BIT(11);

	intf->stop(intf);
	ret = readl_poll_timeout((mdp3_res->mdp_base + MDP3_REG_DISPLAY_STATUS),
				status,
				((status & display_status_bit) == 0),
				DMA_STOP_POLL_SLEEP_US,
				DMA_STOP_POLL_TIMEOUT_US);

	mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_VSYNC |
					MDP3_DMA_CALLBACK_TYPE_DMA_DONE);
	mdp3_irq_disable(MDP3_INTR_LCDC_UNDERFLOW);

	MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, 0);
	MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, 0xfffffff);

	init_completion(&dma->dma_comp);
	dma->vsync_client.handler = NULL;
	return ret;
}
Example #22
0
static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
{
	struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
	u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
	int ret;

	val = (val & ~STM32_HIZ) | STM32_ENVR;
	writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);

	/*
	 * Vrefbuf startup time depends on external capacitor: wait here for
	 * VRR to be set. That means output has reached expected value.
	 * ~650us sleep should be enough for caps up to 1.5uF. Use 10ms as
	 * arbitrary timeout.
	 */
	ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val,
				 !(val & STM32_VRR), 650, 10000);
	if (ret) {
		dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n");
		val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
		val = (val & ~STM32_ENVR) | STM32_HIZ;
		writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
	}

	return ret;
}
Example #23
0
static int dw_mipi_dsi_write(struct dw_mipi_dsi *dsi,
			     const struct mipi_dsi_packet *packet)
{
	const u8 *tx_buf = packet->payload;
	int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret;
	__le32 word;
	u32 val;

	while (len) {
		if (len < pld_data_bytes) {
			word = 0;
			memcpy(&word, tx_buf, len);
			dsi_write(dsi, DSI_GEN_PLD_DATA, le32_to_cpu(word));
			len = 0;
		} else {
			memcpy(&word, tx_buf, pld_data_bytes);
			dsi_write(dsi, DSI_GEN_PLD_DATA, le32_to_cpu(word));
			tx_buf += pld_data_bytes;
			len -= pld_data_bytes;
		}

		ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
					 val, !(val & GEN_PLD_W_FULL), 1000,
					 CMD_PKT_STATUS_TIMEOUT_US);
		if (ret) {
			dev_err(dsi->dev,
				"failed to get available write payload FIFO\n");
			return ret;
		}
	}

	word = 0;
	memcpy(&word, packet->header, sizeof(packet->header));
	return dw_mipi_dsi_gen_pkt_hdr_write(dsi, le32_to_cpu(word));
}
Example #24
0
static int _stm32_qspi_poll(struct stm32_qspi_priv *priv,
			    const struct spi_mem_op *op)
{
	void (*fifo)(u8 *val, void __iomem *addr);
	u32 len = op->data.nbytes, sr;
	u8 *buf;
	int ret;

	if (op->data.dir == SPI_MEM_DATA_IN) {
		fifo = _stm32_qspi_read_fifo;
		buf = op->data.buf.in;

	} else {
		fifo = _stm32_qspi_write_fifo;
		buf = (u8 *)op->data.buf.out;
	}

	while (len--) {
		ret = readl_poll_timeout(&priv->regs->sr, sr,
					 sr & STM32_QSPI_SR_FTF,
					 STM32_QSPI_FIFO_TIMEOUT_US);
		if (ret) {
			pr_err("fifo timeout (len:%d stat:%#x)\n", len, sr);
			return ret;
		}

		fifo(buf++, &priv->regs->dr);
	}

	return 0;
}
Example #25
0
/*
 * struct mii_bus functions
 */
static int ftgmac100_mdio_read(struct mii_dev *bus, int phy_addr, int dev_addr,
			       int reg_addr)
{
	struct ftgmac100_data *priv = bus->priv;
	struct ftgmac100 *ftgmac100 = priv->iobase;
	int phycr;
	int data;
	int ret;

	phycr = FTGMAC100_PHYCR_MDC_CYCTHR(MDC_CYCTHR) |
		FTGMAC100_PHYCR_PHYAD(phy_addr) |
		FTGMAC100_PHYCR_REGAD(reg_addr) |
		FTGMAC100_PHYCR_MIIRD;
	writel(phycr, &ftgmac100->phycr);

	ret = readl_poll_timeout(&ftgmac100->phycr, phycr,
				 !(phycr & FTGMAC100_PHYCR_MIIRD),
				 FTGMAC100_MDIO_TIMEOUT_USEC);
	if (ret) {
		pr_err("%s: mdio read failed (phy:%d reg:%x)\n",
		       priv->phydev->dev->name, phy_addr, reg_addr);
		return ret;
	}

	data = readl(&ftgmac100->phydata);

	return FTGMAC100_PHYDATA_MIIRDATA(data);
}
void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata)
{
	u32 status;
	int timeout_us = 10000;
	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;

	if (pdata == NULL) {
		pr_err("%s: Invalid input data\n", __func__);
		return;
	}

	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
				panel_data);

	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x098, 0x01);	
	wmb();

	
	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
				status, ((status & 0x0010) == 0),
				0, timeout_us))
		pr_info("%s: DSI status=%x failed\n", __func__, status);

	mdss_dsi_ack_err_status(ctrl_pdata);

	pr_debug("%s: BTA done, status = %d\n", __func__, status);
}
Example #27
0
static int msdc_start_command(struct msdc_host *host, struct mmc_cmd *cmd,
			      struct mmc_data *data)
{
	u32 rawcmd;
	u32 status;
	u32 blocks = 0;
	int ret;

	if (!msdc_cmd_is_ready(host))
		return -EIO;

	msdc_fifo_clr(host);

	host->last_resp_type = cmd->resp_type;
	host->last_data_write = 0;

	rawcmd = msdc_cmd_prepare_raw_cmd(host, cmd, data);

	if (data)
		blocks = data->blocks;

	writel(CMD_INTS_MASK, &host->base->msdc_int);
	writel(blocks, &host->base->sdc_blk_num);
	writel(cmd->cmdarg, &host->base->sdc_arg);
	writel(rawcmd, &host->base->sdc_cmd);

	ret = readl_poll_timeout(&host->base->msdc_int, status,
				 status & CMD_INTS_MASK, 1000000);

	if (ret)
		status = MSDC_INT_CMDTMO;

	return msdc_cmd_done(host, status, cmd);
}
Example #28
0
static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
					size_t size)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	void *mdata_virt;
	dma_addr_t mdata_phys;
	s32 status;
	int ret;
	DEFINE_DMA_ATTRS(attrs);

	drv->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
	/* Make metadata physically contiguous and 4K aligned. */
	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
					GFP_KERNEL, &attrs);
	if (!mdata_virt) {
		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
		ret = -ENOMEM;
		goto fail;
	}
	memcpy(mdata_virt, metadata, size);
	/* wmb() ensures copy completes prior to starting authentication. */
	wmb();

	/* Initialize length counter to 0 */
	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);

	/* Pass address of meta-data to the MBA and perform authentication */
	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
		status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
		POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of headers timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for headers\n",
				status);
		ret = -EINVAL;
	}

	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, &attrs);

	if (!ret)
		return ret;

fail:
	modem_log_rmb_regs(drv->rmb_base);
	if (drv->q6) {
		pil_mss_shutdown(pil);
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys,
				&drv->attrs_dma);
		drv->q6->mba_virt = NULL;
	}
	return ret;
}
Example #29
0
static void msdc_reset_hw(struct msdc_host *host)
{
	u32 reg;

	setbits_le32(&host->base->msdc_cfg, MSDC_CFG_RST);

	readl_poll_timeout(&host->base->msdc_cfg, reg,
			   !(reg & MSDC_CFG_RST), 1000000);
}
Example #30
0
static void msdc_fifo_clr(struct msdc_host *host)
{
	u32 reg;

	setbits_le32(&host->base->msdc_fifocs, MSDC_FIFOCS_CLR);

	readl_poll_timeout(&host->base->msdc_fifocs, reg,
			   !(reg & MSDC_FIFOCS_CLR), 1000000);
}