static int pcie_phy_power_on(struct phy *phy)
{
	struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy);
	int err;
	u32 value;

	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
	value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK;
	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);

	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);
	value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN |
		 XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN |
		 XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL;
	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);

	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
	value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST;
	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);

	err = wait_on_timeout(50 * MSECOND,
			padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1) &
			XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET);

	return err;
}
static int sata_phy_power_on(struct phy *phy)
{
	struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy);
	int err;
	u32 value;

	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
	value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD;
	value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ;
	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);

	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
	value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD;
	value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ;
	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);

	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
	value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE;
	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);

	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
	value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST;
	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);

	err = wait_on_timeout(50 * MSECOND,
			padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1) &
			XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET);

	return err;
}
Пример #3
0
static int port_send(struct eth_device *edev, void *data, int len)
{
	struct port_priv *port = edev->priv;
	struct txdesc *txdesc = port->txdesc;
	u32 cmd_sts;
	int ret;

	/* flush transmit data */
	dma_sync_single_for_device((unsigned long)data, len, DMA_TO_DEVICE);

	txdesc->cmd_sts = TXDESC_OWNED_BY_DMA;
	txdesc->cmd_sts |= TXDESC_FIRST | TXDESC_LAST;
	txdesc->cmd_sts |= TXDESC_ZERO_PADDING | TXDESC_GEN_CRC;
	txdesc->buf_ptr = data;
	txdesc->byte_cnt = len;

	/* assign tx descriptor and issue send command */
	writel((u32)txdesc, port->regs + PORT_TCQDP(UTXQ));
	writel(BIT(UTXQ), port->regs + PORT_TQC);

	/* wait for packet transmit completion */
	ret = wait_on_timeout(TRANSFER_TIMEOUT,
		      (readl(&txdesc->cmd_sts) & TXDESC_OWNED_BY_DMA) == 0);
	dma_sync_single_for_cpu((unsigned long)data, len, DMA_TO_DEVICE);
	if (ret) {
		dev_err(&edev->dev, "transmit timeout\n");
		return ret;
	}

	cmd_sts = readl(&txdesc->cmd_sts);
	if ((cmd_sts & TXDESC_LAST) && (cmd_sts & TXDESC_ERROR)) {
		dev_err(&edev->dev, "transmit error %d\n",
			(cmd_sts & TXDESC_ERROR_MASK) >> TXDESC_ERROR_SHIFT);
		return ret;
	}
Пример #4
0
static int mvebu_mdio_wait_ready(struct mdio_priv *priv)
{
	int ret = wait_on_timeout(SMI_POLL_TIMEOUT,
				  (readl(priv->regs) & SMI_BUSY) == 0);

	if (ret)
		dev_err(&priv->miibus.dev, "timeout, SMI busy for too long\n");

	return ret;
}
Пример #5
0
static unsigned int cqspi_wait_idle(struct cqspi_st *cqspi)
{
	void __iomem *reg_base = cqspi->iobase;

	if (wait_on_timeout(CQSPI_TIMEOUT_MS,
		CQSPI_REG_IS_IDLE(reg_base))) {
		/* Timeout, in busy mode. */
		dev_err(cqspi->dev, "QSPI is still busy after %llums timeout.\n",
			CQSPI_TIMEOUT_MS);
		return -ETIMEDOUT;
	}

	return 0;
}
Пример #6
0
static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
{
	void __iomem *reg_base = cqspi->iobase;

	/* Write the CMDCTRL without start execution. */
	writel(reg, reg_base + CQSPI_REG_CMDCTRL);
	/* Start execute */
	reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
	writel(reg, reg_base + CQSPI_REG_CMDCTRL);

	if (wait_on_timeout(CQSPI_TIMEOUT_MS,
		(readl(reg_base + CQSPI_REG_CMDCTRL) &
			CQSPI_REG_CMDCTRL_INPROGRESS_MASK) == 0)) {
		dev_err(cqspi->dev, "flash cmd execute time out (0x%08x)\n",
			readl(reg_base + CQSPI_REG_CMDCTRL));
		return -EIO;
	}

	/* Polling QSPI idle status. */
	return cqspi_wait_idle(cqspi);
}
Пример #7
0
static int mvneta_send(struct eth_device *edev, void *data, int len)
{
	struct mvneta_port *priv = edev->priv;
	struct txdesc *txdesc = priv->txdesc;
	int ret, error, last_desc;

	/* Flush transmit data */
	dma_sync_single_for_device((unsigned long)data, len, DMA_TO_DEVICE);

	memset(txdesc, 0, sizeof(*txdesc));
	/* Fill the Tx descriptor */
	txdesc->cmd_sts = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC;
	txdesc->buf_ptr = (u32)data;
	txdesc->byte_cnt = len;

	/* Increase the number of prepared descriptors (one), by writing
	 * to the 'NoOfWrittenDescriptors' field in the PTXSU register.
	 */
	writel(1, priv->reg + MVNETA_TXQ_UPDATE_REG(0));

	/* The controller updates the number of transmitted descriptors in
	 * the Tx port status register (PTXS).
	 */
	ret = wait_on_timeout(TRANSFER_TIMEOUT, !mvneta_pending_tx(priv));
	dma_sync_single_for_cpu((unsigned long)data, len, DMA_TO_DEVICE);
	if (ret) {
		dev_err(&edev->dev, "transmit timeout\n");
		return ret;
	}

	last_desc = readl(&txdesc->cmd_sts) & MVNETA_TXD_L_DESC;
	error = readl(&txdesc->error);
	if (last_desc && error & MVNETA_TXD_ERROR) {
		dev_err(&edev->dev, "transmit error %d\n",
			(error & TXD_ERROR_MASK) >> TXD_ERROR_SHIFT);
		return -EIO;
	}
Пример #8
0
static int arc_emac_send(struct eth_device *edev, void *data, int length)
{
	struct arc_emac_priv *priv = edev->priv;
	struct arc_emac_bd *bd = &priv->txbd[priv->txbd_curr];
	char txbuf[EMAC_ZLEN];
	int ret;

	/* Pad short frames to minimum length */
	if (length < EMAC_ZLEN) {
		memcpy(txbuf, data, length);
		memset(txbuf + length, 0, EMAC_ZLEN - length);
		data = txbuf;
		length = EMAC_ZLEN;
	}

	dma_flush_range((unsigned long)data, (unsigned long)data + length);

	bd->data = cpu_to_le32(data);
	bd->info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | length);
	arc_reg_set(priv, R_STATUS, TXPL_MASK);

	ret = wait_on_timeout(20 * MSECOND,
			      (arc_reg_get(priv, R_STATUS) & TXINT_MASK) != 0);

	if (ret) {
		dev_err(&edev->dev, "transmit timeout\n");
		return ret;
	}

	arc_reg_set(priv, R_STATUS, TXINT_MASK);

	priv->txbd_curr++;
	priv->txbd_curr %= TX_BD_NUM;

	return 0;
}
Пример #9
0
static int mv_sata_probe(struct device_d *dev)
{
	struct resource *iores;
	void __iomem *base;
	struct ide_port *ide;
	u32 scontrol;
	int ret, i;

	iores = dev_request_mem_resource(dev, 0);
	if (IS_ERR(iores)) {
		dev_err(dev, "Failed to request mem resources\n");
		return PTR_ERR(iores);
	}
	base = IOMEM(iores->start);

	/* disable MBus windows */
	for (i = 0; i < 4; ++i) {
		writel(0, base + REG_WINDOW_CONTROL(i));
		writel(0, base + REG_WINDOW_BASE(i));
	}

	/* enable first window */
	writel(0x7fff0e01, base + REG_WINDOW_CONTROL(0));
	writel(0, base + REG_WINDOW_BASE(0));

	writel(REG_EDMA_COMMAND__EATARST, base + REG_EDMA_COMMAND(0));
	udelay(25);
	writel(0x0, base + REG_EDMA_COMMAND(0));

	scontrol = readl(base + REG_SCONTROL(0));
	scontrol &= ~(REG_SCONTROL__DET | REG_SCONTROL__IPM);
	/* disable power management */
	scontrol |= REG_SCONTROL__IPM__PARTIAL | REG_SCONTROL__IPM__SLUMBER;

	/* perform interface communication initialization */
	writel(scontrol | REG_SCONTROL__DET__INIT, base + REG_SCONTROL(0));
	writel(scontrol, base + REG_SCONTROL(0));

	ret = wait_on_timeout(10 * MSECOND,
			      (readl(base + REG_SSTATUS(0)) & REG_SCONTROL__DET) == (REG_SCONTROL__DET__INIT | REG_SCONTROL__DET__PHYOK));
	if (ret) {
		dev_err(dev, "Failed to wait for phy (sstatus=0x%08x)\n",
			readl(base + REG_SSTATUS(0)));
		return ret;
	}

	ide = xzalloc(sizeof(*ide));

	ide->port.dev = dev;

	ata_ioports_init(&ide->io, base + REG_ATA_BASE, base + REG_ATA_BASE,
			 NULL, 4);

	dev->priv = ide;

	ret = ide_port_register(ide);
	if (ret)
		free(ide);

	return ret;
}
Пример #10
0
static __maybe_unused int cqspi_indirect_write_execute(struct spi_nor *nor,
						const u8 *txbuf, unsigned n_tx)
{
	int ret;
	unsigned int reg = 0;
	struct cqspi_st *cqspi = nor->priv;
	void __iomem *reg_base = cqspi->iobase;
	void __iomem *ahb_base = cqspi->ahb_base;
	int remaining = (int)n_tx;
	unsigned int page_size;
	unsigned int write_bytes;

	page_size = nor->page_size;

	writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);

	writel(CQSPI_REG_SRAM_THRESHOLD_BYTES, reg_base +
	       CQSPI_REG_INDIRECTWRWATERMARK);

	/* Clear all interrupts. */
	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);

	cqspi->irq_mask = CQSPI_IRQ_MASK_WR;
	writel(cqspi->irq_mask, reg_base + CQSPI_REG_IRQMASK);

	writel(CQSPI_REG_INDIRECTWR_START_MASK,
	       reg_base + CQSPI_REG_INDIRECTWR);

	/* Write a page or remaining bytes. */
	write_bytes = remaining > page_size ? page_size : remaining;
	/* Fill up the data at the beginning */
	cqspi_fifo_write(ahb_base, txbuf, write_bytes);
	txbuf += write_bytes;
	remaining -= write_bytes;

	while (remaining > 0) {
		ret = wait_on_timeout(CQSPI_READ_TIMEOUT_MS,
			readl(reg_base + CQSPI_REG_IRQSTATUS) & cqspi->irq_mask);

		/* Clear all interrupts. */
		writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);

		if (ret < 0) {
			dev_err(nor->dev, "Indirect write timeout\n");
			ret = -ETIMEDOUT;
			goto failwr;
		}

		write_bytes = remaining > page_size ? page_size : remaining;
		cqspi_fifo_write(ahb_base, txbuf, write_bytes);
		txbuf += write_bytes;
		remaining -= write_bytes;

		writel(cqspi->irq_mask, reg_base + CQSPI_REG_IRQMASK);
	}
	ret = wait_on_timeout(CQSPI_READ_TIMEOUT_MS,
			readl(reg_base + CQSPI_REG_IRQSTATUS) & cqspi->irq_mask);

	/* Clear all interrupts. */
	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
	if (ret < 0) {
		dev_err(nor->dev, "Indirect write timeout\n");
		ret = -ETIMEDOUT;
		goto failwr;
	}

	/* Check indirect done status */
	if (wait_on_timeout(CQSPI_TIMEOUT_MS,
		readl(reg_base + CQSPI_REG_INDIRECTWR)
			& CQSPI_REG_INDIRECTWR_DONE_MASK)) {
		dev_err(nor->dev,
			"Indirect write completion error 0x%08x\n", reg);
		ret = -ETIMEDOUT;
		goto failwr;
	}

	/* Disable interrupt. */
	writel(0, reg_base + CQSPI_REG_IRQMASK);

	/* Clear indirect completion status */
	writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);

	cqspi_wait_idle(cqspi);

	return 0;

failwr:
	/* Disable interrupt. */
	writel(0, reg_base + CQSPI_REG_IRQMASK);

	/* Cancel the indirect write */
	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
	       reg_base + CQSPI_REG_INDIRECTWR);
	return ret;
}
Пример #11
0
static int cqspi_indirect_read_execute(struct spi_nor *nor,
				       u8 *rxbuf, unsigned n_rx)
{
	int ret = 0;
	unsigned int reg = 0;
	unsigned int bytes_to_read = 0;
	unsigned int watermark;
	struct cqspi_st *cqspi = nor->priv;
	void __iomem *reg_base = cqspi->iobase;
	void __iomem *ahb_base = cqspi->ahb_base;
	int remaining = (int)n_rx;

	watermark = cqspi->fifo_depth * CQSPI_FIFO_WIDTH / 2;
	writel(watermark, reg_base + CQSPI_REG_INDIRECTRDWATERMARK);
	writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);

	/* Clear all interrupts. */
	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);

	cqspi->irq_mask = CQSPI_IRQ_MASK_RD;
	writel(cqspi->irq_mask, reg_base + CQSPI_REG_IRQMASK);

	writel(CQSPI_REG_INDIRECTRD_START_MASK,
		reg_base + CQSPI_REG_INDIRECTRD);

	while (remaining > 0) {
		unsigned int irq_status;

		ret = wait_on_timeout(CQSPI_READ_TIMEOUT_MS,
			readl(reg_base + CQSPI_REG_IRQSTATUS) & cqspi->irq_mask);

		irq_status = readl(reg_base + CQSPI_REG_IRQSTATUS);
		bytes_to_read = CQSPI_GET_RD_SRAM_LEVEL(reg_base);

		/* Clear all interrupts. */
		writel(irq_status, reg_base + CQSPI_REG_IRQSTATUS);

		if (!ret && bytes_to_read == 0) {
			dev_err(nor->dev, "Indirect read timeout, no bytes\n");
			ret = -ETIMEDOUT;
			goto failrd;
		}

		while (bytes_to_read != 0) {
			bytes_to_read *= CQSPI_FIFO_WIDTH;
			bytes_to_read = bytes_to_read > remaining
					? remaining : bytes_to_read;
			cqspi_fifo_read(rxbuf, ahb_base, bytes_to_read);
			rxbuf += bytes_to_read;
			remaining -= bytes_to_read;
			bytes_to_read = CQSPI_GET_RD_SRAM_LEVEL(reg_base);
		}
	}

	/* Check indirect done status */
	if (wait_on_timeout(CQSPI_TIMEOUT_MS,
		readl(reg_base + CQSPI_REG_INDIRECTRD) &
			CQSPI_REG_INDIRECTRD_DONE_MASK)) {
		dev_err(nor->dev,
			"Indirect read completion error 0x%08x\n", reg);
		ret = -ETIMEDOUT;
		goto failrd;
	}

	/* Disable interrupt */
	writel(0, reg_base + CQSPI_REG_IRQMASK);

	/* Clear indirect completion status */
	writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);

	return 0;

 failrd:
	/* Disable interrupt */
	writel(0, reg_base + CQSPI_REG_IRQMASK);

	/* Cancel the indirect read */
	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
	       reg_base + CQSPI_REG_INDIRECTRD);
	return ret;
}