Exemplo n.º 1
0
/*
 * Configure DMA descriptors and enable DMA channel for data and ECC reads
 */
static void lpc32xx_nand_dma_configure(struct mtd_info *mtd,
	dma_addr_t databuf, int eccsubpages, int read)
{
	struct nand_chip *chip = mtd->priv;
	struct lpc32xx_nand_host *host = chip->priv;
	uint32_t ecc_ctrl, *ecc_buf;
	uint32_t dataaddr, data_ctrl;
	int i;

	/* DMA buffer pointer for calculated ECC values */
	ecc_buf = (uint32_t *)host->ecc_buf_dma;

	/*
	 * ctrl descriptor entry for reading ECC
	 */
	ecc_ctrl = DMAC_CHAN_SRC_BURST_1 |
		DMAC_CHAN_DEST_BURST_1 |
		DMAC_CHAN_SRC_WIDTH_32 |
		DMAC_CHAN_DEST_WIDTH_32 |
		DMAC_CHAN_DEST_AHB1;

	/* data descriptor entry for reading/writing data */
	data_ctrl = ((mtd->writesize / eccsubpages) / 4) |
		DMAC_CHAN_SRC_BURST_4 |
		DMAC_CHAN_DEST_BURST_4 |
		DMAC_CHAN_SRC_WIDTH_32 |
		DMAC_CHAN_DEST_WIDTH_32 |
		DMAC_CHAN_DEST_AHB1;

	if (read) {
		data_ctrl |= DMAC_CHAN_DEST_AUTOINC;
		ecc_ctrl |= DMAC_CHAN_TRANSFER_SIZE(1);
	} else {
		data_ctrl |= DMAC_CHAN_SRC_AUTOINC;
		ecc_ctrl |= DMAC_CHAN_TRANSFER_SIZE(LPC32XX_DMA_ECC_REP_READ);
	}

	/*
	 * Only transfer the data areas plus ECC from hardware. The last ECC
	 * from hardware and OOB area will be transferred later.
	 */
	dataaddr = (uint32_t)databuf;

	for (i = 0; i < eccsubpages; i++) {
		if ((i == (eccsubpages - 1)) &&
			(!host->ncfg->polled_completion))
			data_ctrl |= DMAC_CHAN_INT_TC_EN;

		if (read)
			lpc32xx_dma_queue_llist(host->dmach,
				(void *)SLC_DMA_DATA(host->io_base_dma),
				(void *)dataaddr, -1, data_ctrl);
		else
			lpc32xx_dma_queue_llist(host->dmach, (void *)dataaddr,
				(void *)SLC_DMA_DATA(host->io_base_dma), -1,
				data_ctrl);

		dataaddr += (uint32_t)chip->ecc.size;;

		if (i != (eccsubpages - 1)) {
			lpc32xx_dma_queue_llist(host->dmach,
				(void *)SLC_ECC(host->io_base_dma),
				(void *)ecc_buf, -1, ecc_ctrl);
			ecc_buf++;;
		}
	}
}
Exemplo n.º 2
0
/*
 * DMA read/write transfers with ECC support
 */
static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
			int read)
{
	struct nand_chip *chip = mtd->priv;
	struct lpc32xx_nand_host *host = chip->priv;
	int i, status = 0;
	unsigned long timeout;
	int res;
	enum dma_transfer_direction dir =
		read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
	uint8_t *dma_buf;
	bool dma_mapped;

	if ((void *)buf <= high_memory) {
		dma_buf = buf;
		dma_mapped = true;
	} else {
		dma_buf = host->data_buf;
		dma_mapped = false;
		if (!read)
			memcpy(host->data_buf, buf, mtd->writesize);
	}

	if (read) {
		writel(readl(SLC_CFG(host->io_base)) |
		       SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
		       SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
	} else {
		writel((readl(SLC_CFG(host->io_base)) |
			SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
		       ~SLCCFG_DMA_DIR,
			SLC_CFG(host->io_base));
	}

	/* Clear initial ECC */
	writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));

	/* Transfer size is data area only */
	writel(mtd->writesize, SLC_TC(host->io_base));

	/* Start transfer in the NAND controller */
	writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
	       SLC_CTRL(host->io_base));

	for (i = 0; i < chip->ecc.steps; i++) {
		/* Data */
		res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
				       dma_buf + i * chip->ecc.size,
				       mtd->writesize / chip->ecc.steps, dir);
		if (res)
			return res;

		/* Always _read_ ECC */
		if (i == chip->ecc.steps - 1)
			break;
		if (!read) /* ECC availability delayed on write */
			udelay(10);
		res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
				       &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
		if (res)
			return res;
	}

	/*
	 * According to NXP, the DMA can be finished here, but the NAND
	 * controller may still have buffered data. After porting to using the
	 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
	 * appears to be always true, according to tests. Keeping the check for
	 * safety reasons for now.
	 */
	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
		dev_warn(mtd->dev.parent, "FIFO not empty!\n");
		timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
		while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
		       time_before(jiffies, timeout))
			cpu_relax();
		if (!time_before(jiffies, timeout)) {
			dev_err(mtd->dev.parent, "FIFO held data too long\n");
			status = -EIO;
		}
	}

	/* Read last calculated ECC value */
	if (!read)
		udelay(10);
	host->ecc_buf[chip->ecc.steps - 1] =
		readl(SLC_ECC(host->io_base));

	/* Flush DMA */
	dmaengine_terminate_all(host->dma_chan);

	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
	    readl(SLC_TC(host->io_base))) {
		/* Something is left in the FIFO, something is wrong */
		dev_err(mtd->dev.parent, "DMA FIFO failure\n");
		status = -EIO;
	}

	/* Stop DMA & HW ECC */
	writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
	       SLC_CTRL(host->io_base));
	writel(readl(SLC_CFG(host->io_base)) &
	       ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
		 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));

	if (!dma_mapped && read)
		memcpy(buf, host->data_buf, mtd->writesize);

	return status;
}