Exemple #1
0
/*
 * Remove NAND device.
 */
static int __devexit lpc32xx_nand_remove(struct platform_device *pdev)
{
	u32 tmp;
	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
	struct mtd_info *mtd = &host->mtd;

	nand_release(mtd);

	/* Free the DMA channel used by us */
	lpc32xx_dma_ch_disable(host->dmach);
	lpc32xx_dma_dealloc_llist(host->dmach);
	lpc32xx_dma_ch_put(host->dmach);
	host->dmach = -1;

	dma_free_coherent(&pdev->dev, host->dma_buf_len,
		host->data_buf, host->data_buf_dma);

	/* Force CE high */
	tmp = __raw_readl(SLC_CTRL(host->io_base));
	tmp &= ~SLCCFG_CE_LOW;
	__raw_writel(tmp, SLC_CTRL(host->io_base));

	lpc32xx_wp_enable(host);
	clk_disable(host->clk);
	clk_put(host->clk);

	iounmap(host->io_base);

	kfree(host);

	return 0;
}
Exemple #2
0
static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
{
	u32 clkrate, tmp;

	/* Reset SLC controller */
	__raw_writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
	udelay(1000);

	/* Basic setup */
	__raw_writel(0, SLC_CFG(host->io_base));
	__raw_writel(0, SLC_IEN(host->io_base));
	__raw_writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
		SLC_ICR(host->io_base));

	/* Get base clock for SLC block */
	clkrate = clk_get_rate(host->clk);
	if (clkrate == 0)
		clkrate = LPC32XX_DEF_BUS_RATE;

	/* Compute clock setup values */
	tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
		SLCTAC_WWIDTH(1 + (clkrate / host->ncfg->wwidth)) |
		SLCTAC_WHOLD(1 + (clkrate / host->ncfg->whold)) |
		SLCTAC_WSETUP(1 + (clkrate / host->ncfg->wsetup)) |
		SLCTAC_RDR(host->ncfg->rdr_clks) |
		SLCTAC_RWIDTH(1 + (clkrate / host->ncfg->rwidth)) |
		SLCTAC_RHOLD(1 + (clkrate / host->ncfg->rhold)) |
		SLCTAC_RSETUP(1 + (clkrate / host->ncfg->rsetup));
	__raw_writel(tmp, SLC_TAC(host->io_base));
}
Exemple #3
0
static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
{
	u32 tmp;
	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);

	/* Force CE high */
	tmp = __raw_readl(SLC_CTRL(host->io_base));
	tmp &= ~SLCCFG_CE_LOW;
	__raw_writel(tmp, SLC_CTRL(host->io_base));

	/* Enable write protect for safety */
	lpc32xx_wp_enable(host);

	/* Disable clock */
	clk_disable(host->clk);

	return 0;
}
Exemple #4
0
/*
 * Remove NAND device.
 */
static int lpc32xx_nand_remove(struct platform_device *pdev)
{
	uint32_t tmp;
	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
	struct mtd_info *mtd = &host->mtd;

	nand_release(mtd);
	dma_release_channel(host->dma_chan);

	/* Force CE high */
	tmp = readl(SLC_CTRL(host->io_base));
	tmp &= ~SLCCFG_CE_LOW;
	writel(tmp, SLC_CTRL(host->io_base));

	clk_disable(host->clk);
	lpc32xx_wp_enable(host);

	return 0;
}
Exemple #5
0
/*
 * DMA read/write transfers with ECC support
 */
static int lpc32xx_dma_xfer(struct mtd_info *mtd, uint8_t *buf,
	int eccsubpages, int read)
{
	struct nand_chip *chip = mtd->priv;
	struct lpc32xx_nand_host *host = chip->priv;
	uint32_t config, tmpreg;
	dma_addr_t buf_phy;
	int i, timeout, dma_mapped = 0, status = 0;

	/* Map DMA buffer */
	if (likely((void *) buf < high_memory)) {
		buf_phy = dma_map_single(mtd->dev.parent, buf, mtd->writesize,
			read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(mtd->dev.parent, buf_phy))) {
			dev_err(mtd->dev.parent,
				"Unable to map DMA buffer\n");
			dma_mapped = 0;
		} else
			dma_mapped = 1;
	}

	/* If a buffer can't be mapped, use the local buffer */
	if (!dma_mapped) {
		buf_phy = host->data_buf_dma;
		if (!read)
			memcpy(host->data_buf, buf, mtd->writesize);
	}

	if (read)
		config = DMAC_CHAN_ITC | DMAC_CHAN_IE | DMAC_CHAN_FLOW_D_P2M |
			DMAC_DEST_PERIP (0) |
			DMAC_SRC_PERIP(DMA_PERID_NAND1) | DMAC_CHAN_ENABLE;
	else
		config = DMAC_CHAN_ITC | DMAC_CHAN_IE | DMAC_CHAN_FLOW_D_M2P |
			DMAC_DEST_PERIP(DMA_PERID_NAND1) |
			DMAC_SRC_PERIP (0) | DMAC_CHAN_ENABLE;

	/* DMA mode with ECC enabled */
	tmpreg = __raw_readl(SLC_CFG(host->io_base));
	__raw_writel(SLCCFG_ECC_EN | SLCCFG_DMA_ECC | tmpreg,
		SLC_CFG(host->io_base));

	/* Clear initial ECC */
	__raw_writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));

	/* Prepare DMA descriptors */
	lpc32xx_nand_dma_configure(mtd, buf_phy, chip->ecc.steps, read);

	/* Setup DMA direction and burst mode */
	if (read)
		__raw_writel(__raw_readl(SLC_CFG(host->io_base)) |
			SLCCFG_DMA_DIR, SLC_CFG(host->io_base));
	else
		__raw_writel(__raw_readl(SLC_CFG(host->io_base)) &
			~SLCCFG_DMA_DIR, SLC_CFG(host->io_base));
	__raw_writel(__raw_readl(SLC_CFG(host->io_base)) | SLCCFG_DMA_BURST,
		SLC_CFG(host->io_base));

	/* Transfer size is data area only */
	__raw_writel(mtd->writesize, SLC_TC(host->io_base));

	/* Start transfer in the NAND controller */
	__raw_writel(__raw_readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
		SLC_CTRL(host->io_base));

	/* Start DMA to process NAND controller DMA FIFO */
	host->dmapending = 0;
	lpc32xx_dma_start_xfer(host->dmach, config);

	/*
	 * On some systems, the DMA transfer will be very fast, so there is no
	 * point in waiting for the transfer to complete using the interrupt
	 * method. It's best to just poll the transfer here to prevent several
	 * costly context changes. This is especially true for systems that
	 * use small page devices or NAND devices with very fast access.
	 */
	if (host->ncfg->polled_completion) {
		timeout = LPC32XX_DMA_SIMPLE_TIMEOUT;
		while ((timeout > 0) && lpc32xx_dma_is_active(host->dmach))
			timeout--;
		if (timeout == 0) {
			dev_err(mtd->dev.parent,
				"DMA transfer timeout error\n");
			status = -EIO;

			/* Switch to non-polled mode */
			host->ncfg->polled_completion = false;
		}
	}

	if (!host->ncfg->polled_completion) {
		/* Wait till DMA transfer is done or timeout occurs */
		wait_event_timeout(host->dma_waitq, host->dmapending,
			msecs_to_jiffies(LPC32XX_DMA_WAIT_TIMEOUT_MS));
		if (host->dma_xfer_status != 0) {
			dev_err(mtd->dev.parent, "DMA transfer error\n");
			status = -EIO;
		}
	}

	/*
	 * The DMA is finished, but the NAND controller may still have
	 * buffered data. Wait until all the data is sent.
	 */
	timeout = LPC32XX_DMA_SIMPLE_TIMEOUT;
	while ((__raw_readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO)
		&& (timeout > 0))
		timeout--;
	if (timeout == 0) {
		dev_err(mtd->dev.parent, "FIFO held data too long\n");
		status = -EIO;
	}

	/* Read last calculated ECC value */
	if (read)
		host->ecc_buf[chip->ecc.steps - 1] =
			__raw_readl(SLC_ECC(host->io_base));
	else {
		for (i = 0; i < LPC32XX_DMA_ECC_REP_READ; i++)
			host->ecc_buf[chip->ecc.steps - 1] =
				__raw_readl(SLC_ECC(host->io_base));
	}

	/*
	 * For reads, get the OOB data. For writes, the data will be written
	 * later
	 */
	if (read)
		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);

	/* Flush DMA link list */
	lpc32xx_dma_flush_llist(host->dmach);

	if (__raw_readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
		__raw_readl(SLC_TC(host->io_base))) {
		/* Something is left in the FIFO, something is wrong */
		dev_err(mtd->dev.parent, "DMA FIFO failure\n");
		status = -EIO;
	}

	if (dma_mapped)
		dma_unmap_single(mtd->dev.parent, buf_phy, mtd->writesize,
			read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
	else if (read)
		memcpy(buf, host->data_buf, mtd->writesize);

	/* Stop DMA & HW ECC */
	__raw_writel(__raw_readl(SLC_CTRL(host->io_base)) &
		~SLCCTRL_DMA_START, SLC_CTRL(host->io_base));
	__raw_writel(tmpreg, SLC_CFG(host->io_base));

	return status;
}
Exemple #6
0
/*
 * DMA read/write transfers with ECC support
 */
static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
			int read)
{
	struct nand_chip *chip = mtd->priv;
	struct lpc32xx_nand_host *host = chip->priv;
	int i, status = 0;
	unsigned long timeout;
	int res;
	enum dma_transfer_direction dir =
		read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
	uint8_t *dma_buf;
	bool dma_mapped;

	if ((void *)buf <= high_memory) {
		dma_buf = buf;
		dma_mapped = true;
	} else {
		dma_buf = host->data_buf;
		dma_mapped = false;
		if (!read)
			memcpy(host->data_buf, buf, mtd->writesize);
	}

	if (read) {
		writel(readl(SLC_CFG(host->io_base)) |
		       SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
		       SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
	} else {
		writel((readl(SLC_CFG(host->io_base)) |
			SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
		       ~SLCCFG_DMA_DIR,
			SLC_CFG(host->io_base));
	}

	/* Clear initial ECC */
	writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));

	/* Transfer size is data area only */
	writel(mtd->writesize, SLC_TC(host->io_base));

	/* Start transfer in the NAND controller */
	writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
	       SLC_CTRL(host->io_base));

	for (i = 0; i < chip->ecc.steps; i++) {
		/* Data */
		res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
				       dma_buf + i * chip->ecc.size,
				       mtd->writesize / chip->ecc.steps, dir);
		if (res)
			return res;

		/* Always _read_ ECC */
		if (i == chip->ecc.steps - 1)
			break;
		if (!read) /* ECC availability delayed on write */
			udelay(10);
		res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
				       &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
		if (res)
			return res;
	}

	/*
	 * According to NXP, the DMA can be finished here, but the NAND
	 * controller may still have buffered data. After porting to using the
	 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
	 * appears to be always true, according to tests. Keeping the check for
	 * safety reasons for now.
	 */
	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
		dev_warn(mtd->dev.parent, "FIFO not empty!\n");
		timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
		while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
		       time_before(jiffies, timeout))
			cpu_relax();
		if (!time_before(jiffies, timeout)) {
			dev_err(mtd->dev.parent, "FIFO held data too long\n");
			status = -EIO;
		}
	}

	/* Read last calculated ECC value */
	if (!read)
		udelay(10);
	host->ecc_buf[chip->ecc.steps - 1] =
		readl(SLC_ECC(host->io_base));

	/* Flush DMA */
	dmaengine_terminate_all(host->dma_chan);

	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
	    readl(SLC_TC(host->io_base))) {
		/* Something is left in the FIFO, something is wrong */
		dev_err(mtd->dev.parent, "DMA FIFO failure\n");
		status = -EIO;
	}

	/* Stop DMA & HW ECC */
	writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
	       SLC_CTRL(host->io_base));
	writel(readl(SLC_CFG(host->io_base)) &
	       ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
		 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));

	if (!dma_mapped && read)
		memcpy(buf, host->data_buf, mtd->writesize);

	return status;
}