/* * DMA read/write transfers with ECC support */ static int lpc32xx_dma_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages, int read) { struct nand_chip *chip = mtd->priv; struct lpc32xx_nand_host *host = chip->priv; uint32_t config, tmpreg; dma_addr_t buf_phy; int i, timeout, dma_mapped = 0, status = 0; /* Map DMA buffer */ if (likely((void *) buf < high_memory)) { buf_phy = dma_map_single(mtd->dev.parent, buf, mtd->writesize, read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); if (unlikely(dma_mapping_error(mtd->dev.parent, buf_phy))) { dev_err(mtd->dev.parent, "Unable to map DMA buffer\n"); dma_mapped = 0; } else dma_mapped = 1; } /* If a buffer can't be mapped, use the local buffer */ if (!dma_mapped) { buf_phy = host->data_buf_dma; if (!read) memcpy(host->data_buf, buf, mtd->writesize); } if (read) config = DMAC_CHAN_ITC | DMAC_CHAN_IE | DMAC_CHAN_FLOW_D_P2M | DMAC_DEST_PERIP (0) | DMAC_SRC_PERIP(DMA_PERID_NAND1) | DMAC_CHAN_ENABLE; else config = DMAC_CHAN_ITC | DMAC_CHAN_IE | DMAC_CHAN_FLOW_D_M2P | DMAC_DEST_PERIP(DMA_PERID_NAND1) | DMAC_SRC_PERIP (0) | DMAC_CHAN_ENABLE; /* DMA mode with ECC enabled */ tmpreg = __raw_readl(SLC_CFG(host->io_base)); __raw_writel(SLCCFG_ECC_EN | SLCCFG_DMA_ECC | tmpreg, SLC_CFG(host->io_base)); /* Clear initial ECC */ __raw_writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base)); /* Prepare DMA descriptors */ lpc32xx_nand_dma_configure(mtd, buf_phy, chip->ecc.steps, read); /* Setup DMA direction and burst mode */ if (read) __raw_writel(__raw_readl(SLC_CFG(host->io_base)) | SLCCFG_DMA_DIR, SLC_CFG(host->io_base)); else __raw_writel(__raw_readl(SLC_CFG(host->io_base)) & ~SLCCFG_DMA_DIR, SLC_CFG(host->io_base)); __raw_writel(__raw_readl(SLC_CFG(host->io_base)) | SLCCFG_DMA_BURST, SLC_CFG(host->io_base)); /* Transfer size is data area only */ __raw_writel(mtd->writesize, SLC_TC(host->io_base)); /* Start transfer in the NAND controller */ __raw_writel(__raw_readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START, SLC_CTRL(host->io_base)); /* Start DMA to process NAND controller DMA FIFO */ host->dmapending = 0; lpc32xx_dma_start_xfer(host->dmach, config); /* * On some systems, the DMA transfer will be very fast, so there is no * point in waiting for the transfer to complete using the interrupt * method. It's best to just poll the transfer here to prevent several * costly context changes. This is especially true for systems that * use small page devices or NAND devices with very fast access. */ if (host->ncfg->polled_completion) { timeout = LPC32XX_DMA_SIMPLE_TIMEOUT; while ((timeout > 0) && lpc32xx_dma_is_active(host->dmach)) timeout--; if (timeout == 0) { dev_err(mtd->dev.parent, "DMA transfer timeout error\n"); status = -EIO; /* Switch to non-polled mode */ host->ncfg->polled_completion = false; } } if (!host->ncfg->polled_completion) { /* Wait till DMA transfer is done or timeout occurs */ wait_event_timeout(host->dma_waitq, host->dmapending, msecs_to_jiffies(LPC32XX_DMA_WAIT_TIMEOUT_MS)); if (host->dma_xfer_status != 0) { dev_err(mtd->dev.parent, "DMA transfer error\n"); status = -EIO; } } /* * The DMA is finished, but the NAND controller may still have * buffered data. Wait until all the data is sent. */ timeout = LPC32XX_DMA_SIMPLE_TIMEOUT; while ((__raw_readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) && (timeout > 0)) timeout--; if (timeout == 0) { dev_err(mtd->dev.parent, "FIFO held data too long\n"); status = -EIO; } /* Read last calculated ECC value */ if (read) host->ecc_buf[chip->ecc.steps - 1] = __raw_readl(SLC_ECC(host->io_base)); else { for (i = 0; i < LPC32XX_DMA_ECC_REP_READ; i++) host->ecc_buf[chip->ecc.steps - 1] = __raw_readl(SLC_ECC(host->io_base)); } /* * For reads, get the OOB data. For writes, the data will be written * later */ if (read) chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); /* Flush DMA link list */ lpc32xx_dma_flush_llist(host->dmach); if (__raw_readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO || __raw_readl(SLC_TC(host->io_base))) { /* Something is left in the FIFO, something is wrong */ dev_err(mtd->dev.parent, "DMA FIFO failure\n"); status = -EIO; } if (dma_mapped) dma_unmap_single(mtd->dev.parent, buf_phy, mtd->writesize, read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); else if (read) memcpy(buf, host->data_buf, mtd->writesize); /* Stop DMA & HW ECC */ __raw_writel(__raw_readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START, SLC_CTRL(host->io_base)); __raw_writel(tmpreg, SLC_CFG(host->io_base)); return status; }
static int lpc3xxx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *rtd = substream->runtime; struct lpc3xxx_dma_data *prtd = rtd->private_data; int i, ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: prtd->period_ptr = prtd->dma_cur = prtd->dma_buffer; lpc32xx_dma_flush_llist(prtd->dmach); /* Queue a few buffers to start DMA */ for (i = 0; i < NUMLINKS; i++) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { lpc32xx_dma_queue_llist_entry(prtd->dmach, (void *) prtd->period_ptr, #if defined(CONFIG_SND_LPC32XX_USEI2S1) (void *) I2S_TX_FIFO(LPC32XX_I2S1_BASE), #else (void *) I2S_TX_FIFO(LPC32XX_I2S0_BASE), #endif prtd->period_size); } else { lpc32xx_dma_queue_llist_entry(prtd->dmach, #if defined(CONFIG_SND_LPC32XX_USEI2S1) (void *) I2S_RX_FIFO(LPC32XX_I2S1_BASE), #else (void *) I2S_RX_FIFO(LPC32XX_I2S0_BASE), #endif (void *) prtd->period_ptr, prtd->period_size); } prtd->period_ptr += prtd->period_size; } break; case SNDRV_PCM_TRIGGER_STOP: lpc32xx_dma_flush_llist(prtd->dmach); lpc32xx_dma_ch_disable(prtd->dmach); break; case SNDRV_PCM_TRIGGER_SUSPEND: break; case SNDRV_PCM_TRIGGER_RESUME: break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: lpc32xx_dma_ch_pause_unpause(prtd->dmach, 1); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: lpc32xx_dma_ch_pause_unpause(prtd->dmach, 0); break; default: ret = -EINVAL; } return ret; }