/* * Get DMA channel and allocate DMA descriptors memory. * Prepare DMA descriptors link lists */ static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host, int num_entries) { int ret = 0; host->dmach = DMA_CH_SLCNAND; host->dmacfg.ch = DMA_CH_SLCNAND; /* * All the DMA configuration parameters will * be overwritten in lpc32xx_nand_dma_configure(). */ host->dmacfg.tc_inten = 1; host->dmacfg.err_inten = 1; host->dmacfg.src_size = 4; host->dmacfg.src_inc = 1; host->dmacfg.src_ahb1 = 1; host->dmacfg.src_bsize = DMAC_CHAN_SRC_BURST_4; host->dmacfg.src_prph = 0; host->dmacfg.dst_size = 4; host->dmacfg.dst_inc = 0; host->dmacfg.dst_bsize = DMAC_CHAN_DEST_BURST_4; host->dmacfg.dst_ahb1 = 0; host->dmacfg.dst_prph = DMAC_DEST_PERIP(DMA_PERID_NAND1); host->dmacfg.flowctrl = DMAC_CHAN_FLOW_D_M2P; if (lpc32xx_dma_ch_get(&host->dmacfg, LPC32XX_MODNAME, &lpc3xxx_nand_dma_irq, host) < 0) { dev_err(host->mtd.dev.parent, "Error setting up SLC NAND " "DMA channel\n"); ret = -ENODEV; goto dma_ch_err; } /* * Allocate Linked list of DMA Descriptors */ host->llptr = lpc32xx_dma_alloc_llist(host->dmach, num_entries); if (host->llptr == 0) { lpc32xx_dma_ch_put(host->dmach); host->dmach = -1; dev_err(host->mtd.dev.parent, "Error allocating list buffer for SLC NAND\n"); ret = -ENOMEM; goto dma_alloc_err; } return ret; dma_alloc_err: lpc32xx_dma_ch_put(host->dmach); dma_ch_err: return ret; }
/* * Remove NAND device. */ static int __devexit lpc32xx_nand_remove(struct platform_device *pdev) { u32 tmp; struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); struct mtd_info *mtd = &host->mtd; nand_release(mtd); /* Free the DMA channel used by us */ lpc32xx_dma_ch_disable(host->dmach); lpc32xx_dma_dealloc_llist(host->dmach); lpc32xx_dma_ch_put(host->dmach); host->dmach = -1; dma_free_coherent(&pdev->dev, host->dma_buf_len, host->data_buf, host->data_buf_dma); /* Force CE high */ tmp = __raw_readl(SLC_CTRL(host->io_base)); tmp &= ~SLCCFG_CE_LOW; __raw_writel(tmp, SLC_CTRL(host->io_base)); lpc32xx_wp_enable(host); clk_disable(host->clk); clk_put(host->clk); iounmap(host->io_base); kfree(host); return 0; }
static int lpc3xxx_pcm_hw_free(struct snd_pcm_substream *substream) { struct lpc3xxx_dma_data *prtd = substream->runtime->private_data; /* Return the DMA channel */ if (prtd->dmach != -1) { lpc32xx_dma_ch_disable(prtd->dmach); lpc32xx_dma_dealloc_llist(prtd->dmach); lpc32xx_dma_ch_put(prtd->dmach); prtd->dmach = -1; } return 0; }
/* * Probe for NAND controller */ static int __init lpc32xx_nand_probe(struct platform_device *pdev) { struct lpc32xx_nand_host *host; struct mtd_info *mtd; struct nand_chip *chip; struct resource *rc; int res; rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (rc == NULL) { dev_err(&pdev->dev,"No memory resource found for" " device\n"); return -ENXIO; } /* Allocate memory for the device structure (and zero it) */ host = kzalloc(sizeof(struct lpc32xx_nand_host), GFP_KERNEL); if (!host) { dev_err(&pdev->dev,"failed to allocate device structure\n"); return -ENOMEM; } host->io_base_dma = (dma_addr_t) rc->start; host->io_base = ioremap(rc->start, rc->end - rc->start + 1); if (host->io_base == NULL) { dev_err(&pdev->dev,"ioremap failed\n"); res = -EIO; goto err_exit1; } host->ncfg = pdev->dev.platform_data; if (!host->ncfg) { dev_err(&pdev->dev,"Missing platform data\n"); res = -ENOENT; goto err_exit1; } mtd = &host->mtd; chip = &host->nand_chip; chip->priv = host; mtd->priv = chip; mtd->owner = THIS_MODULE; mtd->dev.parent = &pdev->dev; /* Get NAND clock */ host->clk = clk_get(&pdev->dev, "nand_ck"); if (IS_ERR(host->clk)) { dev_err(&pdev->dev,"Clock failure\n"); res = -ENOENT; goto err_exit2; } clk_enable(host->clk); /* Set NAND IO addresses and command/ready functions */ chip->IO_ADDR_R = SLC_DATA(host->io_base); chip->IO_ADDR_W = SLC_DATA(host->io_base); chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; chip->dev_ready = lpc32xx_nand_device_ready; chip->chip_delay = 20; /* 20us command delay time */ /* Init NAND controller */ lpc32xx_nand_setup(host); lpc32xx_wp_disable(host); platform_set_drvdata(pdev, host); /* NAND callbacks for LPC32xx SLC hardware */ chip->ecc.mode = NAND_ECC_HW_SYNDROME; chip->read_byte = lpc32xx_nand_read_byte; chip->read_buf = lpc32xx_nand_read_buf; chip->write_buf = lpc32xx_nand_write_buf; chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome; chip->ecc.read_page = lpc32xx_nand_read_page_syndrome; chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome; chip->ecc.write_page = lpc32xx_nand_write_page_syndrome; chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome; chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome; chip->ecc.calculate = lpc32xx_nand_ecc_calculate; chip->ecc.correct = nand_correct_data; chip->ecc.hwctl = lpc32xx_nand_ecc_enable; chip->verify_buf = lpc32xx_verify_buf; /* * Allocate a large enough buffer for a single huge page plus * extra space for the spare area and ECC storage area */ host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE; host->data_buf = dma_alloc_coherent(&pdev->dev, host->dma_buf_len, &host->data_buf_dma, GFP_KERNEL); if (host->data_buf == NULL) { dev_err(&pdev->dev, "Error allocating memory\n"); res = -ENOMEM; goto err_exit3; } /* Get free DMA channel and alloc DMA descriptor link list */ res = lpc32xx_nand_dma_setup(host, LPC32XX_MAX_DMA_DESCRIPTORS); if(res) { res = -EIO; goto err_exit4; } init_waitqueue_head(&host->dma_waitq); /* Find NAND device */ if (nand_scan_ident(mtd, 1)) { res = -ENXIO; goto err_exit5; } /* OOB and ECC CPU and DMA work areas */ host->ecc_buf_dma = host->data_buf_dma + LPC32XX_DMA_DATA_SIZE; host->ecc_buf = (uint32_t *) (host->data_buf + LPC32XX_DMA_DATA_SIZE); /* * Small page FLASH has a unique OOB layout, but large and huge * page FLASH use the standard layout. Small page FLASH uses a * custom BBT marker layout. */ if (mtd->writesize <= 512) chip->ecc.layout = &lpc32xx_nand_oob_16; /* These sizes remain the same regardless of page size */ chip->ecc.size = 256; chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES; chip->ecc.prepad = chip->ecc.postpad = 0; /* Avoid extra scan if using BBT, setup BBT support */ if (host->ncfg->use_bbt) { chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; /* * Use a custom BBT marker setup for small page FLASH that * won't interfere with the ECC layout. Large and huge page * FLASH use the standard layout. */ if (mtd->writesize <= 512) { chip->bbt_td = &bbt_smallpage_main_descr; chip->bbt_md = &bbt_smallpage_mirror_descr; } } /* * Fills out all the uninitialized function pointers with the defaults */ if (nand_scan_tail(mtd)) { res = -ENXIO; goto err_exit5; } /* Standard layout in FLASH for bad block tables */ if (host->ncfg->use_bbt) { if (nand_default_bbt(mtd) < 0) dev_err(&pdev->dev, "Error initializing default bad" " block tables\n"); } res = lpc32xx_add_partitions(host); if (!res) return res; nand_release(mtd); err_exit5: /* Free the DMA channel used by us */ lpc32xx_dma_ch_disable(host->dmach); lpc32xx_dma_dealloc_llist(host->dmach); lpc32xx_dma_ch_put(host->dmach); host->dmach = -1; err_exit4: dma_free_coherent(&pdev->dev, host->dma_buf_len, host->data_buf, host->data_buf_dma); err_exit3: clk_disable(host->clk); clk_put(host->clk); platform_set_drvdata(pdev, NULL); err_exit2: lpc32xx_wp_enable(host); iounmap(host->io_base); err_exit1: kfree(host); return res; }
static int lpc3xxx_pcm_prepare(struct snd_pcm_substream *substream) { struct lpc3xxx_dma_data *prtd = substream->runtime->private_data; /* Setup DMA channel */ if (prtd->dmach == -1) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { prtd->dmach = DMA_CH_I2S_TX; prtd->dmacfg.ch = DMA_CH_I2S_TX; prtd->dmacfg.tc_inten = 1; prtd->dmacfg.err_inten = 1; prtd->dmacfg.src_size = 4; prtd->dmacfg.src_inc = 1; #ifdef CONFIG_ARM_LPC32XX prtd->dmacfg.src_ahb1 = 1; #endif prtd->dmacfg.src_bsize = DMAC_CHAN_SRC_BURST_4; prtd->dmacfg.src_prph = 0; prtd->dmacfg.dst_size = 4; prtd->dmacfg.dst_inc = 0; prtd->dmacfg.dst_bsize = DMAC_CHAN_DEST_BURST_4; #ifdef CONFIG_ARM_LPC32XX prtd->dmacfg.dst_ahb1 = 0; #endif #if defined(CONFIG_SND_LPC32XX_USEI2S1) prtd->dmacfg.dst_prph = DMAC_DEST_PERIP(DMA_PERID_I2S1_DMA1); #else prtd->dmacfg.dst_prph = DMAC_DEST_PERIP(DMA_PERID_I2S0_DMA1); #endif prtd->dmacfg.flowctrl = DMAC_CHAN_FLOW_D_M2P; if (lpc32xx_dma_ch_get(&prtd->dmacfg, "dma_i2s_tx", &lpc3xxx_pcm_dma_irq, substream) < 0) { pr_debug(KERN_ERR "Error setting up I2S TX DMA channel\n"); return -ENODEV; } /* Allocate a linked list for audio buffers */ prtd->llptr = lpc32xx_dma_alloc_llist(prtd->dmach, NUMLINKS); if (prtd->llptr == 0) { lpc32xx_dma_ch_put(prtd->dmach); prtd->dmach = -1; pr_debug(KERN_ERR "Error allocating list buffer (I2S TX)\n"); return -ENOMEM; } } else { prtd->dmach = DMA_CH_I2S_RX; prtd->dmacfg.ch = DMA_CH_I2S_RX; prtd->dmacfg.tc_inten = 1; prtd->dmacfg.err_inten = 1; prtd->dmacfg.src_size = 4; prtd->dmacfg.src_inc = 0; #ifdef CONFIG_ARM_LPC32XX prtd->dmacfg.src_ahb1 = 1; #endif prtd->dmacfg.src_bsize = DMAC_CHAN_SRC_BURST_4; #if defined(CONFIG_SND_LPC32XX_USEI2S1) prtd->dmacfg.src_prph = DMAC_SRC_PERIP(DMA_PERID_I2S1_DMA0); #else prtd->dmacfg.src_prph = DMAC_SRC_PERIP(DMA_PERID_I2S0_DMA0); #endif prtd->dmacfg.dst_size = 4; prtd->dmacfg.dst_inc = 1; #ifdef CONFIG_ARM_LPC32XX prtd->dmacfg.dst_ahb1 = 0; #endif prtd->dmacfg.dst_bsize = DMAC_CHAN_DEST_BURST_4; prtd->dmacfg.dst_prph = 0; prtd->dmacfg.flowctrl = DMAC_CHAN_FLOW_D_P2M; if (lpc32xx_dma_ch_get(&prtd->dmacfg, "dma_i2s_rx", &lpc3xxx_pcm_dma_irq, substream) < 0) { pr_debug(KERN_ERR "Error setting up I2S RX DMA channel\n"); return -ENODEV; } /* Allocate a linked list for audio buffers */ prtd->llptr = lpc32xx_dma_alloc_llist(prtd->dmach, NUMLINKS); if (prtd->llptr == 0) { lpc32xx_dma_ch_put(prtd->dmach); prtd->dmach = -1; pr_debug(KERN_ERR "Error allocating list buffer (I2S RX)\n"); return -ENOMEM; } } } return 0; }