static int davinci_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct davinci_runtime_data *prtd = runtime->private_data; if (prtd->ram_channel >= 0) edma_stop(prtd->ram_channel); if (prtd->asp_channel >= 0) edma_stop(prtd->asp_channel); if (prtd->asp_link[0] >= 0) edma_unlink(prtd->asp_link[0]); if (prtd->asp_link[1] >= 0) edma_unlink(prtd->asp_link[1]); if (prtd->ram_link >= 0) edma_unlink(prtd->ram_link); if (prtd->asp_link[0] >= 0) edma_free_slot(prtd->asp_link[0]); if (prtd->asp_link[1] >= 0) edma_free_slot(prtd->asp_link[1]); if (prtd->asp_channel >= 0) edma_free_channel(prtd->asp_channel); if (prtd->ram_link >= 0) edma_free_slot(prtd->ram_link); if (prtd->ram_link2 >= 0) edma_free_slot(prtd->ram_link2); if (prtd->ram_channel >= 0) edma_free_channel(prtd->ram_channel); kfree(prtd); return 0; }
static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) { struct spi_device *spi = (struct spi_device *)data; struct davinci_spi *davinci_spi; struct davinci_spi_dma *davinci_spi_dma; struct davinci_spi_platform_data *pdata; davinci_spi = spi_master_get_devdata(spi->master); davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); pdata = davinci_spi->pdata; if (ch_status == DMA_COMPLETE) edma_stop(davinci_spi_dma->dma_tx_channel); else edma_clean_channel(davinci_spi_dma->dma_tx_channel); complete(&davinci_spi_dma->dma_tx_completion); /* We must disable the DMA TX request */ davinci_spi_set_dma_req(spi, 0); }
static int edma_memtomemcpy(int count, unsigned long src_addr, unsigned long trgt_addr, int dma_ch) { int result = 0; struct edmacc_param param_set; edma_set_src(dma_ch, src_addr, INCR, W256BIT); edma_set_dest(dma_ch, trgt_addr, INCR, W256BIT); edma_set_src_index(dma_ch, 1, 1); edma_set_dest_index(dma_ch, 1, 1); /* A Sync Transfer Mode */ edma_set_transfer_params(dma_ch, count, 1, 1, 1, ASYNC); //one block of one frame of one array of count bytes /* Enable the Interrupts on Channel 1 */ edma_read_slot(dma_ch, ¶m_set); param_set.opt |= ITCINTEN; param_set.opt |= TCINTEN; param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch)); edma_write_slot(dma_ch, ¶m_set); irqraised1 = 0u; dma_comp.done = 0; result = edma_start(dma_ch); if (result != 0) { printk("%s: edma copy failed \n", DEVICE_NAME); } wait_for_completion(&dma_comp); /* Check the status of the completed transfer */ if (irqraised1 < 0) { printk("%s: edma copy: Event Miss Occured!!!\n", DEVICE_NAME); edma_stop(dma_ch); result = -EAGAIN; } return result; }
static int __init fpga_perh_init(void) { unsigned int cnt; u32 val = 0; int ret = 0; int chk = 0; gpio_store(); // GPIO初始化 gpio_config(); gpmc_config(); // GPMC配置 edma_config(); // EDMA配置 for(cnt=0; cnt<7; cnt++){ val = gpmc_cs_read_reg(GPMC_FPGA_CS, GPMC_CS_CONFIG1 + cnt*0x04); printk("GPMC_CS3_CONFIG_%d : [%08X]\n", cnt+1, val); } printk("Gpmc now start reading...\n"); FPGA_RRST_L; _delay_ns(1); // 1us FPGA_RRST_H; ret = edma_start(dma_ch); if (ret != 0) { printk ("dm8168_start_dma failed, error:%d", ret); return ret; } // wait for completion ISR while(irqraised1 == 0u){ _delay_ms(10); // break; } if (ret == 0) { for (cnt=0; cnt<FPGA_FIFO_SIZE; cnt++) { // fpga_buf[cnt] = readw(fpga_membase); if (fpga_buf[cnt] != cnt+1) { // 进行数据校验 chk = cnt+1; break; } } edma_stop(dma_ch); edma_free_channel(dma_ch); } if (chk == 0){ printk ("Gpmc&edma reading sequence data check successful!\n"); }else{ printk ("Gpmc&edma reading data check error at: %d\n", chk); } for(cnt=0; cnt<8; cnt++){ printk("[%04X] [%04X] [%04X] [%04X]\n", fpga_buf[cnt*4], fpga_buf[cnt*4+1], fpga_buf[cnt*4+2], fpga_buf[cnt*4+3]); } // gpmc_cs_free(GPMC_FPGA_CS); return 0; }
int logi_dma_copy(struct drvr_mem* mem_dev, unsigned long trgt_addr, unsigned long src_addr, int count) { int result = 0; #ifdef USE_DMA_ENGINE struct dma_chan *chan; struct dma_device *dev; struct dma_async_tx_descriptor *tx; unsigned long flags; chan = mem_dev->dma.chan; dev = chan->device; flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; tx = dev->device_prep_dma_memcpy(chan, trgt_addr, src_addr, count, flags); if (!tx) { DBG_LOG("device_prep_dma_memcpy failed\n"); return -ENODEV; } irqraised1 = 0u; dma_comp.done = 0; /* set the callback and submit the transaction */ tx->callback = dma_callback; tx->callback_param = mem_dev; cookie = dmaengine_submit(tx); dma_async_issue_pending(chan); #else struct edmacc_param param_set; int dma_ch = mem_dev->dma.dma_chan; edma_set_src(dma_ch, src_addr, INCR, W256BIT); edma_set_dest(dma_ch, trgt_addr, INCR, W256BIT); edma_set_src_index(dma_ch, 1, 1); edma_set_dest_index(dma_ch, 1, 1); /* A Sync Transfer Mode */ edma_set_transfer_params(dma_ch, count, 1, 1, 1, ASYNC);//one block of one frame of one array of count bytes /* Enable the Interrupts on Channel 1 */ edma_read_slot(dma_ch, ¶m_set); param_set.opt |= ITCINTEN; param_set.opt |= TCINTEN; param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch)); edma_write_slot(dma_ch, ¶m_set); irqraised1 = 0u; dma_comp.done = 0; result = edma_start(dma_ch); if (result != 0) { DBG_LOG("edma copy failed\n"); return result; } #endif /* USE_DMA_ENGINE */ wait_for_completion(&dma_comp); /* Check the status of the completed transfer */ if (irqraised1 < 0) { DBG_LOG("edma copy: Event Miss Occured!!!\n"); #ifdef USE_DMA_ENGINE dmaengine_terminate_all(chan); #else edma_stop(dma_ch); #endif /* USE_DMA_ENGINE */ result = -EAGAIN; } return result; }