static int davinci_spi_request_dma(struct spi_device *spi) { struct davinci_spi *davinci_spi; struct davinci_spi_dma *davinci_spi_dma; struct davinci_spi_platform_data *pdata; struct device *sdev; int r; davinci_spi = spi_master_get_devdata(spi->master); davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; pdata = davinci_spi->pdata; sdev = davinci_spi->bitbang.master->dev.parent; r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, davinci_spi_dma_rx_callback, spi, davinci_spi_dma->eventq); if (r < 0) { dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n"); return -EAGAIN; } davinci_spi_dma->dma_rx_channel = r; r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, davinci_spi_dma_tx_callback, spi, davinci_spi_dma->eventq); if (r < 0) { edma_free_channel(davinci_spi_dma->dma_rx_channel); davinci_spi_dma->dma_rx_channel = -1; dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n"); return -EAGAIN; } davinci_spi_dma->dma_tx_channel = r; return 0; }
static int davinci_pcm_dma_request(struct snd_pcm_substream *substream) { struct snd_dma_buffer *iram_dma; struct davinci_runtime_data *prtd = substream->runtime->private_data; struct davinci_pcm_dma_params *params = prtd->params; int ret; if (!params) return -ENODEV; /* Request asp master DMA channel */ ret = prtd->asp_channel = edma_alloc_channel(params->channel, davinci_pcm_dma_irq, substream, prtd->params->asp_chan_q); if (ret < 0) goto exit1; /* Request asp link channels */ ret = prtd->asp_link[0] = edma_alloc_slot( EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit2; iram_dma = (struct snd_dma_buffer *)substream->dma_buffer.private_data; if (iram_dma) { if (request_ping_pong(substream, prtd, iram_dma) == 0) return 0; printk(KERN_WARNING "%s: dma channel allocation failed," "not using sram\n", __func__); } /* Issue transfer completion IRQ when the channel completes a * transfer, then always reload from the same slot (by a kind * of loopback link). The completion IRQ handler will update * the reload slot with a new buffer. * * REVISIT save p_ram here after setting up everything except * the buffer and its length (ccnt) ... use it as a template * so davinci_pcm_enqueue_dma() takes less time in IRQ. */ edma_read_slot(prtd->asp_link[0], &prtd->asp_params); prtd->asp_params.opt |= TCINTEN | EDMA_TCC(EDMA_CHAN_SLOT(prtd->asp_channel)); prtd->asp_params.link_bcntrld = EDMA_CHAN_SLOT(prtd->asp_link[0]) << 5; edma_write_slot(prtd->asp_link[0], &prtd->asp_params); return 0; exit2: edma_free_channel(prtd->asp_channel); prtd->asp_channel = -1; exit1: return ret; }
int logi_dma_open(struct drvr_mem* mem_dev, dma_addr_t *physbuf) { #ifdef USE_DMA_ENGINE struct dma_slave_config conf; dma_cap_mask_t mask; #endif /* Allocate DMA buffer */ mem_dev->dma.buf = dma_alloc_coherent(NULL, MAX_DMA_TRANSFER_IN_BYTES, &dmaphysbuf, 0); if (!mem_dev->dma.buf) { DBG_LOG("failed to allocate DMA buffer\n"); return -ENOMEM; } *physbuf = dmaphysbuf; #ifdef USE_DMA_ENGINE /* Allocate DMA channel */ dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); mem_dev->dma.chan = dma_request_channel(mask, NULL, NULL); if (!mem_dev->dma.chan) return -ENODEV; /* Configure DMA channel */ conf.direction = DMA_MEM_TO_MEM; /*conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;*/ dmaengine_slave_config(mem_dev->dma.chan, &conf); DBG_LOG("Using Linux DMA Engine API"); #else mem_dev->dma.dma_chan = edma_alloc_channel(EDMA_CHANNEL_ANY, dma_callback, NULL, EVENTQ_0); if (mem_dev->dma.dma_chan < 0) { DBG_LOG("edma_alloc_channel failed for dma_ch, error: %d\n", mem_dev->dma.dma_chan); return mem_dev->dma.dma_chan; } DBG_LOG("Using EDMA/DMA Engine"); #endif /* USE_DMA_ENGINE */ DBG_LOG("EDMA channel %d reserved\n", mem_dev->dma.dma_chan); return 0; }
static int dm_open(struct inode *inode, struct file *filp) { struct drvr_device * dev = container_of(inode->i_cdev, struct drvr_device, cdev); filp->private_data = dev; /* for other methods */ if (dev == NULL) { printk("%s: Failed to retrieve driver structure !\n", DEVICE_NAME); return -1; } if (dev->opened == 1) { printk("%s: module already opened\n", DEVICE_NAME); return 0; } if (dev->type != prog) { struct drvr_mem* mem_dev = &((dev->data).mem); request_mem_region((unsigned long) mem_dev->base_addr, FPGA_MEM_SIZE, DEVICE_NAME); mem_dev->virt_addr = ioremap_nocache(((unsigned long) mem_dev->base_addr), FPGA_MEM_SIZE); mem_dev->dma_chan = edma_alloc_channel(EDMA_CHANNEL_ANY, dma_callback, NULL, EVENTQ_0); mem_dev->dma_buf = (unsigned char *) dma_alloc_coherent(NULL, MAX_DMA_TRANSFER_IN_BYTES, &dmaphysbuf, 0); printk("EDMA channel %d reserved \n", mem_dev->dma_chan); if (mem_dev->dma_chan < 0) { printk("edma_alloc_channel failed for dma_ch, error: %d\n", mem_dev->dma_chan); return -1; } printk("mem interface opened \n"); } dev->opened = 1; return 0; }
/* 1 asp tx or rx channel using 2 parameter channels * 1 ram to/from iram channel using 1 parameter channel * * Playback * ram copy channel kicks off first, * 1st ram copy of entire iram buffer completion kicks off asp channel * asp tcc always kicks off ram copy of 1/2 iram buffer * * Record * asp channel starts, tcc kicks off ram copy */ static int request_ping_pong(struct snd_pcm_substream *substream, struct davinci_runtime_data *prtd, struct snd_dma_buffer *iram_dma) { dma_addr_t asp_src_ping; dma_addr_t asp_dst_ping; int ret; struct davinci_pcm_dma_params *params = prtd->params; /* Request ram master channel */ ret = prtd->ram_channel = edma_alloc_channel(EDMA_CHANNEL_ANY, davinci_pcm_dma_irq, substream, prtd->params->ram_chan_q); if (ret < 0) goto exit1; /* Request ram link channel */ ret = prtd->ram_link = edma_alloc_slot( EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit2; ret = prtd->asp_link[1] = edma_alloc_slot( EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit3; prtd->ram_link2 = -1; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ret = prtd->ram_link2 = edma_alloc_slot( EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit4; } /* circle ping-pong buffers */ edma_link(prtd->asp_link[0], prtd->asp_link[1]); edma_link(prtd->asp_link[1], prtd->asp_link[0]); /* circle ram buffers */ edma_link(prtd->ram_link, prtd->ram_link); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { asp_src_ping = iram_dma->addr; asp_dst_ping = params->dma_addr; /* fifo */ } else { asp_src_ping = params->dma_addr; /* fifo */ asp_dst_ping = iram_dma->addr; } /* ping */ edma_set_src(prtd->asp_link[0], asp_src_ping, INCR, W16BIT); edma_set_dest(prtd->asp_link[0], asp_dst_ping, INCR, W16BIT); edma_set_src_index(prtd->asp_link[0], 0, 0); edma_set_dest_index(prtd->asp_link[0], 0, 0); edma_read_slot(prtd->asp_link[0], &prtd->asp_params); prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f) | TCINTEN); prtd->asp_params.opt |= TCCHEN | EDMA_TCC(prtd->ram_channel & 0x3f); edma_write_slot(prtd->asp_link[0], &prtd->asp_params); /* pong */ edma_set_src(prtd->asp_link[1], asp_src_ping, INCR, W16BIT); edma_set_dest(prtd->asp_link[1], asp_dst_ping, INCR, W16BIT); edma_set_src_index(prtd->asp_link[1], 0, 0); edma_set_dest_index(prtd->asp_link[1], 0, 0); edma_read_slot(prtd->asp_link[1], &prtd->asp_params); prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f)); /* interrupt after every pong completion */ prtd->asp_params.opt |= TCINTEN | TCCHEN | EDMA_TCC(prtd->ram_channel & 0x3f); edma_write_slot(prtd->asp_link[1], &prtd->asp_params); /* ram */ edma_set_src(prtd->ram_link, iram_dma->addr, INCR, W32BIT); edma_set_dest(prtd->ram_link, iram_dma->addr, INCR, W32BIT); pr_debug("%s: audio dma channels/slots in use for ram:%u %u %u," "for asp:%u %u %u\n", __func__, prtd->ram_channel, prtd->ram_link, prtd->ram_link2, prtd->asp_channel, prtd->asp_link[0], prtd->asp_link[1]); return 0; exit4: edma_free_channel(prtd->asp_link[1]); prtd->asp_link[1] = -1; exit3: edma_free_channel(prtd->ram_link); prtd->ram_link = -1; exit2: edma_free_channel(prtd->ram_channel); prtd->ram_channel = -1; exit1: return ret; }
static int edma_config(void) { // use AB mode, one_dma = 8KB/16bit static int acnt = 4096*2; static int bcnt = 1; static int ccnt = 1; int result = 0; unsigned int BRCnt = 0; int srcbidx = 0; int desbidx = 0; int srccidx = 0; int descidx = 0; struct edmacc_param param_set; printk("Initializing dma transfer...\n"); // set dest memory fpga_buf = dma_alloc_coherent (NULL, MAX_DMA_TRANSFER_IN_BYTES, &dmaphysdest, 0); if (!fpga_buf) { printk ("dma_alloc_coherent failed for physdest\n"); return -ENOMEM; } /* Set B count reload as B count. */ BRCnt = bcnt; /* Setting up the SRC/DES Index */ srcbidx = 0; desbidx = acnt; /* A Sync Transfer Mode */ srccidx = 0; descidx = acnt; // gpmc channel result = edma_alloc_channel (52, callback1, NULL, 0); if (result < 0) { printk ("edma_alloc_channel failed, error:%d", result); return result; } dma_ch = result; edma_set_src (dma_ch, (unsigned long)(gpmc_membase), INCR, W16BIT); edma_set_dest (dma_ch, (unsigned long)(dmaphysdest), INCR, W16BIT); edma_set_src_index (dma_ch, srcbidx, srccidx); // use fifo, set zero edma_set_dest_index (dma_ch, desbidx, descidx); // A mode // A Sync Transfer Mode edma_set_transfer_params (dma_ch, acnt, bcnt, ccnt, BRCnt, ASYNC); /* Enable the Interrupts on Channel 1 */ edma_read_slot (dma_ch, ¶m_set); param_set.opt |= (1 << ITCINTEN_SHIFT); param_set.opt |= (1 << TCINTEN_SHIFT); param_set.opt |= EDMA_TCC(EDMA_CHAN_SLOT(dma_ch)); edma_write_slot (dma_ch, ¶m_set); return 0; }
static int dma_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args) #endif { unsigned int __user *argp = (unsigned int __user *) args; int isParam = 0; #if defined(LSP_210) int isQdma = 0; #endif /* defined(LSP_210) */ int result; int dev_id; int channel; struct EDMA_requestDmaParams dma_req; struct EDMA_releaseDmaParams dma_rel; struct list_head *registeredlistp; struct list_head *u; struct list_head *unext; struct registered_user *user; if (_IOC_TYPE(cmd) != _IOC_TYPE(EDMA_IOCMAGIC)) { __E("dma_ioctl(): bad command type 0x%x (should be 0x%x)\n", _IOC_TYPE(cmd), _IOC_TYPE(EDMA_IOCMAGIC)); } switch (cmd & EDMA_IOCCMDMASK) { case EDMA_IOCREQUESTDMA: __D("dma_ioctl(): EDMA_IOCREQUESTDMA called\n"); if (copy_from_user(&dma_req, argp, sizeof(dma_req))) { return -EFAULT; } __D("dev_id: %d, eventq_no: %d, tcc: %d, param: %d, nParam: %d\n", dma_req.dev_id, dma_req.eventq_no, dma_req.tcc, dma_req.param, dma_req.nParam); dev_id = dma_req.dev_id; /* * In order to not be dependent on the LSP #defines, we need to * translate our EDMA interface's #defines to the LSP ones. */ if (dev_id >= EDMA_QDMA0 && dev_id <= EDMA_QDMA7) { #if defined(LSP_210) dev_id = EDMA_QDMA_CHANNEL_0 + (dev_id - EDMA_QDMA0); isQdma = 1; #else /* defined(LSP_210) */ __E("%s: REQUESTDMA failed: QDMA is not supported\n", __FUNCTION__); return -EINVAL; #endif /* defined(LSP_210) */ } else { switch (dev_id) { case EDMA_PARAMANY: dev_id = EDMA_CONT_PARAMS_ANY; isParam = 1; break; case EDMA_PARAMFIXEDEXACT: dev_id = EDMA_CONT_PARAMS_FIXED_EXACT; isParam = 1; break; case EDMA_PARAMFIXEDNOTEXACT: dev_id = EDMA_CONT_PARAMS_FIXED_NOT_EXACT; isParam = 1; break; case EDMA_EDMAANY: #if defined(LSP_210) dev_id = EDMA_DMA_CHANNEL_ANY; #else /* defined(LSP_210) */ dev_id = EDMA_CHANNEL_ANY; #endif /* defined(LSP_210) */ break; case EDMA_QDMAANY: #if defined(LSP_210) dev_id = EDMA_QDMA_CHANNEL_ANY; isQdma = 1; break; #else /* defined(LSP_210) */ __E("%s: REQUESTDMA failed: QDMA is not supported\n", __FUNCTION__); return -EINVAL; #endif /* defined(LSP_210) */ default: /* do nothing, dev_id is an EDMA channel # */ break; } } #if defined(LSP_210) switch (dma_req.tcc) { case EDMA_TCCANY: dma_req.tcc = EDMA_TCC_ANY; break; case EDMA_TCCSYMM: dma_req.tcc = EDMA_TCC_SYMM; break; default: /* do nothing, tcc is an EDMA TCC # */ break; } #endif /* defined(LSP_210) */ if (isParam) { #if defined(LSP_210) __D("calling davinci_request_params(%d, %d, %d)...\n", dev_id, dma_req.nParam, dma_req.param); result = davinci_request_params(dev_id, dma_req.nParam, dma_req.param); __D("...returned %d\n", result); if (result >= 0) { dma_req.channel = result; dma_req.param = result; /* transform to 0-based success for below common code */ result = 0; } #else /* defined(LSP_210) */ __D("calling edma_alloc_cont_slots(0, %d, %d, %d)...\n", dev_id, dma_req.param, dma_req.nParam); result = edma_alloc_cont_slots(0, dev_id, dma_req.param, dma_req.nParam); __D("...returned %d\n", result); if (result >= 0) { if (EDMA_CTLR(result) != 0) { __E("%s: REQUESTDMA failed to obtain a channel from controller 0 (obtained channel %d from controller %d)\n", __FUNCTION__, EDMA_CHAN_SLOT(result), EDMA_CTLR(result)); release_channel(result); } else { dma_req.channel = EDMA_CHAN_SLOT(result); dma_req.param = dma_req.channel; /* transform to 0-based success for below common code */ result = 0; } } #endif /* defined(LSP_210) */ } else { #if defined(LSP_210) if (dma_req.tcc == -1) { __E("%s: REQUESTDMA failed: TCC -1 supported only for PaRAM allocations\n", __FUNCTION__); return -EINVAL; } result = davinci_request_dma(dev_id, "linuxutils DMA", NULL, (void *)NULL, &dma_req.channel, &dma_req.tcc, dma_req.eventq_no); #else /* defined(LSP_210) */ result = edma_alloc_channel(dev_id, NULL, NULL, dma_req.eventq_no); if (result >= 0) { if (EDMA_CTLR(result) != 0) { __E("%s: REQUESTDMA failed to obtain a channel from controller 0 (obtained channel %d from controller %d, will now free it)\n", __FUNCTION__, EDMA_CHAN_SLOT(result), EDMA_CTLR(result)); release_channel(result); } else { dma_req.channel = EDMA_CHAN_SLOT(result); dma_req.tcc = dma_req.channel; /* transform to 0-based success for below common code */ result = 0; } } #endif /* defined(LSP_210) */ } if (result) { __E("%s: REQUESTDMA failed: %d\n", __FUNCTION__, result); return -ENOMEM; } else { /* For EDMA_PARAMANY we've already assigned dma_req.param above */ if (!isParam) { #if defined(LSP_210) dma_req.param = davinci_get_param(dma_req.channel); #else /* defined(LSP_210) */ dma_req.param = dma_req.channel; /* one-to-one mapping */ #endif /* defined(LSP_210) */ } #if defined(LSP_210) /* Translate LSP's QDMA #s to linuxutil's QDMA #s */ if (isQdma) { dma_req.channel = (dma_req.channel - EDMA_QDMA_CHANNEL_0) + EDMA_QDMA0; } #endif /* defined(LSP_210) */ __D(" dma channel %d allocated\n", dma_req.channel); __D("copying to user\n"); if (copy_to_user(argp, &dma_req, sizeof(dma_req))) { return -EFAULT; } } user = kmalloc(sizeof(struct registered_user), GFP_KERNEL); if (!user) { __E("%s: REQUESTDMA failed to kmalloc registered_user struct", __FUNCTION__); release_channel(dma_req.channel); return -ENOMEM; } if (mutex_lock_interruptible(&edma_mutex)) { kfree(user); release_channel(dma_req.channel); return -ERESTARTSYS; } user->filp = filp; list_add(&user->element, &channels[dma_req.channel].users); if (isParam) { channels[dma_req.channel].nParam = dma_req.nParam; channels[dma_req.channel].isParam = 1; } else { channels[dma_req.channel].nParam = 1; channels[dma_req.channel].isParam = 0; } mutex_unlock(&edma_mutex); break; case EDMA_IOCREGUSER: __D("dma_ioctl(): EDMA_IOCREGUSER called\n"); if (get_user(channel, argp)) { return -EFAULT; } __D(" channel %d\n", channel); if (channel >= NCHAN) { __E("%s: REGUSER failed: channel %d out of range\n", __FUNCTION__, channel); return -ERANGE; } registeredlistp = &channels[channel].users; if (registeredlistp != registeredlistp->next) { user = kmalloc(sizeof(struct registered_user), GFP_KERNEL); if (!user) { __E("%s: REGUSER failed to kmalloc registered_user struct", __FUNCTION__); return -ENOMEM; } if (mutex_lock_interruptible(&edma_mutex)) { kfree(user); return -ERESTARTSYS; } user->filp = filp; list_add(&user->element, &channels[channel].users); mutex_unlock(&edma_mutex); } else { __E("%s: REGUSER failed: channel %d not currently allocated\n", __FUNCTION__, channel); return -EFAULT; } break; case EDMA_IOCRELEASEDMA: __D("dma_ioctl(): EDMA_IOCRELEASEDMA called\n"); if (copy_from_user(&dma_rel, argp, sizeof(dma_rel))) { return -EFAULT; } __D(" channel %d\n", dma_rel.channel); channel = dma_rel.channel; if (channel >= NCHAN) { __E("%s: REGUSER failed: channel %d out of range\n", __FUNCTION__, channel); return -ERANGE; } if (mutex_lock_interruptible(&edma_mutex)) { return -ERESTARTSYS; } registeredlistp = &channels[channel].users; u = registeredlistp->next; while (u != registeredlistp) { unext = u->next; user = list_entry(u, struct registered_user, element); if (user->filp == filp) { __D(" removing registered user from channel %d list\n", channel); list_del(u); kfree(user); /* * Only remove once (we allow multiple "registers", and each * one requires a corresponding "release"). */ break; } u = unext; } mutex_unlock(&edma_mutex); if (u == registeredlistp) { __E("%s: RELEASEDMA failed: file %p not registered for channel %d\n", __FUNCTION__, filp, channel); return -EFAULT; } if (mutex_lock_interruptible(&edma_mutex)) { return -ERESTARTSYS; } if (registeredlistp->next == registeredlistp) { __D(" no more registered users, freeing channel %d\n", channel); release_channel(channel); } mutex_unlock(&edma_mutex); break; case EDMA_IOCGETVERSION: __D("GETVERSION ioctl received, returning %#x.\n", version); if (put_user(version, argp)) { return -EFAULT; } break; case EDMA_IOCGETBASEPHYSADDR: __D("GETBASEPHYSADDR ioctl received, returning %#x.\n", BASEADDR); if (put_user(BASEADDR, argp)) { __E("%s: GETBASEPHYSADDR: put_user() failed, returning -EFAULT!\n", __FUNCTION__); return -EFAULT; } break; } return 0; }