static int davinci_pcm_dma_request(struct snd_pcm_substream *substream) { struct snd_dma_buffer *iram_dma; struct davinci_runtime_data *prtd = substream->runtime->private_data; struct davinci_pcm_dma_params *params = prtd->params; int ret; if (!params) return -ENODEV; /* Request asp master DMA channel */ ret = prtd->asp_channel = edma_alloc_channel(params->channel, davinci_pcm_dma_irq, substream, prtd->params->asp_chan_q); if (ret < 0) goto exit1; /* Request asp link channels */ ret = prtd->asp_link[0] = edma_alloc_slot( EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit2; iram_dma = (struct snd_dma_buffer *)substream->dma_buffer.private_data; if (iram_dma) { if (request_ping_pong(substream, prtd, iram_dma) == 0) return 0; printk(KERN_WARNING "%s: dma channel allocation failed," "not using sram\n", __func__); } /* Issue transfer completion IRQ when the channel completes a * transfer, then always reload from the same slot (by a kind * of loopback link). The completion IRQ handler will update * the reload slot with a new buffer. * * REVISIT save p_ram here after setting up everything except * the buffer and its length (ccnt) ... use it as a template * so davinci_pcm_enqueue_dma() takes less time in IRQ. */ edma_read_slot(prtd->asp_link[0], &prtd->asp_params); prtd->asp_params.opt |= TCINTEN | EDMA_TCC(EDMA_CHAN_SLOT(prtd->asp_channel)); prtd->asp_params.link_bcntrld = EDMA_CHAN_SLOT(prtd->asp_link[0]) << 5; edma_write_slot(prtd->asp_link[0], &prtd->asp_params); return 0; exit2: edma_free_channel(prtd->asp_channel); prtd->asp_channel = -1; exit1: return ret; }
static inline void setup_dma_interrupt(unsigned lch, void (*callback)(unsigned channel, u16 ch_status, void *data), void *data) { unsigned ctlr; ctlr = EDMA_CTLR(lch); lch = EDMA_CHAN_SLOT(lch); if (!callback) { edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5, (1 << (lch & 0x1f))); }
/** * map_xbar_event_to_channel - maps a crossbar event to a DMA channel * according to the configuration provided * @event: the event number for which mapping is required * @channel: channel being activated * @xbar_event_mapping: array that has the event to channel map * * Events that are routed by default are not mapped. Only events that * are crossbar mapped are routed to available channels according to * the configuration provided * * Returns zero on success, else negative errno. */ int map_xbar_event_to_channel(unsigned int event, unsigned int *channel, struct event_to_channel_map *xbar_event_mapping) { unsigned int ctrl = 0; unsigned int xbar_evt_no = 0; unsigned int val = 0; unsigned int offset = 0; unsigned int mask = 0; ctrl = EDMA_CTLR(event); xbar_evt_no = event - (edma_cc[ctrl]->num_channels); if (event < edma_cc[ctrl]->num_channels) { *channel = event; } else if (event < edma_cc[ctrl]->num_events) { *channel = xbar_event_mapping[xbar_evt_no].channel_no; /* confirm the range */ if (*channel < EDMA_MAX_DMACH) clear_bit(*channel, edma_cc[ctrl]->edma_unused); mask = (*channel)%4; offset = (*channel)/4; offset *= 4; offset += mask; val = (unsigned int)__raw_readl(AM33XX_CTRL_REGADDR( AM33XX_SCM_BASE_EDMA + offset)); val = val & (~(0xFF)); val = val | (xbar_event_mapping[xbar_evt_no].xbar_event_no); __raw_writel(val, AM33XX_CTRL_REGADDR(AM33XX_SCM_BASE_EDMA + offset)); return 0; } else { return -EINVAL; } return 0; }
/* 1 asp tx or rx channel using 2 parameter channels * 1 ram to/from iram channel using 1 parameter channel * * Playback * ram copy channel kicks off first, * 1st ram copy of entire iram buffer completion kicks off asp channel * asp tcc always kicks off ram copy of 1/2 iram buffer * * Record * asp channel starts, tcc kicks off ram copy */ static int request_ping_pong(struct snd_pcm_substream *substream, struct davinci_runtime_data *prtd, struct snd_dma_buffer *iram_dma) { dma_addr_t asp_src_ping; dma_addr_t asp_dst_ping; int ret; struct davinci_pcm_dma_params *params = prtd->params; /* Request ram master channel */ ret = prtd->ram_channel = edma_alloc_channel(EDMA_CHANNEL_ANY, davinci_pcm_dma_irq, substream, prtd->params->ram_chan_q); if (ret < 0) goto exit1; /* Request ram link channel */ ret = prtd->ram_link = edma_alloc_slot( EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit2; ret = prtd->asp_link[1] = edma_alloc_slot( EDMA_CTLR(prtd->asp_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit3; prtd->ram_link2 = -1; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ret = prtd->ram_link2 = edma_alloc_slot( EDMA_CTLR(prtd->ram_channel), EDMA_SLOT_ANY); if (ret < 0) goto exit4; } /* circle ping-pong buffers */ edma_link(prtd->asp_link[0], prtd->asp_link[1]); edma_link(prtd->asp_link[1], prtd->asp_link[0]); /* circle ram buffers */ edma_link(prtd->ram_link, prtd->ram_link); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { asp_src_ping = iram_dma->addr; asp_dst_ping = params->dma_addr; /* fifo */ } else { asp_src_ping = params->dma_addr; /* fifo */ asp_dst_ping = iram_dma->addr; } /* ping */ edma_set_src(prtd->asp_link[0], asp_src_ping, INCR, W16BIT); edma_set_dest(prtd->asp_link[0], asp_dst_ping, INCR, W16BIT); edma_set_src_index(prtd->asp_link[0], 0, 0); edma_set_dest_index(prtd->asp_link[0], 0, 0); edma_read_slot(prtd->asp_link[0], &prtd->asp_params); prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f) | TCINTEN); prtd->asp_params.opt |= TCCHEN | EDMA_TCC(prtd->ram_channel & 0x3f); edma_write_slot(prtd->asp_link[0], &prtd->asp_params); /* pong */ edma_set_src(prtd->asp_link[1], asp_src_ping, INCR, W16BIT); edma_set_dest(prtd->asp_link[1], asp_dst_ping, INCR, W16BIT); edma_set_src_index(prtd->asp_link[1], 0, 0); edma_set_dest_index(prtd->asp_link[1], 0, 0); edma_read_slot(prtd->asp_link[1], &prtd->asp_params); prtd->asp_params.opt &= ~(TCCMODE | EDMA_TCC(0x3f)); /* interrupt after every pong completion */ prtd->asp_params.opt |= TCINTEN | TCCHEN | EDMA_TCC(prtd->ram_channel & 0x3f); edma_write_slot(prtd->asp_link[1], &prtd->asp_params); /* ram */ edma_set_src(prtd->ram_link, iram_dma->addr, INCR, W32BIT); edma_set_dest(prtd->ram_link, iram_dma->addr, INCR, W32BIT); pr_debug("%s: audio dma channels/slots in use for ram:%u %u %u," "for asp:%u %u %u\n", __func__, prtd->ram_channel, prtd->ram_link, prtd->ram_link2, prtd->asp_channel, prtd->asp_link[0], prtd->asp_link[1]); return 0; exit4: edma_free_channel(prtd->asp_link[1]); prtd->asp_link[1] = -1; exit3: edma_free_channel(prtd->ram_link); prtd->ram_link = -1; exit2: edma_free_channel(prtd->ram_channel); prtd->ram_channel = -1; exit1: return ret; }
static int dma_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args) #endif { unsigned int __user *argp = (unsigned int __user *) args; int isParam = 0; #if defined(LSP_210) int isQdma = 0; #endif /* defined(LSP_210) */ int result; int dev_id; int channel; struct EDMA_requestDmaParams dma_req; struct EDMA_releaseDmaParams dma_rel; struct list_head *registeredlistp; struct list_head *u; struct list_head *unext; struct registered_user *user; if (_IOC_TYPE(cmd) != _IOC_TYPE(EDMA_IOCMAGIC)) { __E("dma_ioctl(): bad command type 0x%x (should be 0x%x)\n", _IOC_TYPE(cmd), _IOC_TYPE(EDMA_IOCMAGIC)); } switch (cmd & EDMA_IOCCMDMASK) { case EDMA_IOCREQUESTDMA: __D("dma_ioctl(): EDMA_IOCREQUESTDMA called\n"); if (copy_from_user(&dma_req, argp, sizeof(dma_req))) { return -EFAULT; } __D("dev_id: %d, eventq_no: %d, tcc: %d, param: %d, nParam: %d\n", dma_req.dev_id, dma_req.eventq_no, dma_req.tcc, dma_req.param, dma_req.nParam); dev_id = dma_req.dev_id; /* * In order to not be dependent on the LSP #defines, we need to * translate our EDMA interface's #defines to the LSP ones. */ if (dev_id >= EDMA_QDMA0 && dev_id <= EDMA_QDMA7) { #if defined(LSP_210) dev_id = EDMA_QDMA_CHANNEL_0 + (dev_id - EDMA_QDMA0); isQdma = 1; #else /* defined(LSP_210) */ __E("%s: REQUESTDMA failed: QDMA is not supported\n", __FUNCTION__); return -EINVAL; #endif /* defined(LSP_210) */ } else { switch (dev_id) { case EDMA_PARAMANY: dev_id = EDMA_CONT_PARAMS_ANY; isParam = 1; break; case EDMA_PARAMFIXEDEXACT: dev_id = EDMA_CONT_PARAMS_FIXED_EXACT; isParam = 1; break; case EDMA_PARAMFIXEDNOTEXACT: dev_id = EDMA_CONT_PARAMS_FIXED_NOT_EXACT; isParam = 1; break; case EDMA_EDMAANY: #if defined(LSP_210) dev_id = EDMA_DMA_CHANNEL_ANY; #else /* defined(LSP_210) */ dev_id = EDMA_CHANNEL_ANY; #endif /* defined(LSP_210) */ break; case EDMA_QDMAANY: #if defined(LSP_210) dev_id = EDMA_QDMA_CHANNEL_ANY; isQdma = 1; break; #else /* defined(LSP_210) */ __E("%s: REQUESTDMA failed: QDMA is not supported\n", __FUNCTION__); return -EINVAL; #endif /* defined(LSP_210) */ default: /* do nothing, dev_id is an EDMA channel # */ break; } } #if defined(LSP_210) switch (dma_req.tcc) { case EDMA_TCCANY: dma_req.tcc = EDMA_TCC_ANY; break; case EDMA_TCCSYMM: dma_req.tcc = EDMA_TCC_SYMM; break; default: /* do nothing, tcc is an EDMA TCC # */ break; } #endif /* defined(LSP_210) */ if (isParam) { #if defined(LSP_210) __D("calling davinci_request_params(%d, %d, %d)...\n", dev_id, dma_req.nParam, dma_req.param); result = davinci_request_params(dev_id, dma_req.nParam, dma_req.param); __D("...returned %d\n", result); if (result >= 0) { dma_req.channel = result; dma_req.param = result; /* transform to 0-based success for below common code */ result = 0; } #else /* defined(LSP_210) */ __D("calling edma_alloc_cont_slots(0, %d, %d, %d)...\n", dev_id, dma_req.param, dma_req.nParam); result = edma_alloc_cont_slots(0, dev_id, dma_req.param, dma_req.nParam); __D("...returned %d\n", result); if (result >= 0) { if (EDMA_CTLR(result) != 0) { __E("%s: REQUESTDMA failed to obtain a channel from controller 0 (obtained channel %d from controller %d)\n", __FUNCTION__, EDMA_CHAN_SLOT(result), EDMA_CTLR(result)); release_channel(result); } else { dma_req.channel = EDMA_CHAN_SLOT(result); dma_req.param = dma_req.channel; /* transform to 0-based success for below common code */ result = 0; } } #endif /* defined(LSP_210) */ } else { #if defined(LSP_210) if (dma_req.tcc == -1) { __E("%s: REQUESTDMA failed: TCC -1 supported only for PaRAM allocations\n", __FUNCTION__); return -EINVAL; } result = davinci_request_dma(dev_id, "linuxutils DMA", NULL, (void *)NULL, &dma_req.channel, &dma_req.tcc, dma_req.eventq_no); #else /* defined(LSP_210) */ result = edma_alloc_channel(dev_id, NULL, NULL, dma_req.eventq_no); if (result >= 0) { if (EDMA_CTLR(result) != 0) { __E("%s: REQUESTDMA failed to obtain a channel from controller 0 (obtained channel %d from controller %d, will now free it)\n", __FUNCTION__, EDMA_CHAN_SLOT(result), EDMA_CTLR(result)); release_channel(result); } else { dma_req.channel = EDMA_CHAN_SLOT(result); dma_req.tcc = dma_req.channel; /* transform to 0-based success for below common code */ result = 0; } } #endif /* defined(LSP_210) */ } if (result) { __E("%s: REQUESTDMA failed: %d\n", __FUNCTION__, result); return -ENOMEM; } else { /* For EDMA_PARAMANY we've already assigned dma_req.param above */ if (!isParam) { #if defined(LSP_210) dma_req.param = davinci_get_param(dma_req.channel); #else /* defined(LSP_210) */ dma_req.param = dma_req.channel; /* one-to-one mapping */ #endif /* defined(LSP_210) */ } #if defined(LSP_210) /* Translate LSP's QDMA #s to linuxutil's QDMA #s */ if (isQdma) { dma_req.channel = (dma_req.channel - EDMA_QDMA_CHANNEL_0) + EDMA_QDMA0; } #endif /* defined(LSP_210) */ __D(" dma channel %d allocated\n", dma_req.channel); __D("copying to user\n"); if (copy_to_user(argp, &dma_req, sizeof(dma_req))) { return -EFAULT; } } user = kmalloc(sizeof(struct registered_user), GFP_KERNEL); if (!user) { __E("%s: REQUESTDMA failed to kmalloc registered_user struct", __FUNCTION__); release_channel(dma_req.channel); return -ENOMEM; } if (mutex_lock_interruptible(&edma_mutex)) { kfree(user); release_channel(dma_req.channel); return -ERESTARTSYS; } user->filp = filp; list_add(&user->element, &channels[dma_req.channel].users); if (isParam) { channels[dma_req.channel].nParam = dma_req.nParam; channels[dma_req.channel].isParam = 1; } else { channels[dma_req.channel].nParam = 1; channels[dma_req.channel].isParam = 0; } mutex_unlock(&edma_mutex); break; case EDMA_IOCREGUSER: __D("dma_ioctl(): EDMA_IOCREGUSER called\n"); if (get_user(channel, argp)) { return -EFAULT; } __D(" channel %d\n", channel); if (channel >= NCHAN) { __E("%s: REGUSER failed: channel %d out of range\n", __FUNCTION__, channel); return -ERANGE; } registeredlistp = &channels[channel].users; if (registeredlistp != registeredlistp->next) { user = kmalloc(sizeof(struct registered_user), GFP_KERNEL); if (!user) { __E("%s: REGUSER failed to kmalloc registered_user struct", __FUNCTION__); return -ENOMEM; } if (mutex_lock_interruptible(&edma_mutex)) { kfree(user); return -ERESTARTSYS; } user->filp = filp; list_add(&user->element, &channels[channel].users); mutex_unlock(&edma_mutex); } else { __E("%s: REGUSER failed: channel %d not currently allocated\n", __FUNCTION__, channel); return -EFAULT; } break; case EDMA_IOCRELEASEDMA: __D("dma_ioctl(): EDMA_IOCRELEASEDMA called\n"); if (copy_from_user(&dma_rel, argp, sizeof(dma_rel))) { return -EFAULT; } __D(" channel %d\n", dma_rel.channel); channel = dma_rel.channel; if (channel >= NCHAN) { __E("%s: REGUSER failed: channel %d out of range\n", __FUNCTION__, channel); return -ERANGE; } if (mutex_lock_interruptible(&edma_mutex)) { return -ERESTARTSYS; } registeredlistp = &channels[channel].users; u = registeredlistp->next; while (u != registeredlistp) { unext = u->next; user = list_entry(u, struct registered_user, element); if (user->filp == filp) { __D(" removing registered user from channel %d list\n", channel); list_del(u); kfree(user); /* * Only remove once (we allow multiple "registers", and each * one requires a corresponding "release"). */ break; } u = unext; } mutex_unlock(&edma_mutex); if (u == registeredlistp) { __E("%s: RELEASEDMA failed: file %p not registered for channel %d\n", __FUNCTION__, filp, channel); return -EFAULT; } if (mutex_lock_interruptible(&edma_mutex)) { return -ERESTARTSYS; } if (registeredlistp->next == registeredlistp) { __D(" no more registered users, freeing channel %d\n", channel); release_channel(channel); } mutex_unlock(&edma_mutex); break; case EDMA_IOCGETVERSION: __D("GETVERSION ioctl received, returning %#x.\n", version); if (put_user(version, argp)) { return -EFAULT; } break; case EDMA_IOCGETBASEPHYSADDR: __D("GETBASEPHYSADDR ioctl received, returning %#x.\n", BASEADDR); if (put_user(BASEADDR, argp)) { __E("%s: GETBASEPHYSADDR: put_user() failed, returning -EFAULT!\n", __FUNCTION__); return -EFAULT; } break; } return 0; }
/* release_channel() must be called with the edma_mutex held */ static void release_channel(int chan) { int localChan; int i; /* * The non-LSP_210 EDMA interface returns a "magic" value that represents * the controller number and channel number muxed together in one UInt32. * This module doesn't yet support a controller other than 0, however, this * function needs to accommodate being called with a controller > 0 since * it's called to release a channel on a controller > 0 when the * REQUESTDMA ioctl() receives a controller > 0 that it can't handle and * needs to clean up after itself. */ /* * In order to not be dependent on the LSP #defines, we need to * translate our EDMA interface's #defines to the LSP ones. */ #if defined(LSP_210) localChan = chan; if (chan >= EDMA_QDMA0 && chan <= EDMA_QDMA7) { __D(" release_channel: translating QDMA channel %d to LSP namespace ...\n", chan); localChan = EDMA_QDMA_CHANNEL_0 + (chan - EDMA_QDMA0); } for (i = 0; i < channels[localChan].nParam; i++) { __D(" release_channel: freeing channel %d...\n", localChan + i); davinci_free_dma(localChan + i); } INIT_LIST_HEAD(&channels[localChan].users); channels[localChan].nParam = 0; channels[localChan].isParam = 0; #else /* defined(LSP_210) */ localChan = EDMA_CHAN_SLOT(chan); if (localChan >= EDMA_QDMA0 && localChan <= EDMA_QDMA7) { __E(" release_channel: QDMA is not supported: chan %d\n", chan); return; } for (i = 0; i < channels[localChan].nParam; i++) { if (channels[localChan].isParam) { __D(" release_channel: calling edma_free_slot(%d)...\n", chan + i); edma_free_slot(chan + i); } else { __D(" release_channel: calling edma_free_channel(%d)...\n", chan + i); edma_clean_channel(chan + i); edma_free_channel(chan + i); } } if (EDMA_CTLR(chan) == 0) { INIT_LIST_HEAD(&channels[localChan].users); channels[localChan].nParam = 0; channels[localChan].isParam = 0; } #endif /* defined(LSP_210) */ }