Exemplo n.º 1
0
static int davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
	int edma_chan_num, tcc = 0, r, sync_dev;
	enum dma_event_q queue_no = EVENTQ_0;

	/* Acquire master DMA write channel */
	r = davinci_request_dma(host->dma_tx_event, "MMC_WRITE",
				     mmc_davinci_dma_cb, host,
				     &edma_chan_num, &tcc, queue_no);
	if (r) {
		dev_warn(host->mmc->dev,
			 "MMC: davinci_request_dma() failed with %d\n", r);
		return r;
	}

	/* Acquire master DMA read channel */
	r = davinci_request_dma(host->dma_rx_event, "MMC_READ",
				     mmc_davinci_dma_cb, host,
				     &edma_chan_num, &tcc, queue_no);
	if (r) {
		dev_warn(host->mmc->dev,
			 "MMC: davinci_request_dma() failed with %d\n", r);
		goto free_master_write;
	}

	host->edma_ch_details.cnt_chanel = 0;

	/* currently data Writes are done using single block mode,
	 * so no DMA slave write channel is required for now */

	/* Create a DMA slave read channel
	 * (assuming max segments handled is 2) */
	sync_dev = host->dma_rx_event;
	r = davinci_request_dma(DAVINCI_EDMA_PARAM_ANY, "LINK",
				     NULL, NULL, &edma_chan_num,
				     &sync_dev, queue_no);
	if (r) {
		dev_warn(host->mmc->dev,
			 "MMC: davinci_request_dma() failed with %d\n", r);
		goto free_master_read;
	}

	host->edma_ch_details.cnt_chanel++;
	host->edma_ch_details.chanel_num[0] = edma_chan_num;

	return 0;

free_master_read:
	davinci_free_dma(host->dma_rx_event);
free_master_write:
	davinci_free_dma(host->dma_tx_event);

	return r;
}
Exemplo n.º 2
0
static int davinci_release_dma_channels(struct mmc_davinci_host *host)
{
	davinci_free_dma(host->dma_tx_event);
	davinci_free_dma(host->dma_rx_event);

	if (host->edma_ch_details.cnt_chanel) {
		davinci_free_dma(host->edma_ch_details.chanel_num[0]);
		host->edma_ch_details.cnt_chanel = 0;
	}

	return 0;
}
static int davinci_pcm_close(struct snd_pcm_substream *substream)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct davinci_runtime_data *prtd = runtime->private_data;

	davinci_dma_unlink_lch(prtd->slave_lch, prtd->slave_lch);

	davinci_free_dma(prtd->slave_lch);
	davinci_free_dma(prtd->master_lch);

	kfree(prtd);

	return 0;
}
Exemplo n.º 4
0
static int davinci_spi_request_dma(struct spi_device *spi)
{
	struct davinci_spi *davinci_spi;
	struct davinci_spi_dma *davinci_spi_dma;
	int tcc;

	davinci_spi = spi_master_get_devdata(spi->master);
	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];

	if (davinci_request_dma(davinci_spi_dma->dma_rx_sync_dev, "MibSPI RX",
				davinci_spi_dma_rx_callback, spi,
				&davinci_spi_dma->dma_rx_channel,
				&tcc, davinci_spi_dma->eventq)) {
		pr_err("Unable to request DMA channel for MibSPI RX\n");
		return -EAGAIN;
	}
	if (davinci_request_dma(davinci_spi_dma->dma_tx_sync_dev, "MibSPI TX",
				davinci_spi_dma_tx_callback, spi,
				&davinci_spi_dma->dma_tx_channel,
				&tcc, davinci_spi_dma->eventq)) {
		davinci_free_dma(davinci_spi_dma->dma_rx_channel);
		davinci_spi_dma->dma_rx_channel = -1;
		pr_err("Unable to request DMA channel for MibSPI TX\n");
		return -EAGAIN;
	}

	return 0;
}
static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
{
	struct davinci_runtime_data *prtd = substream->runtime->private_data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct davinci_pcm_dma_params *dma_data = rtd->dai->cpu_dai->dma_data;
	int tcc = TCC_ANY;
	int ret;

	if (!dma_data)
		return -ENODEV;

	prtd->params = dma_data;

	/* Request master DMA channel */
	ret = davinci_request_dma(prtd->params->channel, prtd->params->name,
				  davinci_pcm_dma_irq, substream,
				  &prtd->master_lch, &tcc, EVENTQ_0);
	if (ret)
		return ret;

	/* Request slave DMA channel */
	ret = davinci_request_dma(PARAM_ANY, "Link",
				  NULL, NULL, &prtd->slave_lch, &tcc, EVENTQ_0);
	if (ret) {
		davinci_free_dma(prtd->master_lch);
		return ret;
	}

	/* Link slave DMA channel in loopback */
	davinci_dma_link_lch(prtd->slave_lch, prtd->slave_lch);

	return 0;
}
Exemplo n.º 6
0
int DMA_copyFillDelete(DMA_OpenClosePrm *prm)
{
    DMA_Obj *pObj;

    if(prm->chId >= DMA_DEV_MAX_CH)
        return -1;

    pObj = gDMA_dev.pObj[prm->chId];
    if(pObj==NULL) {
        FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId );
        return -1;
    }

    if(pObj->copyFillObj==NULL)  {
        FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId );
        return -1;
    }

    davinci_stop_dma( pObj->copyFillObj->chId );
    davinci_free_dma( pObj->copyFillObj->chId );

    if(pObj->copyFillObj->transferPrm!=NULL)
        kfree(pObj->copyFillObj->transferPrm);
    kfree(pObj->copyFillObj);
    kfree(pObj);

    gDMA_dev.pObj[prm->chId] = NULL;

    return 0;
}
Exemplo n.º 7
0
static void davinci_spi_cleanup(const struct spi_device *spi)
{
	struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
	struct davinci_spi_dma *davinci_spi_dma;

	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];

	if (use_dma && davinci_spi->dma_channels) {
		davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];

		if ((davinci_spi_dma->dma_rx_channel != -1)
				&& (davinci_spi_dma->dma_tx_channel != -1)) {
			davinci_free_dma(davinci_spi_dma->dma_tx_channel);
			davinci_free_dma(davinci_spi_dma->dma_rx_channel);
		}
	}
}
Exemplo n.º 8
0
Arquivo: dev_dma.c Projeto: 119/ipnc
void DMA_devExit(void)
{
  dev_t   devno = MKDEV(gDMA_dev.major, 0);
  int i;
  
#ifdef DMA_DEBUG
  printk(KERN_INFO "DMA: DMA_devExit() \n");
#endif

  cdev_del(&gDMA_dev.cdev);

  unregister_chrdev_region(devno, 1);

  //Anshuman - Removing it as it is no more needed after FC bug fix
#if 0  
  for(i=0; i<EDMA_MAX_PARAM_SET; i++)
    davinci_free_dma(i);

  for(i=EDMA_QDMA_CHANNEL_0; i<=EDMA_QDMA_CHANNEL_7; i++)    
    davinci_free_dma(i);
#endif  
}
Exemplo n.º 9
0
int DMA_demuxDelete(DMA_OpenClosePrm *prm)
{
    DMA_Obj *pObj;
    int i;

    if (prm->chId >= DMA_DEV_MAX_CH)
    {
        FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId );    
        return -1;
    }

    pObj = gDMA_dev.pObj[prm->chId];
    if (pObj==NULL)
    {
        FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId );  
        return -1;
    }

    if (pObj->demuxObj==NULL)
    {
        FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId );  
        return -1;
    }

    davinci_stop_dma( pObj->demuxObj->channelDemux[0] );

    for (i=0; i<DRV_DMA_MAX_DEMUX_PARAM; i++)
        davinci_free_dma( pObj->demuxObj->channelDemux[i] );

    if (pObj->demuxObj->srcPhysAddrList!=NULL)
        kfree(pObj->demuxObj->srcPhysAddrList);

    if (pObj->demuxObj->dstPhysAddrList!=NULL)
        kfree(pObj->demuxObj->dstPhysAddrList);

    kfree(pObj->demuxObj);
    kfree(pObj);  

    gDMA_dev.pObj[prm->chId] = NULL;  

    return -1;
}
Exemplo n.º 10
0
int DMA_demuxCreate(DMA_OpenClosePrm *prm)
{
    DMA_Obj *pObj;
    DMA_DemuxObj *pDemuxObj;
    int tcc, i;

#ifdef DMA_DEBUG
    printk(KERN_INFO "DMA: DMA_demuxCreate() ... \n");
#endif

    pObj = kmalloc(sizeof(DMA_Obj), GFP_KERNEL);
    if (pObj==NULL)
    {
        FUNCERR( "Unable to alloc memory\n" );  
        return -1;
    }

    pObj->dmaMode = prm->mode;

    pObj->copyFillObj = NULL;

    pObj->demuxObj = kmalloc(sizeof(DMA_DemuxObj), GFP_KERNEL);
    if (pObj->demuxObj==NULL)
    {
        FUNCERR( "Unable to alloc memory\n" );    
        kfree(pObj);
        return -1;
    }

    memset(pObj->demuxObj, 0, sizeof(DMA_DemuxObj));

    pObj->demuxObj->srcPhysAddrList = kmalloc( sizeof(unsigned long)*prm->maxTransfers, GFP_KERNEL);
    pObj->demuxObj->dstPhysAddrList = kmalloc( sizeof(unsigned long)*prm->maxTransfers, GFP_KERNEL);  

    if (pObj->demuxObj->srcPhysAddrList==NULL
        || pObj->demuxObj->dstPhysAddrList==NULL
       )
    {

        FUNCERR( "Unable to alloc memory\n" );      

        if (pObj->demuxObj->srcPhysAddrList)
            kfree(pObj->demuxObj->srcPhysAddrList);

        if (pObj->demuxObj->dstPhysAddrList)
            kfree(pObj->demuxObj->dstPhysAddrList);

        kfree(pObj->demuxObj);  
        kfree(pObj);

        return -1;
    }

    pObj->demuxObj->maxLines = prm->maxTransfers;

    memset(pObj->demuxObj->srcPhysAddrList, 0, sizeof(unsigned long)*prm->maxTransfers);
    memset(pObj->demuxObj->dstPhysAddrList, 0, sizeof(unsigned long)*prm->maxTransfers);  

    pDemuxObj = pObj->demuxObj;

    init_completion(&pDemuxObj->dma_complete);  

    tcc = EDMA_TCC_ANY;
    pDemuxObj->channelDemux[0] = 14;
    if (davinci_request_dma(EDMA_DMA_CHANNEL_ANY, "Memory DMA", DMA_demuxCallback, 
                            pDemuxObj, &pDemuxObj->channelDemux[0], &tcc, DMA_DEMUX_EVENT_QUEUE))
    {
        FUNCERR( "Unable to request DMA (channel=%d).\n", pDemuxObj->channelDemux[0] );

        if (pObj->demuxObj->srcPhysAddrList)
            kfree(pObj->demuxObj->srcPhysAddrList);

        if (pObj->demuxObj->dstPhysAddrList)
            kfree(pObj->demuxObj->dstPhysAddrList);

        kfree(pObj->demuxObj);  
        kfree(pObj);        
        return -EFAULT;
    }

    for (i=1; i<DRV_DMA_MAX_DEMUX_PARAM; i++)
    {

        pDemuxObj->channelDemux[i] = 15;
        if (davinci_request_dma(DAVINCI_EDMA_PARAM_ANY, "Memory DMA", DMA_demuxCallback, 
                                pDemuxObj, &pDemuxObj->channelDemux[i], &tcc, DMA_DEMUX_EVENT_QUEUE))
        {
            FUNCERR( "Unable to request DMA (channel=%d).\n", pDemuxObj->channelDemux[i] );

            // free allocated channels
            while (i)
            {
                i--;
                davinci_free_dma( pObj->demuxObj->channelDemux[i] );
            }

            if (pObj->demuxObj->srcPhysAddrList)
                kfree(pObj->demuxObj->srcPhysAddrList);

            if (pObj->demuxObj->dstPhysAddrList)
                kfree(pObj->demuxObj->dstPhysAddrList);

            kfree(pObj->demuxObj);  
            kfree(pObj);        
            return -EFAULT;
        }
    }

    for (i=0; i<DRV_DMA_MAX_DEMUX_PARAM; i++)
    {

        /* no link */
        pDemuxObj->dmaParamAddr[i] = davinci_get_param_addr(pDemuxObj->channelDemux[i]);

        pDemuxObj->dmaParamDemux[i].link_bcntrld = DMA_PARAM_NO_LINK;

        /* no support for 3 dimension transfers */
        pDemuxObj->dmaParamDemux[i].src_dst_cidx = 0;
        pDemuxObj->dmaParamDemux[i].ccnt = 1;
        pDemuxObj->dmaParamDemux[i].opt =   DMA_PARAM_OPT_SYNCDIM_AB 
                                            | ((pDemuxObj->channelDemux[0] & 0x3f)<<12)
                                            ;
        pDemuxObj->dmaParamDemux[i].src_dst_bidx = 0;

        if (i==(DRV_DMA_MAX_DEMUX_PARAM-1))
        {
            pDemuxObj->dmaParamDemux[i].opt |= TCINTEN; // enable interrupt for last linked DMA
        }
        else
        {
            pDemuxObj->dmaParamDemux[i].opt |= TCCHEN;  // enable chaining for intermediate linked DMAs    
        }

        davinci_set_dma_params(pDemuxObj->channelDemux[i], &pDemuxObj->dmaParamDemux[i]);
    }

    gDMA_dev.pObj[pDemuxObj->channelDemux[0]] = pObj;
    prm->chId = pDemuxObj->channelDemux[0];

#ifdef DMA_DEBUG
    printk(KERN_INFO "DMA: DMA_demuxCreate() success ... chId is %d\n", prm->chId);
#endif

    return 0;
}
Exemplo n.º 11
0
Arquivo: ipera_k.c Projeto: gqiao/work
static int ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args)
{
	int                    ret;
    unsigned int __user   *argp = (unsigned int __user *) args;
	unsigned long          physp;
	unsigned long          virtp;
	unsigned int           type;
	
	
	//__D("ioctl %d received. \n", cmd);
	
    switch (cmd) {
	case IPERA_INIT_PMU_STATICS:
		init_pmu_asm();
		__D("IPERA_INIT_PMU_STATICS : returning\n");
		break;

	case IPERA_START_PMU_CACHES_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));

		//set_pmu_event_asm(PMU_ICACHE_EXEC, EVENT_COUNTER0);
		//set_pmu_event_asm(PMU_ICACHE_MISS, EVENT_COUNTER1);
		//set_pmu_event_asm(PMU_DCACHE_ACCESS, EVENT_COUNTER2);
		//set_pmu_event_asm(PMU_DCACHE_MISS, EVENT_COUNTER3);
		//start_pmu_asm();
		//__D("IPERA_START_PMU_CACHES_STATICS : returning\n");
		break;

	case IPERA_END_PMU_CACHES_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));
		
		//stop_pmu_asm();
		//pmu_statics[type].pmu_count		+= 1; 
		//pmu_statics[type].pmu_cycles    += get_clock_counter_asm();
		//pmu_statics[type].pmu_instr_exec    += get_pmnx_counter_asm(EVENT_COUNTER0);
		//pmu_statics[type].pmu_icache_miss    += get_pmnx_counter_asm(EVENT_COUNTER1);
		//pmu_statics[type].pmu_dcache_access    += get_pmnx_counter_asm(EVENT_COUNTER2);
		//pmu_statics[type].pmu_dcache_miss    += get_pmnx_counter_asm(EVENT_COUNTER3);
		//__D("IPERA_END_PMU_CACHES_STATICS : returning\n");
		break;
		
	case IPERA_GET_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));
		
		//ret_get_cycles = pmu_statics[type].pmu_cycles;
		//__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_count);
		//__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_cycles);
		//__D("IPERA_GET_ICACHE_EXEC : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_instr_exec);
		//__D("IPERA_GET_ICACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_icache_miss);
		//__D("IPERA_GET_DCACHE_ACCESS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_access);
		//__D("IPERA_GET_DCACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_miss);
		//ret = copy_to_user(argp, &pmu_statics[type], sizeof(pmu_statics[type]));
		break;

	case IPERA_GET_PHYS:
		get_user(virtp, argp);
		physp = ipera_get_phys(virtp);
		put_user(physp, argp);
		//__D("IPERA_GET_PHYS : returning %#lx\n", physp);
		break;

		
#if 0
	case IPERA_GET_CYCLES:
		__D("IPERA_GET_CYCLES : received.\n");
		cur_cycles = get_cycles();
		copy_to_user(argp, &cur_cycles, sizeof(cur_cycles));
		__D("IPERA_GET_CYCLES : returning %#lx\n", cur_cycles);
		break;

	case IPERA_GET_PHYS:
		__D("IPERA_GET_PHYS : received.\n");
		get_user(virtp, argp);
		physp = get_phys(virtp);
		put_user(physp, argp);
		__D("IPERA_GET_PHYS : returning %#lx\n", physp);
		break;

    case IPERA_DMACPY:
        __D("IPERA_DMACPY : received.\n");
        if (copy_from_user(&dma, argp, sizeof(dma))) {
            return -EFAULT;
        }
        err = davinci_request_dma(DM350_DMA_CHANNEL_ANY, "EDMA memcpy", memcpy_dma_irq_handler, NULL, &master_ch, &tcc, EVENTQ_1);
        if (err < 0) {
            __E("Error in requesting Master channel %d = 0x%x\n", master_ch, err);
            return err;
        } else if(master_ch != 25)  __E("get channel %d \n", master_ch);
        davinci_stop_dma(master_ch);

        init_completion(&edmacompletion);
        davinci_set_dma_src_params(master_ch, (unsigned long) edmaparams.src, edmaparams.srcmode, edmaparams.srcfifowidth);
        davinci_set_dma_dest_params(master_ch, (unsigned long) edmaparams.dst, edmaparams.dstmode, edmaparams.dstfifowidth);
        davinci_set_dma_src_index(master_ch, edmaparams.srcbidx, edmaparams.srccidx);
        davinci_set_dma_dest_index(master_ch, edmaparams.dstbidx, edmaparams.dstcidx);
        davinci_set_dma_transfer_params(master_ch, edmaparams.acnt, edmaparams.bcnt, edmaparams.ccnt, edmaparams.bcntrld, edmaparams.syncmode);
        davinci_get_dma_params(master_ch, &paramentry);
        davinci_set_dma_params(master_ch, &paramentry);
        davinci_start_dma(master_ch);
        wait_for_completion(&edmacompletion);
        //printk("Dma completed... \n");
        davinci_stop_dma(master_ch);
        davinci_free_dma(master_ch);
        break;
#endif

    default:
        __E("Unknown ioctl received = %d.\n", cmd);
        return -EINVAL;
    }
    return 0;
}
Exemplo n.º 12
0
int DMA_copyFillCreate(DMA_OpenClosePrm *prm)
{
    DMA_Obj *pObj;
    int tcc;

#ifdef DMA_DEBUG
    printk(KERN_INFO "DMA: DMA_copyFillCreate() ... \n");
#endif


    pObj = kmalloc(sizeof(DMA_Obj), GFP_KERNEL);
    if(pObj==NULL) {
        FUNCERR( "Unable to alloc memory\n" );
        return -1;
    }

    pObj->dmaMode = prm->mode;

    pObj->demuxObj = NULL;

    pObj->copyFillObj = kmalloc(sizeof(DMA_CopyFillObj), GFP_KERNEL);
    if(pObj->copyFillObj==NULL)
    {
        FUNCERR( "Unable to alloc memory\n" );
        kfree(pObj);
        return -1;
    }

    memset(pObj->copyFillObj, 0, sizeof(DMA_CopyFillObj));

    pObj->copyFillObj->transferPrm = kmalloc(sizeof(DRV_CopyFillPrm)*prm->maxTransfers, GFP_KERNEL);
    if(pObj->copyFillObj->transferPrm==NULL)
    {
        FUNCERR( "Unable to alloc memory\n" );
        kfree(pObj->copyFillObj);
        kfree(pObj);
        return -1;
    }

    pObj->copyFillObj->maxCount = prm->maxTransfers;

    memset(pObj->copyFillObj->transferPrm, 0, sizeof(DRV_CopyFillPrm)*prm->maxTransfers);

    init_completion(&pObj->copyFillObj->dma_complete);

    tcc = EDMA_TCC_ANY;
    pObj->copyFillObj->chId=0;
    if (davinci_request_dma(EDMA_DMA_CHANNEL_ANY, "Memory DMA", DMA_copyFillCallback,
                            pObj->copyFillObj, &pObj->copyFillObj->chId, &tcc, DMA_NORMAL_EVENT_QUEUE))
    {
        FUNCERR( "Unable to request DMA (channel=%d).\n", pObj->copyFillObj->chId );
        kfree(pObj->copyFillObj->transferPrm);
        kfree(pObj->copyFillObj);
        kfree(pObj);
        return -EFAULT;
    }

    if(pObj->copyFillObj->chId >= DMA_DEV_MAX_CH) {

        FUNCERR( "Invalid channel ID (channel=%d).\n", pObj->copyFillObj->chId );
        davinci_free_dma( pObj->copyFillObj->chId );
        kfree(pObj->copyFillObj->transferPrm);
        kfree(pObj->copyFillObj);
        kfree(pObj);

        return -EFAULT;
    }

    gDMA_dev.pObj[pObj->copyFillObj->chId] = pObj;

    prm->chId = pObj->copyFillObj->chId;

#ifdef DMA_DEBUG
    printk(KERN_INFO "DMA: DMA_copyFillCreate() success ... chId is %d\n", prm->chId);
#endif

    return 0;
}
Exemplo n.º 13
0
/* release_channel() must be called with the edma_mutex held */
static void release_channel(int chan)
{
    int localChan;
    int i;

    /*
     * The non-LSP_210 EDMA interface returns a "magic" value that represents
     * the controller number and channel number muxed together in one UInt32.
     * This module doesn't yet support a controller other than 0, however, this
     * function needs to accommodate being called with a controller > 0 since
     * it's called to release a channel on a controller > 0 when the
     * REQUESTDMA ioctl() receives a controller > 0 that it can't handle and
     * needs to clean up after itself.
     */
    /*
     * In order to not be dependent on the LSP #defines, we need to
     * translate our EDMA interface's #defines to the LSP ones.
     */
#if defined(LSP_210)

    localChan = chan;
    if (chan >= EDMA_QDMA0 && chan <= EDMA_QDMA7) {
        __D("  release_channel: translating QDMA channel %d to LSP namespace ...\n", chan);
        localChan = EDMA_QDMA_CHANNEL_0 + (chan - EDMA_QDMA0);
    }

    for (i = 0; i < channels[localChan].nParam; i++) {
        __D("  release_channel: freeing channel %d...\n", localChan + i);

        davinci_free_dma(localChan + i);
    }

    INIT_LIST_HEAD(&channels[localChan].users);
    channels[localChan].nParam = 0;
    channels[localChan].isParam = 0;

#else   /* defined(LSP_210) */

    localChan = EDMA_CHAN_SLOT(chan);
    if (localChan >= EDMA_QDMA0 && localChan <= EDMA_QDMA7) {
        __E("  release_channel: QDMA is not supported: chan %d\n", chan);
                
        return;
    }

    for (i = 0; i < channels[localChan].nParam; i++) {
        if (channels[localChan].isParam) {
            __D("  release_channel: calling edma_free_slot(%d)...\n",
                chan + i);

            edma_free_slot(chan + i);
        }
        else {
            __D("  release_channel: calling edma_free_channel(%d)...\n",
                chan + i);

            edma_clean_channel(chan + i);
            edma_free_channel(chan + i);
        }
    }

    if (EDMA_CTLR(chan) == 0) {
        INIT_LIST_HEAD(&channels[localChan].users);
        channels[localChan].nParam = 0;
        channels[localChan].isParam = 0;
    }

#endif  /* defined(LSP_210) */
}