Exemplo n.º 1
0
static int davinci_spi_request_dma(struct spi_device *spi)
{
	struct davinci_spi *davinci_spi;
	struct davinci_spi_dma *davinci_spi_dma;
	int tcc;

	davinci_spi = spi_master_get_devdata(spi->master);
	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];

	if (davinci_request_dma(davinci_spi_dma->dma_rx_sync_dev, "MibSPI RX",
				davinci_spi_dma_rx_callback, spi,
				&davinci_spi_dma->dma_rx_channel,
				&tcc, davinci_spi_dma->eventq)) {
		pr_err("Unable to request DMA channel for MibSPI RX\n");
		return -EAGAIN;
	}
	if (davinci_request_dma(davinci_spi_dma->dma_tx_sync_dev, "MibSPI TX",
				davinci_spi_dma_tx_callback, spi,
				&davinci_spi_dma->dma_tx_channel,
				&tcc, davinci_spi_dma->eventq)) {
		davinci_free_dma(davinci_spi_dma->dma_rx_channel);
		davinci_spi_dma->dma_rx_channel = -1;
		pr_err("Unable to request DMA channel for MibSPI TX\n");
		return -EAGAIN;
	}

	return 0;
}
static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
{
	struct davinci_runtime_data *prtd = substream->runtime->private_data;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct davinci_pcm_dma_params *dma_data = rtd->dai->cpu_dai->dma_data;
	int tcc = TCC_ANY;
	int ret;

	if (!dma_data)
		return -ENODEV;

	prtd->params = dma_data;

	/* Request master DMA channel */
	ret = davinci_request_dma(prtd->params->channel, prtd->params->name,
				  davinci_pcm_dma_irq, substream,
				  &prtd->master_lch, &tcc, EVENTQ_0);
	if (ret)
		return ret;

	/* Request slave DMA channel */
	ret = davinci_request_dma(PARAM_ANY, "Link",
				  NULL, NULL, &prtd->slave_lch, &tcc, EVENTQ_0);
	if (ret) {
		davinci_free_dma(prtd->master_lch);
		return ret;
	}

	/* Link slave DMA channel in loopback */
	davinci_dma_link_lch(prtd->slave_lch, prtd->slave_lch);

	return 0;
}
Exemplo n.º 3
0
static int davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
	int edma_chan_num, tcc = 0, r, sync_dev;
	enum dma_event_q queue_no = EVENTQ_0;

	/* Acquire master DMA write channel */
	r = davinci_request_dma(host->dma_tx_event, "MMC_WRITE",
				     mmc_davinci_dma_cb, host,
				     &edma_chan_num, &tcc, queue_no);
	if (r) {
		dev_warn(host->mmc->dev,
			 "MMC: davinci_request_dma() failed with %d\n", r);
		return r;
	}

	/* Acquire master DMA read channel */
	r = davinci_request_dma(host->dma_rx_event, "MMC_READ",
				     mmc_davinci_dma_cb, host,
				     &edma_chan_num, &tcc, queue_no);
	if (r) {
		dev_warn(host->mmc->dev,
			 "MMC: davinci_request_dma() failed with %d\n", r);
		goto free_master_write;
	}

	host->edma_ch_details.cnt_chanel = 0;

	/* currently data Writes are done using single block mode,
	 * so no DMA slave write channel is required for now */

	/* Create a DMA slave read channel
	 * (assuming max segments handled is 2) */
	sync_dev = host->dma_rx_event;
	r = davinci_request_dma(DAVINCI_EDMA_PARAM_ANY, "LINK",
				     NULL, NULL, &edma_chan_num,
				     &sync_dev, queue_no);
	if (r) {
		dev_warn(host->mmc->dev,
			 "MMC: davinci_request_dma() failed with %d\n", r);
		goto free_master_read;
	}

	host->edma_ch_details.cnt_chanel++;
	host->edma_ch_details.chanel_num[0] = edma_chan_num;

	return 0;

free_master_read:
	davinci_free_dma(host->dma_rx_event);
free_master_write:
	davinci_free_dma(host->dma_tx_event);

	return r;
}
Exemplo n.º 4
0
int DMA_demuxCreate(DMA_OpenClosePrm *prm)
{
    DMA_Obj *pObj;
    DMA_DemuxObj *pDemuxObj;
    int tcc, i;

#ifdef DMA_DEBUG
    printk(KERN_INFO "DMA: DMA_demuxCreate() ... \n");
#endif

    pObj = kmalloc(sizeof(DMA_Obj), GFP_KERNEL);
    if (pObj==NULL)
    {
        FUNCERR( "Unable to alloc memory\n" );  
        return -1;
    }

    pObj->dmaMode = prm->mode;

    pObj->copyFillObj = NULL;

    pObj->demuxObj = kmalloc(sizeof(DMA_DemuxObj), GFP_KERNEL);
    if (pObj->demuxObj==NULL)
    {
        FUNCERR( "Unable to alloc memory\n" );    
        kfree(pObj);
        return -1;
    }

    memset(pObj->demuxObj, 0, sizeof(DMA_DemuxObj));

    pObj->demuxObj->srcPhysAddrList = kmalloc( sizeof(unsigned long)*prm->maxTransfers, GFP_KERNEL);
    pObj->demuxObj->dstPhysAddrList = kmalloc( sizeof(unsigned long)*prm->maxTransfers, GFP_KERNEL);  

    if (pObj->demuxObj->srcPhysAddrList==NULL
        || pObj->demuxObj->dstPhysAddrList==NULL
       )
    {

        FUNCERR( "Unable to alloc memory\n" );      

        if (pObj->demuxObj->srcPhysAddrList)
            kfree(pObj->demuxObj->srcPhysAddrList);

        if (pObj->demuxObj->dstPhysAddrList)
            kfree(pObj->demuxObj->dstPhysAddrList);

        kfree(pObj->demuxObj);  
        kfree(pObj);

        return -1;
    }

    pObj->demuxObj->maxLines = prm->maxTransfers;

    memset(pObj->demuxObj->srcPhysAddrList, 0, sizeof(unsigned long)*prm->maxTransfers);
    memset(pObj->demuxObj->dstPhysAddrList, 0, sizeof(unsigned long)*prm->maxTransfers);  

    pDemuxObj = pObj->demuxObj;

    init_completion(&pDemuxObj->dma_complete);  

    tcc = EDMA_TCC_ANY;
    pDemuxObj->channelDemux[0] = 14;
    if (davinci_request_dma(EDMA_DMA_CHANNEL_ANY, "Memory DMA", DMA_demuxCallback, 
                            pDemuxObj, &pDemuxObj->channelDemux[0], &tcc, DMA_DEMUX_EVENT_QUEUE))
    {
        FUNCERR( "Unable to request DMA (channel=%d).\n", pDemuxObj->channelDemux[0] );

        if (pObj->demuxObj->srcPhysAddrList)
            kfree(pObj->demuxObj->srcPhysAddrList);

        if (pObj->demuxObj->dstPhysAddrList)
            kfree(pObj->demuxObj->dstPhysAddrList);

        kfree(pObj->demuxObj);  
        kfree(pObj);        
        return -EFAULT;
    }

    for (i=1; i<DRV_DMA_MAX_DEMUX_PARAM; i++)
    {

        pDemuxObj->channelDemux[i] = 15;
        if (davinci_request_dma(DAVINCI_EDMA_PARAM_ANY, "Memory DMA", DMA_demuxCallback, 
                                pDemuxObj, &pDemuxObj->channelDemux[i], &tcc, DMA_DEMUX_EVENT_QUEUE))
        {
            FUNCERR( "Unable to request DMA (channel=%d).\n", pDemuxObj->channelDemux[i] );

            // free allocated channels
            while (i)
            {
                i--;
                davinci_free_dma( pObj->demuxObj->channelDemux[i] );
            }

            if (pObj->demuxObj->srcPhysAddrList)
                kfree(pObj->demuxObj->srcPhysAddrList);

            if (pObj->demuxObj->dstPhysAddrList)
                kfree(pObj->demuxObj->dstPhysAddrList);

            kfree(pObj->demuxObj);  
            kfree(pObj);        
            return -EFAULT;
        }
    }

    for (i=0; i<DRV_DMA_MAX_DEMUX_PARAM; i++)
    {

        /* no link */
        pDemuxObj->dmaParamAddr[i] = davinci_get_param_addr(pDemuxObj->channelDemux[i]);

        pDemuxObj->dmaParamDemux[i].link_bcntrld = DMA_PARAM_NO_LINK;

        /* no support for 3 dimension transfers */
        pDemuxObj->dmaParamDemux[i].src_dst_cidx = 0;
        pDemuxObj->dmaParamDemux[i].ccnt = 1;
        pDemuxObj->dmaParamDemux[i].opt =   DMA_PARAM_OPT_SYNCDIM_AB 
                                            | ((pDemuxObj->channelDemux[0] & 0x3f)<<12)
                                            ;
        pDemuxObj->dmaParamDemux[i].src_dst_bidx = 0;

        if (i==(DRV_DMA_MAX_DEMUX_PARAM-1))
        {
            pDemuxObj->dmaParamDemux[i].opt |= TCINTEN; // enable interrupt for last linked DMA
        }
        else
        {
            pDemuxObj->dmaParamDemux[i].opt |= TCCHEN;  // enable chaining for intermediate linked DMAs    
        }

        davinci_set_dma_params(pDemuxObj->channelDemux[i], &pDemuxObj->dmaParamDemux[i]);
    }

    gDMA_dev.pObj[pDemuxObj->channelDemux[0]] = pObj;
    prm->chId = pDemuxObj->channelDemux[0];

#ifdef DMA_DEBUG
    printk(KERN_INFO "DMA: DMA_demuxCreate() success ... chId is %d\n", prm->chId);
#endif

    return 0;
}
Exemplo n.º 5
0
Arquivo: ipera_k.c Projeto: gqiao/work
static int ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args)
{
	int                    ret;
    unsigned int __user   *argp = (unsigned int __user *) args;
	unsigned long          physp;
	unsigned long          virtp;
	unsigned int           type;
	
	
	//__D("ioctl %d received. \n", cmd);
	
    switch (cmd) {
	case IPERA_INIT_PMU_STATICS:
		init_pmu_asm();
		__D("IPERA_INIT_PMU_STATICS : returning\n");
		break;

	case IPERA_START_PMU_CACHES_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));

		//set_pmu_event_asm(PMU_ICACHE_EXEC, EVENT_COUNTER0);
		//set_pmu_event_asm(PMU_ICACHE_MISS, EVENT_COUNTER1);
		//set_pmu_event_asm(PMU_DCACHE_ACCESS, EVENT_COUNTER2);
		//set_pmu_event_asm(PMU_DCACHE_MISS, EVENT_COUNTER3);
		//start_pmu_asm();
		//__D("IPERA_START_PMU_CACHES_STATICS : returning\n");
		break;

	case IPERA_END_PMU_CACHES_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));
		
		//stop_pmu_asm();
		//pmu_statics[type].pmu_count		+= 1; 
		//pmu_statics[type].pmu_cycles    += get_clock_counter_asm();
		//pmu_statics[type].pmu_instr_exec    += get_pmnx_counter_asm(EVENT_COUNTER0);
		//pmu_statics[type].pmu_icache_miss    += get_pmnx_counter_asm(EVENT_COUNTER1);
		//pmu_statics[type].pmu_dcache_access    += get_pmnx_counter_asm(EVENT_COUNTER2);
		//pmu_statics[type].pmu_dcache_miss    += get_pmnx_counter_asm(EVENT_COUNTER3);
		//__D("IPERA_END_PMU_CACHES_STATICS : returning\n");
		break;
		
	case IPERA_GET_STATICS:
		//memset(&type, 0, sizeof(type));
		//ret = copy_from_user(&type, argp, sizeof(type));
		
		//ret_get_cycles = pmu_statics[type].pmu_cycles;
		//__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_count);
		//__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_cycles);
		//__D("IPERA_GET_ICACHE_EXEC : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_instr_exec);
		//__D("IPERA_GET_ICACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_icache_miss);
		//__D("IPERA_GET_DCACHE_ACCESS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_access);
		//__D("IPERA_GET_DCACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_miss);
		//ret = copy_to_user(argp, &pmu_statics[type], sizeof(pmu_statics[type]));
		break;

	case IPERA_GET_PHYS:
		get_user(virtp, argp);
		physp = ipera_get_phys(virtp);
		put_user(physp, argp);
		//__D("IPERA_GET_PHYS : returning %#lx\n", physp);
		break;

		
#if 0
	case IPERA_GET_CYCLES:
		__D("IPERA_GET_CYCLES : received.\n");
		cur_cycles = get_cycles();
		copy_to_user(argp, &cur_cycles, sizeof(cur_cycles));
		__D("IPERA_GET_CYCLES : returning %#lx\n", cur_cycles);
		break;

	case IPERA_GET_PHYS:
		__D("IPERA_GET_PHYS : received.\n");
		get_user(virtp, argp);
		physp = get_phys(virtp);
		put_user(physp, argp);
		__D("IPERA_GET_PHYS : returning %#lx\n", physp);
		break;

    case IPERA_DMACPY:
        __D("IPERA_DMACPY : received.\n");
        if (copy_from_user(&dma, argp, sizeof(dma))) {
            return -EFAULT;
        }
        err = davinci_request_dma(DM350_DMA_CHANNEL_ANY, "EDMA memcpy", memcpy_dma_irq_handler, NULL, &master_ch, &tcc, EVENTQ_1);
        if (err < 0) {
            __E("Error in requesting Master channel %d = 0x%x\n", master_ch, err);
            return err;
        } else if(master_ch != 25)  __E("get channel %d \n", master_ch);
        davinci_stop_dma(master_ch);

        init_completion(&edmacompletion);
        davinci_set_dma_src_params(master_ch, (unsigned long) edmaparams.src, edmaparams.srcmode, edmaparams.srcfifowidth);
        davinci_set_dma_dest_params(master_ch, (unsigned long) edmaparams.dst, edmaparams.dstmode, edmaparams.dstfifowidth);
        davinci_set_dma_src_index(master_ch, edmaparams.srcbidx, edmaparams.srccidx);
        davinci_set_dma_dest_index(master_ch, edmaparams.dstbidx, edmaparams.dstcidx);
        davinci_set_dma_transfer_params(master_ch, edmaparams.acnt, edmaparams.bcnt, edmaparams.ccnt, edmaparams.bcntrld, edmaparams.syncmode);
        davinci_get_dma_params(master_ch, &paramentry);
        davinci_set_dma_params(master_ch, &paramentry);
        davinci_start_dma(master_ch);
        wait_for_completion(&edmacompletion);
        //printk("Dma completed... \n");
        davinci_stop_dma(master_ch);
        davinci_free_dma(master_ch);
        break;
#endif

    default:
        __E("Unknown ioctl received = %d.\n", cmd);
        return -EINVAL;
    }
    return 0;
}
Exemplo n.º 6
0
int DMA_copyFillCreate(DMA_OpenClosePrm *prm)
{
    DMA_Obj *pObj;
    int tcc;

#ifdef DMA_DEBUG
    printk(KERN_INFO "DMA: DMA_copyFillCreate() ... \n");
#endif


    pObj = kmalloc(sizeof(DMA_Obj), GFP_KERNEL);
    if(pObj==NULL) {
        FUNCERR( "Unable to alloc memory\n" );
        return -1;
    }

    pObj->dmaMode = prm->mode;

    pObj->demuxObj = NULL;

    pObj->copyFillObj = kmalloc(sizeof(DMA_CopyFillObj), GFP_KERNEL);
    if(pObj->copyFillObj==NULL)
    {
        FUNCERR( "Unable to alloc memory\n" );
        kfree(pObj);
        return -1;
    }

    memset(pObj->copyFillObj, 0, sizeof(DMA_CopyFillObj));

    pObj->copyFillObj->transferPrm = kmalloc(sizeof(DRV_CopyFillPrm)*prm->maxTransfers, GFP_KERNEL);
    if(pObj->copyFillObj->transferPrm==NULL)
    {
        FUNCERR( "Unable to alloc memory\n" );
        kfree(pObj->copyFillObj);
        kfree(pObj);
        return -1;
    }

    pObj->copyFillObj->maxCount = prm->maxTransfers;

    memset(pObj->copyFillObj->transferPrm, 0, sizeof(DRV_CopyFillPrm)*prm->maxTransfers);

    init_completion(&pObj->copyFillObj->dma_complete);

    tcc = EDMA_TCC_ANY;
    pObj->copyFillObj->chId=0;
    if (davinci_request_dma(EDMA_DMA_CHANNEL_ANY, "Memory DMA", DMA_copyFillCallback,
                            pObj->copyFillObj, &pObj->copyFillObj->chId, &tcc, DMA_NORMAL_EVENT_QUEUE))
    {
        FUNCERR( "Unable to request DMA (channel=%d).\n", pObj->copyFillObj->chId );
        kfree(pObj->copyFillObj->transferPrm);
        kfree(pObj->copyFillObj);
        kfree(pObj);
        return -EFAULT;
    }

    if(pObj->copyFillObj->chId >= DMA_DEV_MAX_CH) {

        FUNCERR( "Invalid channel ID (channel=%d).\n", pObj->copyFillObj->chId );
        davinci_free_dma( pObj->copyFillObj->chId );
        kfree(pObj->copyFillObj->transferPrm);
        kfree(pObj->copyFillObj);
        kfree(pObj);

        return -EFAULT;
    }

    gDMA_dev.pObj[pObj->copyFillObj->chId] = pObj;

    prm->chId = pObj->copyFillObj->chId;

#ifdef DMA_DEBUG
    printk(KERN_INFO "DMA: DMA_copyFillCreate() success ... chId is %d\n", prm->chId);
#endif

    return 0;
}
Exemplo n.º 7
0
static int dma_ioctl(struct inode *inode, struct file *filp,
                     unsigned int cmd, unsigned long args)
#endif
{
    unsigned int __user *argp = (unsigned int __user *) args;
    int isParam = 0;
#if defined(LSP_210)
    int isQdma = 0;
#endif  /* defined(LSP_210) */
    int result;
    int dev_id;
    int channel;
    struct EDMA_requestDmaParams dma_req;
    struct EDMA_releaseDmaParams dma_rel;
    struct list_head *registeredlistp;
    struct list_head *u;
    struct list_head *unext;
    struct registered_user *user;

    if (_IOC_TYPE(cmd) != _IOC_TYPE(EDMA_IOCMAGIC)) {
        __E("dma_ioctl(): bad command type 0x%x (should be 0x%x)\n",
            _IOC_TYPE(cmd), _IOC_TYPE(EDMA_IOCMAGIC));
    }

    switch (cmd & EDMA_IOCCMDMASK) {

      case EDMA_IOCREQUESTDMA:
        __D("dma_ioctl(): EDMA_IOCREQUESTDMA called\n");

        if (copy_from_user(&dma_req, argp, sizeof(dma_req))) {
            return -EFAULT;
        }

        __D("dev_id: %d, eventq_no: %d, tcc: %d, param: %d, nParam: %d\n",
            dma_req.dev_id, dma_req.eventq_no, dma_req.tcc,
            dma_req.param, dma_req.nParam);

        dev_id = dma_req.dev_id;

        /*
         * In order to not be dependent on the LSP #defines, we need to
         * translate our EDMA interface's #defines to the LSP ones.
         */
        if (dev_id >= EDMA_QDMA0 && dev_id <= EDMA_QDMA7) {
#if defined(LSP_210)
            dev_id = EDMA_QDMA_CHANNEL_0 + (dev_id - EDMA_QDMA0);
            isQdma = 1;
#else   /* defined(LSP_210) */
            __E("%s: REQUESTDMA failed: QDMA is not supported\n",
                __FUNCTION__);

            return -EINVAL;
#endif  /* defined(LSP_210) */
        }
        else {
            switch (dev_id) {
              case EDMA_PARAMANY:
                dev_id = EDMA_CONT_PARAMS_ANY;
                isParam = 1;
                break;

              case EDMA_PARAMFIXEDEXACT:
                dev_id = EDMA_CONT_PARAMS_FIXED_EXACT;
                isParam = 1;
                break;

              case EDMA_PARAMFIXEDNOTEXACT:
                dev_id = EDMA_CONT_PARAMS_FIXED_NOT_EXACT;
                isParam = 1;
                break;

              case EDMA_EDMAANY:
#if defined(LSP_210)
                dev_id = EDMA_DMA_CHANNEL_ANY;
#else   /* defined(LSP_210) */
                dev_id = EDMA_CHANNEL_ANY;
#endif  /* defined(LSP_210) */
                break;

              case EDMA_QDMAANY:
#if defined(LSP_210)
                dev_id = EDMA_QDMA_CHANNEL_ANY;
                isQdma = 1;
                break;
#else   /* defined(LSP_210) */
                __E("%s: REQUESTDMA failed: QDMA is not supported\n",
                    __FUNCTION__);
                return -EINVAL;
#endif  /* defined(LSP_210) */

              default:
                /* do nothing, dev_id is an EDMA channel # */
                break;
            }
        }

#if defined(LSP_210)
        switch (dma_req.tcc) {
            case EDMA_TCCANY:
                dma_req.tcc = EDMA_TCC_ANY;
                break;

            case EDMA_TCCSYMM:
                dma_req.tcc = EDMA_TCC_SYMM;
                break;

            default:
                /* do nothing, tcc is an EDMA TCC # */
                break;
        }
#endif  /* defined(LSP_210) */

        if (isParam) {
#if defined(LSP_210)
            __D("calling davinci_request_params(%d, %d, %d)...\n", dev_id,
                dma_req.nParam, dma_req.param);
            result = davinci_request_params(dev_id, dma_req.nParam,
                                            dma_req.param);

            __D("...returned %d\n", result);

            if (result >= 0) {
                dma_req.channel = result;
                dma_req.param = result;
                /* transform to 0-based success for below common code */
                result = 0;
            }

#else   /* defined(LSP_210) */

            __D("calling edma_alloc_cont_slots(0, %d, %d, %d)...\n",
                dev_id, dma_req.param, dma_req.nParam);
            result = edma_alloc_cont_slots(0, dev_id, dma_req.param, dma_req.nParam);

            __D("...returned %d\n", result);

            if (result >= 0) {
                if (EDMA_CTLR(result) != 0) {
                    __E("%s: REQUESTDMA failed to obtain a channel from controller 0 (obtained channel %d from controller %d)\n", __FUNCTION__, EDMA_CHAN_SLOT(result), EDMA_CTLR(result));

                    release_channel(result);
                }
                else {
                    dma_req.channel = EDMA_CHAN_SLOT(result);
                    dma_req.param = dma_req.channel;
                    /* transform to 0-based success for below common code */
                    result = 0;
                }
            }
#endif  /* defined(LSP_210) */
        }
        else {

#if defined(LSP_210)
            if (dma_req.tcc == -1) {
                __E("%s: REQUESTDMA failed: TCC -1 supported only for PaRAM allocations\n", __FUNCTION__);

                return -EINVAL;
            }

            result = davinci_request_dma(dev_id, "linuxutils DMA",
                                         NULL, (void *)NULL,
                                         &dma_req.channel, &dma_req.tcc,
                                         dma_req.eventq_no);

#else   /* defined(LSP_210) */

            result = edma_alloc_channel(dev_id, NULL, NULL, dma_req.eventq_no);

            if (result >= 0) {
                if (EDMA_CTLR(result) != 0) {
                    __E("%s: REQUESTDMA failed to obtain a channel from controller 0 (obtained channel %d from controller %d, will now free it)\n", __FUNCTION__, EDMA_CHAN_SLOT(result), EDMA_CTLR(result));

                    release_channel(result);
                }
                else {
                    dma_req.channel = EDMA_CHAN_SLOT(result);
                    dma_req.tcc = dma_req.channel;
                    /* transform to 0-based success for below common code */
                    result = 0;
                }
            }
#endif  /* defined(LSP_210) */
        }

        if (result) {
            __E("%s: REQUESTDMA failed: %d\n", __FUNCTION__, result);

            return -ENOMEM;
        }
        else {
            /* For EDMA_PARAMANY we've already assigned dma_req.param above */
            if (!isParam) {
#if defined(LSP_210)
                dma_req.param = davinci_get_param(dma_req.channel);
#else   /* defined(LSP_210) */
                dma_req.param = dma_req.channel; /* one-to-one mapping */
#endif  /* defined(LSP_210) */
            }

#if defined(LSP_210)
            /* Translate LSP's QDMA #s to linuxutil's QDMA #s */
            if (isQdma) {
                dma_req.channel = (dma_req.channel - EDMA_QDMA_CHANNEL_0) +
                                  EDMA_QDMA0;
            }
#endif  /* defined(LSP_210) */

            __D("  dma channel %d allocated\n", dma_req.channel);

            __D("copying to user\n");

            if (copy_to_user(argp, &dma_req, sizeof(dma_req))) {
                return -EFAULT;
            }
        }

        user = kmalloc(sizeof(struct registered_user), GFP_KERNEL);
        if (!user) {
            __E("%s: REQUESTDMA failed to kmalloc registered_user struct",
                __FUNCTION__);

            release_channel(dma_req.channel);

            return -ENOMEM;
        }

        if (mutex_lock_interruptible(&edma_mutex)) {
            kfree(user);

            release_channel(dma_req.channel);

            return -ERESTARTSYS;
        }

        user->filp = filp;
        list_add(&user->element, &channels[dma_req.channel].users);

        if (isParam) {
            channels[dma_req.channel].nParam = dma_req.nParam;
            channels[dma_req.channel].isParam = 1;
        }
        else {
            channels[dma_req.channel].nParam = 1;
            channels[dma_req.channel].isParam = 0;
        }

        mutex_unlock(&edma_mutex);

        break;

      case EDMA_IOCREGUSER:
        __D("dma_ioctl(): EDMA_IOCREGUSER called\n");

        if (get_user(channel, argp)) {
            return -EFAULT;
        }

        __D("  channel %d\n", channel);

        if (channel >= NCHAN) {
            __E("%s: REGUSER failed: channel %d out of range\n",
                __FUNCTION__, channel);

            return -ERANGE;
        }

        registeredlistp = &channels[channel].users;
        if (registeredlistp != registeredlistp->next) {
            user = kmalloc(sizeof(struct registered_user), GFP_KERNEL);
            if (!user) {
                __E("%s: REGUSER failed to kmalloc registered_user struct",
                    __FUNCTION__);
                return -ENOMEM;
            }

            if (mutex_lock_interruptible(&edma_mutex)) {
                kfree(user);

                return -ERESTARTSYS;
            }

            user->filp = filp;
            list_add(&user->element, &channels[channel].users);

            mutex_unlock(&edma_mutex);
        }
        else {
            __E("%s: REGUSER failed: channel %d not currently allocated\n",
                __FUNCTION__, channel);

            return -EFAULT;
        }

        break;

      case EDMA_IOCRELEASEDMA:
        __D("dma_ioctl(): EDMA_IOCRELEASEDMA called\n");

        if (copy_from_user(&dma_rel, argp, sizeof(dma_rel))) {
            return -EFAULT;
        }

        __D("  channel %d\n", dma_rel.channel);

        channel = dma_rel.channel;
        if (channel >= NCHAN) {
            __E("%s: REGUSER failed: channel %d out of range\n",
                __FUNCTION__, channel);

            return -ERANGE;
        }

        if (mutex_lock_interruptible(&edma_mutex)) {
            return -ERESTARTSYS;
        }

        registeredlistp = &channels[channel].users;
        u = registeredlistp->next;
        while (u != registeredlistp) {
            unext = u->next;

            user = list_entry(u, struct registered_user, element);
            if (user->filp == filp) {
                __D("  removing registered user from channel %d list\n",
                    channel);

                list_del(u);
                kfree(user);

                /*
                 * Only remove once (we allow multiple "registers", and each
                 * one requires a corresponding "release").
                 */
                break;
            }

            u = unext;
        }

        mutex_unlock(&edma_mutex);

        if (u == registeredlistp) {
            __E("%s: RELEASEDMA failed: file %p not registered for channel %d\n",
                __FUNCTION__, filp, channel);

            return -EFAULT;
        }

        if (mutex_lock_interruptible(&edma_mutex)) {
            return -ERESTARTSYS;
        }

        if (registeredlistp->next == registeredlistp) {
            __D("  no more registered users, freeing channel %d\n", channel);

            release_channel(channel);
        }

        mutex_unlock(&edma_mutex);

        break;

      case EDMA_IOCGETVERSION:
        __D("GETVERSION ioctl received, returning %#x.\n", version);

        if (put_user(version, argp)) {
            return -EFAULT;
        }

        break;

      case EDMA_IOCGETBASEPHYSADDR:
        __D("GETBASEPHYSADDR ioctl received, returning %#x.\n", BASEADDR);

        if (put_user(BASEADDR, argp)) {
            __E("%s: GETBASEPHYSADDR: put_user() failed, returning -EFAULT!\n",
                __FUNCTION__);

            return -EFAULT;
        }

        break;
    }

    return 0;
}