static void davinci_reinit_chan(struct mmc_davinci_host *host) { davinci_stop_dma(host->dma_tx_event); davinci_clean_channel(host->dma_tx_event); davinci_stop_dma(host->dma_rx_event); davinci_clean_channel(host->dma_rx_event); }
static int davinci_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct davinci_runtime_data *prtd = substream->runtime->private_data; int ret = 0; spin_lock(&prtd->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: davinci_start_dma(prtd->master_lch); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: davinci_stop_dma(prtd->master_lch); break; default: ret = -EINVAL; break; } spin_unlock(&prtd->lock); return ret; }
int DMA_copyFillDelete(DMA_OpenClosePrm *prm) { DMA_Obj *pObj; if(prm->chId >= DMA_DEV_MAX_CH) return -1; pObj = gDMA_dev.pObj[prm->chId]; if(pObj==NULL) { FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId ); return -1; } if(pObj->copyFillObj==NULL) { FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId ); return -1; } davinci_stop_dma( pObj->copyFillObj->chId ); davinci_free_dma( pObj->copyFillObj->chId ); if(pObj->copyFillObj->transferPrm!=NULL) kfree(pObj->copyFillObj->transferPrm); kfree(pObj->copyFillObj); kfree(pObj); gDMA_dev.pObj[prm->chId] = NULL; return 0; }
static void DMA_copyFillCallback(int channel, u16 ch_status, void *data) { DMA_CopyFillObj *pCopyFillObj=(DMA_CopyFillObj *)data; davinci_stop_dma(pCopyFillObj->chId); complete(&pCopyFillObj->dma_complete); }
static void davinci_abort_dma(struct mmc_davinci_host *host) { int sync_dev = 0; if (host->data_dir == DAVINCI_MMC_DATADIR_READ) sync_dev = host->dma_tx_event; else sync_dev = host->dma_rx_event; davinci_stop_dma(sync_dev); davinci_clean_channel(sync_dev); }
static void davinci_spi_dma_tx_callback(int lch, u16 ch_status, void *data) { struct spi_device *spi = (struct spi_device *)data; struct davinci_spi *davinci_spi; struct davinci_spi_dma *davinci_spi_dma; davinci_spi = spi_master_get_devdata(spi->master); davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); if (ch_status == DMA_COMPLETE) davinci_stop_dma(davinci_spi_dma->dma_tx_channel); else davinci_clean_channel(davinci_spi_dma->dma_tx_channel); complete(&davinci_spi_dma->dma_tx_completion); /* We must disable the DMA TX request */ davinci_spi_set_dma_req(spi, 0); }
int DMA_demuxDelete(DMA_OpenClosePrm *prm) { DMA_Obj *pObj; int i; if (prm->chId >= DMA_DEV_MAX_CH) { FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId ); return -1; } pObj = gDMA_dev.pObj[prm->chId]; if (pObj==NULL) { FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId ); return -1; } if (pObj->demuxObj==NULL) { FUNCERR( "Illegal parameter (chId = %d)\n", prm->chId ); return -1; } davinci_stop_dma( pObj->demuxObj->channelDemux[0] ); for (i=0; i<DRV_DMA_MAX_DEMUX_PARAM; i++) davinci_free_dma( pObj->demuxObj->channelDemux[i] ); if (pObj->demuxObj->srcPhysAddrList!=NULL) kfree(pObj->demuxObj->srcPhysAddrList); if (pObj->demuxObj->dstPhysAddrList!=NULL) kfree(pObj->demuxObj->dstPhysAddrList); kfree(pObj->demuxObj); kfree(pObj); gDMA_dev.pObj[prm->chId] = NULL; return -1; }
static int ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args) { int ret; unsigned int __user *argp = (unsigned int __user *) args; unsigned long physp; unsigned long virtp; unsigned int type; //__D("ioctl %d received. \n", cmd); switch (cmd) { case IPERA_INIT_PMU_STATICS: init_pmu_asm(); __D("IPERA_INIT_PMU_STATICS : returning\n"); break; case IPERA_START_PMU_CACHES_STATICS: //memset(&type, 0, sizeof(type)); //ret = copy_from_user(&type, argp, sizeof(type)); //set_pmu_event_asm(PMU_ICACHE_EXEC, EVENT_COUNTER0); //set_pmu_event_asm(PMU_ICACHE_MISS, EVENT_COUNTER1); //set_pmu_event_asm(PMU_DCACHE_ACCESS, EVENT_COUNTER2); //set_pmu_event_asm(PMU_DCACHE_MISS, EVENT_COUNTER3); //start_pmu_asm(); //__D("IPERA_START_PMU_CACHES_STATICS : returning\n"); break; case IPERA_END_PMU_CACHES_STATICS: //memset(&type, 0, sizeof(type)); //ret = copy_from_user(&type, argp, sizeof(type)); //stop_pmu_asm(); //pmu_statics[type].pmu_count += 1; //pmu_statics[type].pmu_cycles += get_clock_counter_asm(); //pmu_statics[type].pmu_instr_exec += get_pmnx_counter_asm(EVENT_COUNTER0); //pmu_statics[type].pmu_icache_miss += get_pmnx_counter_asm(EVENT_COUNTER1); //pmu_statics[type].pmu_dcache_access += get_pmnx_counter_asm(EVENT_COUNTER2); //pmu_statics[type].pmu_dcache_miss += get_pmnx_counter_asm(EVENT_COUNTER3); //__D("IPERA_END_PMU_CACHES_STATICS : returning\n"); break; case IPERA_GET_STATICS: //memset(&type, 0, sizeof(type)); //ret = copy_from_user(&type, argp, sizeof(type)); //ret_get_cycles = pmu_statics[type].pmu_cycles; //__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_count); //__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_cycles); //__D("IPERA_GET_ICACHE_EXEC : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_instr_exec); //__D("IPERA_GET_ICACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_icache_miss); //__D("IPERA_GET_DCACHE_ACCESS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_access); //__D("IPERA_GET_DCACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_miss); //ret = copy_to_user(argp, &pmu_statics[type], sizeof(pmu_statics[type])); break; case IPERA_GET_PHYS: get_user(virtp, argp); physp = ipera_get_phys(virtp); put_user(physp, argp); //__D("IPERA_GET_PHYS : returning %#lx\n", physp); break; #if 0 case IPERA_GET_CYCLES: __D("IPERA_GET_CYCLES : received.\n"); cur_cycles = get_cycles(); copy_to_user(argp, &cur_cycles, sizeof(cur_cycles)); __D("IPERA_GET_CYCLES : returning %#lx\n", cur_cycles); break; case IPERA_GET_PHYS: __D("IPERA_GET_PHYS : received.\n"); get_user(virtp, argp); physp = get_phys(virtp); put_user(physp, argp); __D("IPERA_GET_PHYS : returning %#lx\n", physp); break; case IPERA_DMACPY: __D("IPERA_DMACPY : received.\n"); if (copy_from_user(&dma, argp, sizeof(dma))) { return -EFAULT; } err = davinci_request_dma(DM350_DMA_CHANNEL_ANY, "EDMA memcpy", memcpy_dma_irq_handler, NULL, &master_ch, &tcc, EVENTQ_1); if (err < 0) { __E("Error in requesting Master channel %d = 0x%x\n", master_ch, err); return err; } else if(master_ch != 25) __E("get channel %d \n", master_ch); davinci_stop_dma(master_ch); init_completion(&edmacompletion); davinci_set_dma_src_params(master_ch, (unsigned long) edmaparams.src, edmaparams.srcmode, edmaparams.srcfifowidth); davinci_set_dma_dest_params(master_ch, (unsigned long) edmaparams.dst, edmaparams.dstmode, edmaparams.dstfifowidth); davinci_set_dma_src_index(master_ch, edmaparams.srcbidx, edmaparams.srccidx); davinci_set_dma_dest_index(master_ch, edmaparams.dstbidx, edmaparams.dstcidx); davinci_set_dma_transfer_params(master_ch, edmaparams.acnt, edmaparams.bcnt, edmaparams.ccnt, edmaparams.bcntrld, edmaparams.syncmode); davinci_get_dma_params(master_ch, ¶mentry); davinci_set_dma_params(master_ch, ¶mentry); davinci_start_dma(master_ch); wait_for_completion(&edmacompletion); //printk("Dma completed... \n"); davinci_stop_dma(master_ch); davinci_free_dma(master_ch); break; #endif default: __E("Unknown ioctl received = %d.\n", cmd); return -EINVAL; } return 0; }