static int davinci_pcm_prepare(struct snd_pcm_substream *substream) { struct davinci_runtime_data *prtd = substream->runtime->private_data; struct paramentry_descriptor temp; prtd->period = 0; davinci_pcm_enqueue_dma(substream); /* Get slave channel dma params for master channel startup */ davinci_get_dma_params(prtd->slave_lch, &temp); davinci_set_dma_params(prtd->master_lch, &temp); return 0; }
void DMA_dumpPARAM(const char *message, int channel) { struct paramentry_descriptor tempParamentry; davinci_get_dma_params(channel, &tempParamentry); printk( KERN_INFO "\n%s - PaRAM(%d)\n", message, channel); printk( KERN_INFO" OPT - 0x%.8X\n", tempParamentry.opt); printk( KERN_INFO" SRC - 0x%.8X\n", tempParamentry.src); printk( KERN_INFO" BCNT - 0x%.4X, ACNT - 0x%.4X\n", tempParamentry.a_b_cnt >> 16, tempParamentry.a_b_cnt & 0xffff); printk( KERN_INFO" DST - 0x%.8X\n", tempParamentry.dst); printk( KERN_INFO" DSTBIDX - 0x%.4X, SRCBIDX - 0x%.4X\n", tempParamentry.src_dst_bidx >> 16, tempParamentry.src_dst_bidx & 0xffff); printk( KERN_INFO" BCNTRLD - 0x%.4X, LINK - 0x%.4X\n", tempParamentry.link_bcntrld >> 16, tempParamentry.link_bcntrld & 0xffff); printk( KERN_INFO" DSTCIDX - 0x%.4X, SRCCIDX - 0x%.4X\n", tempParamentry.src_dst_cidx >> 16, tempParamentry.src_dst_cidx & 0xffff); printk( KERN_INFO" CCNT - 0x%.4X\n", tempParamentry.ccnt); }/* dump_param_entry() */
int DMA_demuxRun(DMA_DemuxPrm *pUserPrm ) { DMA_Obj *pObj; DMA_DemuxObj *pDemuxObj; DRV_DmaDemux *prm; DMA_DemuxPrm kernelPrm; int i, status; status = copy_from_user(&kernelPrm, pUserPrm, sizeof(kernelPrm)); if (status < 0) { FUNCERR( "copy_from_user()\n"); return status; } if (kernelPrm.chId >= DMA_DEV_MAX_CH) { FUNCERR( "Illegal parameter (chId = %d)\n", kernelPrm.chId ); return -1; } pObj = gDMA_dev.pObj[kernelPrm.chId]; if (pObj==NULL) { FUNCERR( "Illegal parameter (chId = %d)\n", kernelPrm.chId ); return -1; } pDemuxObj = pObj->demuxObj; if (pDemuxObj==NULL) { FUNCERR( "Illegal parameter (chId = %d)\n", kernelPrm.chId ); return -1; } prm = &pDemuxObj->demuxPrm; memcpy(prm, &kernelPrm.prm, sizeof(pDemuxObj->demuxPrm)); prm->srcPhysAddrList = pDemuxObj->srcPhysAddrList; prm->dstPhysAddrList = pDemuxObj->dstPhysAddrList; if (prm->numLines>pDemuxObj->maxLines) { FUNCERR( "Illegal parameter (chId = %d)\n", kernelPrm.chId ); return -1; } status = copy_from_user(prm->srcPhysAddrList, kernelPrm.prm.srcPhysAddrList, sizeof(unsigned long)*prm->numLines); if (status < 0) { FUNCERR( "copy_from_user() (chId = %d)\n", kernelPrm.chId ); return status; } status = copy_from_user(prm->dstPhysAddrList, kernelPrm.prm.dstPhysAddrList, sizeof(unsigned long)*prm->numLines); if (status < 0) { FUNCERR( "copy_from_user() (chId = %d)\n", kernelPrm.chId ); return status; } pDemuxObj->curLine = 0; for (i=0; i<DRV_DMA_MAX_DEMUX_PARAM; i++) { pDemuxObj->dmaParamDemux[i].a_b_cnt = (uint32_t)(1<<16)|prm->copyWidth; } for (i=0; i<DRV_DMA_MAX_DEMUX_PARAM; i+=1) { if (pDemuxObj->curLine<prm->numLines) { pDemuxObj->dmaParamDemux[i].src = (uint32_t) pDemuxObj->srcPhysAddrList[pDemuxObj->curLine]; pDemuxObj->dmaParamDemux[i].dst = (uint32_t) pDemuxObj->dstPhysAddrList[pDemuxObj->curLine]; } pDemuxObj->curLine++; } for (i=0; i<DRV_DMA_MAX_DEMUX_PARAM; i++) { davinci_set_dma_params(pDemuxObj->channelDemux[i], &pDemuxObj->dmaParamDemux[i]); } for (i=1; i<DRV_DMA_MAX_DEMUX_PARAM; i++) { davinci_dma_link_lch(pDemuxObj->channelDemux[i-1], pDemuxObj->channelDemux[i]); davinci_get_dma_params(pDemuxObj->channelDemux[i-1], &pDemuxObj->dmaParamDemux[i-1]); } #ifdef DMA_DEBUG for (i=0; i<DRV_DMA_MAX_DEMUX_PARAM; i++) { DMA_dumpPARAM("Demux", pDemuxObj->channelDemux[i]); } #endif #if 1 davinci_start_dma(pDemuxObj->channelDemux[0]); wait_for_completion_interruptible(&pDemuxObj->dma_complete); INIT_COMPLETION(pDemuxObj->dma_complete); #endif return 0; }
static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, struct mmc_request *req) { int sync_dev; unsigned char i, j; unsigned short acnt, bcnt, ccnt; unsigned int src_port, dst_port, temp_ccnt; enum address_mode mode_src, mode_dst; enum fifo_width fifo_width_src, fifo_width_dst; unsigned short src_bidx, dst_bidx; unsigned short src_cidx, dst_cidx; unsigned short bcntrld; enum sync_dimension sync_mode; edmacc_paramentry_regs temp; int edma_chan_num; struct mmc_data *data = host->data; struct scatterlist *sg = &data->sg[0]; unsigned int count; int num_frames, frame; frame = data->blksz; count = sg_dma_len(sg); if ((data->blocks == 1) && (count > data->blksz)) count = frame; if (count % 32 == 0) { acnt = 4; bcnt = 8; num_frames = count / 32; } else { acnt = count; bcnt = 1; num_frames = 1; } if (num_frames > MAX_C_CNT) { temp_ccnt = MAX_C_CNT; ccnt = temp_ccnt; } else { ccnt = num_frames; temp_ccnt = ccnt; } if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { /*AB Sync Transfer */ sync_dev = host->dma_tx_event; src_port = (unsigned int)sg_dma_address(sg); mode_src = INCR; fifo_width_src = W8BIT; /* It's not cared as modeDsr is INCR */ src_bidx = acnt; src_cidx = acnt * bcnt; dst_port = host->phys_base + DAVINCI_MMC_REG_DXR; mode_dst = INCR; fifo_width_dst = W8BIT; /* It's not cared as modeDsr is INCR */ dst_bidx = 0; dst_cidx = 0; bcntrld = 8; sync_mode = ABSYNC; } else { sync_dev = host->dma_rx_event; src_port = host->phys_base + DAVINCI_MMC_REG_DRR; mode_src = INCR; fifo_width_src = W8BIT; src_bidx = 0; src_cidx = 0; dst_port = (unsigned int)sg_dma_address(sg); mode_dst = INCR; fifo_width_dst = W8BIT; /* It's not cared as modeDsr is INCR */ dst_bidx = acnt; dst_cidx = acnt * bcnt; bcntrld = 8; sync_mode = ABSYNC; } davinci_set_dma_src_params(sync_dev, src_port, mode_src, fifo_width_src); davinci_set_dma_dest_params(sync_dev, dst_port, mode_dst, fifo_width_dst); davinci_set_dma_src_index(sync_dev, src_bidx, src_cidx); davinci_set_dma_dest_index(sync_dev, dst_bidx, dst_cidx); davinci_set_dma_transfer_params(sync_dev, acnt, bcnt, ccnt, bcntrld, sync_mode); davinci_get_dma_params(sync_dev, &temp); if (sync_dev == host->dma_tx_event) { if (host->option_write == 0) { host->option_write = temp.opt; } else { temp.opt = host->option_write; davinci_set_dma_params(sync_dev, &temp); } } if (sync_dev == host->dma_rx_event) { if (host->option_read == 0) { host->option_read = temp.opt; } else { temp.opt = host->option_read; davinci_set_dma_params(sync_dev, &temp); } } if (host->sg_len > 1) { davinci_get_dma_params(sync_dev, &temp); temp.opt &= ~TCINTEN; davinci_set_dma_params(sync_dev, &temp); for (i = 0; i < host->sg_len - 1; i++) { sg = &data->sg[i + 1]; if (i != 0) { j = i - 1; davinci_get_dma_params(host->edma_ch_details. chanel_num[j], &temp); temp.opt &= ~TCINTEN; davinci_set_dma_params(host->edma_ch_details. chanel_num[j], &temp); } edma_chan_num = host->edma_ch_details.chanel_num[0]; frame = data->blksz; count = sg_dma_len(sg); if ((data->blocks == 1) && (count > data->blksz)) count = frame; ccnt = count / 32; if (sync_dev == host->dma_tx_event) temp.src = (unsigned int)sg_dma_address(sg); else temp.dst = (unsigned int)sg_dma_address(sg); temp.opt |= TCINTEN; temp.ccnt = (temp.ccnt & 0xFFFF0000) | (ccnt); davinci_set_dma_params(edma_chan_num, &temp); if (i != 0) { j = i - 1; davinci_dma_link_lch(host->edma_ch_details. chanel_num[j], edma_chan_num); } } davinci_dma_link_lch(sync_dev, host->edma_ch_details.chanel_num[0]); } davinci_start_dma(sync_dev); return 0; }
static int ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args) { int ret; unsigned int __user *argp = (unsigned int __user *) args; unsigned long physp; unsigned long virtp; unsigned int type; //__D("ioctl %d received. \n", cmd); switch (cmd) { case IPERA_INIT_PMU_STATICS: init_pmu_asm(); __D("IPERA_INIT_PMU_STATICS : returning\n"); break; case IPERA_START_PMU_CACHES_STATICS: //memset(&type, 0, sizeof(type)); //ret = copy_from_user(&type, argp, sizeof(type)); //set_pmu_event_asm(PMU_ICACHE_EXEC, EVENT_COUNTER0); //set_pmu_event_asm(PMU_ICACHE_MISS, EVENT_COUNTER1); //set_pmu_event_asm(PMU_DCACHE_ACCESS, EVENT_COUNTER2); //set_pmu_event_asm(PMU_DCACHE_MISS, EVENT_COUNTER3); //start_pmu_asm(); //__D("IPERA_START_PMU_CACHES_STATICS : returning\n"); break; case IPERA_END_PMU_CACHES_STATICS: //memset(&type, 0, sizeof(type)); //ret = copy_from_user(&type, argp, sizeof(type)); //stop_pmu_asm(); //pmu_statics[type].pmu_count += 1; //pmu_statics[type].pmu_cycles += get_clock_counter_asm(); //pmu_statics[type].pmu_instr_exec += get_pmnx_counter_asm(EVENT_COUNTER0); //pmu_statics[type].pmu_icache_miss += get_pmnx_counter_asm(EVENT_COUNTER1); //pmu_statics[type].pmu_dcache_access += get_pmnx_counter_asm(EVENT_COUNTER2); //pmu_statics[type].pmu_dcache_miss += get_pmnx_counter_asm(EVENT_COUNTER3); //__D("IPERA_END_PMU_CACHES_STATICS : returning\n"); break; case IPERA_GET_STATICS: //memset(&type, 0, sizeof(type)); //ret = copy_from_user(&type, argp, sizeof(type)); //ret_get_cycles = pmu_statics[type].pmu_cycles; //__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_count); //__D("IPERA_GET_CYCLES : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_cycles); //__D("IPERA_GET_ICACHE_EXEC : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_instr_exec); //__D("IPERA_GET_ICACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_icache_miss); //__D("IPERA_GET_DCACHE_ACCESS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_access); //__D("IPERA_GET_DCACHE_MISS : returning %#lx\n", (unsigned long)pmu_statics[type].pmu_dcache_miss); //ret = copy_to_user(argp, &pmu_statics[type], sizeof(pmu_statics[type])); break; case IPERA_GET_PHYS: get_user(virtp, argp); physp = ipera_get_phys(virtp); put_user(physp, argp); //__D("IPERA_GET_PHYS : returning %#lx\n", physp); break; #if 0 case IPERA_GET_CYCLES: __D("IPERA_GET_CYCLES : received.\n"); cur_cycles = get_cycles(); copy_to_user(argp, &cur_cycles, sizeof(cur_cycles)); __D("IPERA_GET_CYCLES : returning %#lx\n", cur_cycles); break; case IPERA_GET_PHYS: __D("IPERA_GET_PHYS : received.\n"); get_user(virtp, argp); physp = get_phys(virtp); put_user(physp, argp); __D("IPERA_GET_PHYS : returning %#lx\n", physp); break; case IPERA_DMACPY: __D("IPERA_DMACPY : received.\n"); if (copy_from_user(&dma, argp, sizeof(dma))) { return -EFAULT; } err = davinci_request_dma(DM350_DMA_CHANNEL_ANY, "EDMA memcpy", memcpy_dma_irq_handler, NULL, &master_ch, &tcc, EVENTQ_1); if (err < 0) { __E("Error in requesting Master channel %d = 0x%x\n", master_ch, err); return err; } else if(master_ch != 25) __E("get channel %d \n", master_ch); davinci_stop_dma(master_ch); init_completion(&edmacompletion); davinci_set_dma_src_params(master_ch, (unsigned long) edmaparams.src, edmaparams.srcmode, edmaparams.srcfifowidth); davinci_set_dma_dest_params(master_ch, (unsigned long) edmaparams.dst, edmaparams.dstmode, edmaparams.dstfifowidth); davinci_set_dma_src_index(master_ch, edmaparams.srcbidx, edmaparams.srccidx); davinci_set_dma_dest_index(master_ch, edmaparams.dstbidx, edmaparams.dstcidx); davinci_set_dma_transfer_params(master_ch, edmaparams.acnt, edmaparams.bcnt, edmaparams.ccnt, edmaparams.bcntrld, edmaparams.syncmode); davinci_get_dma_params(master_ch, ¶mentry); davinci_set_dma_params(master_ch, ¶mentry); davinci_start_dma(master_ch); wait_for_completion(&edmacompletion); //printk("Dma completed... \n"); davinci_stop_dma(master_ch); davinci_free_dma(master_ch); break; #endif default: __E("Unknown ioctl received = %d.\n", cmd); return -EINVAL; } return 0; }