static struct dma_async_tx_descriptor * rt_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct rt_dma_chan *rt_chan = to_rt_dma_chan(chan); unsigned long mid_offset; #ifdef CONFIG_RT_DMA_HSDMA unsigned long i; #endif //printk("%x->%x len=%d ch=%d\n", src, dest, len, chan->chan_id); spin_lock_bh(&rt_chan->lock); #ifdef CONFIG_RT_DMA_HSDMA if ((dest & 0x03)!=0){ memcpy(phys_to_virt(dest), phys_to_virt(src), len); dma_async_tx_descriptor_init(&rt_chan->txd, chan); } else{ hsdma_rx_dma_owner_idx0 = (hsdma_rx_calc_idx0 + 1) % NUM_HSDMA_RX_DESC; HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info1.SDP0 = (src & 0xFFFFFFFF); HSDMA_Entry.HSDMA_rx_ring0[hsdma_rx_dma_owner_idx0].hsdma_rxd_info1.PDP0 = (dest & 0xFFFFFFFF); HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info2.SDL0 = len; HSDMA_Entry.HSDMA_rx_ring0[hsdma_rx_dma_owner_idx0].hsdma_rxd_info2.PLEN0 = len; HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info2.LS0_bit = 1; HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info2.DDONE_bit = 0; hsdma_tx_cpu_owner_idx0 = (hsdma_tx_cpu_owner_idx0+1) % NUM_HSDMA_TX_DESC; hsdma_rx_calc_idx0 = (hsdma_rx_calc_idx0 + 1) % NUM_HSDMA_RX_DESC; sysRegWrite(HSDMA_TX_CTX_IDX0, cpu_to_le32((u32)hsdma_tx_cpu_owner_idx0)); dma_async_tx_descriptor_init(&rt_chan->txd, chan); } #else mid_offset = len/2; RT_DMA_WRITE_REG(RT_DMA_SRC_REG(MEMCPY_DMA_CH), src); RT_DMA_WRITE_REG(RT_DMA_DST_REG(MEMCPY_DMA_CH), dest); RT_DMA_WRITE_REG(RT_DMA_CTRL_REG(MEMCPY_DMA_CH), (mid_offset << 16) | (3 << 3) | (3 << 0)); memcpy(phys_to_virt(dest)+mid_offset, phys_to_virt(src)+mid_offset, len-mid_offset); dma_async_tx_descriptor_init(&rt_chan->txd, chan); while((RT_DMA_READ_REG(RT_DMA_DONEINT) & (0x1<<MEMCPY_DMA_CH))==0); RT_DMA_WRITE_REG(RT_DMA_DONEINT, (1<<MEMCPY_DMA_CH)); #endif spin_unlock_bh(&rt_chan->lock); return &rt_chan->txd; }
static struct dma_async_tx_descriptor * rt_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct rt_dma_chan *rt_chan = to_rt_dma_chan(chan); unsigned long mid_offset; // printk("%x->%x len=%d ch=%d\n", src, dest, len, chan->chan_id); spin_lock_bh(&rt_chan->lock); if(len < MIN_RTDMA_PKT_LEN) { memcpy(phys_to_virt(dest), phys_to_virt(src), len); } else { mid_offset = len/2; /* Lower parts are transferred by GDMA. * Upper parts are transferred by CPU. */ RT_DMA_WRITE_REG(RT_DMA_SRC_REG(MEMCPY_DMA_CH), src); RT_DMA_WRITE_REG(RT_DMA_DST_REG(MEMCPY_DMA_CH), dest); RT_DMA_WRITE_REG(RT_DMA_CTRL_REG(MEMCPY_DMA_CH), (mid_offset << 16) | (3 << 3) | (3 << 0)); memcpy(phys_to_virt(dest)+mid_offset, phys_to_virt(src)+mid_offset, len-mid_offset); dma_async_tx_descriptor_init(&rt_chan->txd, chan); while((RT_DMA_READ_REG(RT_DMA_DONEINT) & (0x1<<MEMCPY_DMA_CH))==0); RT_DMA_WRITE_REG(RT_DMA_DONEINT, (1<<MEMCPY_DMA_CH)); } spin_unlock_bh(&rt_chan->lock); return &rt_chan->txd; }
static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); int ret; dma_async_tx_descriptor_init(&tdmac->desc, chan); tdmac->desc.tx_submit = mmp_tdma_tx_submit; if (tdmac->irq) { ret = devm_request_irq(tdmac->dev, tdmac->irq, mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); if (ret) return ret; } return 1; }
static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, gfp_t gfp_flags) { struct at_xdmac_desc *desc; struct at_xdmac *atxdmac = to_at_xdmac(chan->device); dma_addr_t phys; desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); if (desc) { memset(desc, 0, sizeof(*desc)); INIT_LIST_HEAD(&desc->descs_list); dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; desc->tx_dma_desc.phys = phys; } return desc; }
static int sdma_alloc_chan_resources(struct dma_chan *chan) { struct sdma_channel *sdmac = to_sdma_chan(chan); struct imx_dma_data *data = chan->private; int prio, ret; if (!data) return -EINVAL; switch (data->priority) { case DMA_PRIO_HIGH: prio = 3; break; case DMA_PRIO_MEDIUM: prio = 2; break; case DMA_PRIO_LOW: default: prio = 1; break; } sdmac->peripheral_type = data->peripheral_type; sdmac->event_id0 = data->dma_request; if (data->dma_request_p2p > 0) sdmac->event_id1 = data->dma_request_p2p; else sdmac->event_id1 = 0; ret = sdma_request_channel(sdmac); if (ret) return ret; ret = sdma_set_channel_priority(sdmac, prio); if (ret) return ret; dma_async_tx_descriptor_init(&sdmac->desc, chan); sdmac->desc.tx_submit = sdma_tx_submit; /* txd.flags will be overwritten in prep funcs */ sdmac->desc.flags = DMA_CTRL_ACK; return 0; }