Example #1
0
static struct dma_async_tx_descriptor *
rt_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
		size_t len, unsigned long flags)
{
	struct rt_dma_chan *rt_chan = to_rt_dma_chan(chan);
	unsigned long mid_offset;

//	printk("%x->%x len=%d ch=%d\n", src, dest, len, chan->chan_id);

	spin_lock_bh(&rt_chan->lock);

	if(len < MIN_RTDMA_PKT_LEN) {
		memcpy(phys_to_virt(dest), phys_to_virt(src), len);	
	} else {
		mid_offset = len/2;

		/* Lower parts are transferred  by GDMA.
		 * Upper parts are transferred by CPU.
		 */
		RT_DMA_WRITE_REG(RT_DMA_SRC_REG(MEMCPY_DMA_CH), src);
		RT_DMA_WRITE_REG(RT_DMA_DST_REG(MEMCPY_DMA_CH), dest);
		RT_DMA_WRITE_REG(RT_DMA_CTRL_REG(MEMCPY_DMA_CH), (mid_offset << 16) | (3 << 3) | (3 << 0));

		memcpy(phys_to_virt(dest)+mid_offset, phys_to_virt(src)+mid_offset, len-mid_offset);	
		
		dma_async_tx_descriptor_init(&rt_chan->txd, chan);
		
		while((RT_DMA_READ_REG(RT_DMA_DONEINT) & (0x1<<MEMCPY_DMA_CH))==0);
		RT_DMA_WRITE_REG(RT_DMA_DONEINT, (1<<MEMCPY_DMA_CH));
	}

	spin_unlock_bh(&rt_chan->lock);

	return &rt_chan->txd;
}
Example #2
0
static struct dma_async_tx_descriptor *
rt_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
		size_t len, unsigned long flags)
{
	struct rt_dma_chan *rt_chan = to_rt_dma_chan(chan);
	unsigned long mid_offset;
#ifdef CONFIG_RT_DMA_HSDMA
	unsigned long i;
#endif
 
	//printk("%x->%x len=%d ch=%d\n", src, dest, len, chan->chan_id);
	spin_lock_bh(&rt_chan->lock);

#ifdef CONFIG_RT_DMA_HSDMA
  if ((dest & 0x03)!=0){
  	memcpy(phys_to_virt(dest), phys_to_virt(src), len);	
	dma_async_tx_descriptor_init(&rt_chan->txd, chan);
  }
  else{
	  hsdma_rx_dma_owner_idx0 = (hsdma_rx_calc_idx0 + 1) % NUM_HSDMA_RX_DESC;
		HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info1.SDP0 = (src & 0xFFFFFFFF);
	  HSDMA_Entry.HSDMA_rx_ring0[hsdma_rx_dma_owner_idx0].hsdma_rxd_info1.PDP0 = (dest & 0xFFFFFFFF);
	  
	HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info2.SDL0 = len;
	HSDMA_Entry.HSDMA_rx_ring0[hsdma_rx_dma_owner_idx0].hsdma_rxd_info2.PLEN0 = len;
	  
		HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info2.LS0_bit = 1;
	  HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info2.DDONE_bit = 0;
	
		hsdma_tx_cpu_owner_idx0 = (hsdma_tx_cpu_owner_idx0+1) % NUM_HSDMA_TX_DESC;
	  hsdma_rx_calc_idx0 = (hsdma_rx_calc_idx0 + 1) % NUM_HSDMA_RX_DESC;
	sysRegWrite(HSDMA_TX_CTX_IDX0, cpu_to_le32((u32)hsdma_tx_cpu_owner_idx0));
		
	  dma_async_tx_descriptor_init(&rt_chan->txd, chan);
	  
	}
#else
		mid_offset = len/2;
		RT_DMA_WRITE_REG(RT_DMA_SRC_REG(MEMCPY_DMA_CH), src);
		RT_DMA_WRITE_REG(RT_DMA_DST_REG(MEMCPY_DMA_CH), dest);
		RT_DMA_WRITE_REG(RT_DMA_CTRL_REG(MEMCPY_DMA_CH), (mid_offset << 16) | (3 << 3) | (3 << 0));

		memcpy(phys_to_virt(dest)+mid_offset, phys_to_virt(src)+mid_offset, len-mid_offset);	
		
		dma_async_tx_descriptor_init(&rt_chan->txd, chan);
		
		while((RT_DMA_READ_REG(RT_DMA_DONEINT) & (0x1<<MEMCPY_DMA_CH))==0);
		RT_DMA_WRITE_REG(RT_DMA_DONEINT, (1<<MEMCPY_DMA_CH));
#endif

	spin_unlock_bh(&rt_chan->lock);

	return &rt_chan->txd;
}