Ejemplo n.º 1
0
static struct dma_async_tx_descriptor *
rt_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
		size_t len, unsigned long flags)
{
	struct rt_dma_chan *rt_chan = to_rt_dma_chan(chan);
	unsigned long mid_offset;

//	printk("%x->%x len=%d ch=%d\n", src, dest, len, chan->chan_id);

	spin_lock_bh(&rt_chan->lock);

	if(len < MIN_RTDMA_PKT_LEN) {
		memcpy(phys_to_virt(dest), phys_to_virt(src), len);	
	} else {
		mid_offset = len/2;

		/* Lower parts are transferred  by GDMA.
		 * Upper parts are transferred by CPU.
		 */
		RT_DMA_WRITE_REG(RT_DMA_SRC_REG(MEMCPY_DMA_CH), src);
		RT_DMA_WRITE_REG(RT_DMA_DST_REG(MEMCPY_DMA_CH), dest);
		RT_DMA_WRITE_REG(RT_DMA_CTRL_REG(MEMCPY_DMA_CH), (mid_offset << 16) | (3 << 3) | (3 << 0));

		memcpy(phys_to_virt(dest)+mid_offset, phys_to_virt(src)+mid_offset, len-mid_offset);	
		
		dma_async_tx_descriptor_init(&rt_chan->txd, chan);
		
		while((RT_DMA_READ_REG(RT_DMA_DONEINT) & (0x1<<MEMCPY_DMA_CH))==0);
		RT_DMA_WRITE_REG(RT_DMA_DONEINT, (1<<MEMCPY_DMA_CH));
	}

	spin_unlock_bh(&rt_chan->lock);

	return &rt_chan->txd;
}
Ejemplo n.º 2
0
static struct dma_async_tx_descriptor *
rt_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
		size_t len, unsigned long flags)
{
	struct rt_dma_chan *rt_chan = to_rt_dma_chan(chan);
	unsigned long mid_offset;
#ifdef CONFIG_RT_DMA_HSDMA
	unsigned long i;
#endif
 
	//printk("%x->%x len=%d ch=%d\n", src, dest, len, chan->chan_id);
	spin_lock_bh(&rt_chan->lock);

#ifdef CONFIG_RT_DMA_HSDMA
  if ((dest & 0x03)!=0){
  	memcpy(phys_to_virt(dest), phys_to_virt(src), len);	
	dma_async_tx_descriptor_init(&rt_chan->txd, chan);
  }
  else{
	  hsdma_rx_dma_owner_idx0 = (hsdma_rx_calc_idx0 + 1) % NUM_HSDMA_RX_DESC;
		HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info1.SDP0 = (src & 0xFFFFFFFF);
	  HSDMA_Entry.HSDMA_rx_ring0[hsdma_rx_dma_owner_idx0].hsdma_rxd_info1.PDP0 = (dest & 0xFFFFFFFF);
	  
	HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info2.SDL0 = len;
	HSDMA_Entry.HSDMA_rx_ring0[hsdma_rx_dma_owner_idx0].hsdma_rxd_info2.PLEN0 = len;
	  
		HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info2.LS0_bit = 1;
	  HSDMA_Entry.HSDMA_tx_ring0[hsdma_tx_cpu_owner_idx0].hsdma_txd_info2.DDONE_bit = 0;
	
		hsdma_tx_cpu_owner_idx0 = (hsdma_tx_cpu_owner_idx0+1) % NUM_HSDMA_TX_DESC;
	  hsdma_rx_calc_idx0 = (hsdma_rx_calc_idx0 + 1) % NUM_HSDMA_RX_DESC;
	sysRegWrite(HSDMA_TX_CTX_IDX0, cpu_to_le32((u32)hsdma_tx_cpu_owner_idx0));
		
	  dma_async_tx_descriptor_init(&rt_chan->txd, chan);
	  
	}
#else
		mid_offset = len/2;
		RT_DMA_WRITE_REG(RT_DMA_SRC_REG(MEMCPY_DMA_CH), src);
		RT_DMA_WRITE_REG(RT_DMA_DST_REG(MEMCPY_DMA_CH), dest);
		RT_DMA_WRITE_REG(RT_DMA_CTRL_REG(MEMCPY_DMA_CH), (mid_offset << 16) | (3 << 3) | (3 << 0));

		memcpy(phys_to_virt(dest)+mid_offset, phys_to_virt(src)+mid_offset, len-mid_offset);	
		
		dma_async_tx_descriptor_init(&rt_chan->txd, chan);
		
		while((RT_DMA_READ_REG(RT_DMA_DONEINT) & (0x1<<MEMCPY_DMA_CH))==0);
		RT_DMA_WRITE_REG(RT_DMA_DONEINT, (1<<MEMCPY_DMA_CH));
#endif

	spin_unlock_bh(&rt_chan->lock);

	return &rt_chan->txd;
}
Ejemplo n.º 3
0
static int __devinit rt_dma_probe(struct platform_device *pdev)
{
	struct dma_device *dma_dev;
	struct rt_dma_chan *rt_chan;
	int err;
	int ret;
	int reg;

	printk("%s\n",__FUNCTION__);
	
	dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL);
	if (!dma_dev)
		return -ENOMEM;


	INIT_LIST_HEAD(&dma_dev->channels);
	dma_cap_zero(dma_dev->cap_mask);
	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
	dma_dev->device_alloc_chan_resources = rt_dma_alloc_chan_resources;
	dma_dev->device_free_chan_resources = rt_dma_free_chan_resources;
	dma_dev->device_tx_status = rt_dma_status;
	dma_dev->device_issue_pending = rt_dma_issue_pending;
	dma_dev->device_prep_dma_memcpy = rt_dma_prep_dma_memcpy;
	dma_dev->dev = &pdev->dev;

	rt_chan = devm_kzalloc(&pdev->dev, sizeof(*rt_chan), GFP_KERNEL);
        if (!rt_chan) {
		return -ENOMEM;
	}

	spin_lock_init(&rt_chan->lock);
        INIT_LIST_HEAD(&rt_chan->chain);
	INIT_LIST_HEAD(&rt_chan->completed_slots);
	INIT_LIST_HEAD(&rt_chan->all_slots);
	rt_chan->common.device = dma_dev;
	rt_chan->txd.tx_submit = rt_dma_tx_submit;

	list_add_tail(&rt_chan->common.device_node, &dma_dev->channels);
	
	err = dma_async_device_register(dma_dev);
	if (0 != err) {
		pr_err("ERR_MDMA:device_register failed: %d\n", err);
		return 1;
	}

	ret = request_irq(SURFBOARDINT_DMA, rt_dma_interrupt_handler, IRQF_DISABLED, "Ralink_DMA", NULL);
	if(ret){
		pr_err("IRQ %d is not free.\n", SURFBOARDINT_DMA);
		return 1;
	}

	//set GDMA register in advance.
	reg = (32 << 16) | (32 << 8) | (MEMCPY_DMA_CH << 3);
	RT_DMA_WRITE_REG(RT_DMA_CTRL_REG1(MEMCPY_DMA_CH), reg);
	
	return 0;
}
Ejemplo n.º 4
0
static int __devinit rt_dma_probe(struct platform_device *pdev)
#endif
{
	struct dma_device *dma_dev;
	struct rt_dma_chan *rt_chan;
	int err;
	int ret;
#ifdef CONFIG_RT_DMA_HSDMA
	unsigned long reg_int_mask=0;
#else
	int reg;
#endif

	//printk("%s\n",__FUNCTION__);
	
	dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL);
	if (!dma_dev)
		return -ENOMEM;


	INIT_LIST_HEAD(&dma_dev->channels);
	dma_cap_zero(dma_dev->cap_mask);
	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
	//dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
	dma_dev->device_alloc_chan_resources = rt_dma_alloc_chan_resources;
	dma_dev->device_free_chan_resources = rt_dma_free_chan_resources;
	dma_dev->device_tx_status = rt_dma_status;
	dma_dev->device_issue_pending = rt_dma_issue_pending;
	dma_dev->device_prep_dma_memcpy = rt_dma_prep_dma_memcpy;
	dma_dev->dev = &pdev->dev;

	rt_chan = devm_kzalloc(&pdev->dev, sizeof(*rt_chan), GFP_KERNEL);
        if (!rt_chan) {
		return -ENOMEM;
	}

	spin_lock_init(&rt_chan->lock);	
  INIT_LIST_HEAD(&rt_chan->chain);
	INIT_LIST_HEAD(&rt_chan->completed_slots);
	INIT_LIST_HEAD(&rt_chan->all_slots);
	rt_chan->common.device = dma_dev;
	rt_chan->txd.tx_submit = rt_dma_tx_submit;

	list_add_tail(&rt_chan->common.device_node, &dma_dev->channels);

	err = dma_async_device_register(dma_dev);

	if (0 != err) {
		pr_err("ERR_MDMA:device_register failed: %d\n", err);
		return 1;
	}
	
#ifdef CONFIG_RT_DMA_HSDMA
	ret = request_irq(SURFBOARDINT_HSGDMA, rt_dma_interrupt_handler, IRQF_DISABLED, "HS_DMA", NULL);
#else
	ret = request_irq(SURFBOARDINT_DMA, rt_dma_interrupt_handler, IRQF_DISABLED, "GDMA", NULL);
#endif
	if(ret){
		pr_err("IRQ %d is not free.\n", SURFBOARDINT_DMA);
		return 1;
	}

#ifdef CONFIG_RT_DMA_HSDMA
		sysRegWrite(HSDMA_INT_MASK, reg_int_mask  & ~(HSDMA_FE_INT_TX));  // disable int TX DONE
		sysRegWrite(HSDMA_INT_MASK, reg_int_mask  & ~(HSDMA_FE_INT_RX) );  // disable int RX DONE		
		printk("reg_int_mask=%lu, INT_MASK= %x \n", reg_int_mask, sysRegRead(HSDMA_INT_MASK));
  	HSDMA_init();	
#else
	//set GDMA register in advance.
	reg = (32 << 16) | (32 << 8) | (MEMCPY_DMA_CH << 3);
	RT_DMA_WRITE_REG(RT_DMA_CTRL_REG1(MEMCPY_DMA_CH), reg);
#endif

	return 0;
}