static int __devinit rt_dma_probe(struct platform_device *pdev) { struct dma_device *dma_dev; struct rt_dma_chan *rt_chan; int err; int ret; int reg; printk("%s\n",__FUNCTION__); dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL); if (!dma_dev) return -ENOMEM; INIT_LIST_HEAD(&dma_dev->channels); dma_cap_zero(dma_dev->cap_mask); dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); dma_dev->device_alloc_chan_resources = rt_dma_alloc_chan_resources; dma_dev->device_free_chan_resources = rt_dma_free_chan_resources; dma_dev->device_tx_status = rt_dma_status; dma_dev->device_issue_pending = rt_dma_issue_pending; dma_dev->device_prep_dma_memcpy = rt_dma_prep_dma_memcpy; dma_dev->dev = &pdev->dev; rt_chan = devm_kzalloc(&pdev->dev, sizeof(*rt_chan), GFP_KERNEL); if (!rt_chan) { return -ENOMEM; } spin_lock_init(&rt_chan->lock); INIT_LIST_HEAD(&rt_chan->chain); INIT_LIST_HEAD(&rt_chan->completed_slots); INIT_LIST_HEAD(&rt_chan->all_slots); rt_chan->common.device = dma_dev; rt_chan->txd.tx_submit = rt_dma_tx_submit; list_add_tail(&rt_chan->common.device_node, &dma_dev->channels); err = dma_async_device_register(dma_dev); if (0 != err) { pr_err("ERR_MDMA:device_register failed: %d\n", err); return 1; } ret = request_irq(SURFBOARDINT_DMA, rt_dma_interrupt_handler, IRQF_DISABLED, "Ralink_DMA", NULL); if(ret){ pr_err("IRQ %d is not free.\n", SURFBOARDINT_DMA); return 1; } //set GDMA register in advance. reg = (32 << 16) | (32 << 8) | (MEMCPY_DMA_CH << 3); RT_DMA_WRITE_REG(RT_DMA_CTRL_REG1(MEMCPY_DMA_CH), reg); return 0; }
static int __devinit rt_dma_probe(struct platform_device *pdev) #endif { struct dma_device *dma_dev; struct rt_dma_chan *rt_chan; int err; int ret; #ifdef CONFIG_RT_DMA_HSDMA unsigned long reg_int_mask=0; #else int reg; #endif //printk("%s\n",__FUNCTION__); dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL); if (!dma_dev) return -ENOMEM; INIT_LIST_HEAD(&dma_dev->channels); dma_cap_zero(dma_dev->cap_mask); dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); //dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); dma_dev->device_alloc_chan_resources = rt_dma_alloc_chan_resources; dma_dev->device_free_chan_resources = rt_dma_free_chan_resources; dma_dev->device_tx_status = rt_dma_status; dma_dev->device_issue_pending = rt_dma_issue_pending; dma_dev->device_prep_dma_memcpy = rt_dma_prep_dma_memcpy; dma_dev->dev = &pdev->dev; rt_chan = devm_kzalloc(&pdev->dev, sizeof(*rt_chan), GFP_KERNEL); if (!rt_chan) { return -ENOMEM; } spin_lock_init(&rt_chan->lock); INIT_LIST_HEAD(&rt_chan->chain); INIT_LIST_HEAD(&rt_chan->completed_slots); INIT_LIST_HEAD(&rt_chan->all_slots); rt_chan->common.device = dma_dev; rt_chan->txd.tx_submit = rt_dma_tx_submit; list_add_tail(&rt_chan->common.device_node, &dma_dev->channels); err = dma_async_device_register(dma_dev); if (0 != err) { pr_err("ERR_MDMA:device_register failed: %d\n", err); return 1; } #ifdef CONFIG_RT_DMA_HSDMA ret = request_irq(SURFBOARDINT_HSGDMA, rt_dma_interrupt_handler, IRQF_DISABLED, "HS_DMA", NULL); #else ret = request_irq(SURFBOARDINT_DMA, rt_dma_interrupt_handler, IRQF_DISABLED, "GDMA", NULL); #endif if(ret){ pr_err("IRQ %d is not free.\n", SURFBOARDINT_DMA); return 1; } #ifdef CONFIG_RT_DMA_HSDMA sysRegWrite(HSDMA_INT_MASK, reg_int_mask & ~(HSDMA_FE_INT_TX)); // disable int TX DONE sysRegWrite(HSDMA_INT_MASK, reg_int_mask & ~(HSDMA_FE_INT_RX) ); // disable int RX DONE printk("reg_int_mask=%lu, INT_MASK= %x \n", reg_int_mask, sysRegRead(HSDMA_INT_MASK)); HSDMA_init(); #else //set GDMA register in advance. reg = (32 << 16) | (32 << 8) | (MEMCPY_DMA_CH << 3); RT_DMA_WRITE_REG(RT_DMA_CTRL_REG1(MEMCPY_DMA_CH), reg); #endif return 0; }