static int __devinit rt_dma_probe(struct platform_device *pdev) { struct dma_device *dma_dev; struct rt_dma_chan *rt_chan; int err; int ret; int reg; printk("%s\n",__FUNCTION__); dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL); if (!dma_dev) return -ENOMEM; INIT_LIST_HEAD(&dma_dev->channels); dma_cap_zero(dma_dev->cap_mask); dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); dma_dev->device_alloc_chan_resources = rt_dma_alloc_chan_resources; dma_dev->device_free_chan_resources = rt_dma_free_chan_resources; dma_dev->device_tx_status = rt_dma_status; dma_dev->device_issue_pending = rt_dma_issue_pending; dma_dev->device_prep_dma_memcpy = rt_dma_prep_dma_memcpy; dma_dev->dev = &pdev->dev; rt_chan = devm_kzalloc(&pdev->dev, sizeof(*rt_chan), GFP_KERNEL); if (!rt_chan) { return -ENOMEM; } spin_lock_init(&rt_chan->lock); INIT_LIST_HEAD(&rt_chan->chain); INIT_LIST_HEAD(&rt_chan->completed_slots); INIT_LIST_HEAD(&rt_chan->all_slots); rt_chan->common.device = dma_dev; rt_chan->txd.tx_submit = rt_dma_tx_submit; list_add_tail(&rt_chan->common.device_node, &dma_dev->channels); err = dma_async_device_register(dma_dev); if (0 != err) { pr_err("ERR_MDMA:device_register failed: %d\n", err); return 1; } ret = request_irq(SURFBOARDINT_DMA, rt_dma_interrupt_handler, IRQF_DISABLED, "Ralink_DMA", NULL); if(ret){ pr_err("IRQ %d is not free.\n", SURFBOARDINT_DMA); return 1; } //set GDMA register in advance. reg = (32 << 16) | (32 << 8) | (MEMCPY_DMA_CH << 3); RT_DMA_WRITE_REG(RT_DMA_CTRL_REG1(MEMCPY_DMA_CH), reg); return 0; }
static int mmp_tdma_probe(struct platform_device *pdev) { enum mmp_tdma_type type; const struct of_device_id *of_id; struct mmp_tdma_device *tdev; struct resource *iores; int i, ret; int irq = 0, irq_num = 0; int chan_num = TDMA_CHANNEL_NUM; of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); if (of_id) type = (enum mmp_tdma_type) of_id->data; else type = platform_get_device_id(pdev)->driver_data; /* always have couple channels */ tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); if (!tdev) return -ENOMEM; tdev->dev = &pdev->dev; for (i = 0; i < chan_num; i++) { if (platform_get_irq(pdev, i) > 0) irq_num++; } iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iores) return -EINVAL; tdev->base = devm_request_and_ioremap(&pdev->dev, iores); if (!tdev->base) return -EADDRNOTAVAIL; INIT_LIST_HEAD(&tdev->device.channels); if (irq_num != chan_num) { irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); if (ret) return ret; } /* initialize channel parameters */ for (i = 0; i < chan_num; i++) { irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); ret = mmp_tdma_chan_init(tdev, i, irq, type); if (ret) return ret; } dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); tdev->device.dev = &pdev->dev; tdev->device.device_alloc_chan_resources = mmp_tdma_alloc_chan_resources; tdev->device.device_free_chan_resources = mmp_tdma_free_chan_resources; tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; tdev->device.device_tx_status = mmp_tdma_tx_status; tdev->device.device_issue_pending = mmp_tdma_issue_pending; tdev->device.device_control = mmp_tdma_control; tdev->device.copy_align = TDMA_ALIGNMENT; dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); platform_set_drvdata(pdev, tdev); ret = dma_async_device_register(&tdev->device); if (ret) { dev_err(tdev->device.dev, "unable to register\n"); return ret; } dev_info(tdev->device.dev, "initialized\n"); return 0; }
static int mmp_tdma_probe(struct platform_device *pdev) { enum mmp_tdma_type type; const struct of_device_id *of_id; struct mmp_tdma_device *tdev; struct resource *iores; int i, ret; int irq = 0, irq_num = 0; int chan_num = TDMA_CHANNEL_NUM; struct gen_pool *pool = NULL; of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); if (of_id) type = (enum mmp_tdma_type) of_id->data; else type = platform_get_device_id(pdev)->driver_data; /* always have couple channels */ tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); if (!tdev) return -ENOMEM; tdev->dev = &pdev->dev; for (i = 0; i < chan_num; i++) { if (platform_get_irq(pdev, i) > 0) irq_num++; } iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); tdev->base = devm_ioremap_resource(&pdev->dev, iores); if (IS_ERR(tdev->base)) return PTR_ERR(tdev->base); INIT_LIST_HEAD(&tdev->device.channels); if (pdev->dev.of_node) pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0); else pool = sram_get_gpool("asram"); if (!pool) { dev_err(&pdev->dev, "asram pool not available\n"); return -ENOMEM; } if (irq_num != chan_num) { irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, mmp_tdma_int_handler, 0, "tdma", tdev); if (ret) return ret; } /* initialize channel parameters */ for (i = 0; i < chan_num; i++) { irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); ret = mmp_tdma_chan_init(tdev, i, irq, type, pool); if (ret) return ret; } dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); tdev->device.dev = &pdev->dev; tdev->device.device_alloc_chan_resources = mmp_tdma_alloc_chan_resources; tdev->device.device_free_chan_resources = mmp_tdma_free_chan_resources; tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; tdev->device.device_tx_status = mmp_tdma_tx_status; tdev->device.device_issue_pending = mmp_tdma_issue_pending; tdev->device.device_config = mmp_tdma_config; tdev->device.device_pause = mmp_tdma_pause_chan; tdev->device.device_resume = mmp_tdma_resume_chan; tdev->device.device_terminate_all = mmp_tdma_terminate_all; tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); platform_set_drvdata(pdev, tdev); ret = dma_async_device_register(&tdev->device); if (ret) { dev_err(tdev->device.dev, "unable to register\n"); return ret; } if (pdev->dev.of_node) { ret = of_dma_controller_register(pdev->dev.of_node, mmp_tdma_xlate, tdev); if (ret) { dev_err(tdev->device.dev, "failed to register controller\n"); dma_async_device_unregister(&tdev->device); } } dev_info(tdev->device.dev, "initialized\n"); return 0; }
static int __init sdma_probe(struct platform_device *pdev) { int ret; int irq; struct resource *iores; struct sdma_platform_data *pdata = pdev->dev.platform_data; int i; struct sdma_engine *sdma; sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); if (!sdma) return -ENOMEM; sdma->dev = &pdev->dev; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!iores || irq < 0 || !pdata) { ret = -EINVAL; goto err_irq; } if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { ret = -EBUSY; goto err_request_region; } sdma->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(sdma->clk)) { ret = PTR_ERR(sdma->clk); goto err_clk; } sdma->regs = ioremap(iores->start, resource_size(iores)); if (!sdma->regs) { ret = -ENOMEM; goto err_ioremap; } ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma); if (ret) goto err_request_irq; sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); if (!sdma->script_addrs) goto err_alloc; sdma->version = pdata->sdma_version; dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); INIT_LIST_HEAD(&sdma->dma_device.channels); /* Initialize channel parameters */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { struct sdma_channel *sdmac = &sdma->channel[i]; sdmac->sdma = sdma; spin_lock_init(&sdmac->lock); sdmac->chan.device = &sdma->dma_device; sdmac->channel = i; /* * Add the channel to the DMAC list. Do not add channel 0 though * because we need it internally in the SDMA driver. This also means * that channel 0 in dmaengine counting matches sdma channel 1. */ if (i) list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); } ret = sdma_init(sdma); if (ret) goto err_init; if (pdata->script_addrs) sdma_add_scripts(sdma, pdata->script_addrs); sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version); sdma->dma_device.dev = &pdev->dev; sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; sdma->dma_device.device_tx_status = sdma_tx_status; sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_control = sdma_control; sdma->dma_device.device_issue_pending = sdma_issue_pending; sdma->dma_device.dev->dma_parms = &sdma->dma_parms; dma_set_max_seg_size(sdma->dma_device.dev, 65535); ret = dma_async_device_register(&sdma->dma_device); if (ret) { dev_err(&pdev->dev, "unable to register\n"); goto err_init; } dev_info(sdma->dev, "initialized\n"); return 0; err_init: kfree(sdma->script_addrs); err_alloc: free_irq(irq, sdma); err_request_irq: iounmap(sdma->regs); err_ioremap: clk_put(sdma->clk); err_clk: release_mem_region(iores->start, resource_size(iores)); err_request_region: err_irq: kfree(sdma); return ret; }
static int __devinit rt_dma_probe(struct platform_device *pdev) #endif { struct dma_device *dma_dev; struct rt_dma_chan *rt_chan; int err; int ret; #ifdef CONFIG_RT_DMA_HSDMA unsigned long reg_int_mask=0; #else int reg; #endif //printk("%s\n",__FUNCTION__); dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL); if (!dma_dev) return -ENOMEM; INIT_LIST_HEAD(&dma_dev->channels); dma_cap_zero(dma_dev->cap_mask); dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); //dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); dma_dev->device_alloc_chan_resources = rt_dma_alloc_chan_resources; dma_dev->device_free_chan_resources = rt_dma_free_chan_resources; dma_dev->device_tx_status = rt_dma_status; dma_dev->device_issue_pending = rt_dma_issue_pending; dma_dev->device_prep_dma_memcpy = rt_dma_prep_dma_memcpy; dma_dev->dev = &pdev->dev; rt_chan = devm_kzalloc(&pdev->dev, sizeof(*rt_chan), GFP_KERNEL); if (!rt_chan) { return -ENOMEM; } spin_lock_init(&rt_chan->lock); INIT_LIST_HEAD(&rt_chan->chain); INIT_LIST_HEAD(&rt_chan->completed_slots); INIT_LIST_HEAD(&rt_chan->all_slots); rt_chan->common.device = dma_dev; rt_chan->txd.tx_submit = rt_dma_tx_submit; list_add_tail(&rt_chan->common.device_node, &dma_dev->channels); err = dma_async_device_register(dma_dev); if (0 != err) { pr_err("ERR_MDMA:device_register failed: %d\n", err); return 1; } #ifdef CONFIG_RT_DMA_HSDMA ret = request_irq(SURFBOARDINT_HSGDMA, rt_dma_interrupt_handler, IRQF_DISABLED, "HS_DMA", NULL); #else ret = request_irq(SURFBOARDINT_DMA, rt_dma_interrupt_handler, IRQF_DISABLED, "GDMA", NULL); #endif if(ret){ pr_err("IRQ %d is not free.\n", SURFBOARDINT_DMA); return 1; } #ifdef CONFIG_RT_DMA_HSDMA sysRegWrite(HSDMA_INT_MASK, reg_int_mask & ~(HSDMA_FE_INT_TX)); // disable int TX DONE sysRegWrite(HSDMA_INT_MASK, reg_int_mask & ~(HSDMA_FE_INT_RX) ); // disable int RX DONE printk("reg_int_mask=%lu, INT_MASK= %x \n", reg_int_mask, sysRegRead(HSDMA_INT_MASK)); HSDMA_init(); #else //set GDMA register in advance. reg = (32 << 16) | (32 << 8) | (MEMCPY_DMA_CH << 3); RT_DMA_WRITE_REG(RT_DMA_CTRL_REG1(MEMCPY_DMA_CH), reg); #endif return 0; }
static int tegra_adma_probe(struct platform_device *pdev) { const struct tegra_adma_chip_data *cdata; struct tegra_adma *tdma; struct resource *res; struct clk *clk; int ret, i; cdata = of_device_get_match_data(&pdev->dev); if (!cdata) { dev_err(&pdev->dev, "device match data not found\n"); return -ENODEV; } tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * sizeof(struct tegra_adma_chan), GFP_KERNEL); if (!tdma) return -ENOMEM; tdma->dev = &pdev->dev; tdma->nr_channels = cdata->nr_channels; platform_set_drvdata(pdev, tdma); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); ret = pm_clk_create(&pdev->dev); if (ret) return ret; clk = clk_get(&pdev->dev, "d_audio"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "ADMA clock not found\n"); ret = PTR_ERR(clk); goto clk_destroy; } ret = pm_clk_add_clk(&pdev->dev, clk); if (ret) { clk_put(clk); goto clk_destroy; } pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) goto rpm_disable; ret = tegra_adma_init(tdma); if (ret) goto rpm_put; INIT_LIST_HEAD(&tdma->dma_dev.channels); for (i = 0; i < tdma->nr_channels; i++) { struct tegra_adma_chan *tdc = &tdma->channels[i]; tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); tdc->irq = of_irq_get(pdev->dev.of_node, i); if (tdc->irq < 0) { ret = tdc->irq; goto irq_dispose; } vchan_init(&tdc->vc, &tdma->dma_dev); tdc->vc.desc_free = tegra_adma_desc_free; tdc->tdma = tdma; } dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); tdma->dma_dev.dev = &pdev->dev; tdma->dma_dev.device_alloc_chan_resources = tegra_adma_alloc_chan_resources; tdma->dma_dev.device_free_chan_resources = tegra_adma_free_chan_resources; tdma->dma_dev.device_issue_pending = tegra_adma_issue_pending; tdma->dma_dev.device_prep_dma_cyclic = tegra_adma_prep_dma_cyclic; tdma->dma_dev.device_config = tegra_adma_slave_config; tdma->dma_dev.device_tx_status = tegra_adma_tx_status; tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all; tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; ret = dma_async_device_register(&tdma->dma_dev); if (ret < 0) { dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); goto irq_dispose; } ret = of_dma_controller_register(pdev->dev.of_node, tegra_dma_of_xlate, tdma); if (ret < 0) { dev_err(&pdev->dev, "ADMA OF registration failed %d\n", ret); goto dma_remove; } pm_runtime_put(&pdev->dev); dev_info(&pdev->dev, "Tegra210 ADMA driver registered %d channels\n", tdma->nr_channels); return 0; dma_remove: dma_async_device_unregister(&tdma->dma_dev); irq_dispose: while (--i >= 0) irq_dispose_mapping(tdma->channels[i].irq); rpm_put: pm_runtime_put_sync(&pdev->dev); rpm_disable: pm_runtime_disable(&pdev->dev); clk_destroy: pm_clk_destroy(&pdev->dev); return ret; }