static int __init pvr2_dma_init(void) { setup_irq(HW_EVENT_PVR2_DMA, &pvr2_dma_irq); request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade"); return register_dmac(&pvr2_dma_info); }
static int __init pvr2_dma_init(void) { int i, base; setup_irq(HW_EVENT_PVR2_DMA, &pvr2_dma_irq); request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade"); /* PVR2 cascade comes after on-chip DMAC */ base = ONCHIP_NR_DMA_CHANNELS; for (i = 0; i < PVR2_NR_DMA_CHANNELS; i++) dma_info[base + i].ops = &pvr2_dma_ops; return register_dmac(&pvr2_dma_ops); }
static int __init g2_dma_init(void) { int ret; ret = request_irq(HW_EVENT_G2_DMA, g2_dma_interrupt, 0, "g2 DMA handler", &g2_dma_info); if (unlikely(ret)) return -EINVAL; g2_dma->wait_state = 27; g2_dma->magic = 0x4659404f; ret = register_dmac(&g2_dma_info); if (unlikely(ret != 0)) free_irq(HW_EVENT_G2_DMA, &g2_dma_info); return ret; }
static int __init sh_dmac_init(void) { struct dma_info *info = &sh_dmac_info; int i; #ifdef CONFIG_CPU_SH4 i = request_irq(DMAE_IRQ, dma_err, IRQF_DISABLED, "DMAC Address Error", 0); if (unlikely(i < 0)) return i; #endif /* * Initialize DMAOR, and clean up any error flags that may have * been set. */ i = dmaor_reset(); if (unlikely(i != 0)) return i; return register_dmac(info); }
static int __init sh_dmac_init(void) { struct dma_info *info = &sh_dmac_info; int i; #ifdef CONFIG_CPU_SH4 make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY); i = request_irq(DMAE_IRQ, dma_err, SA_INTERRUPT, "DMAC Address Error", 0); if (i < 0) return i; #endif for (i = 0; i < info->nr_channels; i++) { int irq = get_dmte_irq(i); make_ipr_irq(irq, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY); } ctrl_outl(0x8000 | DMAOR_DME, DMAOR); return register_dmac(info); }
static int __init sh_dmac_init(void) { int i; #ifdef CONFIG_CPU_SH4 make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY); i = request_irq(DMAE_IRQ, dma_err, SA_INTERRUPT, "DMAC Address Error", 0); if (i < 0) return i; #endif for (i = 0; i < MAX_DMAC_CHANNELS; i++) { int irq = get_dmte_irq(i); make_ipr_irq(irq, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY); dma_info[i].ops = &sh_dmac_ops; dma_info[i].tei_capable = 1; } sh_dmac->dmaor |= 0x8000 | DMAOR_DME; return register_dmac(&sh_dmac_ops); }
static int __init fdma_driver_probe(struct platform_device *pdev) { static __initdata char *fdma_clks_n[FDMA_CLKS_NR] = { [FDMA_SLIM_CLK] = "fdma_slim_clk", [FDMA_HI_CLK] = "fdma_hi_clk", [FDMA_LOW_CLK] = "fdma_low_clk", [FDMA_IC_CLK] = "fdma_ic_clk", }; struct stm_plat_fdma_data *plat_data; struct fdma *fdma = NULL; struct resource *res; int chan_num, i; int err = 0; plat_data = pdev->dev.platform_data; fdma = kzalloc(sizeof(struct fdma), GFP_KERNEL); if (fdma == NULL) return -ENOMEM; for (i = 0; i < FDMA_CLKS_NR; ++i) { fdma->clks[i] = clk_get(&pdev->dev, fdma_clks_n[i]); if (!fdma->clks[i] || IS_ERR(fdma->clks[i])) pr_warning("%s: clk %s not found\n", dev_name(&pdev->dev), fdma_clks_n[i]); } stm_fdma_clk_enable(fdma); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; fdma->phys_mem = request_mem_region(res->start, res->end - res->start + 1, pdev->name); if (fdma->phys_mem == NULL) return -EBUSY; fdma->io_base = ioremap_nocache(res->start, res->end - res->start + 1); if (fdma->io_base == NULL) return -EINVAL; fdma->pdev = pdev; fdma->fdma_num = (pdev->id != -1 ? pdev->id : 0); fdma_channels_parse(fdma); fdma->ch_status_mask = ((1ULL << ((fdma->ch_max + 1) * 2)) - 1ULL) ^ ((1 << (fdma->ch_min * 2)) - 1); fdma->fw = plat_data->fw; fdma->hw = plat_data->hw; fdma->regs.id = fdma->hw->slim_regs.id; fdma->regs.ver = fdma->hw->slim_regs.ver; fdma->regs.en = fdma->hw->slim_regs.en; fdma->regs.clk_gate = fdma->hw->slim_regs.clk_gate; fdma->regs.rev_id = fdma->fw->rev_id; fdma->regs.cmd_statn = fdma->fw->cmd_statn; fdma->regs.req_ctln = fdma->fw->req_ctln; fdma->regs.ptrn = fdma->fw->ptrn; fdma->regs.cntn = fdma->fw->cntn; fdma->regs.saddrn = fdma->fw->saddrn; fdma->regs.daddrn = fdma->fw->daddrn; fdma->regs.sync_reg = fdma->hw->periph_regs.sync_reg; fdma->regs.cmd_sta = fdma->hw->periph_regs.cmd_sta; fdma->regs.cmd_set = fdma->hw->periph_regs.cmd_set; fdma->regs.cmd_clr = fdma->hw->periph_regs.cmd_clr; fdma->regs.cmd_mask = fdma->hw->periph_regs.cmd_mask; fdma->regs.int_sta = fdma->hw->periph_regs.int_sta; fdma->regs.int_set = fdma->hw->periph_regs.int_set; fdma->regs.int_clr = fdma->hw->periph_regs.int_clr; fdma->regs.int_mask = fdma->hw->periph_regs.int_mask; /* Req lines 0 and 31 are basically unusable */ fdma->reqs_used_mask = (1 << 31) | (1 << 0); spin_lock_init(&fdma->reqs_lock); spin_lock_init(&fdma->channels_lock); init_waitqueue_head(&fdma->fw_load_q); fdma->dma_info.nr_channels = fdma->ch_max - fdma->ch_min + 1; fdma->dma_info.ops = &fdma_ops; fdma->dma_info.flags = DMAC_CHANNELS_TEI_CAPABLE; strlcpy(fdma->name, STM_DMAC_ID, FDMA_NAME_LEN); if (pdev->id != -1) { int len = strlen(fdma->name); snprintf(fdma->name + len, FDMA_NAME_LEN - len, ".%d", pdev->id); } fdma->dma_info.name = fdma->name; if (register_dmac(&fdma->dma_info) != 0) printk(KERN_ERR "%s(): Error Registering DMAC\n", __func__); for (chan_num = fdma->ch_min; chan_num <= fdma->ch_max; chan_num++) { struct fdma_channel *channel = &fdma->channels[chan_num]; struct dma_channel *dma_chan; channel->chan_num = chan_num; channel->fdma = fdma; dma_chan = get_dma_channel(chan_num - fdma->ch_min + fdma->dma_info.first_vchannel_nr); dma_chan->priv_data = channel; channel->dma_chan = dma_chan; } fdma->irq = platform_get_irq(pdev, 0); err = request_irq(fdma->irq, fdma_irq, IRQF_DISABLED | IRQF_SHARED, fdma->name, fdma); if (err < 0) panic("Cant Register irq %d for FDMA engine err %d\n", fdma->irq, err); fdma_register_caps(fdma); fdma_check_firmware_state(fdma); platform_set_drvdata(pdev, fdma); return 0; }