void set_dma_mde_base(dmach_t channel, const char *mde_vaddr) { struct arch_dma_t *arch_dma; dma_t *dma; if (channel >= MAX_DMA_CHANNELS) { printk(KERN_ERR "Invalid dma channel %d\n", channel); return; } if (((unsigned long)mde_vaddr) & 0x03) { printk(KERN_ERR "set channel %d mde, invalid mde_base address %p (must alignment 4 bytes)\n", channel, mde_vaddr); return; } arch_dma = &arch_dma_chan[channel]; dma = arch_dma->dma; if (dma->active) { printk(KERN_ERR "dma%d: altering DMA mde while DMA active\n", channel); } dma->using_sg = 1; dma->prolific_sg = 1; /* using mde insead of sg */ arch_dma->mde = (dma_mdt_t *)mde_vaddr; dma_writeb(0, DMA_IDX_REG, channel); dma_writel((unsigned int) virt_to_bus(&arch_dma->mde[0]), DMA_MDT_REG, channel); }
static void ar231x_reset_regs(struct eth_device *edev) { struct ar231x_eth_priv *priv = edev->priv; struct ar231x_eth_platform_data *cfg = priv->cfg; u32 flags; ar231x_reset_bit_(priv, cfg->reset_mac, SET); mdelay(10); ar231x_reset_bit_(priv, cfg->reset_mac, REMOVE); mdelay(10); ar231x_reset_bit_(priv, cfg->reset_phy, SET); mdelay(10); ar231x_reset_bit_(priv, cfg->reset_phy, REMOVE); mdelay(10); dma_writel(priv, DMA_BUS_MODE_SWR, AR231X_DMA_BUS_MODE); mdelay(10); dma_writel(priv, ((32 << DMA_BUS_MODE_PBL_SHIFT) | DMA_BUS_MODE_BLE), AR231X_DMA_BUS_MODE); /* FIXME: priv->{t,r}x_ring are virtual addresses, * use virt-to-phys convertion */ dma_writel(priv, (u32)priv->tx_ring, AR231X_DMA_TX_RING); dma_writel(priv, (u32)priv->rx_ring, AR231X_DMA_RX_RING); dma_writel(priv, (DMA_CONTROL_SR | DMA_CONTROL_ST | DMA_CONTROL_SF), AR231X_DMA_CONTROL); eth_writel(priv, FLOW_CONTROL_FCE, AR231X_ETH_FLOW_CONTROL); /* TODO: not sure if we need it here. */ eth_writel(priv, 0x8100, AR231X_ETH_VLAN_TAG); /* Enable Ethernet Interface */ flags = (MAC_CONTROL_TE | /* transmit enable */ /* FIXME: MAC_CONTROL_PM - pass mcast. * Seems like it makes no difference on some WiSoCs, * for example ar2313. * It should be tested on ar231[5,6,7] */ MAC_CONTROL_PM | MAC_CONTROL_F | /* full duplex */ MAC_CONTROL_HBD); /* heart beat disabled */ eth_writel(priv, flags, AR231X_ETH_MAC_CONTROL); }
static int imapx200_dma_start(struct imapx200_dma_chan *chan) { struct imapx200_dmac *dmac= chan->dmac; u32 bit = chan->bit; pr_debug("%s: clearing interrupts\n", __func__); /* clear interrupts */ dma_writel(dmac,CLEAR.XFER ,bit); dma_writel(dmac,CLEAR.ERROR ,bit); pr_debug("%s: starting channel\n", __func__); //enable dma channel dma_set_bit(dmac,CH_EN,bit); return 0; }
static void pl_enable_dma(dmach_t channel, dma_t *dma) { unsigned char val; struct arch_dma_t *arch_dma; int count; int i; unsigned int dcnt = 0; if (channel >= MAX_DMA_CHANNELS) { printk(KERN_ERR "Invalid dma channel %d\n", channel); return; } arch_dma = &arch_dma_chan[channel]; if (dma->using_sg == 0) { if (arch_dma->mde == NULL) { arch_dma->mde = arch_dma->mde_default; } arch_dma->mde[0].saddr = (unsigned int)virt_to_bus(dma->buf.address); count = dma->buf.length; if (count > MAX_DMA_MDT * MAX_DMA_MDT_COUNT) { printk(KERN_ERR "Exceed max transfer size %d\n", count); return; } for (i = 0; i < MAX_DMA_MDT; i++) { if (i > 0) { arch_dma->mde[i].saddr = arch_dma->mde[i-1].saddr + dcnt; } if (count > MAX_DMA_MDT_COUNT) dcnt = MAX_DMA_MDT_COUNT; else dcnt = count; count -= dcnt; arch_dma->mde[i].flag = dcnt | ((i+1) << MDT_FLAG_NXT_SHIFT) | MDT_FLAG_V; if (count == 0) break; } arch_dma->mde[i].flag |= MDT_FLAG_E; dma_writeb(0, DMA_IDX_REG, channel); dma_writel((unsigned int) virt_to_bus(arch_dma->mde), DMA_MDT_REG, channel); pci_map_single(NULL, arch_dma->mde, MAX_DMA_MDT*sizeof(dma_mdt_t), PCI_DMA_TODEVICE); } else { if (dma->prolific_sg) { /* do nothing */ } else { /* TBD */ printk(KERN_ERR "Not implement yet!!!!"); return; } } val = dma_readb(DMA_CMD_REG, channel); if (dma->dma_mode == DMA_MODE_READ) val |= DMA_MODE_UPSTREAM; else val &= ~DMA_MODE_UPSTREAM; dma_writeb(val | DMA_CHN_ENABLE, DMA_CMD_REG, channel); }
static int imapx200_dma_init_xxx(int chno, int dma_ch, int irq, unsigned int base) { struct imapx200_dma_chan *chptr = &imapx200_chans[chno]; struct imapx200_dmac *dmac; char clkname[16]; void __iomem *regs; void __iomem *regptr; int err, ch; dmac = kzalloc(sizeof(struct imapx200_dmac), GFP_KERNEL); if (!dmac) { printk(KERN_ERR "%s: failed to alloc mem\n", __func__); return -ENOMEM; } dmac->sysdev.id = chno / 8; dmac->sysdev.cls = &dma_sysclass; err = sysdev_register(&dmac->sysdev); if (err) { printk(KERN_ERR "%s: failed to register sysdevice\n", __func__); goto err_alloc; } regs = ioremap(base, DW_REGLEN); if (!regs) { printk(KERN_ERR "%s: failed to ioremap()\n", __func__); err = -ENXIO; goto err_dev; } snprintf(clkname, sizeof(clkname), "dma%d", dmac->sysdev.id); /* dmac->clk = clk_get(NULL, clkname); if (IS_ERR(dmac->clk)) { printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname); err = PTR_ERR(dmac->clk); goto err_map; } clk_enable(dmac->clk); */ dmac->regs = regs; dmac->dma_ch = dma_ch; dmac->channels = chptr; err = request_irq(irq, imapx200_dma_irq, 0, "DMA", dmac); if (err < 0) { printk(KERN_ERR "%s: failed to get irq\n", __func__); goto err_clk; } regptr = regs + (0); for (ch = 0; ch < 8; ch++, chno++, chptr++) { printk(KERN_INFO "%s: registering DMA %d (%p)\n", __func__, chno, regptr); chptr->bit = 1 << ch; chptr->number = chno; chptr->regs = regptr; regptr += DW_CH_STRIDE; dma_clear_bit(dmac,CH_EN,chptr->bit); } /* for the moment, permanently enable the controller */ dma_writel(dmac, CFG, DW_CFG_DMA_EN); printk(KERN_INFO "DW: IRQ %d, at %p\n", irq, regs); return 0; err_clk: clk_disable(dmac->clk); clk_put(dmac->clk); err_map: iounmap(regs); err_dev: sysdev_unregister(&dmac->sysdev); err_alloc: kfree(dmac); return err; }