static void ag71xx_dma_reset(struct ag71xx *ag) { u32 val; int i; ag71xx_dump_dma_regs(ag); /* stop RX and TX */ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); /* * give the hardware some time to really stop all rx/tx activity * clearing the descriptors too early causes random memory corruption */ mdelay(1); /* clear descriptor addresses */ ag71xx_wr(ag, AG71XX_REG_TX_DESC, 0); ag71xx_wr(ag, AG71XX_REG_RX_DESC, 0); /* clear pending RX/TX interrupts */ for (i = 0; i < 256; i++) { ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); } /* clear pending errors */ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF); ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR); val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); if (val) printk(KERN_ALERT "%s: unable to clear DMA Rx status: %08x\n", ag->dev->name, val); val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); /* mask out reserved bits */ val &= ~0xff000000; if (val) printk(KERN_ALERT "%s: unable to clear DMA Tx status: %08x\n", ag->dev->name, val); ag71xx_dump_dma_regs(ag); }
static void ag71xx_tx_packets(struct ag71xx *ag) { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct ag71xx_ring *ring = &ag->tx_ring; unsigned int sent; DBG("%s: processing TX ring\n", ag->dev->name); #ifdef AG71XX_NAPI_TX ar71xx_ddr_flush(pdata->flush_reg); #endif sent = 0; while (ring->dirty != ring->curr) { unsigned int i = ring->dirty % AG71XX_TX_RING_SIZE; struct ag71xx_desc *desc = &ring->descs[i]; struct sk_buff *skb = ring->buf[i].skb; if (!ag71xx_desc_empty(desc)) break; ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); ag->dev->stats.tx_bytes += skb->len; ag->dev->stats.tx_packets++; dev_kfree_skb_any(skb); ring->buf[i].skb = NULL; ring->dirty++; sent++; } DBG("%s: %d packets sent out\n", ag->dev->name, sent); if ((ring->curr - ring->dirty) < AG71XX_TX_THRES_WAKEUP) netif_wake_queue(ag->dev); }
static int ag71xx_poll(struct napi_struct *napi, int limit) { struct ag71xx *ag = container_of(napi, struct ag71xx, napi); struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct net_device *dev = ag->dev; struct ag71xx_ring *rx_ring; unsigned long flags; u32 status; int done; pdata->ddr_flush(); ag71xx_tx_packets(ag); DBG("%s: processing RX ring\n", dev->name); done = ag71xx_rx_packets(ag, limit); rx_ring = &ag->rx_ring; if (rx_ring->buf[rx_ring->dirty % AG71XX_RX_RING_SIZE].skb == NULL) goto oom; status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); if (unlikely(status & RX_STATUS_OF)) { ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); dev->stats.rx_fifo_errors++; /* restart RX */ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); } if (done < limit) { if (status & RX_STATUS_PR) goto more; status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); if (status & TX_STATUS_PS) goto more; DBG("%s: disable polling mode, done=%d, limit=%d\n", dev->name, done, limit); netif_rx_complete(dev, napi); /* enable interrupts */ spin_lock_irqsave(&ag->lock, flags); ag71xx_int_enable(ag, AG71XX_INT_POLL); spin_unlock_irqrestore(&ag->lock, flags); return done; } more: DBG("%s: stay in polling mode, done=%d, limit=%d\n", dev->name, done, limit); return done; oom: if (netif_msg_rx_err(ag)) printk(KERN_DEBUG "%s: out of memory\n", dev->name); mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); netif_rx_complete(dev, napi); return 0; }
static int ag71xx_rx_packets(struct ag71xx *ag, int limit) { struct net_device *dev = ag->dev; struct ag71xx_ring *ring = &ag->rx_ring; int done = 0; DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n", dev->name, limit, ring->curr, ring->dirty); while (done < limit) { unsigned int i = ring->curr % AG71XX_RX_RING_SIZE; struct ag71xx_desc *desc = ring->buf[i].desc; struct sk_buff *skb; int pktlen; int err = 0; if (ag71xx_desc_empty(desc)) break; if ((ring->dirty + AG71XX_RX_RING_SIZE) == ring->curr) { ag71xx_assert(0); break; } ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); skb = ring->buf[i].skb; pktlen = ag71xx_desc_pktlen(desc); pktlen -= ETH_FCS_LEN; dma_unmap_single(&dev->dev, ring->buf[i].dma_addr, AG71XX_RX_PKT_SIZE, DMA_FROM_DEVICE); dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; skb_put(skb, pktlen); if (ag71xx_has_ar8216(ag)) err = ag71xx_remove_ar8216_header(ag, skb, pktlen); if (err) { dev->stats.rx_dropped++; kfree_skb(skb); } else { skb->dev = dev; skb->ip_summed = CHECKSUM_NONE; if (ag->phy_dev) { ag->phy_dev->netif_receive_skb(skb); } else { skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); } } ring->buf[i].skb = NULL; done++; ring->curr++; } ag71xx_ring_rx_refill(ag); DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n", dev->name, ring->curr, ring->dirty, done); return done; }
void ag71xx_link_adjust(struct ag71xx *ag) { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); u32 cfg2; u32 ifctl; u32 fifo5; u32 mii_speed; if (!ag->link) { netif_carrier_off(ag->dev); if (netif_msg_link(ag)) printk(KERN_INFO "%s: link down\n", ag->dev->name); return; } cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0; ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); ifctl &= ~(MAC_IFCTL_SPEED); fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); fifo5 &= ~FIFO_CFG5_BM; switch (ag->speed) { case SPEED_1000: mii_speed = MII_CTRL_SPEED_1000; cfg2 |= MAC_CFG2_IF_1000; fifo5 |= FIFO_CFG5_BM; break; case SPEED_100: mii_speed = MII_CTRL_SPEED_100; cfg2 |= MAC_CFG2_IF_10_100; ifctl |= MAC_IFCTL_SPEED; break; case SPEED_10: mii_speed = MII_CTRL_SPEED_10; cfg2 |= MAC_CFG2_IF_10_100; break; default: BUG(); return; } if (pdata->is_ar91xx) ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x00780fff); else if (pdata->is_ar724x) ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, pdata->fifo_cfg3); else ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x008001ff); if (pdata->set_pll) pdata->set_pll(ag->speed); ag71xx_mii_ctrl_set_speed(ag, mii_speed); ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); netif_carrier_on(ag->dev); if (netif_msg_link(ag)) printk(KERN_INFO "%s: link up (%sMbps/%s duplex)\n", ag->dev->name, ag71xx_speed_str(ag), (DUPLEX_FULL == ag->duplex) ? "Full" : "Half"); DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n", ag->dev->name, ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2)); DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n", ag->dev->name, ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5)); DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x, mii_ctrl=%#x\n", ag->dev->name, ag71xx_rr(ag, AG71XX_REG_MAC_CFG2), ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL), ag71xx_mii_ctrl_rr(ag)); }
static void ag71xx_phy_link_update(struct ag71xx *ag) { u32 cfg2; u32 ifctl; u32 pll; u32 fifo5; u32 mii_speed; if (!ag->link) { netif_carrier_off(ag->dev); if (netif_msg_link(ag)) printk(KERN_INFO "%s: link down\n", ag->dev->name); return; } cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0; ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); ifctl &= ~(MAC_IFCTL_SPEED); fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); fifo5 &= ~FIFO_CFG5_BYTE_PER_CLK; switch (ag->speed) { case SPEED_1000: mii_speed = MII_CTRL_SPEED_1000; cfg2 |= MAC_CFG2_IF_1000; pll = PLL_VAL_1000; fifo5 |= FIFO_CFG5_BYTE_PER_CLK; break; case SPEED_100: mii_speed = MII_CTRL_SPEED_100; cfg2 |= MAC_CFG2_IF_10_100; ifctl |= MAC_IFCTL_SPEED; pll = PLL_VAL_100; break; case SPEED_10: mii_speed = MII_CTRL_SPEED_10; cfg2 |= MAC_CFG2_IF_10_100; pll = PLL_VAL_10; break; default: BUG(); return; } ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x008001ff); ag71xx_set_pll(ag, pll); ag71xx_mii_ctrl_set_speed(ag, mii_speed); ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); netif_carrier_on(ag->dev); if (netif_msg_link(ag)) printk(KERN_INFO "%s: link up (%sMbps/%s duplex)\n", ag->dev->name, ag71xx_speed_str(ag), (DUPLEX_FULL == ag->duplex) ? "Full" : "Half"); DBG("%s: fifo1=%#x, fifo2=%#x, fifo3=%#x, fifo4=%#x, fifo5=%#x\n", ag->dev->name, ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5)); DBG("%s: mac_cfg2=%#x, ifctl=%#x, mii_ctrl=%#x\n", ag->dev->name, ag71xx_rr(ag, AG71XX_REG_MAC_CFG2), ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL), ag71xx_mii_ctrl_rr(ag)); }
static int ag71xx_rx_packets(struct ag71xx *ag, int limit) { struct net_device *dev = ag->dev; struct ag71xx_ring *ring = &ag->rx_ring; #ifndef AG71XX_NAPI_TX struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); unsigned long flags; #endif int done = 0; #ifndef AG71XX_NAPI_TX spin_lock_irqsave(&ag->lock, flags); ar71xx_ddr_flush(pdata->flush_reg); spin_unlock_irqrestore(&ag->lock, flags); #endif DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n", dev->name, limit, ring->curr, ring->dirty); while (done < limit) { unsigned int i = ring->curr % AG71XX_RX_RING_SIZE; struct ag71xx_desc *desc = &ring->descs[i]; struct sk_buff *skb; int pktlen; if (ag71xx_desc_empty(desc)) break; if ((ring->dirty + AG71XX_RX_RING_SIZE) == ring->curr) { ag71xx_assert(0); break; } skb = ring->buf[i].skb; pktlen = ag71xx_desc_pktlen(desc); pktlen -= ETH_FCS_LEN; /* TODO: move it into the refill function */ dma_cache_wback_inv((unsigned long)skb->data, pktlen); skb_put(skb, pktlen); skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; netif_receive_skb(skb); dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; ring->buf[i].skb = NULL; done++; ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); ring->curr++; if ((ring->curr - ring->dirty) > (AG71XX_RX_RING_SIZE / 4)) ag71xx_ring_rx_refill(ag); } ag71xx_ring_rx_refill(ag); DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n", dev->name, ring->curr, ring->dirty, done); return done; }