static void cpmac_end_xmit(int queue) { // struct cpmac_desc *desc; //struct cpmac_priv *priv = netdev_priv(dev); // desc = desc_ring[queue]; cpmac_write(CPMAC_TX_ACK(queue), (u32)desc_ring[queue].mapping); if (likely(desc_ring[queue].skb)) { spin_lock(cplock); netdev.stats.tx_packets++; netdev.stats.tx_bytes += desc_ring[queue].skb->len; spin_unlock(cplock); dma_unmap_single(desc_ring[queue].data_mapping, desc_ring[queue].skb->len, DMA_TO_DEVICE); // if (unlikely(netif_msg_tx_done(priv))) // netdev_dbg(dev, "sent 0x%p, len=%d\n", // desc_ring[queue].skb, desc_ring[queue].skb->len); dev_kfree_skb_irq(desc_ring[queue].skb); desc_ring[queue].skb = NULL; //if (__netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(); } else { // if (netif_msg_tx_err(priv) && net_ratelimit()) // netdev_warn(dev, "end_xmit: spurious interrupt\n"); //if (__netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(); } }
static irqreturn_t cpmac_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct cpmac_priv *priv; int queue; u32 status; priv = netdev_priv(dev); status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); if (unlikely(netif_msg_intr(priv))) printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name, status); if (status & MAC_INT_TX) cpmac_end_xmit(dev, (status & 7)); if (status & MAC_INT_RX) { queue = (status >> 8) & 7; if (napi_schedule_prep(&priv->napi)) { cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); __napi_schedule(&priv->napi); } }
static void cpmac_check_status(struct net_device *dev) { struct cpmac_priv *priv = netdev_priv(dev); u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); int rx_channel = (macstatus >> 8) & 7; int rx_code = (macstatus >> 12) & 15; int tx_channel = (macstatus >> 16) & 7; int tx_code = (macstatus >> 20) & 15; if (rx_code || tx_code) { if (netif_msg_drv(priv) && net_ratelimit()) { /* Can't find any documentation on what these *error codes actually are. So just log them and hope.. */ if (rx_code) printk(KERN_WARNING "%s: host error %d on rx " "channel %d (macstatus %08x), resetting\n", dev->name, rx_code, rx_channel, macstatus); if (tx_code) printk(KERN_WARNING "%s: host error %d on tx " "channel %d (macstatus %08x), resetting\n", dev->name, tx_code, tx_channel, macstatus); } netif_tx_stop_all_queues(dev); cpmac_hw_stop(dev); if (schedule_work(&priv->reset_work)) atomic_inc(&priv->reset_pending); if (unlikely(netif_msg_hw(priv))) cpmac_dump_regs(dev); } cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); }
static void cpmac_end_xmit(struct net_device *dev, int queue) { struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); desc = &priv->desc_ring[queue]; cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); if (likely(desc->skb)) { spin_lock(&priv->lock); dev->stats.tx_packets++; dev->stats.tx_bytes += desc->skb->len; spin_unlock(&priv->lock); dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, DMA_TO_DEVICE); if (unlikely(netif_msg_tx_done(priv))) printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name, desc->skb, desc->skb->len); dev_kfree_skb_irq(desc->skb); desc->skb = NULL; if (__netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(dev, queue); } else { if (netif_msg_tx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: end_xmit: spurious interrupt\n", dev->name); if (__netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(dev, queue); } }
static int cpmac_mdio_reset(struct mii_bus *bus) { ar7_device_reset(AR7_RESET_BIT_MDIO); cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1)); return 0; }
static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) cpu_relax(); cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); return 0; }
static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) { u32 val; while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) cpu_relax(); cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | MDIO_PHY(phy_id)); while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) cpu_relax(); return MDIO_DATA(val); }
static int cpmac_mdio_reset(struct mii_bus *bus) { struct clk *cpmac_clk; cpmac_clk = clk_get(&bus->dev, "cpmac"); if (IS_ERR(cpmac_clk)) { printk(KERN_ERR "unable to get cpmac clock\n"); return -1; } ar7_device_reset(AR7_RESET_BIT_MDIO); cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1)); return 0; }
static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, struct cpmac_desc *desc) { struct sk_buff *skb, *result = NULL; if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(priv->dev, desc); cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); if (unlikely(!desc->datalen)) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx: spurious interrupt\n", priv->dev->name); return NULL; } skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE); if (likely(skb)) { skb_reserve(skb, 2); skb_put(desc->skb, desc->datalen); desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); desc->skb->ip_summed = CHECKSUM_NONE; priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += desc->datalen; result = desc->skb; dma_unmap_single(&priv->dev->dev, desc->data_mapping, CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->skb = skb; desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->hw_data = (u32)desc->data_mapping; if (unlikely(netif_msg_pktdata(priv))) { printk(KERN_DEBUG "%s: received packet:\n", priv->dev->name); cpmac_dump_skb(priv->dev, result); } } else { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: low on skbs, dropping packet\n", priv->dev->name); priv->dev->stats.rx_dropped++; } desc->buflen = CPMAC_SKB_SIZE; desc->dataflags = CPMAC_OWN; return result; }
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { int queue, len; struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); if (unlikely(atomic_read(&priv->reset_pending))) return NETDEV_TX_BUSY; if (unlikely(skb_padto(skb, ETH_ZLEN))) return NETDEV_TX_OK; len = max(skb->len, ETH_ZLEN); queue = skb_get_queue_mapping(skb); #ifdef CONFIG_NETDEVICES_MULTIQUEUE netif_stop_subqueue(dev, queue); #else netif_stop_queue(dev); #endif desc = &priv->desc_ring[queue]; if (unlikely(desc->dataflags & CPMAC_OWN)) { if (netif_msg_tx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: tx dma ring full\n", dev->name); return NETDEV_TX_BUSY; } spin_lock(&priv->lock); dev->trans_start = jiffies; spin_unlock(&priv->lock); desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; desc->skb = skb; desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, DMA_TO_DEVICE); desc->hw_data = (u32)desc->data_mapping; desc->datalen = len; desc->buflen = len; if (unlikely(netif_msg_tx_queued(priv))) printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, skb->len); if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(dev, desc); if (unlikely(netif_msg_pktdata(priv))) cpmac_dump_skb(dev, skb); cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); return NETDEV_TX_OK; }
//static int cpmac_open(struct net_device *dev); // //static void cpmac_dump_regs(struct net_device *dev) //{ // int i; // struct cpmac_priv *priv = netdev_priv(dev); // // for (i = 0; i < CPMAC_REG_END; i += 4) { // if (i % 16 == 0) { // if (i) // printk("\n"); // printk("%s: reg[%p]:", dev->name, priv->regs + i); // } // printk(" %08x", cpmac_read(priv->regs, i)); // } // printk("\n"); //} // //static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) //{ // int i; // // printk("%s: desc[%p]:", dev->name, desc); // for (i = 0; i < sizeof(*desc) / 4; i++) // printk(" %08x", ((u32 *)desc)[i]); // printk("\n"); //} // //static void cpmac_dump_all_desc(struct net_device *dev) //{ // struct cpmac_priv *priv = netdev_priv(dev); // struct cpmac_desc *dump = priv->rx_head; // // do { // cpmac_dump_desc(dev, dump); // dump = dump->next; // } while (dump != priv->rx_head); //} // //static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) //{ // int i; // // printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); // for (i = 0; i < skb->len; i++) { // if (i % 16 == 0) { // if (i) // printk("\n"); // printk("%s: data[%p]:", dev->name, skb->data + i); // } // printk(" %02x", ((u8 *)skb->data)[i]); // } // printk("\n"); //} // //static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) //{ // u32 val; // // while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) // cpu_relax(); // cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | // MDIO_PHY(phy_id)); // while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) // cpu_relax(); // // return MDIO_DATA(val); //} // //static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, // int reg, u16 val) //{ // while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) // cpu_relax(); // cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | // MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); // // return 0; //} // static int cpmac_mdio_reset() { // struct clk *cpmac_clk; // cpmac_clk = clk_get("cpmac"); // if (IS_ERR(cpmac_clk)) { // pr_err("unable to get cpmac clock\n"); // return -1; // } ar7_device_reset(AR7_RESET_BIT_MDIO); cpmac_write(CPMAC_MDIO_CONTROL, MDIOC_ENABLE | MDIOC_CLKDIV(/*clk_get_rate(cpmac_clk)*/nondet / 2200000 - 1)); return 0; }
static void cpmac_hw_error(struct work_struct *work) { struct cpmac_priv *priv = container_of(work, struct cpmac_priv, reset_work); spin_lock(&priv->rx_lock); cpmac_clear_rx(priv->dev); spin_unlock(&priv->rx_lock); cpmac_clear_tx(priv->dev); cpmac_hw_start(priv->dev); barrier(); atomic_dec(&priv->reset_pending); netif_tx_wake_all_queues(priv->dev); cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); }
static void cpmac_hw_stop(struct net_device *dev) { int i; struct cpmac_priv *priv = netdev_priv(dev); struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; ar7_device_reset(pdata->reset_bit); cpmac_write(priv->regs, CPMAC_RX_CONTROL, cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); cpmac_write(priv->regs, CPMAC_TX_CONTROL, cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); for (i = 0; i < 8; i++) { cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); } cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_MAC_CONTROL, cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); }
static void cpmac_hw_error(struct work_struct *work) { int i; struct cpmac_priv *priv = container_of(work, struct cpmac_priv, reset_work); spin_lock(&priv->rx_lock); cpmac_clear_rx(priv->dev); spin_unlock(&priv->rx_lock); cpmac_clear_tx(priv->dev); cpmac_hw_start(priv->dev); barrier(); atomic_dec(&priv->reset_pending); for (i = 0; i < CPMAC_QUEUES; i++) netif_wake_subqueue(priv->dev, i); netif_wake_queue(priv->dev); cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); }
static void cpmac_hw_stop(/*struct net_device *dev*/) { int i; //struct cpmac_priv *priv = netdev_priv(dev); //struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata.reset_bit); cpmac_write(CPMAC_RX_CONTROL, cpmac_read(CPMAC_RX_CONTROL) & ~1); cpmac_write(CPMAC_TX_CONTROL, cpmac_read(CPMAC_TX_CONTROL) & ~1); //for (i = 0; i < 8; i++) { cpmac_write(CPMAC_TX_PTR(i), 0); cpmac_write_CPMAC_RX_PTR(i, 0); //} cpmac_write(CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(CPMAC_MAC_INT_CLEAR, 0xff); cpmac_write(CPMAC_MAC_CONTROL, cpmac_read(CPMAC_MAC_CONTROL) & ~MAC_MII); }
static void cpmac_set_multicast_list(struct net_device *dev) { struct dev_mc_list *iter; int i; u8 tmp; u32 mbp, bit, hash[2] = { 0, }; struct cpmac_priv *priv = netdev_priv(dev); mbp = cpmac_read(priv->regs, CPMAC_MBP); if (dev->flags & IFF_PROMISC) { cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | MBP_RXPROMISC); } else { cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); if (dev->flags & IFF_ALLMULTI) { /* enable all multicast mode */ cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); } else { /* * cpmac uses some strange mac address hashing * (not crc32) */ for (i = 0, iter = dev->mc_list; i < dev->mc_count; i++, iter = iter->next) { bit = 0; tmp = iter->dmi_addr[0]; bit ^= (tmp >> 2) ^ (tmp << 4); tmp = iter->dmi_addr[1]; bit ^= (tmp >> 4) ^ (tmp << 2); tmp = iter->dmi_addr[2]; bit ^= (tmp >> 6) ^ tmp; tmp = iter->dmi_addr[3]; bit ^= (tmp >> 2) ^ (tmp << 4); tmp = iter->dmi_addr[4]; bit ^= (tmp >> 4) ^ (tmp << 2); tmp = iter->dmi_addr[5]; bit ^= (tmp >> 6) ^ tmp; bit &= 0x3f; hash[bit / 32] |= 1 << (bit % 32); } cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); } } }
static void cpmac_set_multicast_list(struct net_device *dev) { struct netdev_hw_addr *ha; u8 tmp; u32 mbp, bit, hash[2] = { 0, }; struct cpmac_priv *priv = netdev_priv(dev); mbp = cpmac_read(priv->regs, CPMAC_MBP); if (dev->flags & IFF_PROMISC) { cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | MBP_RXPROMISC); } else { cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); if (dev->flags & IFF_ALLMULTI) { cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); } else { netdev_for_each_mc_addr(ha, dev) { bit = 0; tmp = ha->addr[0]; bit ^= (tmp >> 2) ^ (tmp << 4); tmp = ha->addr[1]; bit ^= (tmp >> 4) ^ (tmp << 2); tmp = ha->addr[2]; bit ^= (tmp >> 6) ^ tmp; tmp = ha->addr[3]; bit ^= (tmp >> 2) ^ (tmp << 4); tmp = ha->addr[4]; bit ^= (tmp >> 4) ^ (tmp << 2); tmp = ha->addr[5]; bit ^= (tmp >> 6) ^ tmp; bit &= 0x3f; hash[bit / 32] |= 1 << (bit % 32); } cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); } }
static void cpmac_hw_start() { int i; //struct cpmac_priv *priv = netdev_priv(dev); //struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); ar7_device_reset(pdata.reset_bit); //for (i = 0; i < 8; i++) { cpmac_write(CPMAC_TX_PTR(i), 0); cpmac_write_CPMAC_RX_PTR(i, 0); //} cpmac_write_CPMAC_RX_PTR(0, rx_head->mapping); cpmac_write(CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | MBP_RXMCAST); cpmac_write(CPMAC_BUFFER_OFFSET, 0); //for (i = 0; i < 8; i++) cpmac_write(CPMAC_MAC_ADDR_LO(i), netdev.dev_addr[5]); cpmac_write(CPMAC_MAC_ADDR_MID, netdev.dev_addr[4]); cpmac_write(CPMAC_MAC_ADDR_HI, netdev.dev_addr[0] | (netdev.dev_addr[1] << 8) | (netdev.dev_addr[2] << 16) | (netdev.dev_addr[3] << 24)); cpmac_write(CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); cpmac_write(CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(CPMAC_MAC_INT_CLEAR, 0xff); cpmac_write(CPMAC_UNICAST_ENABLE, 1); cpmac_write(CPMAC_RX_INT_ENABLE, 1); cpmac_write(CPMAC_TX_INT_ENABLE, 0xff); cpmac_write(CPMAC_MAC_INT_ENABLE, 3); cpmac_write(CPMAC_RX_CONTROL, cpmac_read(CPMAC_RX_CONTROL) | 1); cpmac_write(CPMAC_TX_CONTROL, cpmac_read(CPMAC_TX_CONTROL) | 1); cpmac_write(CPMAC_MAC_CONTROL, cpmac_read(CPMAC_MAC_CONTROL) | MAC_MII | MAC_FDX); }
static void cpmac_hw_start(struct net_device *dev) { int i; struct cpmac_priv *priv = netdev_priv(dev); struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; ar7_device_reset(pdata->reset_bit); for (i = 0; i < 8; i++) { cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); } cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | MBP_RXMCAST); cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); for (i = 0; i < 8; i++) cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24)); cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); cpmac_write(priv->regs, CPMAC_RX_CONTROL, cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); cpmac_write(priv->regs, CPMAC_TX_CONTROL, cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); cpmac_write(priv->regs, CPMAC_MAC_CONTROL, cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | MAC_FDX); }
static int cpmac_poll(struct napi_struct *napi, int budget) { struct sk_buff *skb; struct cpmac_desc *desc, *restart; struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); int received = 0, processed = 0; spin_lock(&priv->rx_lock); if (unlikely(!priv->rx_head)) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx: polling, but no queue\n", priv->dev->name); spin_unlock(&priv->rx_lock); netif_rx_complete(priv->dev, napi); return 0; } desc = priv->rx_head; restart = NULL; while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { processed++; if ((desc->dataflags & CPMAC_EOQ) != 0) { /* The last update to eoq->hw_next didn't happen * soon enough, and the receiver stopped here. *Remember this descriptor so we can restart * the receiver after freeing some space. */ if (unlikely(restart)) { if (netif_msg_rx_err(priv)) printk(KERN_ERR "%s: poll found a" " duplicate EOQ: %p and %p\n", priv->dev->name, restart, desc); goto fatal_error; } restart = desc->next; } skb = cpmac_rx_one(priv, desc); if (likely(skb)) { netif_receive_skb(skb); received++; } desc = desc->next; } if (desc != priv->rx_head) { /* We freed some buffers, but not the whole ring, * add what we did free to the rx list */ desc->prev->hw_next = (u32)0; priv->rx_head->prev->hw_next = priv->rx_head->mapping; } /* Optimization: If we did not actually process an EOQ (perhaps because * of quota limits), check to see if the tail of the queue has EOQ set. * We should immediately restart in that case so that the receiver can * restart and run in parallel with more packet processing. * This lets us handle slightly larger bursts before running * out of ring space (assuming dev->weight < ring_size) */ if (!restart && (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) == CPMAC_EOQ && (priv->rx_head->dataflags & CPMAC_OWN) != 0) { /* reset EOQ so the poll loop (above) doesn't try to * restart this when it eventually gets to this descriptor. */ priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; restart = priv->rx_head; } if (restart) { priv->dev->stats.rx_errors++; priv->dev->stats.rx_fifo_errors++; if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx dma ring overrun\n", priv->dev->name); if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { if (netif_msg_drv(priv)) printk(KERN_ERR "%s: cpmac_poll is trying to " "restart rx from a descriptor that's " "not free: %p\n", priv->dev->name, restart); goto fatal_error; } cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); } priv->rx_head = desc; spin_unlock(&priv->rx_lock); if (unlikely(netif_msg_rx_status(priv))) printk(KERN_DEBUG "%s: poll processed %d packets\n", priv->dev->name, received); if (processed == 0) { /* we ran out of packets to read, * revert to interrupt-driven mode */ netif_rx_complete(priv->dev, napi); cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); return 0; } return 1; fatal_error: /* Something went horribly wrong. * Reset hardware to try to recover rather than wedging. */ if (netif_msg_drv(priv)) { printk(KERN_ERR "%s: cpmac_poll is confused. " "Resetting hardware\n", priv->dev->name); cpmac_dump_all_desc(priv->dev); printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", priv->dev->name, cpmac_read(priv->regs, CPMAC_RX_PTR(0)), cpmac_read(priv->regs, CPMAC_RX_ACK(0))); } spin_unlock(&priv->rx_lock); netif_rx_complete(priv->dev, napi); netif_tx_stop_all_queues(priv->dev); napi_disable(&priv->napi); atomic_inc(&priv->reset_pending); cpmac_hw_stop(priv->dev); if (!schedule_work(&priv->reset_work)) atomic_dec(&priv->reset_pending); return 0; }