static void eth_txdone_irq(void *unused) { u32 phys; #if DEBUG_TX printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); #endif while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { u32 npe_id, n_desc; struct port *port; struct desc *desc; int start; npe_id = phys & 3; BUG_ON(npe_id >= MAX_NPES); port = npe_port_tab[npe_id]; BUG_ON(!port); phys &= ~0x1F; /* mask out non-address bits */ n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); BUG_ON(n_desc >= TX_DESCS); desc = tx_desc_ptr(port, n_desc); debug_desc(phys, desc); if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ port->netdev->stats.tx_packets++; port->netdev->stats.tx_bytes += desc->pkt_len; dma_unmap_tx(port, desc); #if DEBUG_TX printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", port->netdev->name, port->tx_buff_tab[n_desc]); #endif free_buffer_irq(port->tx_buff_tab[n_desc]); port->tx_buff_tab[n_desc] = NULL; } start = qmgr_stat_empty(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, phys, desc); if (start) { #if DEBUG_TX printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", port->netdev->name); #endif netif_wake_queue(port->netdev); } } }
static void hss_hdlc_txdone_irq(void *pdev) { struct net_device *dev = pdev; struct port *port = dev_to_port(dev); int n_desc; #if DEBUG_TX printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n"); #endif while ((n_desc = queue_get_desc(queue_ids[port->id].txdone, port, 1)) >= 0) { struct desc *desc; int start; desc = tx_desc_ptr(port, n_desc); dev->stats.tx_packets++; dev->stats.tx_bytes += desc->pkt_len; dma_unmap_tx(port, desc); #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n", dev->name, port->tx_buff_tab[n_desc]); #endif free_buffer_irq(port->tx_buff_tab[n_desc]); port->tx_buff_tab[n_desc] = NULL; start = qmgr_stat_empty(port->plat->txreadyq); queue_put_desc(port->plat->txreadyq, tx_desc_phys(port, n_desc), desc); if (start) { #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit" " ready\n", dev->name); #endif netif_wake_queue(dev); } } }
static int hss_hdlc_poll(struct napi_struct *napi, int budget) { struct port *port = container_of(napi, struct port, napi); struct net_device *dev = port->netdev; unsigned int rxq = queue_ids[port->id].rx; unsigned int rxfreeq = queue_ids[port->id].rxfree; int received = 0; #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name); #endif while (received < budget) { struct sk_buff *skb; struct desc *desc; int n; #ifdef __ARMEB__ struct sk_buff *temp; u32 phys; #endif if ((n = queue_get_desc(rxq, port, 0)) < 0) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" " napi_complete\n", dev->name); #endif napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_empty(rxq) && napi_reschedule(napi)) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" " napi_reschedule succeeded\n", dev->name); #endif qmgr_disable_irq(rxq); continue; } #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n", dev->name); #endif return received; } desc = rx_desc_ptr(port, n); #if 0 if (desc->error_count) printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X" " errors %u\n", dev->name, desc->status, desc->error_count); #endif skb = NULL; switch (desc->status) { case 0: #ifdef __ARMEB__ if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) { phys = dma_map_single(&dev->dev, skb->data, RX_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&dev->dev, phys)) { dev_kfree_skb(skb); skb = NULL; } } #else skb = netdev_alloc_skb(dev, desc->pkt_len); #endif if (!skb) dev->stats.rx_dropped++; break; case ERR_HDLC_ALIGN: case ERR_HDLC_ABORT: dev->stats.rx_frame_errors++; dev->stats.rx_errors++; break; case ERR_HDLC_FCS: dev->stats.rx_crc_errors++; dev->stats.rx_errors++; break; case ERR_HDLC_TOO_LONG: dev->stats.rx_length_errors++; dev->stats.rx_errors++; break; default: netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n", desc->status, desc->error_count); dev->stats.rx_errors++; } if (!skb) { desc->buf_len = RX_SIZE; desc->pkt_len = desc->status = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); continue; } #ifdef __ARMEB__ temp = skb; skb = port->rx_buff_tab[n]; dma_unmap_single(&dev->dev, desc->data, RX_SIZE, DMA_FROM_DEVICE); #else dma_sync_single_for_cpu(&dev->dev, desc->data, RX_SIZE, DMA_FROM_DEVICE); memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], ALIGN(desc->pkt_len, 4) / 4); #endif skb_put(skb, desc->pkt_len); debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len); skb->protocol = hdlc_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_receive_skb(skb); #ifdef __ARMEB__ port->rx_buff_tab[n] = temp; desc->data = phys; #endif desc->buf_len = RX_SIZE; desc->pkt_len = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); received++; } #if DEBUG_RX printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n"); #endif return received; }
static int eth_xmit(struct sk_buff *skb, struct net_device *dev) { struct port *port = netdev_priv(dev); unsigned int txreadyq = port->plat->txreadyq; int len, offset, bytes, n; void *mem; u32 phys; struct desc *desc; #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit\n", dev->name); #endif if (unlikely(skb->len > MAX_MRU)) { dev_kfree_skb(skb); dev->stats.tx_errors++; return NETDEV_TX_OK; } debug_pkt(dev, "eth_xmit", skb->data, skb->len); len = skb->len; #ifdef __ARMEB__ offset = 0; /* no need to keep alignment */ bytes = len; mem = skb->data; #else offset = (int)skb->data & 3; /* keep 32-bit alignment */ bytes = ALIGN(offset + len, 4); if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); dev_kfree_skb(skb); #endif phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); if (dma_mapping_error(&dev->dev, phys)) { #ifdef __ARMEB__ dev_kfree_skb(skb); #else kfree(mem); #endif dev->stats.tx_dropped++; return NETDEV_TX_OK; } n = queue_get_desc(txreadyq, port, 1); BUG_ON(n < 0); desc = tx_desc_ptr(port, n); #ifdef __ARMEB__ port->tx_buff_tab[n] = skb; #else port->tx_buff_tab[n] = mem; #endif desc->data = phys + offset; desc->buf_len = desc->pkt_len = len; /* NPE firmware pads short frames with zeros internally */ wmb(); queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); dev->trans_start = jiffies; if (qmgr_stat_empty(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ if (!qmgr_stat_empty(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit ready again\n", dev->name); #endif netif_wake_queue(dev); } } #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); #endif return NETDEV_TX_OK; }
static int eth_poll(struct napi_struct *napi, int budget) { struct port *port = container_of(napi, struct port, napi); struct net_device *dev = port->netdev; unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); int received = 0; #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll\n", dev->name); #endif while (received < budget) { struct sk_buff *skb; struct desc *desc; int n; #ifdef __ARMEB__ struct sk_buff *temp; u32 phys; #endif if ((n = queue_get_desc(rxq, port, 0)) < 0) { #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll napi_complete\n", dev->name); #endif napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_empty(rxq) && napi_reschedule(napi)) { #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll" " napi_reschedule successed\n", dev->name); #endif qmgr_disable_irq(rxq); continue; } #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll all done\n", dev->name); #endif return received; /* all work done */ } desc = rx_desc_ptr(port, n); #ifdef __ARMEB__ if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { phys = dma_map_single(&dev->dev, skb->data, RX_BUFF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&dev->dev, phys)) { dev_kfree_skb(skb); skb = NULL; } } #else skb = netdev_alloc_skb(dev, ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); #endif if (!skb) { dev->stats.rx_dropped++; /* put the desc back on RX-ready queue */ desc->buf_len = MAX_MRU; desc->pkt_len = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); continue; } /* process received frame */ #ifdef __ARMEB__ temp = skb; skb = port->rx_buff_tab[n]; dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, RX_BUFF_SIZE, DMA_FROM_DEVICE); #else dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN, RX_BUFF_SIZE, DMA_FROM_DEVICE); memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); #endif skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, desc->pkt_len); debug_pkt(dev, "eth_poll", skb->data, skb->len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_receive_skb(skb); /* put the new buffer on RX-free queue */ #ifdef __ARMEB__ port->rx_buff_tab[n] = temp; desc->data = phys + NET_IP_ALIGN; #endif desc->buf_len = MAX_MRU; desc->pkt_len = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); received++; } #if DEBUG_RX printk(KERN_DEBUG "eth_poll(): end, not all work done\n"); #endif return received; /* not all work done */ }
static int eth_close(struct net_device *dev) { struct port *port = netdev_priv(dev); struct msg msg; int buffs = RX_DESCS; /* allocated RX buffers */ int i; ports_open--; qmgr_disable_irq(port->plat->rxq); napi_disable(&port->napi); netif_stop_queue(dev); while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) buffs--; memset(&msg, 0, sizeof(msg)); msg.cmd = NPE_SETLOOPBACK_MODE; msg.eth_id = port->id; msg.byte3 = 1; if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name); i = 0; do { /* drain RX buffers */ while (queue_get_desc(port->plat->rxq, port, 0) >= 0) buffs--; if (!buffs) break; if (qmgr_stat_empty(TX_QUEUE(port->id))) { /* we have to inject some packet */ struct desc *desc; u32 phys; int n = queue_get_desc(port->plat->txreadyq, port, 1); BUG_ON(n < 0); desc = tx_desc_ptr(port, n); phys = tx_desc_phys(port, n); desc->buf_len = desc->pkt_len = 1; wmb(); queue_put_desc(TX_QUEUE(port->id), phys, desc); } udelay(1); } while (++i < MAX_CLOSE_WAIT); if (buffs) printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)" " left in NPE\n", dev->name, buffs); #if DEBUG_CLOSE if (!buffs) printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i); #endif buffs = TX_DESCS; while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) buffs--; /* cancel TX */ i = 0; do { while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) buffs--; if (!buffs) break; } while (++i < MAX_CLOSE_WAIT); if (buffs) printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) " "left in NPE\n", dev->name, buffs); #if DEBUG_CLOSE if (!buffs) printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); #endif msg.byte3 = 0; if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) printk(KERN_CRIT "%s: unable to disable loopback\n", dev->name); phy_stop(port->phydev); if (!ports_open) qmgr_disable_irq(TXDONE_QUEUE); destroy_queues(port); release_queues(port); return 0; }