static int hss_hdlc_close(struct net_device *dev) { struct port *port = dev_to_port(dev); unsigned long flags; int i, buffs = RX_DESCS; spin_lock_irqsave(&npe_lock, flags); ports_open--; qmgr_disable_irq(queue_ids[port->id].rx); netif_stop_queue(dev); napi_disable(&port->napi); hss_stop_hdlc(port); while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0) buffs--; while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0) buffs--; if (buffs) netdev_crit(dev, "unable to drain RX queue, %i buffer(s) left in NPE\n", buffs); buffs = TX_DESCS; while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0) buffs--; i = 0; do { while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) buffs--; if (!buffs) break; } while (++i < MAX_CLOSE_WAIT); if (buffs) netdev_crit(dev, "unable to drain TX queue, %i buffer(s) left in NPE\n", buffs); #if DEBUG_CLOSE if (!buffs) printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); #endif qmgr_disable_irq(queue_ids[port->id].txdone); if (port->plat->close) port->plat->close(port->id, dev); spin_unlock_irqrestore(&npe_lock, flags); destroy_hdlc_queues(port); release_hdlc_queues(port); hdlc_close(dev); return 0; }
static void hss_hdlc_rx_irq(void *pdev) { struct net_device *dev = pdev; struct port *port = dev_to_port(dev); #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); #endif qmgr_disable_irq(queue_ids[port->id].rx); napi_schedule(&port->napi); }
static void eth_rx_irq(void *pdev) { struct net_device *dev = pdev; struct port *port = netdev_priv(dev); #if DEBUG_RX printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); #endif qmgr_disable_irq(port->plat->rxq); napi_schedule(&port->napi); }
static int hss_hdlc_poll(struct napi_struct *napi, int budget) { struct port *port = container_of(napi, struct port, napi); struct net_device *dev = port->netdev; unsigned int rxq = queue_ids[port->id].rx; unsigned int rxfreeq = queue_ids[port->id].rxfree; int received = 0; #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name); #endif while (received < budget) { struct sk_buff *skb; struct desc *desc; int n; #ifdef __ARMEB__ struct sk_buff *temp; u32 phys; #endif if ((n = queue_get_desc(rxq, port, 0)) < 0) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" " napi_complete\n", dev->name); #endif napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_empty(rxq) && napi_reschedule(napi)) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" " napi_reschedule succeeded\n", dev->name); #endif qmgr_disable_irq(rxq); continue; } #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n", dev->name); #endif return received; } desc = rx_desc_ptr(port, n); #if 0 if (desc->error_count) printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X" " errors %u\n", dev->name, desc->status, desc->error_count); #endif skb = NULL; switch (desc->status) { case 0: #ifdef __ARMEB__ if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) { phys = dma_map_single(&dev->dev, skb->data, RX_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&dev->dev, phys)) { dev_kfree_skb(skb); skb = NULL; } } #else skb = netdev_alloc_skb(dev, desc->pkt_len); #endif if (!skb) dev->stats.rx_dropped++; break; case ERR_HDLC_ALIGN: case ERR_HDLC_ABORT: dev->stats.rx_frame_errors++; dev->stats.rx_errors++; break; case ERR_HDLC_FCS: dev->stats.rx_crc_errors++; dev->stats.rx_errors++; break; case ERR_HDLC_TOO_LONG: dev->stats.rx_length_errors++; dev->stats.rx_errors++; break; default: netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n", desc->status, desc->error_count); dev->stats.rx_errors++; } if (!skb) { desc->buf_len = RX_SIZE; desc->pkt_len = desc->status = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); continue; } #ifdef __ARMEB__ temp = skb; skb = port->rx_buff_tab[n]; dma_unmap_single(&dev->dev, desc->data, RX_SIZE, DMA_FROM_DEVICE); #else dma_sync_single_for_cpu(&dev->dev, desc->data, RX_SIZE, DMA_FROM_DEVICE); memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], ALIGN(desc->pkt_len, 4) / 4); #endif skb_put(skb, desc->pkt_len); debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len); skb->protocol = hdlc_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_receive_skb(skb); #ifdef __ARMEB__ port->rx_buff_tab[n] = temp; desc->data = phys; #endif desc->buf_len = RX_SIZE; desc->pkt_len = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); received++; } #if DEBUG_RX printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n"); #endif return received; }
static int eth_poll(struct napi_struct *napi, int budget) { struct port *port = container_of(napi, struct port, napi); struct net_device *dev = port->netdev; unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); int received = 0; #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll\n", dev->name); #endif while (received < budget) { struct sk_buff *skb; struct desc *desc; int n; #ifdef __ARMEB__ struct sk_buff *temp; u32 phys; #endif if ((n = queue_get_desc(rxq, port, 0)) < 0) { #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll napi_complete\n", dev->name); #endif napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_empty(rxq) && napi_reschedule(napi)) { #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll" " napi_reschedule successed\n", dev->name); #endif qmgr_disable_irq(rxq); continue; } #if DEBUG_RX printk(KERN_DEBUG "%s: eth_poll all done\n", dev->name); #endif return received; /* all work done */ } desc = rx_desc_ptr(port, n); #ifdef __ARMEB__ if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { phys = dma_map_single(&dev->dev, skb->data, RX_BUFF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&dev->dev, phys)) { dev_kfree_skb(skb); skb = NULL; } } #else skb = netdev_alloc_skb(dev, ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); #endif if (!skb) { dev->stats.rx_dropped++; /* put the desc back on RX-ready queue */ desc->buf_len = MAX_MRU; desc->pkt_len = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); continue; } /* process received frame */ #ifdef __ARMEB__ temp = skb; skb = port->rx_buff_tab[n]; dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, RX_BUFF_SIZE, DMA_FROM_DEVICE); #else dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN, RX_BUFF_SIZE, DMA_FROM_DEVICE); memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); #endif skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, desc->pkt_len); debug_pkt(dev, "eth_poll", skb->data, skb->len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_receive_skb(skb); /* put the new buffer on RX-free queue */ #ifdef __ARMEB__ port->rx_buff_tab[n] = temp; desc->data = phys + NET_IP_ALIGN; #endif desc->buf_len = MAX_MRU; desc->pkt_len = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); received++; } #if DEBUG_RX printk(KERN_DEBUG "eth_poll(): end, not all work done\n"); #endif return received; /* not all work done */ }
static int eth_close(struct net_device *dev) { struct port *port = netdev_priv(dev); struct msg msg; int buffs = RX_DESCS; /* allocated RX buffers */ int i; ports_open--; qmgr_disable_irq(port->plat->rxq); napi_disable(&port->napi); netif_stop_queue(dev); while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) buffs--; memset(&msg, 0, sizeof(msg)); msg.cmd = NPE_SETLOOPBACK_MODE; msg.eth_id = port->id; msg.byte3 = 1; if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name); i = 0; do { /* drain RX buffers */ while (queue_get_desc(port->plat->rxq, port, 0) >= 0) buffs--; if (!buffs) break; if (qmgr_stat_empty(TX_QUEUE(port->id))) { /* we have to inject some packet */ struct desc *desc; u32 phys; int n = queue_get_desc(port->plat->txreadyq, port, 1); BUG_ON(n < 0); desc = tx_desc_ptr(port, n); phys = tx_desc_phys(port, n); desc->buf_len = desc->pkt_len = 1; wmb(); queue_put_desc(TX_QUEUE(port->id), phys, desc); } udelay(1); } while (++i < MAX_CLOSE_WAIT); if (buffs) printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)" " left in NPE\n", dev->name, buffs); #if DEBUG_CLOSE if (!buffs) printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i); #endif buffs = TX_DESCS; while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) buffs--; /* cancel TX */ i = 0; do { while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) buffs--; if (!buffs) break; } while (++i < MAX_CLOSE_WAIT); if (buffs) printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) " "left in NPE\n", dev->name, buffs); #if DEBUG_CLOSE if (!buffs) printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); #endif msg.byte3 = 0; if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) printk(KERN_CRIT "%s: unable to disable loopback\n", dev->name); phy_stop(port->phydev); if (!ports_open) qmgr_disable_irq(TXDONE_QUEUE); destroy_queues(port); release_queues(port); return 0; }