static netdev_tx_t greth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; int err = NETDEV_TX_OK; u32 status, dma_addr, ctrl; unsigned long flags; /* Clean TX Ring */ greth_clean_tx(greth->netdev); if (unlikely(greth->tx_free <= 0)) { spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ ctrl = GRETH_REGLOAD(greth->regs->control); /* Enable TX IRQ only if not already in poll() routine */ if (ctrl & GRETH_RXI) GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_BUSY; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } bdp = greth->tx_bd_base + greth->tx_next; dma_addr = greth_read_bd(&bdp->addr); memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN; /* Wrap around descriptor ring */ if (greth->tx_next == GRETH_TXBD_NUM_MASK) { status |= GRETH_BD_WR; } greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; /* Write descriptor control word and enable transmission */ greth_write_bd(&bdp->stat, status); spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); spin_unlock_irqrestore(&greth->devlock, flags); out: dev_kfree_skb(skb); return err; }
static int greth_poll(struct napi_struct *napi, int budget) { struct greth_private *greth; int work_done = 0; greth = container_of(napi, struct greth_private, napi); if (greth->gbit_mac) { greth_clean_tx_gbit(greth->netdev); } else { greth_clean_tx(greth->netdev); } restart_poll: if (greth->gbit_mac) { work_done += greth_rx_gbit(greth->netdev, budget - work_done); } else { work_done += greth_rx(greth->netdev, budget - work_done); } if (work_done < budget) { napi_complete(napi); if (greth_pending_packets(greth)) { napi_reschedule(napi); goto restart_poll; } } greth_enable_irqs(greth); return work_done; }
static int greth_poll(struct napi_struct *napi, int budget) { struct greth_private *greth; int work_done = 0; unsigned long flags; u32 mask, ctrl; greth = container_of(napi, struct greth_private, napi); restart_txrx_poll: if (netif_queue_stopped(greth->netdev)) { if (greth->gbit_mac) greth_clean_tx_gbit(greth->netdev); else greth_clean_tx(greth->netdev); } if (greth->gbit_mac) { work_done += greth_rx_gbit(greth->netdev, budget - work_done); } else { work_done += greth_rx(greth->netdev, budget - work_done); } if (work_done < budget) { spin_lock_irqsave(&greth->devlock, flags); ctrl = GRETH_REGLOAD(greth->regs->control); if (netif_queue_stopped(greth->netdev)) { GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI | GRETH_RXI); mask = GRETH_INT_RX | GRETH_INT_RE | GRETH_INT_TX | GRETH_INT_TE; } else { GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); mask = GRETH_INT_RX | GRETH_INT_RE; } if (GRETH_REGLOAD(greth->regs->status) & mask) { GRETH_REGSAVE(greth->regs->control, ctrl); spin_unlock_irqrestore(&greth->devlock, flags); goto restart_txrx_poll; } else { __napi_complete(napi); spin_unlock_irqrestore(&greth->devlock, flags); } } return work_done; }
static netdev_tx_t greth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; int err = NETDEV_TX_OK; u32 status, dma_addr; bdp = greth->tx_bd_base + greth->tx_next; if (unlikely(greth->tx_free <= 0)) { netif_stop_queue(dev); return NETDEV_TX_BUSY; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } dma_addr = greth_read_bd(&bdp->addr); memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); /* Wrap around descriptor ring */ if (greth->tx_next == GRETH_TXBD_NUM_MASK) { status |= GRETH_BD_WR; } greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; /* No more descriptors */ if (unlikely(greth->tx_free == 0)) { /* Free transmitted descriptors */ greth_clean_tx(dev); /* If nothing was cleaned, stop queue & wait for irq */ if (unlikely(greth->tx_free == 0)) { status |= GRETH_BD_IE; netif_stop_queue(dev); } } /* Write descriptor control word and enable transmission */ greth_write_bd(&bdp->stat, status); greth_enable_tx(greth); out: dev_kfree_skb(skb); return err; }