static netdev_tx_t greth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; int err = NETDEV_TX_OK; u32 status, dma_addr, ctrl; unsigned long flags; /* Clean TX Ring */ greth_clean_tx(greth->netdev); if (unlikely(greth->tx_free <= 0)) { spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ ctrl = GRETH_REGLOAD(greth->regs->control); /* Enable TX IRQ only if not already in poll() routine */ if (ctrl & GRETH_RXI) GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_BUSY; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } bdp = greth->tx_bd_base + greth->tx_next; dma_addr = greth_read_bd(&bdp->addr); memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN; /* Wrap around descriptor ring */ if (greth->tx_next == GRETH_TXBD_NUM_MASK) { status |= GRETH_BD_WR; } greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; /* Write descriptor control word and enable transmission */ greth_write_bd(&bdp->stat, status); spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); spin_unlock_irqrestore(&greth->devlock, flags); out: dev_kfree_skb(skb); return err; }
static int greth_open(struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); int err; err = greth_init_rings(greth); if (err) { if (netif_msg_ifup(greth)) dev_err(&dev->dev, "Could not allocate memory for DMA rings\n"); return err; } err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev); if (err) { if (netif_msg_ifup(greth)) dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq); greth_clean_rings(greth); return err; } if (netif_msg_ifup(greth)) dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); GRETH_REGSAVE(greth->regs->status, 0xFF); napi_enable(&greth->napi); greth_enable_irqs(greth); greth_enable_tx(greth); greth_enable_rx(greth); return 0; }
static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; u32 status = 0, dma_addr; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; nr_frags = skb_shinfo(skb)->nr_frags; if (greth->tx_free < nr_frags + 1) { netif_stop_queue(dev); err = NETDEV_TX_BUSY; goto out; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } /* Save skb pointer. */ greth->tx_skbuff[greth->tx_next] = skb; /* Linear buf */ if (nr_frags != 0) status = GRETH_TXBD_MORE; status |= GRETH_TXBD_CSALL; status |= skb_headlen(skb) & GRETH_BD_LEN; if (greth->tx_next == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, status); dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(greth->tx_next); /* Frags */ for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; greth->tx_skbuff[curr_tx] = NULL; bdp = greth->tx_bd_base + curr_tx; status = GRETH_TXBD_CSALL; status |= frag->size & GRETH_BD_LEN; /* Wrap around descriptor ring */ if (curr_tx == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; /* More fragments left */ if (i < nr_frags - 1) status |= GRETH_TXBD_MORE; /* ... last fragment, check if out of descriptors */ else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) { /* Enable interrupts and stop queue */ status |= GRETH_BD_IE; netif_stop_queue(dev); } greth_write_bd(&bdp->stat, status); dma_addr = dma_map_page(greth->dev, frag->page, frag->page_offset, frag->size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto frag_map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(curr_tx); } wmb(); /* Enable the descriptors that we configured ... */ for (i = 0; i < nr_frags + 1; i++) { bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; } greth_enable_tx(greth); return NETDEV_TX_OK; frag_map_error: /* Unmap SKB mappings that succeeded */ for (i = 0; greth->tx_next + i != curr_tx; i++) { bdp = greth->tx_bd_base + greth->tx_next + i; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), greth_read_bd(&bdp->stat) & GRETH_BD_LEN, DMA_TO_DEVICE); } map_error: if (net_ratelimit()) dev_warn(greth->dev, "Could not create TX DMA mapping\n"); dev_kfree_skb(skb); out: return err; }
static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; u32 status = 0, dma_addr, ctrl; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; unsigned long flags; nr_frags = skb_shinfo(skb)->nr_frags; /* Clean TX Ring */ greth_clean_tx_gbit(dev); if (greth->tx_free < nr_frags + 1) { spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ ctrl = GRETH_REGLOAD(greth->regs->control); /* Enable TX IRQ only if not already in poll() routine */ if (ctrl & GRETH_RXI) GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); spin_unlock_irqrestore(&greth->devlock, flags); err = NETDEV_TX_BUSY; goto out; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } /* Save skb pointer. */ greth->tx_skbuff[greth->tx_next] = skb; /* Linear buf */ if (nr_frags != 0) status = GRETH_TXBD_MORE; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= GRETH_TXBD_CSALL; status |= skb_headlen(skb) & GRETH_BD_LEN; if (greth->tx_next == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, status); dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(greth->tx_next); /* Frags */ for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; greth->tx_skbuff[curr_tx] = NULL; bdp = greth->tx_bd_base + curr_tx; status = GRETH_BD_EN; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= GRETH_TXBD_CSALL; status |= frag->size & GRETH_BD_LEN; /* Wrap around descriptor ring */ if (curr_tx == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; /* More fragments left */ if (i < nr_frags - 1) status |= GRETH_TXBD_MORE; else status |= GRETH_BD_IE; /* enable IRQ on last fragment */ greth_write_bd(&bdp->stat, status); dma_addr = dma_map_page(greth->dev, frag->page, frag->page_offset, frag->size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto frag_map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(curr_tx); } wmb(); /* Enable the descriptor chain by enabling the first descriptor */ bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); greth->tx_next = curr_tx; greth->tx_free -= nr_frags + 1; wmb(); spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_OK; frag_map_error: /* Unmap SKB mappings that succeeded and disable descriptor */ for (i = 0; greth->tx_next + i != curr_tx; i++) { bdp = greth->tx_bd_base + greth->tx_next + i; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), greth_read_bd(&bdp->stat) & GRETH_BD_LEN, DMA_TO_DEVICE); greth_write_bd(&bdp->stat, 0); } map_error: if (net_ratelimit()) dev_warn(greth->dev, "Could not create TX DMA mapping\n"); dev_kfree_skb(skb); out: return err; }
static netdev_tx_t greth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; int err = NETDEV_TX_OK; u32 status, dma_addr; bdp = greth->tx_bd_base + greth->tx_next; if (unlikely(greth->tx_free <= 0)) { netif_stop_queue(dev); return NETDEV_TX_BUSY; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } dma_addr = greth_read_bd(&bdp->addr); memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); /* Wrap around descriptor ring */ if (greth->tx_next == GRETH_TXBD_NUM_MASK) { status |= GRETH_BD_WR; } greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; /* No more descriptors */ if (unlikely(greth->tx_free == 0)) { /* Free transmitted descriptors */ greth_clean_tx(dev); /* If nothing was cleaned, stop queue & wait for irq */ if (unlikely(greth->tx_free == 0)) { status |= GRETH_BD_IE; netif_stop_queue(dev); } } /* Write descriptor control word and enable transmission */ greth_write_bd(&bdp->stat, status); greth_enable_tx(greth); out: dev_kfree_skb(skb); return err; }