static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { struct sgiseeq_rx_desc *rd; struct sk_buff *skb = 0; unsigned char pkt_status; unsigned char *pkt_pointer = 0; int len = 0; unsigned int orig_end = PREV_RX(sp->rx_new); /* Service every received packet. */ for_each_rx(rd, sp) { len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; pkt_pointer = (unsigned char *)(long)rd->buf_vaddr; pkt_status = pkt_pointer[len + 2]; if (pkt_status & SEEQ_RSTAT_FIG) { /* Packet is OK. */ skb = dev_alloc_skb(len + 2); if (skb) { skb->dev = dev; skb_reserve(skb, 2); skb_put(skb, len); /* Copy out of kseg1 to avoid silly cache flush. */ eth_copy_and_sum(skb, pkt_pointer + 2, len, 0); skb->protocol = eth_type_trans(skb, dev); /* We don't want to receive our own packets */ if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) { netif_rx(skb); dev->last_rx = jiffies; sp->stats.rx_packets++; sp->stats.rx_bytes += len; } else { /* Silently drop my own packets */ dev_kfree_skb_irq(skb); } } else { printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", dev->name); sp->stats.rx_dropped++; } } else { record_rx_errors(sp, pkt_status); } /* Return the entry to the ring pool. */ rd->rdma.cntinfo = RCNTINFO_INIT; sp->rx_new = NEXT_RX(sp->rx_new); }
static void qe_rx(struct sunqe *qep) { struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; int elem = qep->rx_new, drops = 0; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; unsigned char *this_qbuf = &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; __u32 this_qbuf_dvma = qbufs_dvma + qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); struct qe_rxd *end_rxd = &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ /* Check for errors. */ if (len < ETH_ZLEN) { dev->stats.rx_errors++; dev->stats.rx_length_errors++; dev->stats.rx_dropped++; } else { skb = dev_alloc_skb(len + 2); if (skb == NULL) { drops++; dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); skb_put(skb, len); skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, len); skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } } end_rxd->rx_addr = this_qbuf_dvma; end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); elem = NEXT_RX(elem); this = &rxbase[elem]; } qep->rx_new = elem; if (drops) printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); }
static void myri_rx(struct myri_eth *mp, struct net_device *dev) { struct recvq __iomem *rq = mp->rq; struct recvq __iomem *rqa = mp->rqack; int entry = sbus_readl(&rqa->head); int limit = sbus_readl(&rqa->tail); int drops; DRX(("entry[%d] limit[%d] ", entry, limit)); if (entry == limit) return; drops = 0; DRX(("\n")); while (entry != limit) { struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry]; u32 csum = sbus_readl(&rxdack->csum); int len = sbus_readl(&rxdack->myri_scatters[0].len); int index = sbus_readl(&rxdack->ctx); struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)]; struct sk_buff *skb = mp->rx_skbs[index]; /* Ack it. */ sbus_writel(NEXT_RX(entry), &rqa->head); /* Check for errors. */ DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum)); dma_sync_single_for_cpu(&mp->myri_op->dev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, DMA_FROM_DEVICE); if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { DRX(("ERROR[")); dev->stats.rx_errors++; if (len < (ETH_HLEN + MYRI_PAD_LEN)) { DRX(("BAD_LENGTH] ")); dev->stats.rx_length_errors++; } else { DRX(("NO_PADDING] ")); dev->stats.rx_frame_errors++; } /* Return it to the LANAI. */ drop_it: drops++; DRX(("DROP ")); dev->stats.rx_dropped++; dma_sync_single_for_device(&mp->myri_op->dev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, DMA_FROM_DEVICE); sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); sbus_writel(index, &rxd->ctx); sbus_writel(1, &rxd->num_sg); sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail); goto next; } DRX(("len[%d] ", len)); if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; u32 dma_addr; DRX(("BIGBUFF ")); new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { DRX(("skb_alloc(FAILED) ")); goto drop_it; } dma_unmap_single(&mp->myri_op->dev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, DMA_FROM_DEVICE); mp->rx_skbs[index] = new_skb; new_skb->dev = dev; skb_put(new_skb, RX_ALLOC_SIZE); dma_addr = dma_map_single(&mp->myri_op->dev, new_skb->data, RX_ALLOC_SIZE, DMA_FROM_DEVICE); sbus_writel(dma_addr, &rxd->myri_scatters[0].addr); sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); sbus_writel(index, &rxd->ctx); sbus_writel(1, &rxd->num_sg); sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail); /* Trim the original skb for the netif. */ DRX(("trim(%d) ", len)); skb_trim(skb, len); } else { struct sk_buff *copy_skb = dev_alloc_skb(len); DRX(("SMALLBUFF ")); if (copy_skb == NULL) { DRX(("dev_alloc_skb(FAILED) ")); goto drop_it; } /* DMA sync already done above. */ copy_skb->dev = dev; DRX(("resv_and_put ")); skb_put(copy_skb, len); skb_copy_from_linear_data(skb, copy_skb->data, len); /* Reuse original ring buffer. */ DRX(("reuse ")); dma_sync_single_for_device(&mp->myri_op->dev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, DMA_FROM_DEVICE); sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len); sbus_writel(index, &rxd->ctx); sbus_writel(1, &rxd->num_sg); sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail); skb = copy_skb; } /* Just like the happy meal we get checksums from this card. */ skb->csum = csum; skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */ skb->protocol = myri_type_trans(skb, dev); DRX(("prot[%04x] netif_rx ", skb->protocol)); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; next: DRX(("NEXT\n")); entry = NEXT_RX(entry); } }
static int greth_rx_gbit(struct net_device *dev, int limit) { struct greth_private *greth; struct greth_bd *bdp; struct sk_buff *skb, *newskb; int pkt_len; int bad, count = 0; u32 status, dma_addr; unsigned long flags; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; skb = greth->rx_skbuff[greth->rx_cur]; GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); mb(); status = greth_read_bd(&bdp->stat); bad = 0; if (status & GRETH_BD_EN) break; /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { dev->stats.rx_length_errors++; bad = 1; } else if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) { dev->stats.rx_frame_errors++; bad = 1; } else if (status & GRETH_RXBD_ERR_CRC) { dev->stats.rx_crc_errors++; bad = 1; } } /* Allocate new skb to replace current, not needed if the * current skb can be reused */ if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { skb_reserve(newskb, NET_IP_ALIGN); dma_addr = dma_map_single(greth->dev, newskb->data, MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (!dma_mapping_error(greth->dev, dma_addr)) { /* Process the incoming frame. */ pkt_len = status & GRETH_BD_LEN; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (netif_msg_pktdata(greth)) greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len); skb_put(skb, pkt_len); if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; netif_receive_skb(skb); greth->rx_skbuff[greth->rx_cur] = newskb; greth_write_bd(&bdp->addr, dma_addr); } else { if (net_ratelimit()) dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); dev_kfree_skb(newskb); /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } } else if (bad) { /* Bad Frame transfer, the skb is reused */ dev->stats.rx_dropped++; } else { /* Failed Allocating a new skb. This is rather stupid * but the current "filled" skb is reused, as if * transfer failure. One could argue that RX descriptor * table handling should be divided into cleaning and * filling as the TX part of the driver */ if (net_ratelimit()) dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } status = GRETH_BD_EN | GRETH_BD_IE; if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { status |= GRETH_BD_WR; } wmb(); greth_write_bd(&bdp->stat, status); spin_lock_irqsave(&greth->devlock, flags); greth_enable_rx(greth); spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } return count; }
static int greth_rx(struct net_device *dev, int limit) { struct greth_private *greth; struct greth_bd *bdp; struct sk_buff *skb; int pkt_len; int bad, count; u32 status, dma_addr; unsigned long flags; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); mb(); status = greth_read_bd(&bdp->stat); if (unlikely(status & GRETH_BD_EN)) { break; } dma_addr = greth_read_bd(&bdp->addr); bad = 0; /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { dev->stats.rx_length_errors++; bad = 1; } if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) { dev->stats.rx_frame_errors++; bad = 1; } if (status & GRETH_RXBD_ERR_CRC) { dev->stats.rx_crc_errors++; bad = 1; } } if (unlikely(bad)) { dev->stats.rx_errors++; } else { pkt_len = status & GRETH_BD_LEN; skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); if (unlikely(skb == NULL)) { if (net_ratelimit()) dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); dev->stats.rx_dropped++; } else { skb_reserve(skb, NET_IP_ALIGN); dma_sync_single_for_cpu(greth->dev, dma_addr, pkt_len, DMA_FROM_DEVICE); if (netif_msg_pktdata(greth)) greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len); memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_bytes += pkt_len; dev->stats.rx_packets++; netif_receive_skb(skb); } } status = GRETH_BD_EN | GRETH_BD_IE; if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { status |= GRETH_BD_WR; } wmb(); greth_write_bd(&bdp->stat, status); dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ greth_enable_rx(greth); spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } return count; }
static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { struct sgiseeq_rx_desc *rd; struct sk_buff *skb = NULL; struct sk_buff *newskb; unsigned char pkt_status; int len = 0; unsigned int orig_end = PREV_RX(sp->rx_new); /* Service every received packet. */ rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, PKT_BUF_SZ, DMA_FROM_DEVICE); pkt_status = rd->skb->data[len]; if (pkt_status & SEEQ_RSTAT_FIG) { /* Packet is OK. */ /* We don't want to receive our own packets */ if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) { if (len > rx_copybreak) { skb = rd->skb; newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); if (!newskb) { newskb = skb; skb = NULL; goto memory_squeeze; } skb_reserve(newskb, 2); } else { skb = netdev_alloc_skb_ip_align(dev, len); if (skb) skb_copy_to_linear_data(skb, rd->skb->data, len); newskb = rd->skb; } memory_squeeze: if (skb) { skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } else { printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", dev->name); dev->stats.rx_dropped++; } } else { /* Silently drop my own packets */ newskb = rd->skb; } } else { record_rx_errors(dev, pkt_status); newskb = rd->skb; } rd->skb = newskb; rd->rdma.pbuf = dma_map_single(dev->dev.parent, newskb->data - 2, PKT_BUF_SZ, DMA_FROM_DEVICE); /* Return the entry to the ring pool. */ rd->rdma.cntinfo = RCNTINFO_INIT; sp->rx_new = NEXT_RX(sp->rx_new); dma_sync_desc_dev(dev, rd); rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); } dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); rx_maybe_restart(sp, hregs, sregs); }
static int greth_rx_gbit(struct net_device *dev, int limit) { struct greth_private *greth; struct greth_bd *bdp; struct sk_buff *skb, *newskb; int pkt_len; int bad, count = 0; u32 status, dma_addr; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; skb = greth->rx_skbuff[greth->rx_cur]; status = greth_read_bd(&bdp->stat); bad = 0; if (status & GRETH_BD_EN) break; /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { dev->stats.rx_length_errors++; bad = 1; } else if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) { dev->stats.rx_frame_errors++; bad = 1; } else if (status & GRETH_RXBD_ERR_CRC) { dev->stats.rx_crc_errors++; bad = 1; } } /* Allocate new skb to replace current */ newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); if (!bad && newskb) { skb_reserve(newskb, NET_IP_ALIGN); dma_addr = dma_map_single(greth->dev, newskb->data, MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (!dma_mapping_error(greth->dev, dma_addr)) { /* Process the incoming frame. */ pkt_len = status & GRETH_BD_LEN; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (netif_msg_pktdata(greth)) greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len); skb_put(skb, pkt_len); if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; netif_receive_skb(skb); greth->rx_skbuff[greth->rx_cur] = newskb; greth_write_bd(&bdp->addr, dma_addr); } else { if (net_ratelimit()) dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); dev_kfree_skb(newskb); dev->stats.rx_dropped++; } } else { if (net_ratelimit()) dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); dev->stats.rx_dropped++; } status = GRETH_BD_EN | GRETH_BD_IE; if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { status |= GRETH_BD_WR; } wmb(); greth_write_bd(&bdp->stat, status); greth_enable_rx(greth); greth->rx_cur = NEXT_RX(greth->rx_cur); } return count; }
static int cp_rx_poll (struct net_device *dev, int *budget) { struct cp_private *cp = netdev_priv(dev); unsigned rx_tail = cp->rx_tail; unsigned rx_work = dev->quota; unsigned rx; rx_status_loop: rx = 0; cpw16(IntrStatus, cp_rx_intr_mask); while (1) { u32 status, len; dma_addr_t mapping; struct sk_buff *skb, *new_skb; struct cp_desc *desc; unsigned buflen; skb = cp->rx_skb[rx_tail].skb; if (!skb) BUG(); desc = &cp->rx_ring[rx_tail]; status = le32_to_cpu(desc->opts1); if (status & DescOwn) break; len = (status & 0x1fff) - 4; mapping = cp->rx_skb[rx_tail].mapping; if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { /* we don't support incoming fragmented frames. * instead, we attempt to ensure that the * pre-allocated RX skbs are properly sized such * that RX fragments are never encountered */ cp_rx_err_acct(cp, rx_tail, status, len); cp->net_stats.rx_dropped++; cp->cp_stats.rx_frags++; goto rx_next; } if (status & (RxError | RxErrFIFO)) { cp_rx_err_acct(cp, rx_tail, status, len); goto rx_next; } if (netif_msg_rx_status(cp)) printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", cp->dev->name, rx_tail, status, len); buflen = cp->rx_buf_sz + RX_OFFSET; new_skb = dev_alloc_skb (buflen); if (!new_skb) { cp->net_stats.rx_dropped++; goto rx_next; } skb_reserve(new_skb, RX_OFFSET); new_skb->dev = cp->dev; pci_unmap_single(cp->pdev, mapping, buflen, PCI_DMA_FROMDEVICE); /* Handle checksum offloading for incoming packets. */ if (cp_rx_csum_ok(status)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; skb_put(skb, len); mapping = cp->rx_skb[rx_tail].mapping = pci_map_single(cp->pdev, new_skb->tail, buflen, PCI_DMA_FROMDEVICE); cp->rx_skb[rx_tail].skb = new_skb; cp_rx_skb(cp, skb, desc); rx++; rx_next: cp->rx_ring[rx_tail].opts2 = 0; cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); if (rx_tail == (CP_RX_RING_SIZE - 1)) desc->opts1 = cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); else desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); rx_tail = NEXT_RX(rx_tail); if (!rx_work--) break; } cp->rx_tail = rx_tail; dev->quota -= rx; *budget -= rx; /* if we did not reach work limit, then we're done with * this round of polling */ if (rx_work) { if (cpr16(IntrStatus) & cp_rx_intr_mask) goto rx_status_loop; local_irq_disable(); cpw16_f(IntrMask, cp_intr_mask); __netif_rx_complete(dev); local_irq_enable(); return 0; /* done */ } return 1; /* not done */ }