static int qcaspi_netdev_init(struct net_device *dev) { struct qcaspi *qca = netdev_priv(dev); dev->mtu = QCAFRM_MAX_MTU; dev->type = ARPHRD_ETHER; qca->clkspeed = qcaspi_clkspeed; qca->burst_len = qcaspi_burst_len; qca->spi_thread = NULL; qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + QCAFRM_FOOTER_LEN + 4) * 4; memset(&qca->stats, 0, sizeof(struct qcaspi_stats)); qca->rx_buffer = kmalloc(qca->buffer_size, GFP_KERNEL); if (!qca->rx_buffer) return -ENOBUFS; qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { kfree(qca->rx_buffer); netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); return -ENOBUFS; } return 0; }
static int roq_alloc_rx_buffers(struct roq_eth_priv *priv) { struct device *dev = &priv->ndev->dev; int i; memset(&priv->rx_buf, 0, sizeof(struct roq_eth_rx_buffer)); /* alloc socket buffers */ for (i = 0; i < MAX_RX_SKBS; i++) { priv->rx_buf.post[i].skb = netdev_alloc_skb_ip_align(priv->ndev, DEFAULT_SKB_SIZE); if (!priv->rx_buf.post[i].skb) { pr_warn("Failed to alloc RX SKB Data Buffer %d\n", i); goto err; } priv->rx_buf.post[i].addr = dma_map_single(dev, priv->rx_buf.post[i].skb->data, DEFAULT_SKB_SIZE, DMA_FROM_DEVICE); if (!priv->rx_buf.post[i].addr) { dev_kfree_skb(priv->rx_buf.post[i].skb); pr_warn("Mapping of RX SKB Data Buffer %d failed\n", i); goto err; } } return 0; err: roq_free_rx_buffers(priv); return -ENOMEM; }
static void hisi_femac_rx_refill(struct hisi_femac_priv *priv) { struct hisi_femac_queue *rxq = &priv->rxq; struct sk_buff *skb; u32 pos; u32 len = MAX_FRAME_SIZE; dma_addr_t addr; pos = rxq->head; while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) { if (!CIRC_SPACE(pos, rxq->tail, rxq->num)) break; if (unlikely(rxq->skb[pos])) { netdev_err(priv->ndev, "err skb[%d]=%p\n", pos, rxq->skb[pos]); break; } skb = netdev_alloc_skb_ip_align(priv->ndev, len); if (unlikely(!skb)) break; addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE); if (dma_mapping_error(priv->dev, addr)) { dev_kfree_skb_any(skb); break; } rxq->dma_phys[pos] = addr; rxq->skb[pos] = skb; writel(addr, priv->port_base + IQ_ADDR); pos = (pos + 1) % rxq->num; } rxq->head = pos; }
static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue_elem_info *elem_info) { size_t buf_len = MLXSW_PORT_MAX_MTU; char *wqe = elem_info->elem; struct sk_buff *skb; int err; elem_info->u.rdq.skb = NULL; skb = netdev_alloc_skb_ip_align(NULL, buf_len); if (!skb) return -ENOMEM; /* Assume that wqe was previously zeroed. */ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, buf_len, DMA_FROM_DEVICE); if (err) goto err_frag_map; elem_info->u.rdq.skb = skb; return 0; err_frag_map: dev_kfree_skb_any(skb); return err; }
static void ec_bhf_process_rx(struct ec_bhf_priv *priv) { struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; while (ec_bhf_pkt_received(desc)) { int pkt_size = (le16_to_cpu(desc->header.len) & RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4; u8 *data = desc->data; struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); if (skb) { memcpy(skb_put(skb, pkt_size), data, pkt_size); skb->protocol = eth_type_trans(skb, priv->net_dev); priv->stat_rx_bytes += pkt_size; netif_rx(skb); } else { dev_err_ratelimited(PRIV_TO_DEV(priv), "Couldn't allocate a skb_buff for a packet of size %u\n", pkt_size); } desc->header.recv = 0; ec_bhf_add_rx_desc(priv, desc); priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; desc = &priv->rx_descs[priv->rx_dnext]; } }
/* * netvsc_recv_callback - Callback when we receive a packet from the * "wire" on the specified device. */ int netvsc_recv_callback(struct hv_device *device_obj, struct hv_netvsc_packet *packet, struct ndis_tcp_ip_checksum_info *csum_info) { struct net_device *net; struct sk_buff *skb; net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; if (!net || net->reg_state != NETREG_REGISTERED) { packet->status = NVSP_STAT_FAIL; return 0; } /* Allocate a skb - TODO direct I/O to pages? */ skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); if (unlikely(!skb)) { ++net->stats.rx_dropped; packet->status = NVSP_STAT_FAIL; return 0; } /* * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ memcpy(skb_put(skb, packet->total_data_buflen), packet->data, packet->total_data_buflen); skb->protocol = eth_type_trans(skb, net); if (csum_info) { /* We only look at the IP checksum here. * Should we be dropping the packet if checksum * failed? How do we deal with other checksums - TCP/UDP? */ if (csum_info->receive.ip_checksum_succeeded) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; } if (packet->vlan_tci & VLAN_TAG_PRESENT) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci); skb_record_rx_queue(skb, packet->channel-> offermsg.offer.sub_channel_index); net->stats.rx_packets++; net->stats.rx_bytes += packet->total_data_buflen; /* * Pass the skb back up. Network stack will deallocate the skb when it * is done. * TODO - use NAPI? */ netif_rx(skb); return 0; }
static void ks8842_rx_frame(struct net_device *netdev, struct ks8842_adapter *adapter) { u32 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); int len = (status >> 16) & 0x7ff; status &= 0xffff; dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n", __func__, status); /* check the status */ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n", __func__, len); if (skb) { u32 *data; netdev->stats.rx_packets++; netdev->stats.rx_bytes += len; if (status & RXSR_MULTICAST) netdev->stats.multicast++; data = (u32 *)skb_put(skb, len); ks8842_select_bank(adapter, 17); while (len > 0) { *data++ = ioread32(adapter->hw_addr + REG_QMU_DATA_LO); len -= sizeof(u32); } skb->protocol = eth_type_trans(skb, netdev); netif_rx(skb); } else netdev->stats.rx_dropped++; } else { dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status); netdev->stats.rx_errors++; if (status & RXSR_TOO_LONG) netdev->stats.rx_length_errors++; if (status & RXSR_CRC_ERROR) netdev->stats.rx_crc_errors++; if (status & RXSR_RUNT) netdev->stats.rx_frame_errors++; } /* set high watermark to 3K */ ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); /* release the frame */ ks8842_write16(adapter, 17, 0x01, REG_RXQCR); /* set high watermark to 2K */ ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR); }
/* allocate and initialize Tx and Rx descriptors */ static void alloc_list (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; np->cur_rx = np->cur_tx = 0; np->old_rx = np->old_tx = 0; np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].status = cpu_to_le64 (TFDDone); np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + ((i+1)%TX_RING_SIZE) * sizeof (struct netdev_desc)); } /* Initialize Rx descriptors */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + ((i + 1) % RX_RING_SIZE) * sizeof (struct netdev_desc)); np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; np->rx_skbuff[i] = NULL; } /* Allocate the rx buffers */ for (i = 0; i < RX_RING_SIZE; i++) { /* Allocated fixed size of skbuff */ struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; if (skb == NULL) { printk (KERN_ERR "%s: alloc_list: allocate Rx buffer error! ", dev->name); break; } /* Rubicon now supports 40 bits of addressing space. */ np->rx_ring[i].fraginfo = cpu_to_le64 ( pci_map_single ( np->pdev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); } /* Set RFDListPtr */ writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); writel (0, dev->base_addr + RFDListPtr1); return; }
static int ethoc_rx(struct net_device *dev, int limit) { struct ethoc *priv = netdev_priv(dev); int count; for (count = 0; count < limit; ++count) { unsigned int entry; struct ethoc_bd bd; entry = priv->num_tx + priv->cur_rx; ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) { ethoc_ack_irq(priv, INT_MASK_RX); /* If packet (interrupt) came in between checking * BD_EMTPY and clearing the interrupt source, then we * risk missing the packet as the RX interrupt won't * trigger right away when we reenable it; hence, check * BD_EMTPY here again to make sure there isn't such a * packet waiting for us... */ ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) break; } if (ethoc_update_rx_stats(priv, &bd) == 0) { int size = bd.stat >> 16; struct sk_buff *skb; size -= 4; /* strip the CRC */ skb = netdev_alloc_skb_ip_align(dev, size); if (likely(skb)) { void *src = priv->vma[entry]; memcpy_fromio(skb_put(skb, size), src, size); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_receive_skb(skb); } else { if (net_ratelimit()) dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); dev->stats.rx_dropped++; break; } } /* clear the buffer descriptor so it can be reused */ bd.stat &= ~RX_BD_STATS; bd.stat |= RX_BD_EMPTY; ethoc_write_bd(priv, entry, &bd); if (++priv->cur_rx == priv->num_rx) priv->cur_rx = 0; }
static int ethoc_rx(struct net_device *dev, int limit) { struct ethoc *priv = netdev_priv(dev); int count; for (count = 0; count < limit; ++count) { unsigned int entry; struct ethoc_bd bd; entry = priv->num_tx + priv->cur_rx; ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) { ethoc_ack_irq(priv, INT_MASK_RX); /* */ ethoc_read_bd(priv, entry, &bd); if (bd.stat & RX_BD_EMPTY) break; } if (ethoc_update_rx_stats(priv, &bd) == 0) { int size = bd.stat >> 16; struct sk_buff *skb; size -= 4; /* */ skb = netdev_alloc_skb_ip_align(dev, size); if (likely(skb)) { void *src = priv->vma[entry]; memcpy_fromio(skb_put(skb, size), src, size); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_receive_skb(skb); } else { if (net_ratelimit()) dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); dev->stats.rx_dropped++; break; } } /* */ bd.stat &= ~RX_BD_STATS; bd.stat |= RX_BD_EMPTY; ethoc_write_bd(priv, entry, &bd); if (++priv->cur_rx == priv->num_rx) priv->cur_rx = 0; }
static void alloc_list (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; np->cur_rx = np->cur_tx = 0; np->old_rx = np->old_tx = 0; np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); /* */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].status = cpu_to_le64 (TFDDone); np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + ((i+1)%TX_RING_SIZE) * sizeof (struct netdev_desc)); } /* */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + ((i + 1) % RX_RING_SIZE) * sizeof (struct netdev_desc)); np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; np->rx_skbuff[i] = NULL; } /* */ for (i = 0; i < RX_RING_SIZE; i++) { /* */ struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; if (skb == NULL) { printk (KERN_ERR "%s: alloc_list: allocate Rx buffer error! ", dev->name); break; } /* */ np->rx_ring[i].fraginfo = cpu_to_le64 ( pci_map_single ( np->pdev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); } /* */ writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); writel (0, dev->base_addr + RFDListPtr1); }
static int ixpdev_rx(struct net_device *dev, int processed, int budget) { while (processed < budget) { struct ixpdev_rx_desc *desc; struct sk_buff *skb; void *buf; u32 _desc; _desc = ixp2000_reg_read(RING_RX_DONE); if (_desc == 0) return 0; desc = rx_desc + ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc)); buf = phys_to_virt(desc->buf_addr); if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) { printk(KERN_ERR "ixp2000: rx err, length %d\n", desc->pkt_length); goto err; } if (desc->channel < 0 || desc->channel >= nds_count) { printk(KERN_ERR "ixp2000: rx err, channel %d\n", desc->channel); goto err; } /* @@@ Make FCS stripping configurable. */ desc->pkt_length -= 4; if (unlikely(!netif_running(nds[desc->channel]))) goto err; skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length); if (likely(skb != NULL)) { skb_copy_to_linear_data(skb, buf, desc->pkt_length); skb_put(skb, desc->pkt_length); skb->protocol = eth_type_trans(skb, nds[desc->channel]); netif_receive_skb(skb); } err: ixp2000_reg_write(RING_RX_PENDING, _desc); processed++; } return processed; }
static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, struct cpmac_desc *desc) { struct sk_buff *skb, *result = NULL; if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(priv->dev, desc); cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); if (unlikely(!desc->datalen)) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx: spurious interrupt\n", priv->dev->name); return NULL; } skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); if (likely(skb)) { skb_put(desc->skb, desc->datalen); desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); desc->skb->ip_summed = CHECKSUM_NONE; priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += desc->datalen; result = desc->skb; dma_unmap_single(&priv->dev->dev, desc->data_mapping, CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->skb = skb; desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->hw_data = (u32)desc->data_mapping; if (unlikely(netif_msg_pktdata(priv))) { printk(KERN_DEBUG "%s: received packet:\n", priv->dev->name); cpmac_dump_skb(priv->dev, result); } } else { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: low on skbs, dropping packet\n", priv->dev->name); priv->dev->stats.rx_dropped++; } desc->buflen = CPMAC_SKB_SIZE; desc->dataflags = CPMAC_OWN; return result; }
static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, u32 nbuf) { struct sk_buff *skb; struct xgene_enet_raw_desc16 *raw_desc; struct xgene_enet_pdata *pdata; struct net_device *ndev; struct device *dev; dma_addr_t dma_addr; u32 tail = buf_pool->tail; u32 slots = buf_pool->slots - 1; u16 bufdatalen, len; int i; ndev = buf_pool->ndev; dev = ndev_to_dev(buf_pool->ndev); pdata = netdev_priv(ndev); bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); len = XGENE_ENET_MAX_MTU; for (i = 0; i < nbuf; i++) { raw_desc = &buf_pool->raw_desc16[tail]; skb = netdev_alloc_skb_ip_align(ndev, len); if (unlikely(!skb)) return -ENOMEM; buf_pool->rx_skb[tail] = skb; dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma_addr)) { netdev_err(ndev, "DMA mapping error\n"); dev_kfree_skb_any(skb); return -EINVAL; } raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | SET_VAL(BUFDATALEN, bufdatalen) | SET_BIT(COHERENT)); tail = (tail + 1) & slots; } pdata->ring_ops->wr_cmd(buf_pool, nbuf); buf_pool->tail = tail; return 0; }
static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_t len, u16 tci) { __be16 proto = htons(ETH_P_802_3); struct sk_buff *skb = NULL; if (tci < 256) { /* IPS session? */ if (len < sizeof(struct iphdr)) goto err; switch (*buf & 0xf0) { case 0x40: proto = htons(ETH_P_IP); break; case 0x60: #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0) if (is_neigh_solicit(buf, len)) do_neigh_solicit(dev, buf, tci); #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0) */ proto = htons(ETH_P_IPV6); break; default: goto err; } } skb = netdev_alloc_skb_ip_align(dev->net, len + ETH_HLEN); if (!skb) goto err; /* add an ethernet header */ skb_put(skb, ETH_HLEN); skb_reset_mac_header(skb); eth_hdr(skb)->h_proto = proto; memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); /* add datagram */ memcpy(skb_put(skb, len), buf, len); /* map MBIM session to VLAN */ if (tci) vlan_put_tag(skb, htons(ETH_P_8021Q), tci); err: return skb; }
static void rio_timer (unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); unsigned int entry; int next_tick = 1*HZ; unsigned long flags; spin_lock_irqsave(&np->rx_lock, flags); /* Recover rx ring exhausted error */ if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { printk(KERN_INFO "Try to recover rx ring exhausted...\n"); /* Re-allocate skbuffs to fill the descriptor ring */ for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { struct sk_buff *skb; entry = np->old_rx % RX_RING_SIZE; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO "%s: Still unable to re-allocate Rx skbuff.#%d\n", dev->name, entry); break; } np->rx_skbuff[entry] = skb; np->rx_ring[entry].fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); } np->rx_ring[entry].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); np->rx_ring[entry].status = 0; } /* end for */ } /* end if */ spin_unlock_irqrestore (&np->rx_lock, flags); np->timer.expires = jiffies + next_tick; add_timer(&np->timer); }
/* allocate and initialize Tx and Rx descriptors */ static int alloc_list(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; rio_reset_ring(np); np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma + ((i + 1) % TX_RING_SIZE) * sizeof(struct netdev_desc)); } /* Initialize Rx descriptors & allocate buffers */ for (i = 0; i < RX_RING_SIZE; i++) { /* Allocated fixed size of skbuff */ struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; if (!skb) { free_list(dev); return -ENOMEM; } np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + ((i + 1) % RX_RING_SIZE) * sizeof(struct netdev_desc)); /* Rubicon now supports 40 bits of addressing space. */ np->rx_ring[i].fraginfo = cpu_to_le64(pci_map_single( np->pdev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); } return 0; }
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, const struct ndis_tcp_ip_checksum_info *csum_info, const struct ndis_pkt_8021q_info *vlan, void *data, u32 buflen) { struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(net, buflen); if (!skb) return skb; /* * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ memcpy(skb_put(skb, buflen), data, buflen); skb->protocol = eth_type_trans(skb, net); /* skb is already created with CHECKSUM_NONE */ skb_checksum_none_assert(skb); /* * In Linux, the IP checksum is always checked. * Do L4 checksum offload if enabled and present. */ if (csum_info && (net->features & NETIF_F_RXCSUM)) { if (csum_info->receive.tcp_checksum_succeeded || csum_info->receive.udp_checksum_succeeded) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (vlan) { u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); } return skb; }
static int roq_eth_renew_skb(struct roq_eth_priv *vdev, int skbno) { struct net_device *ndev = vdev->ndev; struct device *dev = &vdev->ndev->dev; struct sk_buff *skb = NULL; dma_addr_t addr = 0; int rv = 0; BUG_ON(!vdev->rx_buf.post[skbno].skb); BUG_ON(!vdev->rx_buf.post[skbno].addr); dma_unmap_single(dev, DEFAULT_SKB_SIZE, vdev->rx_buf.post[skbno].addr, DMA_FROM_DEVICE); skb = netdev_alloc_skb_ip_align(ndev, DEFAULT_SKB_SIZE); if (unlikely(!skb)) { pr_warn("roq_eth_renew_skb: Alloc failed"); rv = -ENOMEM; goto out; } addr = dma_map_single(dev, skb->data, DEFAULT_SKB_SIZE, DMA_FROM_DEVICE); if (unlikely(!addr)) { pr_warn("roq_eth_renew_skb: Mapping failed"); dev_kfree_skb(skb); skb = NULL; rv = -ENOMEM; goto out; } skb->dev = vdev->ndev; skb->ip_summed = CHECKSUM_UNNECESSARY; out: vdev->rx_buf.post[skbno].skb = skb; vdev->rx_buf.post[skbno].addr = addr; return rv; }
void cpsw_rx_handler(void *token, int len, int status) { struct sk_buff *skb = token; struct net_device *ndev = skb->dev; struct cpsw_priv *priv = netdev_priv(ndev); int ret = 0; /* */ if (unlikely(!netif_running(ndev)) || unlikely(!netif_carrier_ok(ndev))) { dev_kfree_skb_any(skb); return; } if (likely(status >= 0)) { skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); priv->stats.rx_bytes += len; priv->stats.rx_packets++; skb = NULL; } if (unlikely(!netif_running(ndev))) { if (skb) dev_kfree_skb_any(skb); return; } if (likely(!skb)) { skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); if (WARN_ON(!skb)) return; ret = cpdma_chan_submit(priv->rxch, skb, skb->data, skb_tailroom(skb), GFP_KERNEL); } WARN_ON(ret < 0); }
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, struct hv_netvsc_packet *packet, struct ndis_tcp_ip_checksum_info *csum_info, void *data, u16 vlan_tci) { struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); if (!skb) return skb; /* * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ memcpy(skb_put(skb, packet->total_data_buflen), data, packet->total_data_buflen); skb->protocol = eth_type_trans(skb, net); if (csum_info) { /* We only look at the IP checksum here. * Should we be dropping the packet if checksum * failed? How do we deal with other checksums - TCP/UDP? */ if (csum_info->receive.ip_checksum_succeeded) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; } if (vlan_tci & VLAN_TAG_PRESENT) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); return skb; }
static inline void stmmac_rx_refill(struct stmmac_priv *priv) { unsigned int rxsize = priv->dma_rx_size; int bfsize = priv->dma_buf_sz; struct dma_desc *p = priv->dma_rx; for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { unsigned int entry = priv->dirty_rx % rxsize; if (likely(priv->rx_skbuff[entry] == NULL)) { struct sk_buff *skb; skb = __skb_dequeue(&priv->rx_recycle); if (skb == NULL) skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); if (unlikely(skb == NULL)) break; priv->rx_skbuff[entry] = skb; priv->rx_skbuff_dma[entry] = dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE); (p + entry)->des2 = priv->rx_skbuff_dma[entry]; if (unlikely(priv->plat->has_gmac)) { if (bfsize >= BUF_SIZE_8KiB) (p + entry)->des3 = (p + entry)->des2 + BUF_SIZE_8KiB; } RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); } wmb(); priv->hw->desc->set_rx_owner(p + entry); } }
static int receive_packet (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int entry = np->cur_rx % RX_RING_SIZE; int cnt = 30; /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ while (1) { struct netdev_desc *desc = &np->rx_ring[entry]; int pkt_len; u64 frame_status; if (!(desc->status & cpu_to_le64(RFDDone)) || !(desc->status & cpu_to_le64(FrameStart)) || !(desc->status & cpu_to_le64(FrameEnd))) break; /* Chip omits the CRC. */ frame_status = le64_to_cpu(desc->status); pkt_len = frame_status & 0xffff; if (--cnt < 0) break; /* Update rx error statistics, drop packet. */ if (frame_status & RFS_Errors) { np->stats.rx_errors++; if (frame_status & (RxRuntFrame | RxLengthError)) np->stats.rx_length_errors++; if (frame_status & RxFCSError) np->stats.rx_crc_errors++; if (frame_status & RxAlignmentError && np->speed != 1000) np->stats.rx_frame_errors++; if (frame_status & RxFIFOOverrun) np->stats.rx_fifo_errors++; } else { struct sk_buff *skb; /* Small skbuffs for short packets */ if (pkt_len > copy_thresh) { pci_unmap_single (np->pdev, desc_to_dma(desc), np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_put (skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { pci_dma_sync_single_for_cpu(np->pdev, desc_to_dma(desc), np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_copy_to_linear_data (skb, np->rx_skbuff[entry]->data, pkt_len); skb_put (skb, pkt_len); pci_dma_sync_single_for_device(np->pdev, desc_to_dma(desc), np->rx_buf_sz, PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans (skb, dev); #if 0 /* Checksum done by hw, but csum value unavailable. */ if (np->pdev->pci_rev_id >= 0x0c && !(frame_status & (TCPError | UDPError | IPError))) { skb->ip_summed = CHECKSUM_UNNECESSARY; } #endif netif_rx (skb); } entry = (entry + 1) % RX_RING_SIZE; } spin_lock(&np->rx_lock); np->cur_rx = entry; /* Re-allocate skbuffs to fill the descriptor ring */ entry = np->old_rx; while (entry != np->cur_rx) { struct sk_buff *skb; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO "%s: receive_packet: " "Unable to re-allocate Rx skbuff.#%d\n", dev->name, entry); break; } np->rx_skbuff[entry] = skb; np->rx_ring[entry].fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE)); } np->rx_ring[entry].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); np->rx_ring[entry].status = 0; entry = (entry + 1) % RX_RING_SIZE; } np->old_rx = entry; spin_unlock(&np->rx_lock); return 0; }
/** * temac_dma_bd_init - Setup buffer descriptor rings */ static int temac_dma_bd_init(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct sk_buff *skb; int i; lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL); if (!lp->rx_skb) { dev_err(&ndev->dev, "can't allocate memory for DMA RX buffer\n"); goto out; } /* allocate the tx and rx ring buffer descriptors. */ /* returns a virtual address and a physical address. */ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) { dev_err(&ndev->dev, "unable to allocate DMA TX buffer descriptors"); goto out; } lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * RX_BD_NUM, &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) { dev_err(&ndev->dev, "unable to allocate DMA RX buffer descriptors"); goto out; } memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM); for (i = 0; i < TX_BD_NUM; i++) { lp->tx_bd_v[i].next = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); } memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM); for (i = 0; i < RX_BD_NUM; i++) { lp->rx_bd_v[i].next = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); if (skb == 0) { dev_err(&ndev->dev, "alloc_skb error %d\n", i); goto out; } lp->rx_skb[i] = skb; /* returns physical address of skb->data */ lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, skb->data, XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE; lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; } lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); /* 0x10220483 */ /* 0x00100483 */ lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN | CHNL_CTRL_IRQ_IOE); /* 0xff010283 */ lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); lp->dma_out(lp, RX_TAILDESC_PTR, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); return 0; out: temac_dma_bd_release(ndev); return -ENOMEM; }
/** * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA * @ndev: Pointer to the net_device structure * * Return: 0, on success -ENOMEM, on failure * * This function is called to initialize the Rx and Tx DMA descriptor * rings. This initializes the descriptors with required default values * and is called when Axi Ethernet driver reset is called. */ static int axienet_dma_bd_init(struct net_device *ndev) { u32 cr; int i; struct sk_buff *skb; struct axienet_local *lp = netdev_priv(ndev); /* Reset the indexes which are used for accessing the BDs */ lp->tx_bd_ci = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; /* Allocate the Tx and Rx buffer descriptors. */ lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) goto out; lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * RX_BD_NUM, &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) goto out; for (i = 0; i < TX_BD_NUM; i++) { lp->tx_bd_v[i].next = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); } for (i = 0; i < RX_BD_NUM; i++) { lp->rx_bd_v[i].next = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); if (!skb) goto out; lp->rx_bd_v[i].sw_id_offset = (u32) skb; lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); lp->rx_bd_v[i].cntrl = lp->max_frm_size; } /* Start updating the Rx channel control register */ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = ((cr & ~XAXIDMA_COALESCE_MASK) | ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = ((cr & ~XAXIDMA_DELAY_MASK) | (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); /* Start updating the Tx channel control register */ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = (((cr & ~XAXIDMA_DELAY_MASK)) | (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Write to the Tx channel control register */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting. */ axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); return 0; out: axienet_dma_bd_release(ndev); return -ENOMEM; }
/** * arc_emac_rx - processing of Rx packets. * @ndev: Pointer to the network device. * @budget: How many BDs to process on 1 call. * * returns: Number of processed BDs * * Iterate through Rx BDs and deliver received packages to upper layer. */ static int arc_emac_rx(struct net_device *ndev, int budget) { struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int work_done; for (work_done = 0; work_done < budget; work_done++) { unsigned int *last_rx_bd = &priv->last_rx_bd; struct net_device_stats *stats = &ndev->stats; struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; unsigned int pktlen, info = le32_to_cpu(rxbd->info); struct sk_buff *skb; dma_addr_t addr; if (unlikely((info & OWN_MASK) == FOR_EMAC)) break; /* Make a note that we saw a packet at this BD. * So next time, driver starts from this + 1 */ *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; if (unlikely((info & FIRST_OR_LAST_MASK) != FIRST_OR_LAST_MASK)) { /* We pre-allocate buffers of MTU size so incoming * packets won't be split/chained. */ if (net_ratelimit()) netdev_err(ndev, "incomplete packet received\n"); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_length_errors++; continue; } /* Prepare the BD for next cycle. netif_receive_skb() * only if new skb was allocated and mapped to avoid holes * in the RX fifo. */ skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); if (unlikely(!skb)) { if (net_ratelimit()) netdev_err(ndev, "cannot allocate skb\n"); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_dropped++; continue; } addr = dma_map_single(&ndev->dev, (void *)skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, addr)) { if (net_ratelimit()) netdev_err(ndev, "cannot map dma buffer\n"); dev_kfree_skb(skb); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_dropped++; continue; } /* unmap previosly mapped skb */ dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); pktlen = info & LEN_MASK; stats->rx_packets++; stats->rx_bytes += pktlen; skb_put(rx_buff->skb, pktlen); rx_buff->skb->dev = ndev; rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev); netif_receive_skb(rx_buff->skb); rx_buff->skb = skb; dma_unmap_addr_set(rx_buff, addr, addr); dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); rxbd->data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ wmb(); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); } return work_done; }
/** * arc_emac_open - Open the network device. * @ndev: Pointer to the network device. * * returns: 0, on success or non-zero error value on failure. * * This function sets the MAC address, requests and enables an IRQ * for the EMAC device and starts the Tx queue. * It also connects to the phy device. */ static int arc_emac_open(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); struct phy_device *phy_dev = ndev->phydev; int i; phy_dev->autoneg = AUTONEG_ENABLE; phy_dev->speed = 0; phy_dev->duplex = 0; linkmode_and(phy_dev->advertising, phy_dev->advertising, phy_dev->supported); priv->last_rx_bd = 0; /* Allocate and set buffers for Rx BD's */ for (i = 0; i < RX_BD_NUM; i++) { dma_addr_t addr; unsigned int *last_rx_bd = &priv->last_rx_bd; struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; rx_buff->skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); if (unlikely(!rx_buff->skb)) return -ENOMEM; addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, addr)) { netdev_err(ndev, "cannot dma map\n"); dev_kfree_skb(rx_buff->skb); return -ENOMEM; } dma_unmap_addr_set(rx_buff, addr, addr); dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); rxbd->data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ wmb(); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; } priv->txbd_curr = 0; priv->txbd_dirty = 0; /* Clean Tx BD's */ memset(priv->txbd, 0, TX_RING_SZ); /* Initialize logical address filter */ arc_reg_set(priv, R_LAFL, 0); arc_reg_set(priv, R_LAFH, 0); /* Set BD ring pointers for device side */ arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma); arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); /* Enable interrupts */ arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); /* Set CONTROL */ arc_reg_set(priv, R_CTRL, (RX_BD_NUM << 24) | /* RX BD table length */ (TX_BD_NUM << 16) | /* TX BD table length */ TXRN_MASK | RXRN_MASK); napi_enable(&priv->napi); /* Enable EMAC */ arc_reg_or(priv, R_CTRL, EN_MASK); phy_start(ndev->phydev); netif_start_queue(ndev); return 0; }
static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) { struct net_device *netdev = priv->netdev; struct ftmac100_rxdes *rxdes; struct sk_buff *skb; struct page *page; dma_addr_t map; int length; rxdes = ftmac100_rx_locate_first_segment(priv); if (!rxdes) return false; if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) { ftmac100_rx_drop_packet(priv); return true; } /* * It is impossible to get multi-segment packets * because we always provide big enough receive buffers. */ if (unlikely(!ftmac100_rxdes_last_segment(rxdes))) BUG(); /* start processing */ skb = netdev_alloc_skb_ip_align(netdev, 128); if (unlikely(!skb)) { if (net_ratelimit()) netdev_err(netdev, "rx skb alloc failed\n"); ftmac100_rx_drop_packet(priv); return true; } if (unlikely(ftmac100_rxdes_multicast(rxdes))) netdev->stats.multicast++; map = ftmac100_rxdes_get_dma_addr(rxdes); dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); length = ftmac100_rxdes_frame_length(rxdes); page = ftmac100_rxdes_get_page(rxdes); skb_fill_page_desc(skb, 0, page, 0, length); skb->len += length; skb->data_len += length; if (length > 128) { skb->truesize += PAGE_SIZE; /* We pull the minimum amount into linear part */ __pskb_pull_tail(skb, ETH_HLEN); } else { /* Small frames are copied into linear part to free one page */ __pskb_pull_tail(skb, length); } ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); ftmac100_rx_pointer_advance(priv); skb->protocol = eth_type_trans(skb, netdev); netdev->stats.rx_packets++; netdev->stats.rx_bytes += skb->len; /* push packet to protocol stack */ netif_receive_skb(skb); (*processed)++; return true; }
static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, struct hpc3_ethregs *hregs, struct sgiseeq_regs *sregs) { struct sgiseeq_rx_desc *rd; struct sk_buff *skb = NULL; struct sk_buff *newskb; unsigned char pkt_status; int len = 0; unsigned int orig_end = PREV_RX(sp->rx_new); /* Service every received packet. */ rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, PKT_BUF_SZ, DMA_FROM_DEVICE); pkt_status = rd->skb->data[len]; if (pkt_status & SEEQ_RSTAT_FIG) { /* Packet is OK. */ /* We don't want to receive our own packets */ if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) { if (len > rx_copybreak) { skb = rd->skb; newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); if (!newskb) { newskb = skb; skb = NULL; goto memory_squeeze; } skb_reserve(newskb, 2); } else { skb = netdev_alloc_skb_ip_align(dev, len); if (skb) skb_copy_to_linear_data(skb, rd->skb->data, len); newskb = rd->skb; } memory_squeeze: if (skb) { skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } else { printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", dev->name); dev->stats.rx_dropped++; } } else { /* Silently drop my own packets */ newskb = rd->skb; } } else { record_rx_errors(dev, pkt_status); newskb = rd->skb; } rd->skb = newskb; rd->rdma.pbuf = dma_map_single(dev->dev.parent, newskb->data - 2, PKT_BUF_SZ, DMA_FROM_DEVICE); /* Return the entry to the ring pool. */ rd->rdma.cntinfo = RCNTINFO_INIT; sp->rx_new = NEXT_RX(sp->rx_new); dma_sync_desc_dev(dev, rd); rd = &sp->rx_desc[sp->rx_new]; dma_sync_desc_cpu(dev, rd); } dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); rx_maybe_restart(sp, hregs, sregs); }
/* * netvsc_recv_callback - Callback when we receive a packet from the * "wire" on the specified device. */ static int netvsc_recv_callback(struct hv_device *device_obj, struct hv_netvsc_packet *packet) { struct vm_device *device_ctx = to_vm_device(device_obj); struct net_device *net = dev_get_drvdata(&device_ctx->device); struct sk_buff *skb; void *data; int i; unsigned long flags; DPRINT_ENTER(NETVSC_DRV); if (!net) { DPRINT_ERR(NETVSC_DRV, "got receive callback but net device " "not initialized yet"); return 0; } /* Allocate a skb - TODO direct I/O to pages? */ skb = netdev_alloc_skb_ip_align(net, packet->TotalDataBufferLength); if (unlikely(!skb)) { ++net->stats.rx_dropped; return 0; } /* for kmap_atomic */ local_irq_save(flags); /* * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ for (i = 0; i < packet->PageBufferCount; i++) { data = kmap_atomic(pfn_to_page(packet->PageBuffers[i].Pfn), KM_IRQ1); data = (void *)(unsigned long)data + packet->PageBuffers[i].Offset; memcpy(skb_put(skb, packet->PageBuffers[i].Length), data, packet->PageBuffers[i].Length); kunmap_atomic((void *)((unsigned long)data - packet->PageBuffers[i].Offset), KM_IRQ1); } local_irq_restore(flags); skb->protocol = eth_type_trans(skb, net); skb->ip_summed = CHECKSUM_NONE; net->stats.rx_packets++; net->stats.rx_bytes += skb->len; /* * Pass the skb back up. Network stack will deallocate the skb when it * is done. * TODO - use NAPI? */ netif_rx(skb); DPRINT_DBG(NETVSC_DRV, "# of recvs %lu total size %lu", net->stats.rx_packets, net->stats.rx_bytes); DPRINT_EXIT(NETVSC_DRV); return 0; }