static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, unsigned int *pkts_compl, unsigned int *bytes_compl) { if (buffer->unmap_len) { struct device *dma_dev = &tx_queue->efx->pci_dev->dev; dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, DMA_TO_DEVICE); else dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, DMA_TO_DEVICE); buffer->unmap_len = 0; } if (buffer->flags & EFX_TX_BUF_SKB) { (*pkts_compl)++; (*bytes_compl) += buffer->skb->len; dev_consume_skb_any((struct sk_buff *)buffer->skb); netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", tx_queue->queue, tx_queue->read_count); } buffer->len = 0; buffer->flags = 0; }
static void tx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; switch (req->status) { default: dev->net->stats.tx_errors++; VDBG(dev, "tx err %d\n", req->status); /* FALLTHROUGH */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ dev_kfree_skb_any(skb); break; case 0: dev->net->stats.tx_bytes += skb->len; dev_consume_skb_any(skb); } dev->net->stats.tx_packets++; spin_lock(&dev->req_lock); list_add(&req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); atomic_dec(&dev->tx_qlen); if (netif_carrier_ok(dev->net)) netif_wake_queue(dev->net); }
int mynet_netdev_start_xmit(struct sk_buff *skb, struct net_device *dev) { // struct vnet_priv* priv = netdev_priv(dev); //priv->xmit++; /* free skb */ dev_consume_skb_any(skb); printk (KERN_INFO "Enter mynet_netdev_start_xmit () \n"); return NETDEV_TX_OK; }
/* Hardware start transmission. * Send a packet to media from the upper layer. */ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); unsigned long channel; unsigned long flags; channel = db->tx_fifo_stat & 3; if (channel == 3) return 1; channel = (channel == 1 ? 1 : 0); spin_lock_irqsave(&db->lock, flags); writel(channel, db->membase + EMAC_TX_INS_REG); emac_outblk_32bit(db->membase + EMAC_TX_IO_DATA_REG, skb->data, skb->len); dev->stats.tx_bytes += skb->len; db->tx_fifo_stat |= 1 << channel; /* TX control: First packet immediately send, second packet queue */ if (channel == 0) { /* set TX len */ writel(skb->len, db->membase + EMAC_TX_PL0_REG); /* start translate from fifo to phy */ writel(readl(db->membase + EMAC_TX_CTL0_REG) | 1, db->membase + EMAC_TX_CTL0_REG); /* save the time stamp */ netif_trans_update(dev); } else if (channel == 1) { /* set TX len */ writel(skb->len, db->membase + EMAC_TX_PL1_REG); /* start translate from fifo to phy */ writel(readl(db->membase + EMAC_TX_CTL1_REG) | 1, db->membase + EMAC_TX_CTL1_REG); /* save the time stamp */ netif_trans_update(dev); } if ((db->tx_fifo_stat & 3) == 3) { /* Second packet */ netif_stop_queue(dev); } spin_unlock_irqrestore(&db->lock, flags); /* free this SKB */ dev_consume_skb_any(skb); return NETDEV_TX_OK; }
static inline void _tx_reclaim_skb(void) { do { tx_list_head->desc_a.config &= ~DMAEN; tx_list_head->status.status_word = 0; if (tx_list_head->skb) { dev_consume_skb_any(tx_list_head->skb); tx_list_head->skb = NULL; } tx_list_head = tx_list_head->next; } while (tx_list_head->status.status_word != 0); }
static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb) { struct vector_private *vp = netdev_priv(qi->dev); int queue_depth; int packet_len; struct mmsghdr *mmsg_vector = qi->mmsg_vector; int iov_count; spin_lock(&qi->tail_lock); spin_lock(&qi->head_lock); queue_depth = qi->queue_depth; spin_unlock(&qi->head_lock); if (skb) packet_len = skb->len; if (queue_depth < qi->max_depth) { *(qi->skbuff_vector + qi->tail) = skb; mmsg_vector += qi->tail; iov_count = prep_msg( vp, skb, mmsg_vector->msg_hdr.msg_iov ); if (iov_count < 1) goto drop; mmsg_vector->msg_hdr.msg_iovlen = iov_count; mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr; mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size; queue_depth = vector_advancetail(qi, 1); } else goto drop; spin_unlock(&qi->tail_lock); return queue_depth; drop: qi->dev->stats.tx_dropped++; if (skb != NULL) { packet_len = skb->len; dev_consume_skb_any(skb); netdev_completed_queue(qi->dev, 1, packet_len); } spin_unlock(&qi->tail_lock); return queue_depth; }
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, unsigned int *pkts_compl, unsigned int *bytes_compl) { if (buffer->unmap_len) { struct device *dma_dev = &tx_queue->efx->pci_dev->dev; dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, DMA_TO_DEVICE); else dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, DMA_TO_DEVICE); buffer->unmap_len = 0; } if (buffer->flags & EFX_TX_BUF_SKB) { struct sk_buff *skb = (struct sk_buff *)buffer->skb; EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); (*pkts_compl)++; (*bytes_compl) += skb->len; if (tx_queue->timestamping && (tx_queue->completed_timestamp_major || tx_queue->completed_timestamp_minor)) { struct skb_shared_hwtstamps hwtstamp; hwtstamp.hwtstamp = efx_ptp_nic_to_kernel_time(tx_queue); skb_tstamp_tx(skb, &hwtstamp); tx_queue->completed_timestamp_major = 0; tx_queue->completed_timestamp_minor = 0; } dev_consume_skb_any((struct sk_buff *)buffer->skb); netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", tx_queue->queue, tx_queue->read_count); } buffer->len = 0; buffer->flags = 0; }
static int consume_vector_skbs(struct vector_queue *qi, int count) { struct sk_buff *skb; int skb_index; int bytes_compl = 0; for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) { skb = *(qi->skbuff_vector + skb_index); /* mark as empty to ensure correct destruction if * needed */ bytes_compl += skb->len; *(qi->skbuff_vector + skb_index) = NULL; dev_consume_skb_any(skb); } qi->dev->stats.tx_bytes += bytes_compl; qi->dev->stats.tx_packets += count; netdev_completed_queue(qi->dev, count, bytes_compl); return vector_advancehead(qi, count); }
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_priv *priv = app->priv; struct nfp_flower_cmsg_hdr *cmsg_hdr; cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) { nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n", cmsg_hdr->version); dev_kfree_skb_any(skb); return; } if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) { /* We need to deal with stats updates from HW asap */ nfp_flower_rx_flow_stats(app, skb); dev_consume_skb_any(skb); } else { skb_queue_tail(&priv->cmsg_skbs, skb); schedule_work(&priv->cmsg_work); } }
static void nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_cmsg_hdr *cmsg_hdr; enum nfp_flower_cmsg_type_port type; cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); type = cmsg_hdr->type; switch (type) { case NFP_FLOWER_CMSG_TYPE_PORT_REIFY: nfp_flower_cmsg_portreify_rx(app, skb); break; case NFP_FLOWER_CMSG_TYPE_PORT_MOD: nfp_flower_cmsg_portmod_rx(app, skb); break; case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: nfp_tunnel_request_route(app, skb); break; case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: nfp_tunnel_keep_alive(app, skb); break; case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH: /* Acks from the NFP that the route is added - ignore. */ break; default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", type); goto out; } dev_consume_skb_any(skb); return; out: dev_kfree_skb_any(skb); }
static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); unsigned int desc_flags; union ibmveth_buf_desc descs[6]; int last, i; int force_bounce = 0; dma_addr_t dma_addr; unsigned long mss = 0; /* * veth handles a maximum of 6 segments including the header, so * we have to linearize the skb if there are more than this. */ if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) { netdev->stats.tx_dropped++; goto out; } /* veth can't checksum offload UDP */ if (skb->ip_summed == CHECKSUM_PARTIAL && ((skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->protocol != IPPROTO_TCP) || (skb->protocol == htons(ETH_P_IPV6) && ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) && skb_checksum_help(skb)) { netdev_err(netdev, "tx: failed to checksum packet\n"); netdev->stats.tx_dropped++; goto out; } desc_flags = IBMVETH_BUF_VALID; if (skb_is_gso(skb) && adapter->fw_large_send_support) desc_flags |= IBMVETH_BUF_LRG_SND; if (skb->ip_summed == CHECKSUM_PARTIAL) { unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); /* Need to zero out the checksum */ buf[0] = 0; buf[1] = 0; } retry_bounce: memset(descs, 0, sizeof(descs)); /* * If a linear packet is below the rx threshold then * copy it into the static bounce buffer. This avoids the * cost of a TCE insert and remove. */ if (force_bounce || (!skb_is_nonlinear(skb) && (skb->len < tx_copybreak))) { skb_copy_from_linear_data(skb, adapter->bounce_buffer, skb->len); descs[0].fields.flags_len = desc_flags | skb->len; descs[0].fields.address = adapter->bounce_buffer_dma; if (ibmveth_send(adapter, descs, 0)) { adapter->tx_send_failed++; netdev->stats.tx_dropped++; } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; } goto out; } /* Map the header */ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) goto map_failed; descs[0].fields.flags_len = desc_flags | skb_headlen(skb); descs[0].fields.address = dma_addr; /* Map the frags */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) goto map_failed_frags; descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag); descs[i+1].fields.address = dma_addr; } if (skb_is_gso(skb)) { if (adapter->fw_large_send_support) { mss = (unsigned long)skb_shinfo(skb)->gso_size; adapter->tx_large_packets++; } else if (!skb_is_gso_v6(skb)) { /* Put -1 in the IP checksum to tell phyp it * is a largesend packet. Put the mss in * the TCP checksum. */ ip_hdr(skb)->check = 0xffff; tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size); adapter->tx_large_packets++; } } if (ibmveth_send(adapter, descs, mss)) { adapter->tx_send_failed++; netdev->stats.tx_dropped++; } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; } dma_unmap_single(&adapter->vdev->dev, descs[0].fields.address, descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, DMA_TO_DEVICE); for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, DMA_TO_DEVICE); out: dev_consume_skb_any(skb); return NETDEV_TX_OK; map_failed_frags: last = i+1; for (i = 0; i < last; i++) dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, DMA_TO_DEVICE); map_failed: if (!firmware_has_feature(FW_FEATURE_CMO)) netdev_err(netdev, "tx: unable to map xmit buffer\n"); adapter->tx_map_failed++; if (skb_linearize(skb)) { netdev->stats.tx_dropped++; goto out; } force_bounce = 1; goto retry_bounce; }
/* * chcr_ipsec_xmit called from ULD Tx handler */ int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev) { struct xfrm_state *x = xfrm_input_state(skb); struct ipsec_sa_entry *sa_entry; u64 *pos, *end, *before, *sgl; int qidx, left, credits; unsigned int flits = 0, ndesc, kctx_len; struct adapter *adap; struct sge_eth_txq *q; struct port_info *pi; dma_addr_t addr[MAX_SKB_FRAGS + 1]; bool immediate = false; if (!x->xso.offload_handle) return NETDEV_TX_BUSY; sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; kctx_len = sa_entry->kctx_len; if (skb->sp->len != 1) { out_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } pi = netdev_priv(dev); adap = pi->adapter; qidx = skb->queue_mapping; q = &adap->sge.ethtxq[qidx + pi->first_qset]; cxgb4_reclaim_completed_tx(adap, &q->q, true); flits = calc_tx_sec_flits(skb, sa_entry->kctx_len); ndesc = flits_to_desc(flits); credits = txq_avail(&q->q) - ndesc; if (unlikely(credits < 0)) { eth_txq_stop(q); dev_err(adap->pdev_dev, "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n", dev->name, qidx, credits, ndesc, txq_avail(&q->q), flits); return NETDEV_TX_BUSY; } if (is_eth_imm(skb, kctx_len)) immediate = true; if (!immediate && unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { q->mapping_err++; goto out_free; } pos = (u64 *)&q->q.desc[q->q.pidx]; before = (u64 *)pos; end = (u64 *)pos + flits; /* Setup IPSec CPL */ pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos, credits, sa_entry); if (before > (u64 *)pos) { left = (u8 *)end - (u8 *)q->q.stat; end = (void *)q->q.desc + left; } if (pos == (u64 *)q->q.stat) { left = (u8 *)end - (u8 *)q->q.stat; end = (void *)q->q.desc + left; pos = (void *)q->q.desc; } sgl = (void *)pos; if (immediate) { cxgb4_inline_tx_skb(skb, &q->q, sgl); dev_consume_skb_any(skb); } else { int last_desc; cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr); skb_orphan(skb); last_desc = q->q.pidx + ndesc - 1; if (last_desc >= q->q.size) last_desc -= q->q.size; q->q.sdesc[last_desc].skb = skb; q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; } txq_advance(&q->q, ndesc); cxgb4_ring_tx_db(adap, &q->q, ndesc); return NETDEV_TX_OK; }
static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, struct net_device *dev) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; struct mlxsw_sx_port_pcpu_stats *pcpu_stats; const struct mlxsw_tx_info tx_info = { .local_port = mlxsw_sx_port->local_port, .is_emad = false, }; u64 len; int err; if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info)) return NETDEV_TX_BUSY; if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { struct sk_buff *skb_orig = skb; skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); if (!skb) { this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } dev_consume_skb_any(skb_orig); } mlxsw_sx_txhdr_construct(skb, &tx_info); /* TX header is consumed by HW on the way so we shouldn't count its * bytes as being sent. */ len = skb->len - MLXSW_TXHDR_LEN; /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info); if (!err) { pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->tx_packets++; pcpu_stats->tx_bytes += len; u64_stats_update_end(&pcpu_stats->syncp); } else { this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); dev_kfree_skb_any(skb); } return NETDEV_TX_OK; } static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); int err; err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu); if (err) return err; dev->mtu = mtu; return 0; } static void mlxsw_sx_port_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx_port_pcpu_stats *p; u64 rx_packets, rx_bytes, tx_packets, tx_bytes; u32 tx_dropped = 0; unsigned int start; int i; for_each_possible_cpu(i) { p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i); do { start = u64_stats_fetch_begin_irq(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; } while (u64_stats_fetch_retry_irq(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* tx_dropped is u32, updated without syncp protection. */ tx_dropped += p->tx_dropped; } stats->tx_dropped = tx_dropped; } static struct devlink_port * mlxsw_sx_port_get_devlink_port(struct net_device *dev) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; return mlxsw_core_port_devlink_port_get(mlxsw_sx->core, mlxsw_sx_port->local_port); } static const struct net_device_ops mlxsw_sx_port_netdev_ops = { .ndo_open = mlxsw_sx_port_open, .ndo_stop = mlxsw_sx_port_stop, .ndo_start_xmit = mlxsw_sx_port_xmit, .ndo_change_mtu = mlxsw_sx_port_change_mtu, .ndo_get_stats64 = mlxsw_sx_port_get_stats64, .ndo_get_devlink_port = mlxsw_sx_port_get_devlink_port, }; static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, mlxsw_sx_driver_version, sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", mlxsw_sx->bus_info->fw_rev.major, mlxsw_sx->bus_info->fw_rev.minor, mlxsw_sx->bus_info->fw_rev.subminor); strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name, sizeof(drvinfo->bus_info)); } struct mlxsw_sx_port_hw_stats { char str[ETH_GSTRING_LEN]; u64 (*getter)(const char *payload); }; static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = { { .str = "a_frames_transmitted_ok", .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, }, { .str = "a_frames_received_ok",