static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) { struct meson_host *host = mmc_priv(mmc); struct sd_emmc_desc *desc, desc_tmp; u32 cfg; u8 blk_len, cmd_cfg_timeout; unsigned int xfer_bytes = 0; /* Setup descriptors */ dma_rmb(); desc = &desc_tmp; memset(desc, 0, sizeof(struct sd_emmc_desc)); desc->cmd_cfg |= (cmd->opcode & CMD_CFG_CMD_INDEX_MASK) << CMD_CFG_CMD_INDEX_SHIFT; desc->cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ desc->cmd_arg = cmd->arg; /* Response */ if (cmd->flags & MMC_RSP_PRESENT) { desc->cmd_cfg &= ~CMD_CFG_NO_RESP; if (cmd->flags & MMC_RSP_136) desc->cmd_cfg |= CMD_CFG_RESP_128; desc->cmd_cfg |= CMD_CFG_RESP_NUM; desc->cmd_resp = 0; if (!(cmd->flags & MMC_RSP_CRC)) desc->cmd_cfg |= CMD_CFG_RESP_NOCRC; if (cmd->flags & MMC_RSP_BUSY) desc->cmd_cfg |= CMD_CFG_R1B; } else { desc->cmd_cfg |= CMD_CFG_NO_RESP; } /* data? */ if (cmd->data) { desc->cmd_cfg |= CMD_CFG_DATA_IO; if (cmd->data->blocks > 1) { desc->cmd_cfg |= CMD_CFG_BLOCK_MODE; desc->cmd_cfg |= (cmd->data->blocks & CMD_CFG_LENGTH_MASK) << CMD_CFG_LENGTH_SHIFT; /* check if block-size matches, if not update */ cfg = readl(host->regs + SD_EMMC_CFG); blk_len = cfg & (CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT); blk_len >>= CFG_BLK_LEN_SHIFT; if (blk_len != ilog2(cmd->data->blksz)) { dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__, blk_len, ilog2(cmd->data->blksz)); blk_len = ilog2(cmd->data->blksz); cfg &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT); cfg |= blk_len << CFG_BLK_LEN_SHIFT; writel(cfg, host->regs + SD_EMMC_CFG); } } else {
/* SP - CREQ Completion handlers */ static void bnxt_qplib_service_creq(unsigned long data) { struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data; struct bnxt_qplib_hwq *creq = &rcfw->creq; struct creq_base *creqe, **creq_ptr; u32 sw_cons, raw_cons; unsigned long flags; u32 type, budget = CREQ_ENTRY_POLL_BUDGET; /* Service the CREQ until budget is over */ spin_lock_irqsave(&creq->lock, flags); raw_cons = creq->cons; while (budget > 0) { sw_cons = HWQ_CMP(raw_cons, creq); creq_ptr = (struct creq_base **)creq->pbl_ptr; creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements)) break; /* The valid test of the entry must be done first before * reading any further. */ dma_rmb(); type = creqe->type & CREQ_BASE_TYPE_MASK; switch (type) { case CREQ_BASE_TYPE_QP_EVENT: bnxt_qplib_process_qp_event (rcfw, (struct creq_qp_event *)creqe); rcfw->creq_qp_event_processed++; break; case CREQ_BASE_TYPE_FUNC_EVENT: if (!bnxt_qplib_process_func_event (rcfw, (struct creq_func_event *)creqe)) rcfw->creq_func_event_processed++; else dev_warn (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled", type); break; default: dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with "); dev_warn(&rcfw->pdev->dev, "QPLIB: op_event = 0x%x not handled", type); break; } raw_cons++; budget--; } if (creq->cons != raw_cons) { creq->cons = raw_cons; CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, creq->max_elements); } spin_unlock_irqrestore(&creq->lock, flags); }
static void xge_txc_poll(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct xge_desc_ring *tx_ring; struct xge_raw_desc *raw_desc; dma_addr_t dma_addr; struct sk_buff *skb; void *pkt_buf; u32 data; u8 head; tx_ring = pdata->tx_ring; head = tx_ring->head; data = xge_rd_csr(pdata, DMATXSTATUS); if (!GET_BITS(TXPKTCOUNT, data)) return; while (1) { raw_desc = &tx_ring->raw_desc[head]; if (!is_tx_hw_done(raw_desc)) break; dma_rmb(); skb = tx_ring->pkt_info[head].skb; dma_addr = tx_ring->pkt_info[head].dma_addr; pkt_buf = tx_ring->pkt_info[head].pkt_buf; pdata->stats.tx_packets++; pdata->stats.tx_bytes += skb->len; dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr); dev_kfree_skb_any(skb); /* clear pktstart address and pktsize */ raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) | SET_BITS(PKT_SIZE, SLOT_EMPTY)); xge_wr_csr(pdata, DMATXSTATUS, 1); head = (head + 1) & (XGENE_ENET_NUM_DESC - 1); } if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); tx_ring->head = head; }
/** * nfp_net_rx() - receive up to @budget packets on @rx_ring * @rx_ring: RX ring to receive from * @budget: NAPI budget * * Note, this function is separated out from the napi poll function to * more cleanly separate packet receive code from other bookkeeping * functions performed in the napi poll function. * * There are differences between the NFP-3200 firmware and the * NFP-6000 firmware. The NFP-3200 firmware uses a dedicated RX queue * to indicate that new packets have arrived. The NFP-6000 does not * have this queue and uses the DD bit in the RX descriptor. This * method cannot be used on the NFP-3200 as it causes a race * condition: The RX ring write pointer on the NFP-3200 is updated * after packets (and descriptors) have been DMAed. If the DD bit is * used and subsequently the read pointer is updated this may lead to * the RX queue to underflow (if the firmware has not yet update the * write pointer). Therefore we use slightly ugly conditional code * below to handle the differences. We may, in the future update the * NFP-3200 firmware to behave the same as the firmware on the * NFP-6000. * * Return: Number of packets received. */ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; unsigned int data_len, meta_len; int avail = 0, pkts_polled = 0; struct sk_buff *skb, *new_skb; struct nfp_net_rx_desc *rxd; dma_addr_t new_dma_addr; u32 qcp_wr_p; int idx; if (nn->is_nfp3200) { /* Work out how many packets arrived */ qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx); idx = rx_ring->rd_p % rx_ring->cnt; if (qcp_wr_p == idx) /* No new packets */ return 0; if (qcp_wr_p > idx) avail = qcp_wr_p - idx; else avail = qcp_wr_p + rx_ring->cnt - idx; } else { avail = budget + 1; } while (avail > 0 && pkts_polled < budget) { idx = rx_ring->rd_p % rx_ring->cnt; rxd = &rx_ring->rxds[idx]; if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) { if (nn->is_nfp3200) nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n", rx_ring->idx, idx, rxd->vals[0], rxd->vals[1]); break; } /* Memory barrier to ensure that we won't do other reads * before the DD bit. */ dma_rmb(); rx_ring->rd_p++; pkts_polled++; avail--; skb = rx_ring->rxbufs[idx].skb; new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr, nn->fl_bufsz); if (!new_skb) { nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb, rx_ring->rxbufs[idx].dma_addr); u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_drops++; u64_stats_update_end(&r_vec->rx_sync); continue; } dma_unmap_single(&nn->pdev->dev, rx_ring->rxbufs[idx].dma_addr, nn->fl_bufsz, DMA_FROM_DEVICE); nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr); /* < meta_len > * <-- [rx_offset] --> * --------------------------------------------------------- * | [XX] | metadata | packet | XXXX | * --------------------------------------------------------- * <---------------- data_len ---------------> * * The rx_offset is fixed for all packets, the meta_len can vary * on a packet by packet basis. If rx_offset is set to zero * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the * buffer and is immediately followed by the packet (no [XX]). */ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; data_len = le16_to_cpu(rxd->rxd.data_len); if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) skb_reserve(skb, meta_len); else skb_reserve(skb, nn->rx_offset); skb_put(skb, data_len - meta_len); nfp_net_set_hash(nn->netdev, skb, rxd); /* Pad small frames to minimum */ if (skb_put_padto(skb, 60)) break; /* Stats update */ u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_pkts++; r_vec->rx_bytes += skb->len; u64_stats_update_end(&r_vec->rx_sync); skb_record_rx_queue(skb, rx_ring->idx); skb->protocol = eth_type_trans(skb, nn->netdev); nfp_net_rx_csum(nn, r_vec, rxd, skb); if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(rxd->rxd.vlan)); napi_gro_receive(&rx_ring->r_vec->napi, skb); } if (nn->is_nfp3200) nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled); return pkts_polled; }
static int xge_rx_poll(struct net_device *ndev, unsigned int budget) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct xge_desc_ring *rx_ring; struct xge_raw_desc *raw_desc; struct sk_buff *skb; dma_addr_t dma_addr; int processed = 0; u8 head, rx_error; int i, ret; u32 data; u16 len; rx_ring = pdata->rx_ring; head = rx_ring->head; data = xge_rd_csr(pdata, DMARXSTATUS); if (!GET_BITS(RXPKTCOUNT, data)) return 0; for (i = 0; i < budget; i++) { raw_desc = &rx_ring->raw_desc[head]; if (GET_BITS(E, le64_to_cpu(raw_desc->m0))) break; dma_rmb(); skb = rx_ring->pkt_info[head].skb; rx_ring->pkt_info[head].skb = NULL; dma_addr = rx_ring->pkt_info[head].dma_addr; len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)); dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU, DMA_FROM_DEVICE); rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2)); if (unlikely(rx_error)) { pdata->stats.rx_errors++; dev_kfree_skb_any(skb); goto out; } skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); pdata->stats.rx_packets++; pdata->stats.rx_bytes += len; napi_gro_receive(&pdata->napi, skb); out: ret = xge_refill_buffers(ndev, 1); xge_wr_csr(pdata, DMARXSTATUS, 1); xge_wr_csr(pdata, DMARXCTRL, 1); if (ret) break; head = (head + 1) & (XGENE_ENET_NUM_DESC - 1); processed++; } rx_ring->head = head; return processed; }
static int xlgmac_tx_poll(struct xlgmac_channel *channel) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->tx_ring; struct net_device *netdev = pdata->netdev; unsigned int tx_packets = 0, tx_bytes = 0; struct xlgmac_desc_data *desc_data; struct xlgmac_dma_desc *dma_desc; struct xlgmac_desc_ops *desc_ops; struct xlgmac_hw_ops *hw_ops; struct netdev_queue *txq; int processed = 0; unsigned int cur; desc_ops = &pdata->desc_ops; hw_ops = &pdata->hw_ops; /* Nothing to do if there isn't a Tx ring for this channel */ if (!ring) return 0; cur = ring->cur; /* Be sure we get ring->cur before accessing descriptor data */ smp_rmb(); txq = netdev_get_tx_queue(netdev, channel->queue_index); while ((processed < XLGMAC_TX_DESC_MAX_PROC) && (ring->dirty != cur)) { desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty); dma_desc = desc_data->dma_desc; if (!hw_ops->tx_complete(dma_desc)) break; /* Make sure descriptor fields are read after reading * the OWN bit */ dma_rmb(); if (netif_msg_tx_done(pdata)) xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); if (hw_ops->is_last_desc(dma_desc)) { tx_packets += desc_data->tx.packets; tx_bytes += desc_data->tx.bytes; } /* Free the SKB and reset the descriptor for re-use */ desc_ops->unmap_desc_data(pdata, desc_data); hw_ops->tx_desc_reset(desc_data); processed++; ring->dirty++; } if (!processed) return 0; netdev_tx_completed_queue(txq, tx_packets, tx_bytes); if ((ring->tx.queue_stopped == 1) && (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) { ring->tx.queue_stopped = 0; netif_tx_wake_queue(txq); } XLGMAC_PR("processed=%d\n", processed); return processed; }
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int index, u64 timestamp, int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_rx_alloc frame = { .page = tx_info->page, .dma = tx_info->map0_dma, }; if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { dma_unmap_page(priv->ddev, tx_info->map0_dma, PAGE_SIZE, priv->dma_dir); put_page(tx_info->page); } return tx_info->nr_txbb; } int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) { struct mlx4_en_priv *priv = netdev_priv(dev); int cnt = 0; /* Skip last polled descriptor */ ring->cons += ring->last_nr_txbb; en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", ring->cons, ring->prod); if ((u32) (ring->prod - ring->cons) > ring->size) { if (netif_msg_tx_err(priv)) en_warn(priv, "Tx consumer passed producer!\n"); return 0; } while (ring->cons != ring->prod) { ring->last_nr_txbb = ring->free_tx_desc(priv, ring, ring->cons & ring->size_mask, 0, 0 /* Non-NAPI caller */); ring->cons += ring->last_nr_txbb; cnt++; } if (ring->tx_queue) netdev_tx_reset_queue(ring->tx_queue); if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); return cnt; } bool mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int napi_budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring]; struct mlx4_cqe *cqe; u16 index, ring_index, stamp_index; u32 txbbs_skipped = 0; u32 txbbs_stamp = 0; u32 cons_index = mcq->cons_index; int size = cq->size; u32 size_mask = ring->size_mask; struct mlx4_cqe *buf = cq->buf; u32 packets = 0; u32 bytes = 0; int factor = priv->cqe_factor; int done = 0; int budget = priv->tx_work_limit; u32 last_nr_txbb; u32 ring_cons; if (unlikely(!priv->port_up)) return true; netdev_txq_bql_complete_prefetchw(ring->tx_queue); index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; last_nr_txbb = READ_ONCE(ring->last_nr_txbb); ring_cons = READ_ONCE(ring->cons); ring_index = ring_cons & size_mask; stamp_index = ring_index; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cons_index & size) && (done < budget)) { u16 new_index; /* * make sure we read the CQE after we read the * ownership bit */ dma_rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", cqe_err->vendor_err_syndrome, cqe_err->syndrome); } /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { u64 timestamp = 0; txbbs_skipped += last_nr_txbb; ring_index = (ring_index + last_nr_txbb) & size_mask; if (unlikely(ring->tx_info[ring_index].ts_requested)) timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ last_nr_txbb = ring->free_tx_desc( priv, ring, ring_index, timestamp, napi_budget); mlx4_en_stamp_wqe(priv, ring, stamp_index, !!((ring_cons + txbbs_stamp) & ring->size)); stamp_index = ring_index; txbbs_stamp = txbbs_skipped; packets++; bytes += ring->tx_info[ring_index].nr_bytes; } while ((++done < budget) && (ring_index != new_index)); ++cons_index; index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; } /* * To prevent CQ overflow we first update CQ consumer and only then * the ring consumer. */ mcq->cons_index = cons_index; mlx4_cq_set_ci(mcq); wmb(); /* we want to dirty this cache line once */ WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); if (cq->type == TX_XDP) return done < budget; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* Wakeup Tx queue if this stopped, and ring is not full. */ if (netif_tx_queue_stopped(ring->tx_queue) && !mlx4_en_is_tx_ring_full(ring)) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; } return done < budget; } void mlx4_en_tx_irq(struct mlx4_cq *mcq) { struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); if (likely(priv->port_up)) napi_schedule_irqoff(&cq->napi); else mlx4_en_arm_cq(priv, cq); } /* TX CQ polling - called by NAPI */ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) { struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); bool clean_complete; clean_complete = mlx4_en_process_tx_cq(dev, cq, budget); if (!clean_complete) return budget; napi_complete(napi); mlx4_en_arm_cq(priv, cq); return 0; } static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 index, unsigned int desc_size) { u32 copy = (ring->size - index) << LOG_TXBB_SIZE; int i; for (i = desc_size - copy - 4; i >= 0; i -= 4) { if ((i & (TXBB_SIZE - 1)) == 0) wmb(); *((u32 *) (ring->buf + i)) = *((u32 *) (ring->bounce_buf + copy + i)); } for (i = copy - 4; i >= 4 ; i -= 4) { if ((i & (TXBB_SIZE - 1)) == 0) wmb(); *((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) = *((u32 *) (ring->bounce_buf + i)); } /* Return real descriptor location */ return ring->buf + (index << LOG_TXBB_SIZE); }
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct ixgbe_adapter *adapter = q_vector->adapter; u16 cleaned_count = ixgbe_desc_unused(rx_ring); unsigned int xdp_res, xdp_xmit = 0; bool failure = false; struct sk_buff *skb; struct xdp_buff xdp; xdp.rxq = &rx_ring->xdp_rxq; while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { failure = failure || !ixgbe_alloc_rx_buffers_fast_zc(rx_ring, cleaned_count); cleaned_count = 0; } rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); size = le16_to_cpu(rx_desc->wb.upper.length); if (!size) break; /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * descriptor has been written back */ dma_rmb(); bi = ixgbe_get_rx_buffer_zc(rx_ring, size); if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) { struct ixgbe_rx_buffer *next_bi; ixgbe_reuse_rx_buffer_zc(rx_ring, bi); ixgbe_inc_ntc(rx_ring); next_bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; next_bi->skb = ERR_PTR(-EINVAL); continue; } if (unlikely(bi->skb)) { ixgbe_reuse_rx_buffer_zc(rx_ring, bi); ixgbe_inc_ntc(rx_ring); continue; } xdp.data = bi->addr; xdp.data_meta = xdp.data; xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; xdp.data_end = xdp.data + size; xdp.handle = bi->handle; xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp); if (xdp_res) { if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { xdp_xmit |= xdp_res; bi->addr = NULL; bi->skb = NULL; } else { ixgbe_reuse_rx_buffer_zc(rx_ring, bi); } total_rx_packets++; total_rx_bytes += size; cleaned_count++; ixgbe_inc_ntc(rx_ring); continue; } /* XDP_PASS path */ skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp); if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; break; } cleaned_count++; ixgbe_inc_ntc(rx_ring); if (eth_skb_pad(skb)) continue; total_rx_bytes += skb->len; total_rx_packets++; ixgbe_process_skb_fields(rx_ring, rx_desc, skb); ixgbe_rx_skb(q_vector, skb); } if (xdp_xmit & IXGBE_XDP_REDIR) xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. */ wmb(); writel(ring->next_to_use, ring->tail); } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; return failure ? budget : (int)total_rx_packets; }