i40e_tx_free_bufs(struct i40e_tx_queue *txq) { struct i40e_tx_entry *txep; uint32_t n; uint32_t i; int nb_free = 0; struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ]; /* check DD bits on threshold descriptor */ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) return 0; n = txq->tx_rs_thresh; /* first buffer to free from S/W ring is at index * tx_next_dd - (tx_rs_thresh-1) */ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; m = __rte_pktmbuf_prefree_seg(txep[0].mbuf); if (likely(m != NULL)) { free[0] = m; nb_free = 1; for (i = 1; i < n; i++) { m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); if (likely(m != NULL)) { if (likely(m->pool == free[0]->pool)) { free[nb_free++] = m; } else { rte_mempool_put_bulk(free[0]->pool, (void *)free, nb_free); free[0] = m; nb_free = 1; } } } rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); } else { for (i = 1; i < n; i++) { m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); if (m != NULL) rte_mempool_put(m->pool, m); } } /* buffers were freed, update counters */ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); if (txq->tx_next_dd >= txq->nb_tx_desc) txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); return txq->tx_rs_thresh; }
static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb) { uint16_t last_id; uint8_t flags; /* always set the LAST flag on the last descriptor used to * transmit the packet */ flags = FM10K_TXD_FLAG_LAST; last_id = q->next_free + mb->nb_segs - 1; if (last_id >= q->nb_desc) last_id = last_id - q->nb_desc; /* but only set the RS flag on the last descriptor if rs_thresh * descriptors will be used since the RS flag was last set */ if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) { flags |= FM10K_TXD_FLAG_RS; fifo_insert(&q->rs_tracker, last_id); q->nb_used = 0; } else { q->nb_used = q->nb_used + mb->nb_segs; } q->hw_ring[last_id].flags = flags; q->nb_free -= mb->nb_segs; /* set checksum flags on first descriptor of packet. SCTP checksum * offload is not supported, but we do not explicitly check for this * case in favor of greatly simplified processing. */ if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM; /* set vlan if requested */ if (mb->ol_flags & PKT_TX_VLAN_PKT) q->hw_ring[q->next_free].vlan = mb->vlan_tci; /* fill up the rings */ for (; mb != NULL; mb = mb->next) { q->sw_ring[q->next_free] = mb; q->hw_ring[q->next_free].buffer_addr = rte_cpu_to_le_64(MBUF_DMA_ADDR(mb)); q->hw_ring[q->next_free].buflen = rte_cpu_to_le_16(rte_pktmbuf_data_len(mb)); if (++q->next_free == q->nb_desc) q->next_free = 0; } }
static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr, uint16_t prod) { struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod]; struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; struct rte_mbuf *data; data = __bnxt_alloc_rx_data(rxq->mb_pool); if (!data) return -ENOMEM; rx_buf->mbuf = data; rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf)); return 0; }
uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; volatile struct i40e_tx_desc *txdp; struct i40e_tx_entry *txep; uint16_t n, nb_commit, tx_id; uint64_t flags = I40E_TD_CMD; uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; int i; /* cross rx_thresh boundary is not allowed */ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) i40e_tx_free_bufs(txq); nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) return 0; tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; txep = &txq->sw_ring[tx_id]; txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { tx_backlog_entry(txep, tx_pkts, n); for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) vtx1(txdp, *tx_pkts, flags); vtx1(txdp, *tx_pkts++, rs); nb_commit = (uint16_t)(nb_commit - n); tx_id = 0; txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ txdp = &txq->tx_ring[tx_id]; txep = &txq->sw_ring[tx_id]; } tx_backlog_entry(txep, tx_pkts, nb_commit); vtx(txdp, tx_pkts, nb_commit, flags); tx_id = (uint16_t)(tx_id + nb_commit); if (tx_id > txq->tx_next_rs) { txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << I40E_TXD_QW1_CMD_SHIFT); txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); } txq->tx_tail = tx_id; I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); return nb_pkts; }
uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { uint16_t nb_tx; Vmxnet3_TxDesc *txd = NULL; vmxnet3_buf_info_t *tbi = NULL; struct vmxnet3_hw *hw; struct rte_mbuf *txm; vmxnet3_tx_queue_t *txq = tx_queue; hw = txq->hw; if (unlikely(txq->stopped)) { PMD_TX_LOG(DEBUG, "Tx queue is stopped."); return 0; } /* Free up the comp_descriptors aggressively */ vmxnet3_tq_tx_complete(txq); nb_tx = 0; while (nb_tx < nb_pkts) { if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) { int copy_size = 0; txm = tx_pkts[nb_tx]; /* Don't support scatter packets yet, free them if met */ if (txm->nb_segs != 1) { if (vmxnet3_xmit_convert_callback ){ txm=vmxnet3_xmit_convert_callback(txm); }else{ txq->stats.drop_total++; nb_tx++; rte_pktmbuf_free(txm); continue; } } if (!txm) { txq->stats.drop_total++; nb_tx++; continue; } /* Needs to minus ether header len */ if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) { PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU"); rte_pktmbuf_free(txm); txq->stats.drop_total++; nb_tx++; continue; } txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill); if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) { struct Vmxnet3_TxDataDesc *tdd; tdd = txq->data_ring.base + txq->cmd_ring.next2fill; copy_size = rte_pktmbuf_pkt_len(txm); rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size); } /* Fill the tx descriptor */ tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill; tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm); if (copy_size) txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA + txq->cmd_ring.next2fill * sizeof(struct Vmxnet3_TxDataDesc)); else txd->addr = tbi->bufPA; txd->len = txm->data_len; /* Mark the last descriptor as End of Packet. */ txd->cq = 1; txd->eop = 1; /* Add VLAN tag if requested */ if (txm->ol_flags & PKT_TX_VLAN_PKT) { txd->ti = 1; txd->tci = rte_cpu_to_le_16(txm->vlan_tci); } /* Record current mbuf for freeing it later in tx complete */ #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER VMXNET3_ASSERT(txm); #endif tbi->m = txm; /* Set the offloading mode to default */ txd->hlen = 0; txd->om = VMXNET3_OM_NONE; txd->msscof = 0; /* finally flip the GEN bit of the SOP desc */ txd->gen = txq->cmd_ring.gen; txq->shared->ctrl.txNumDeferred++; /* move to the next2fill descriptor */ vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring); nb_tx++; } else {