static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb) { uint16_t last_id; uint8_t flags; /* always set the LAST flag on the last descriptor used to * transmit the packet */ flags = FM10K_TXD_FLAG_LAST; last_id = q->next_free + mb->nb_segs - 1; if (last_id >= q->nb_desc) last_id = last_id - q->nb_desc; /* but only set the RS flag on the last descriptor if rs_thresh * descriptors will be used since the RS flag was last set */ if ((q->nb_used + mb->nb_segs) >= q->rs_thresh) { flags |= FM10K_TXD_FLAG_RS; fifo_insert(&q->rs_tracker, last_id); q->nb_used = 0; } else { q->nb_used = q->nb_used + mb->nb_segs; } q->hw_ring[last_id].flags = flags; q->nb_free -= mb->nb_segs; /* set checksum flags on first descriptor of packet. SCTP checksum * offload is not supported, but we do not explicitly check for this * case in favor of greatly simplified processing. */ if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)) q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM; /* set vlan if requested */ if (mb->ol_flags & PKT_TX_VLAN_PKT) q->hw_ring[q->next_free].vlan = mb->vlan_tci; /* fill up the rings */ for (; mb != NULL; mb = mb->next) { q->sw_ring[q->next_free] = mb; q->hw_ring[q->next_free].buffer_addr = rte_cpu_to_le_64(MBUF_DMA_ADDR(mb)); q->hw_ring[q->next_free].buflen = rte_cpu_to_le_16(rte_pktmbuf_data_len(mb)); if (++q->next_free == q->nb_desc) q->next_free = 0; } }
/** * Convert IPv6 item to EFX filter specification. * * @param item[in] * Item specification. Only source and destination addresses and * next header fields are supported. If the mask is NULL, default * mask will be used. Ranging is not supported. * @param efx_spec[in, out] * EFX filter specification to update. * @param[out] error * Perform verbose error reporting if not NULL. */ static int sfc_flow_parse_ipv6(const struct rte_flow_item *item, efx_filter_spec_t *efx_spec, struct rte_flow_error *error) { int rc; const struct rte_flow_item_ipv6 *spec = NULL; const struct rte_flow_item_ipv6 *mask = NULL; const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6); const struct rte_flow_item_ipv6 supp_mask = { .hdr = { .src_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, .dst_addr = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, .proto = 0xff, }
/** * Convert IPv4 item to EFX filter specification. * * @param item[in] * Item specification. Only source and destination addresses and * protocol fields are supported. If the mask is NULL, default * mask will be used. Ranging is not supported. * @param efx_spec[in, out] * EFX filter specification to update. * @param[out] error * Perform verbose error reporting if not NULL. */ static int sfc_flow_parse_ipv4(const struct rte_flow_item *item, efx_filter_spec_t *efx_spec, struct rte_flow_error *error) { int rc; const struct rte_flow_item_ipv4 *spec = NULL; const struct rte_flow_item_ipv4 *mask = NULL; const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4); const struct rte_flow_item_ipv4 supp_mask = { .hdr = { .src_addr = 0xffffffff, .dst_addr = 0xffffffff, .next_proto_id = 0xff, } }; rc = sfc_flow_parse_init(item, (const void **)&spec, (const void **)&mask, &supp_mask, &rte_flow_item_ipv4_mask, sizeof(struct rte_flow_item_ipv4), error); if (rc != 0) return rc; /* * Filtering by IPv4 source and destination addresses requires * the appropriate ETHER_TYPE in hardware filters */ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; efx_spec->efs_ether_type = ether_type_ipv4; } else if (efx_spec->efs_ether_type != ether_type_ipv4) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Ethertype in pattern with IPV4 item should be appropriate"); return -rte_errno; } if (spec == NULL) return 0; /* * IPv4 addresses are in big-endian byte order in item and in * efx_spec */ if (mask->hdr.src_addr == supp_mask.hdr.src_addr) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST; efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr; } else if (mask->hdr.src_addr != 0) { goto fail_bad_mask; } if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST; efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr; } else if (mask->hdr.dst_addr != 0) { goto fail_bad_mask; } if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; efx_spec->efs_ip_proto = spec->hdr.next_proto_id; } else if (mask->hdr.next_proto_id != 0) { goto fail_bad_mask; } return 0; fail_bad_mask: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Bad mask in the IPV4 pattern item"); return -rte_errno; }
uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { uint16_t nb_tx; Vmxnet3_TxDesc *txd = NULL; vmxnet3_buf_info_t *tbi = NULL; struct vmxnet3_hw *hw; struct rte_mbuf *txm; vmxnet3_tx_queue_t *txq = tx_queue; hw = txq->hw; if (unlikely(txq->stopped)) { PMD_TX_LOG(DEBUG, "Tx queue is stopped."); return 0; } /* Free up the comp_descriptors aggressively */ vmxnet3_tq_tx_complete(txq); nb_tx = 0; while (nb_tx < nb_pkts) { if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) { int copy_size = 0; txm = tx_pkts[nb_tx]; /* Don't support scatter packets yet, free them if met */ if (txm->nb_segs != 1) { if (vmxnet3_xmit_convert_callback ){ txm=vmxnet3_xmit_convert_callback(txm); }else{ txq->stats.drop_total++; nb_tx++; rte_pktmbuf_free(txm); continue; } } if (!txm) { txq->stats.drop_total++; nb_tx++; continue; } /* Needs to minus ether header len */ if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) { PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU"); rte_pktmbuf_free(txm); txq->stats.drop_total++; nb_tx++; continue; } txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill); if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) { struct Vmxnet3_TxDataDesc *tdd; tdd = txq->data_ring.base + txq->cmd_ring.next2fill; copy_size = rte_pktmbuf_pkt_len(txm); rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size); } /* Fill the tx descriptor */ tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill; tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm); if (copy_size) txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA + txq->cmd_ring.next2fill * sizeof(struct Vmxnet3_TxDataDesc)); else txd->addr = tbi->bufPA; txd->len = txm->data_len; /* Mark the last descriptor as End of Packet. */ txd->cq = 1; txd->eop = 1; /* Add VLAN tag if requested */ if (txm->ol_flags & PKT_TX_VLAN_PKT) { txd->ti = 1; txd->tci = rte_cpu_to_le_16(txm->vlan_tci); } /* Record current mbuf for freeing it later in tx complete */ #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER VMXNET3_ASSERT(txm); #endif tbi->m = txm; /* Set the offloading mode to default */ txd->hlen = 0; txd->om = VMXNET3_OM_NONE; txd->msscof = 0; /* finally flip the GEN bit of the SOP desc */ txd->gen = txq->cmd_ring.gen; txq->shared->ctrl.txNumDeferred++; /* move to the next2fill descriptor */ vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring); nb_tx++; } else {