static int test_stats_reset(void) { struct rte_eth_stats stats; struct rte_mbuf buf, *pbuf = &buf; printf("Testing ring PMD stats reset\n"); rte_eth_stats_reset(RXTX_PORT); /* check stats of RXTX port, should all be zero */ rte_eth_stats_get(RXTX_PORT, &stats); if (stats.ipackets != 0 || stats.opackets != 0 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not zero\n"); return -1; } /* send and receive 1 packet and check for stats update */ if (rte_eth_tx_burst(RXTX_PORT, 0, &pbuf, 1) != 1) { printf("Error sending packet to RXTX port\n"); return -1; } if (rte_eth_rx_burst(RXTX_PORT, 0, &pbuf, 1) != 1) { printf("Error receiving packet from RXTX port\n"); return -1; } rte_eth_stats_get(RXTX_PORT, &stats); if (stats.ipackets != 1 || stats.opackets != 1 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } rte_eth_stats_reset(RXTX_PORT); /* check stats of RXTX port, should all be zero */ rte_eth_stats_get(RXTX_PORT, &stats); if (stats.ipackets != 0 || stats.opackets != 0 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not zero\n"); return -1; } return 0; }
/** * @brief Retrieve device statistics data from HW * * @param devId uint8_t, ID of DPDK device * @param stat EthDevStatistics_t*, pointer to buffer where retrieved data will be stored */ void DPDKAdapter::getDevStatistics(uint8_t devId, EthDevStatistics_t* stat) { if(devId > RTE_MAX_ETHPORTS) { qCritical("Device ID is out of range"); return; } rte_eth_stats_get(devId, stat); }
void rw_piot_get_link_stats(rw_piot_api_handle_t api_handle, rw_piot_link_stats_t *stats) { rw_piot_device_t *rw_piot_dev = RWPIOT_GET_DEVICE(api_handle); ASSERT(RWPIOT_VALID_DEVICE(rw_piot_dev)); if (NULL == rw_piot_dev || NULL == stats) { RW_PIOT_LOG(RTE_LOG_ERR, "PIOT Could not find device by handle or invalid input param\n"); return; } rte_eth_stats_get(rw_piot_dev->rte_port_id, stats); return; }
static void stats_display(uint8_t port_id) { struct rte_eth_stats stats; rte_eth_stats_get(port_id, &stats); printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes); printf(" RX-errors: %-10"PRIu64" RX-nombuf: %-10"PRIu64"\n", stats.ierrors, stats.rx_nombuf); printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); }
/* --------------------------Lagopus code start ----------------------------- */ static struct port_stats * port_stats(struct port *port) { struct rte_eth_stats rte_stats; struct timespec ts; struct port_stats *stats; if (unlikely(port->ifindex >= RTE_MAX_ETHPORTS)) { return NULL; } stats = calloc(1, sizeof(struct port_stats)); if (stats == NULL) { return NULL; } rte_eth_stats_get((uint8_t)port->ifindex, &rte_stats); /* if counter is not supported, set all ones value. */ stats->ofp.port_no = port->ofp_port.port_no; stats->ofp.rx_packets = rte_stats.ipackets; stats->ofp.tx_packets = rte_stats.opackets; stats->ofp.rx_bytes = rte_stats.ibytes; stats->ofp.tx_bytes = rte_stats.obytes; stats->ofp.rx_dropped = rte_stats.rx_nombuf; stats->ofp.tx_dropped = UINT64_MAX; stats->ofp.rx_errors = rte_stats.ierrors; stats->ofp.tx_errors = rte_stats.oerrors; stats->ofp.rx_frame_err = UINT64_MAX; stats->ofp.rx_over_err = UINT64_MAX; stats->ofp.rx_crc_err = UINT64_MAX; stats->ofp.collisions = UINT64_MAX; clock_gettime(CLOCK_MONOTONIC, &ts); stats->ofp.duration_sec = (uint32_t)(ts.tv_sec - port->create_time.tv_sec); if (ts.tv_nsec < port->create_time.tv_nsec) { stats->ofp.duration_sec--; stats->ofp.duration_nsec = 1 * 1000 * 1000 * 1000; } else { stats->ofp.duration_nsec = 0; } stats->ofp.duration_nsec += (uint32_t)ts.tv_nsec; stats->ofp.duration_nsec -= (uint32_t)port->create_time.tv_nsec; OS_MEMCPY(&port->ofp_port_stats, &stats->ofp, sizeof(stats->ofp)); return stats; }
static void print_stats(void) { const uint8_t nb_ports = rte_eth_dev_count(); unsigned i; struct rte_eth_stats eth_stats; printf("\nRX thread stats:\n"); printf(" - Pkts rxd: %"PRIu64"\n", app_stats.rx.rx_pkts); printf(" - Pkts enqd to workers ring: %"PRIu64"\n", app_stats.rx.enqueue_pkts); printf("\nWorker thread stats:\n"); printf(" - Pkts deqd from workers ring: %"PRIu64"\n", app_stats.wkr.dequeue_pkts); printf(" - Pkts enqd to tx ring: %"PRIu64"\n", app_stats.wkr.enqueue_pkts); printf(" - Pkts enq to tx failed: %"PRIu64"\n", app_stats.wkr.enqueue_failed_pkts); printf("\nTX stats:\n"); printf(" - Pkts deqd from tx ring: %"PRIu64"\n", app_stats.tx.dequeue_pkts); printf(" - Ro Pkts transmitted: %"PRIu64"\n", app_stats.tx.ro_tx_pkts); printf(" - Ro Pkts tx failed: %"PRIu64"\n", app_stats.tx.ro_tx_failed_pkts); printf(" - Pkts transmitted w/o reorder: %"PRIu64"\n", app_stats.tx.early_pkts_txtd_woro); printf(" - Pkts tx failed w/o reorder: %"PRIu64"\n", app_stats.tx.early_pkts_tx_failed_woro); for (i = 0; i < nb_ports; i++) { rte_eth_stats_get(i, ð_stats); printf("\nPort %u stats:\n", i); printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets); printf(" - Pkts out: %"PRIu64"\n", eth_stats.opackets); printf(" - In Errs: %"PRIu64"\n", eth_stats.ierrors); printf(" - Out Errs: %"PRIu64"\n", eth_stats.oerrors); printf(" - Mbuf Errs: %"PRIu64"\n", eth_stats.rx_nombuf); } }
void pktgen_clear_stats(port_info_t * info) { memset(&info->sizes, 0, sizeof(port_sizes_t)); memset(&info->port_stats, 0, sizeof(eth_stats_t)); memset(&info->rate_stats, 0, sizeof(eth_stats_t)); rte_eth_stats_get(info->pid, &info->init_stats); info->stats.dropped_pkts = 0; info->stats.arp_pkts = 0; info->stats.echo_pkts = 0; info->stats.ip_pkts = 0; info->stats.ipv6_pkts = 0; info->stats.vlan_pkts = 0; info->stats.unknown_pkts = 0; info->stats.tx_failed = 0; memset(&pktgen.cumm_rate_totals, 0, sizeof(eth_stats_t)); pktgen_update_display(); }
static void nic_stats_display(uint8_t port_id) { struct rte_eth_stats stats; uint8_t i; static const char *nic_stats_border = "########################"; rte_eth_stats_get(port_id, &stats); printf("\n %s NIC statistics for port %-2d %s\n", nic_stats_border, port_id, nic_stats_border); printf(" RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64 " RX-bytes: %-10"PRIu64"\n", stats.ipackets, stats.ierrors, stats.ibytes); printf(" RX-nombuf: %-10"PRIu64"\n", stats.rx_nombuf); printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64 " TX-bytes: %-10"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes); printf("\n"); for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { printf(" Stats reg %2d RX-packets: %-10"PRIu64 " RX-errors: %-10"PRIu64 " RX-bytes: %-10"PRIu64"\n", i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); } printf("\n"); for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { printf(" Stats reg %2d TX-packets: %-10"PRIu64 " TX-bytes: %-10"PRIu64"\n", i, stats.q_opackets[i], stats.q_obytes[i]); } printf(" %s############################%s\n", nic_stats_border, nic_stats_border); }
void log_receiver(struct receiver_t *receiver) { RTE_LOG(INFO, RECEIVER, "------------- Receiver -------------\n"); RTE_LOG(INFO, RECEIVER, "| Core ID: %"PRIu32"\n", receiver->core_id); RTE_LOG(INFO, RECEIVER, "| In port: %"PRIu32"\n", receiver->in_port); RTE_LOG(INFO, RECEIVER, "| MAC: "FORMAT_MAC"\n", ARG_V_MAC(receiver->mac)); RTE_LOG(INFO, RECEIVER, "| Packets received: %"PRIu64"\n", receiver->pkts_received); if (receiver->nb_polls != 0) RTE_LOG(INFO, RECEIVER, "| Load: %f\n", receiver->nb_rec / (float) receiver->nb_polls); RTE_LOG(INFO, RECEIVER, "| sum Time (CPU Cycles): %f\n", receiver->time_a /(float) receiver->nb_measurements); RTE_LOG(INFO, RECEIVER, "| rec Time (CPU Cycles): %f\n", receiver->time_b /(float) receiver->nb_measurements); RTE_LOG(INFO, RECEIVER, "|***********************************\n"); receiver->nb_polls = 0; receiver->nb_rec = 0; struct rte_eth_stats stats; rte_eth_stats_get(receiver->in_port, &stats); RTE_LOG(INFO, RECEIVER, "| RX: %"PRIu64" TX: %"PRIu64" \n", stats.ipackets, stats.opackets); RTE_LOG(INFO, RECEIVER, "| RX dropped: %"PRIu64" RX error: %"PRIu64" TX error: %"PRIu64"\n", stats.imissed, stats.ierrors, stats.oerrors); RTE_LOG(INFO, RECEIVER, "------------------------------------\n"); }
void app_print(int signo) { int i; port_info_t *info; printf("\n\n\n"); APP_DISPLAY(INFO, "############ Statistics ###############\n"); for (i = 0; i < probe.nb_ports; i++) { info = &probe.info[i]; APP_DISPLAY(INFO, "+Processing(Port: %d)\n", i); APP_DISPLAY(INFO, " +---ARP : %" PRIu64 "\n", info->stats.arp_pkts); APP_DISPLAY(INFO, " +---IPv4: %" PRIu64 "\n", info->stats.ip_pkts); APP_DISPLAY(INFO, " +---IPv6: %" PRIu64 "\n", info->stats.ipv6_pkts); /* log ethernet stat */ rte_eth_stats_get(i, &info->port_stats); APP_DISPLAY(INFO, "+Ethernet Stats(Port: %d)\n", i); APP_DISPLAY(INFO, " +---ipackets: %" PRIu64 "\n", info->port_stats.ipackets - info->init_stats.ipackets); APP_DISPLAY(INFO, " +---ibytes : %" PRIu64 "\n", info->port_stats.ibytes - info->init_stats.ibytes); APP_DISPLAY(INFO, " +---imissed : %" PRIu64 "\n", info->port_stats.imissed - info->init_stats.imissed); } exit(1); }
dpdkpcap_stats_t txStatsGet(pcap_t *p) { struct rte_eth_stats stats; dpdkpcap_stats_t dpdk_stats; memset(&dpdk_stats, 0, sizeof(dpdk_stats)); if (p == NULL || p->deviceId < 0 || p->deviceId > RTE_MAX_ETHPORTS) { snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Invalid parameter"); return dpdk_stats; } rte_eth_stats_get(p->deviceId, &stats); debug("\nTX port %hu: tx: %"PRIu64 " err: %"PRIu64 "\n", p->deviceId, stats.opackets, stats.oerrors); dpdk_stats.packets = stats.opackets; dpdk_stats.errors = stats.oerrors; return dpdk_stats; }
static inline void app_lcore_io_rx (struct app_lcore_params_io *lp, uint32_t bsz_rd) { uint32_t i; static uint32_t counter = 0; for (i = 0; i < lp->rx.n_nic_queues; i++) { uint8_t port = lp->rx.nic_queues[i].port; uint8_t queue = lp->rx.nic_queues[i].queue; uint32_t n_mbufs, j; n_mbufs = rte_eth_rx_burst (port, queue, lp->rx.mbuf_in.array, (uint16_t)bsz_rd); if (unlikely (continueRX == 0)) { uint32_t k; uint64_t totalBytes = 0; hptl_t firstTime = 0, lastTime = 0; uint64_t ignored = 0; uint64_t sumLatency = 0; struct timespec hwRelation = {0, 0}; struct timespec hwDelta; for (k = 0; k < trainLen; k++) { if (latencyStats[k].recved) { uint64_t currentLatency = latencyStats[k].recvTime - latencyStats[k].sentTime; printf ("%d: Latency %lu ns", k + 1, currentLatency); sumLatency += currentLatency; if (hwTimeTest) { // fpga time conversion uint64_t fpgatime = ntohl (latencyStats[k].hwTime.tv_sec) * 1000000000 + ntohl (latencyStats[k].hwTime.tv_nsec); fpgatime /= fpgaConvRate; latencyStats[k].hwTime.tv_sec = fpgatime / 1000000000; latencyStats[k].hwTime.tv_nsec = fpgatime % 1000000000; printf (" hwTime %lu.%lu", latencyStats[k].hwTime.tv_sec, latencyStats[k].hwTime.tv_nsec); if (lastTime != 0) { printf (" hwDeltaLatency %lu.%lu", latencyStats[k].hwTime.tv_sec - hwDelta.tv_sec, latencyStats[k].hwTime.tv_nsec - hwDelta.tv_nsec); } hwDelta = latencyStats[k].hwTime; } if (lastTime != 0) { printf (" insta-BandWidth %lf Gbps", (latencyStats[k].pktLen * 8 / 1000000000.) / (((double)latencyStats[k].recvTime - lastTime) / 1000000000.)); } else { if (hwTimeTest) { hwRelation = hptl_timespec (latencyStats[k].recvTime); hwRelation.tv_sec -= latencyStats[k].hwTime.tv_sec; hwRelation.tv_nsec -= latencyStats[k].hwTime.tv_nsec; } firstTime = latencyStats[k].recvTime; } lastTime = latencyStats[k].recvTime; totalBytes += latencyStats[k].pktLen; printf ("\n"); } else { printf ("%d: Recved but ignored\n", k + 1); ignored++; } } printf ( "Mean-BandWidth %lf Gbps\n", (totalBytes * 8 / 1000000000.) / (((double)lastTime - firstTime) / 1000000000.)); printf ("Mean-Latency %lf ns\n", sumLatency / (((double)trainLen - ignored))); // Ignored / Dropped stats if (ignored > 0) { printf ("%ld Packets ignored\n", ignored); } struct rte_eth_stats stats; rte_eth_stats_get (port, &stats); if (stats.ierrors > 0) { printf ("%ld Packets errored/dropped\n", stats.ierrors); } exit (0); } if (unlikely (n_mbufs == 0)) { continue; } if (trainLen && (*(uint16_t *)(rte_ctrlmbuf_data (lp->rx.mbuf_in.array[n_mbufs - 1]) + idoffset)) == (*(uint16_t *)(icmppkt + idoffset))) { // Add counter #recvPkts-1, so the data is saved in the structure as "the last packet of // the bulk, instead of the first one" counter += n_mbufs - 1; // Latency ounters latencyStats[counter].recvTime = hptl_get (); latencyStats[counter].sentTime = *(hptl_t *)(rte_ctrlmbuf_data (lp->rx.mbuf_in.array[n_mbufs - 1]) + tsoffset); latencyStats[counter].pktLen = rte_ctrlmbuf_len (lp->rx.mbuf_in.array[n_mbufs - 1]); latencyStats[counter].recved = 1; if (hwTimeTest) { latencyStats[counter].hwTime.tv_sec = *(uint32_t *)(rte_ctrlmbuf_data (lp->rx.mbuf_in.array[n_mbufs - 1]) + 50); latencyStats[counter].hwTime.tv_nsec = *(uint32_t *)(rte_ctrlmbuf_data (lp->rx.mbuf_in.array[n_mbufs - 1]) + 54); } // end if all packets have been recved counter++; if (counter == trainLen) continueRX = 0; } // Drop all packets for (j = 0; j < n_mbufs; j++) { struct rte_mbuf *pkt = lp->rx.mbuf_in.array[j]; rte_pktmbuf_free (pkt); } } }
static int test_pmd_ring_pair_create_attach(void) { struct rte_eth_stats stats, stats2; struct rte_mbuf buf, *pbuf = &buf; struct rte_eth_conf null_conf; if ((RXTX_PORT2 >= RTE_MAX_ETHPORTS) || (RXTX_PORT3 >= RTE_MAX_ETHPORTS)) { printf(" TX/RX port exceed max eth ports\n"); return -1; } if ((rte_eth_dev_configure(RXTX_PORT2, 1, 1, &null_conf) < 0) || (rte_eth_dev_configure(RXTX_PORT3, 1, 1, &null_conf) < 0)) { printf("Configure failed for RXTX port\n"); return -1; } if ((rte_eth_tx_queue_setup(RXTX_PORT2, 0, RING_SIZE, SOCKET0, NULL) < 0) || (rte_eth_tx_queue_setup(RXTX_PORT3, 0, RING_SIZE, SOCKET0, NULL) < 0)) { printf("TX queue setup failed\n"); return -1; } if ((rte_eth_rx_queue_setup(RXTX_PORT2, 0, RING_SIZE, SOCKET0, NULL, mp) < 0) || (rte_eth_rx_queue_setup(RXTX_PORT3, 0, RING_SIZE, SOCKET0, NULL, mp) < 0)) { printf("RX queue setup failed\n"); return -1; } if ((rte_eth_dev_start(RXTX_PORT2) < 0) || (rte_eth_dev_start(RXTX_PORT3) < 0)) { printf("Error starting RXTX port\n"); return -1; } /* * send and receive 1 packet (RXTX_PORT2 -> RXTX_PORT3) * and check for stats update */ if (rte_eth_tx_burst(RXTX_PORT2, 0, &pbuf, 1) != 1) { printf("Error sending packet to RXTX port\n"); return -1; } if (rte_eth_rx_burst(RXTX_PORT3, 0, &pbuf, 1) != 1) { printf("Error receiving packet from RXTX port\n"); return -1; } rte_eth_stats_get(RXTX_PORT2, &stats); rte_eth_stats_get(RXTX_PORT3, &stats2); if (stats.ipackets != 0 || stats.opackets != 1 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } if (stats2.ipackets != 1 || stats2.opackets != 0 || stats2.ibytes != 0 || stats2.obytes != 0 || stats2.ierrors != 0 || stats2.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } /* * send and receive 1 packet (RXTX_PORT3 -> RXTX_PORT2) * and check for stats update */ if (rte_eth_tx_burst(RXTX_PORT3, 0, &pbuf, 1) != 1) { printf("Error sending packet to RXTX port\n"); return -1; } if (rte_eth_rx_burst(RXTX_PORT2, 0, &pbuf, 1) != 1) { printf("Error receiving packet from RXTX port\n"); return -1; } rte_eth_stats_get(RXTX_PORT2, &stats); rte_eth_stats_get(RXTX_PORT3, &stats2); if (stats.ipackets != 1 || stats.opackets != 1 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } if (stats2.ipackets != 1 || stats2.opackets != 1 || stats2.ibytes != 0 || stats2.obytes != 0 || stats2.ierrors != 0 || stats2.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } /* * send and receive 1 packet (RXTX_PORT2 -> RXTX_PORT2) * and check for stats update */ if (rte_eth_tx_burst(RXTX_PORT2, 0, &pbuf, 1) != 1) { printf("Error sending packet to RXTX port\n"); return -1; } if (rte_eth_rx_burst(RXTX_PORT2, 0, &pbuf, 1) != 1) { printf("Error receiving packet from RXTX port\n"); return -1; } rte_eth_stats_get(RXTX_PORT2, &stats); rte_eth_stats_get(RXTX_PORT3, &stats2); if (stats.ipackets != 2 || stats.opackets != 2 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } if (stats2.ipackets != 1 || stats2.opackets != 1 || stats2.ibytes != 0 || stats2.obytes != 0 || stats2.ierrors != 0 || stats2.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } /* * send and receive 1 packet (RXTX_PORT3 -> RXTX_PORT3) * and check for stats update */ if (rte_eth_tx_burst(RXTX_PORT3, 0, &pbuf, 1) != 1) { printf("Error sending packet to RXTX port\n"); return -1; } if (rte_eth_rx_burst(RXTX_PORT3, 0, &pbuf, 1) != 1) { printf("Error receiving packet from RXTX port\n"); return -1; } rte_eth_stats_get(RXTX_PORT2, &stats); rte_eth_stats_get(RXTX_PORT3, &stats2); if (stats.ipackets != 2 || stats.opackets != 2 || stats.ibytes != 0 || stats.obytes != 0 || stats.ierrors != 0 || stats.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } if (stats2.ipackets != 2 || stats2.opackets != 2 || stats2.ibytes != 0 || stats2.obytes != 0 || stats2.ierrors != 0 || stats2.oerrors != 0) { printf("Error: RXTX port stats are not as expected\n"); return -1; } rte_eth_dev_stop(RXTX_PORT2); rte_eth_dev_stop(RXTX_PORT3); return 0; }
/** * Receive packet from ethernet driver and queueing into worker queue. * This function is called from I/O (Input) thread. */ static inline void app_lcore_io_rx( struct app_lcore_params_io *lp, uint32_t n_workers, uint32_t bsz_rd, uint32_t bsz_wr) { struct rte_mbuf *mbuf_1_0, *mbuf_1_1, *mbuf_2_0, *mbuf_2_1; uint32_t i, fifoness; fifoness = app.fifoness; for (i = 0; i < lp->rx.n_nic_queues; i++) { uint8_t portid = lp->rx.nic_queues[i].port; uint8_t queue = lp->rx.nic_queues[i].queue; uint32_t n_mbufs, j; if (unlikely(lp->rx.nic_queues[i].enabled != true)) { continue; } n_mbufs = rte_eth_rx_burst(portid, queue, lp->rx.mbuf_in.array, (uint16_t) bsz_rd); if (unlikely(n_mbufs == 0)) { continue; } #if APP_STATS lp->rx.nic_queues_iters[i] ++; lp->rx.nic_queues_count[i] += n_mbufs; if (unlikely(lp->rx.nic_queues_iters[i] == APP_STATS)) { struct rte_eth_stats stats; unsigned lcore = rte_lcore_id(); rte_eth_stats_get(portid, &stats); printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n", lcore, (unsigned) portid, (double) stats.ierrors / (double) (stats.ierrors + stats.ipackets), ((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i])); lp->rx.nic_queues_iters[i] = 0; lp->rx.nic_queues_count[i] = 0; } #endif #if APP_IO_RX_DROP_ALL_PACKETS for (j = 0; j < n_mbufs; j ++) { struct rte_mbuf *pkt = lp->rx.mbuf_in.array[j]; rte_pktmbuf_free(pkt); } continue; #endif mbuf_1_0 = lp->rx.mbuf_in.array[0]; mbuf_1_1 = lp->rx.mbuf_in.array[1]; mbuf_2_0 = lp->rx.mbuf_in.array[2]; mbuf_2_1 = lp->rx.mbuf_in.array[3]; APP_IO_RX_PREFETCH0(mbuf_2_0); APP_IO_RX_PREFETCH0(mbuf_2_1); for (j = 0; j + 3 < n_mbufs; j += 2) { struct rte_mbuf *mbuf_0_0, *mbuf_0_1; uint32_t worker_0, worker_1; mbuf_0_0 = mbuf_1_0; mbuf_0_1 = mbuf_1_1; mbuf_1_0 = mbuf_2_0; mbuf_1_1 = mbuf_2_1; mbuf_2_0 = lp->rx.mbuf_in.array[j+4]; mbuf_2_1 = lp->rx.mbuf_in.array[j+5]; APP_IO_RX_PREFETCH0(mbuf_2_0); APP_IO_RX_PREFETCH0(mbuf_2_1); switch (fifoness) { case FIFONESS_FLOW: #ifdef __SSE4_2__ worker_0 = rte_hash_crc(rte_pktmbuf_mtod(mbuf_0_0, void *), sizeof(ETHER_HDR) + 2, portid) % n_workers; worker_1 = rte_hash_crc(rte_pktmbuf_mtod(mbuf_0_1, void *), sizeof(ETHER_HDR) + 2, portid) % n_workers; #else worker_0 = CityHash64WithSeed(rte_pktmbuf_mtod(mbuf_0_0, void *), sizeof(ETHER_HDR) + 2, portid) % n_workers; worker_1 = CityHash64WithSeed(rte_pktmbuf_mtod(mbuf_0_1, void *), sizeof(ETHER_HDR) + 2, portid) % n_workers; #endif /* __SSE4_2__ */ break; case FIFONESS_PORT: worker_0 = worker_1 = portid % n_workers; break; case FIFONESS_NONE: default: worker_0 = j % n_workers; worker_1 = (j + 1) % n_workers; break; } app_lcore_io_rx_buffer_to_send(lp, worker_0, mbuf_0_0, bsz_wr); app_lcore_io_rx_buffer_to_send(lp, worker_1, mbuf_0_1, bsz_wr); } /* * Handle the last 1, 2 (when n_mbufs is even) or * 3 (when n_mbufs is odd) packets */ for ( ; j < n_mbufs; j += 1) { struct rte_mbuf *mbuf; uint32_t worker; mbuf = mbuf_1_0; mbuf_1_0 = mbuf_1_1; mbuf_1_1 = mbuf_2_0; mbuf_2_0 = mbuf_2_1; APP_IO_RX_PREFETCH0(mbuf_1_0); switch (fifoness) { case FIFONESS_FLOW: #ifdef __SSE4_2__ worker = rte_hash_crc(rte_pktmbuf_mtod(mbuf, void *), sizeof(ETHER_HDR) + 2, portid) % n_workers; #else worker = CityHash64WithSeed(rte_pktmbuf_mtod(mbuf, void *), sizeof(ETHER_HDR) + 2, portid) % n_workers; #endif /* __SSE4_2__ */ break; case FIFONESS_PORT: worker = portid % n_workers; break; case FIFONESS_NONE: default: worker = j % n_workers; break; } app_lcore_io_rx_buffer_to_send(lp, worker, mbuf, bsz_wr); } } }
static inline void app_lcore_io_rx( struct app_lcore_params_io *lp, uint32_t n_workers, uint32_t bsz_rd, uint32_t bsz_wr, uint8_t pos_lb) { struct rte_mbuf *mbuf_1_0, *mbuf_1_1, *mbuf_2_0, *mbuf_2_1; uint8_t *data_1_0, *data_1_1 = NULL; uint32_t i; for (i = 0; i < lp->rx.n_nic_queues; i ++) { uint8_t port = lp->rx.nic_queues[i].port; uint8_t queue = lp->rx.nic_queues[i].queue; uint32_t n_mbufs, j; n_mbufs = rte_eth_rx_burst( port, queue, lp->rx.mbuf_in.array, (uint16_t) bsz_rd); if (unlikely(n_mbufs == 0)) { continue; } #if APP_STATS lp->rx.nic_queues_iters[i] ++; lp->rx.nic_queues_count[i] += n_mbufs; if (unlikely(lp->rx.nic_queues_iters[i] == APP_STATS)) { struct rte_eth_stats stats; unsigned lcore = rte_lcore_id(); rte_eth_stats_get(port, &stats); printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n", lcore, (unsigned) port, (double) stats.imissed / (double) (stats.imissed + stats.ipackets), ((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i])); lp->rx.nic_queues_iters[i] = 0; lp->rx.nic_queues_count[i] = 0; } #endif #if APP_IO_RX_DROP_ALL_PACKETS for (j = 0; j < n_mbufs; j ++) { struct rte_mbuf *pkt = lp->rx.mbuf_in.array[j]; rte_pktmbuf_free(pkt); } continue; #endif mbuf_1_0 = lp->rx.mbuf_in.array[0]; mbuf_1_1 = lp->rx.mbuf_in.array[1]; data_1_0 = rte_pktmbuf_mtod(mbuf_1_0, uint8_t *); if (likely(n_mbufs > 1)) { data_1_1 = rte_pktmbuf_mtod(mbuf_1_1, uint8_t *); } mbuf_2_0 = lp->rx.mbuf_in.array[2]; mbuf_2_1 = lp->rx.mbuf_in.array[3]; APP_IO_RX_PREFETCH0(mbuf_2_0); APP_IO_RX_PREFETCH0(mbuf_2_1); for (j = 0; j + 3 < n_mbufs; j += 2) { struct rte_mbuf *mbuf_0_0, *mbuf_0_1; uint8_t *data_0_0, *data_0_1; uint32_t worker_0, worker_1; mbuf_0_0 = mbuf_1_0; mbuf_0_1 = mbuf_1_1; data_0_0 = data_1_0; data_0_1 = data_1_1; mbuf_1_0 = mbuf_2_0; mbuf_1_1 = mbuf_2_1; data_1_0 = rte_pktmbuf_mtod(mbuf_2_0, uint8_t *); data_1_1 = rte_pktmbuf_mtod(mbuf_2_1, uint8_t *); APP_IO_RX_PREFETCH0(data_1_0); APP_IO_RX_PREFETCH0(data_1_1); mbuf_2_0 = lp->rx.mbuf_in.array[j+4]; mbuf_2_1 = lp->rx.mbuf_in.array[j+5]; APP_IO_RX_PREFETCH0(mbuf_2_0); APP_IO_RX_PREFETCH0(mbuf_2_1); worker_0 = data_0_0[pos_lb] & (n_workers - 1); worker_1 = data_0_1[pos_lb] & (n_workers - 1); app_lcore_io_rx_buffer_to_send(lp, worker_0, mbuf_0_0, bsz_wr); app_lcore_io_rx_buffer_to_send(lp, worker_1, mbuf_0_1, bsz_wr); } /* Handle the last 1, 2 (when n_mbufs is even) or 3 (when n_mbufs is odd) packets */ for ( ; j < n_mbufs; j += 1) { struct rte_mbuf *mbuf; uint8_t *data; uint32_t worker; mbuf = mbuf_1_0; mbuf_1_0 = mbuf_1_1; mbuf_1_1 = mbuf_2_0; mbuf_2_0 = mbuf_2_1; data = rte_pktmbuf_mtod(mbuf, uint8_t *); APP_IO_RX_PREFETCH0(mbuf_1_0); worker = data[pos_lb] & (n_workers - 1); app_lcore_io_rx_buffer_to_send(lp, worker, mbuf, bsz_wr); } }
void nic_stats_display(struct cmdline *cl, portid_t port_id, int option) { struct rte_eth_stats stats; uint8_t i; static const char *nic_stats_border = "======================="; rte_eth_stats_get(port_id, &stats); if (option) { cmdline_printf( cl, "{\"portid\": %d, " "\"rx\": {\"packets\": %" PRIu64 ", \"errors\": %" PRIu64 ", \"bytes\": %" PRIu64 ", " "\"badcrc\": %" PRIu64 ", \"badlen\": %" PRIu64 ", \"nombuf\": %" PRIu64 ", " "\"xon\": %" PRIu64 ", \"xoff\": %" PRIu64 "}, " "\"tx\": {\"packets\": %" PRIu64 ", \"errors\": %" PRIu64 ", \"bytes\": %" PRIu64 ", " "\"xon\": %" PRIu64 ", \"xoff\": %" PRIu64 "}, ", port_id, stats.ipackets, stats.ierrors, stats.ibytes, stats.ibadcrc, stats.ibadlen, stats.rx_nombuf, stats.rx_pause_xon, stats.rx_pause_xoff, stats.opackets, stats.oerrors, stats.obytes, stats.tx_pause_xon, stats.tx_pause_xoff); cmdline_printf(cl, "\"queues\": ["); for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { cmdline_printf(cl, "{\"queueid\": %d, " "\"rx\": {\"packets\": %" PRIu64 ", \"errors\": %" PRIu64 ", \"bytes\": %" PRIu64 "}, " "\"tx\": {\"packets\": %" PRIu64 ", \"bytes\": %" PRIu64 "}}, ", i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i], stats.q_opackets[i], stats.q_obytes[i]); } // add a null object to end the array cmdline_printf(cl, "{}"); cmdline_printf(cl, "]}\n"); } else { cmdline_printf(cl, "\n %s NIC statistics for port %-2d %s\n", nic_stats_border, port_id, nic_stats_border); cmdline_printf(cl, " RX-packets: %10" PRIu64 " RX-errors: %10" PRIu64 " RX-bytes: %10" PRIu64 "\n", stats.ipackets, stats.ierrors, stats.ibytes); cmdline_printf(cl, " RX-badcrc: %10" PRIu64 " RX-badlen: %10" PRIu64 " RX-errors: %10" PRIu64 "\n", stats.ibadcrc, stats.ibadlen, stats.ierrors); cmdline_printf(cl, " RX-nombuf: %10" PRIu64 "\n", stats.rx_nombuf); cmdline_printf(cl, " TX-packets: %10" PRIu64 " TX-errors: %10" PRIu64 " TX-bytes: %10" PRIu64 "\n", stats.opackets, stats.oerrors, stats.obytes); cmdline_printf(cl, "\n"); for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { cmdline_printf(cl, " Stats reg %2d RX-packets: %10" PRIu64 " RX-errors: %10" PRIu64 " RX-bytes: %10" PRIu64 "\n", i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); } cmdline_printf(cl, "\n"); for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { cmdline_printf( cl, " Stats reg %2d TX-packets: %10" PRIu64 " TX-bytes: %10" PRIu64 "\n", i, stats.q_opackets[i], stats.q_obytes[i]); } /* Display statistics of XON/XOFF pause frames, if any. */ if ((stats.tx_pause_xon | stats.rx_pause_xon | stats.tx_pause_xoff | stats.rx_pause_xoff) > 0) { cmdline_printf(cl, " RX-XOFF: %-10" PRIu64 " RX-XON: %-10" PRIu64 "\n", stats.rx_pause_xoff, stats.rx_pause_xon); cmdline_printf(cl, " TX-XOFF: %-10" PRIu64 " TX-XON: %-10" PRIu64 "\n", stats.tx_pause_xoff, stats.tx_pause_xon); } cmdline_printf(cl, " %s=======================%s\n", nic_stats_border, nic_stats_border); } }
static inline void app_lcore_io_tx_bw (struct app_lcore_params_io *lp, uint32_t bsz_wr) { uint32_t i; uint32_t k; for (i = 0; i < lp->tx.n_nic_queues; i++) { uint8_t port = lp->tx.nic_queues[i].port; uint8_t queue = lp->tx.nic_queues[i].queue; uint32_t n_mbufs, n_pkts; n_mbufs = bsz_wr; for (k = 0; k < n_mbufs; k++) { lp->tx.mbuf_out[port].array[k] = rte_ctrlmbuf_alloc (app.pools[0]); if (lp->tx.mbuf_out[port].array[k] == NULL) { n_mbufs = k; break; } lp->tx.mbuf_out[port].array[k]->pkt_len = sndpktlen; lp->tx.mbuf_out[port].array[k]->data_len = sndpktlen; lp->tx.mbuf_out[port].array[k]->port = port; memcpy (rte_ctrlmbuf_data (lp->tx.mbuf_out[port].array[k]), icmppkt, icmppktlen); } n_pkts = rte_eth_tx_burst (port, queue, lp->tx.mbuf_out[port].array, n_mbufs); #if APP_STATS lp->tx.nic_queues_iters[i]++; lp->tx.nic_queues_count[i] += n_mbufs; if (unlikely (lp->tx.nic_queues_iters[i] == APP_STATS)) { struct rte_eth_stats stats; struct timeval start_ewr, end_ewr; rte_eth_stats_get (port, &stats); gettimeofday (&lp->tx.end_ewr, NULL); start_ewr = lp->tx.start_ewr; end_ewr = lp->tx.end_ewr; if (queue == 0) { printf ( "NIC TX port %u: drop ratio = %.2f (%lu/%lu) usefull-speed: %lf Gbps, " "link-speed: %lf Gbps (%.1lf pkts/s)\n", (unsigned)port, (double)stats.oerrors / (double)(stats.oerrors + stats.opackets), (uint64_t)stats.opackets, (uint64_t)stats.oerrors, (stats.obytes / (((end_ewr.tv_sec * 1000000. + end_ewr.tv_usec) - (start_ewr.tv_sec * 1000000. + start_ewr.tv_usec)) / 1000000.)) / (1000 * 1000 * 1000. / 8.), (((stats.obytes) + stats.opackets * (/*4crc+8prelud+12ifg*/ (8 + 12))) / (((end_ewr.tv_sec * 1000000. + end_ewr.tv_usec) - (start_ewr.tv_sec * 1000000. + start_ewr.tv_usec)) / 1000000.)) / (1000 * 1000 * 1000. / 8.), stats.opackets / (((end_ewr.tv_sec * 1000000. + end_ewr.tv_usec) - (start_ewr.tv_sec * 1000000. + start_ewr.tv_usec)) / 1000000.)); rte_eth_stats_reset (port); lp->tx.start_ewr = end_ewr; // Updating start } lp->tx.nic_queues_iters[i] = 0; lp->tx.nic_queues_count[i] = 0; } #endif if (unlikely (n_pkts < n_mbufs)) { for (k = n_pkts; k < n_mbufs; k++) { struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k]; rte_ctrlmbuf_free (pkt_to_free); } } } }
static inline void app_lcore_io_rx_sts (struct app_lcore_params_io *lp, uint32_t bsz_rd, uint32_t stsw) { uint32_t i; static uint32_t counter = 0; for (i = 0; i < lp->rx.n_nic_queues; i++) { uint8_t port = lp->rx.nic_queues[i].port; uint8_t queue = lp->rx.nic_queues[i].queue; uint32_t n_mbufs, i, j; n_mbufs = rte_eth_rx_burst (port, queue, lp->rx.mbuf_in.array, (uint16_t)bsz_rd); if (unlikely (continueRX == 0)) { uint32_t k; uint64_t totalBytes = 0; hptl_t firstTime = 0, lastTime = 0; uint64_t ignored = 0; uint64_t sumLatency = 0; struct timespec hwRelation = {0, 0}; struct timespec hwDelta; for (k = 0; k < trainLen; k++) { if (latencyStats[k].recved) { uint64_t currentLatency = latencyStats[k].recvTime - latencyStats[k].sentTime; uint64_t fixedLatency = currentLatency - (stsw - 1) * (latencyStats[k].pktLen * 8 + 24) / 10.; printf ("%d: Latency %lu ns", k + 1, currentLatency); printf (" Estimated %lu ns", fixedLatency); sumLatency += currentLatency; if (hwTimeTest) { // fpga time conversion uint64_t fpgatime = ntohl (latencyStats[k].hwTime.tv_sec) * 1000000000 + ntohl (latencyStats[k].hwTime.tv_nsec); fpgatime /= fpgaConvRate; latencyStats[k].hwTime.tv_sec = fpgatime / 1000000000; latencyStats[k].hwTime.tv_nsec = fpgatime % 1000000000; printf (" hwTime %lu.%lu", latencyStats[k].hwTime.tv_sec, latencyStats[k].hwTime.tv_nsec); if (lastTime != 0) { printf (" hwDeltaLatency %lu.%lu", latencyStats[k].hwTime.tv_sec - hwDelta.tv_sec, latencyStats[k].hwTime.tv_nsec - hwDelta.tv_nsec); } hwDelta = latencyStats[k].hwTime; } if (lastTime != 0) { printf (" insta-BandWidth %lf Gbps", (latencyStats[k].totalBytes * 8 / 1000000000.) / (((double)latencyStats[k].recvTime - lastTime) / 1000000000.)); } else { if (hwTimeTest) { hwRelation = hptl_timespec (latencyStats[k].recvTime); hwRelation.tv_sec -= latencyStats[k].hwTime.tv_sec; hwRelation.tv_nsec -= latencyStats[k].hwTime.tv_nsec; } firstTime = latencyStats[k].recvTime; } lastTime = latencyStats[k].recvTime; totalBytes += latencyStats[k].totalBytes; printf ("\n"); } else { printf ("%d: Pkt has not been seen\n", k + 1); ignored++; } } printf ( "Mean-BandWidth %lf Gbps\n", (totalBytes * 8 / 1000000000.) / (((double)lastTime - firstTime) / 1000000000.)); printf ("Mean-Latency %lf ns\n", sumLatency / (((double)trainLen - ignored))); // Ignored / Dropped stats if (ignored > 0) { printf ("%ld TS-Packets lost\n", ignored); } struct rte_eth_stats stats; rte_eth_stats_get (port, &stats); if (stats.ierrors > 0) { printf ("%ld Packets errored/dropped\n", stats.ierrors); } if (stats.oerrors > 0) { printf ("%ld Packets errored in TX\n", stats.oerrors); } printf ("Port %d stats: %lu/%lu/%lu/%lu Pkts sent/recv/ierror/imissed\n", port, stats.opackets, stats.ipackets, stats.ierrors, stats.imissed); printf (" %lu/%lu Bytes sent/recv\n", stats.obytes, stats.ibytes); printf (" %lu/%lu Queue error sent/recv\n", stats.q_errors[0], stats.rx_nombuf); // DEBUG, MUST REMOVE IN RELEASE port = 1 - port; rte_eth_stats_get (port, &stats); printf ("Port %d stats: %lu/%lu/%lu Pkts sent/recv/oerrors\n", port, stats.opackets, stats.ipackets, stats.oerrors); printf (" %lu/%lu Bytes sent/recv\n", stats.obytes, stats.ibytes); printf (" %lu/%lu Queue error sent/recv\n", stats.q_errors[0], stats.rx_nombuf); exit (0); } if (unlikely (n_mbufs == 0)) { continue; } for (i = 0; i < n_mbufs; i++) { uint8_t *data = (uint8_t *)rte_ctrlmbuf_data (lp->rx.mbuf_in.array[i]); uint32_t len = rte_ctrlmbuf_len (lp->rx.mbuf_in.array[i]); bytecounter += len; if (*(uint16_t *)(data + idoffset) == (TSIDTYPE)tspacketId) { // paquete marcado if (autoIncNum) { counter = *(uint16_t *)(data + cntroffset); if (counter >= trainLen) { continueRX = 0; continue; } } // Latency ounters latencyStats[counter].recvTime = hptl_get (); latencyStats[counter].sentTime = (*(hptl_t *)(data + tsoffset)); latencyStats[counter].pktLen = len; latencyStats[counter].totalBytes = bytecounter; latencyStats[counter].recved = 1; bytecounter = 0; // reset counter if (hwTimeTest) { latencyStats[counter].hwTime.tv_sec = *(uint32_t *)(data + 50); latencyStats[counter].hwTime.tv_nsec = *(uint32_t *)(data + 54); } // end if all packets have been recved counter++; if (counter == trainLen) continueRX = 0; } } for (j = 0; j < n_mbufs; j++) { struct rte_mbuf *pkt = lp->rx.mbuf_in.array[j]; rte_pktmbuf_free (pkt); } } }
static inline void app_lcore_io_rx_bw (struct app_lcore_params_io *lp, uint32_t bsz_rd) { uint32_t i; for (i = 0; i < lp->rx.n_nic_queues; i++) { uint8_t port = lp->rx.nic_queues[i].port; uint8_t queue = lp->rx.nic_queues[i].queue; uint32_t n_mbufs, j; n_mbufs = rte_eth_rx_burst (port, queue, lp->rx.mbuf_in.array, (uint16_t)bsz_rd); if (unlikely (n_mbufs == 0)) { continue; } #if APP_STATS lp->rx.nic_queues_iters[i]++; lp->rx.nic_queues_count[i] += n_mbufs; if (unlikely (lp->rx.nic_queues_iters[i] == APP_STATS * 10)) { struct rte_eth_stats stats; struct timeval start_ewr, end_ewr; rte_eth_stats_get (port, &stats); gettimeofday (&lp->rx.end_ewr, NULL); start_ewr = lp->rx.start_ewr; end_ewr = lp->rx.end_ewr; #ifdef QUEUE_STATS if (queue == 0) { #endif printf ("NIC port %u: drop ratio = %.2f (%lu/%lu) speed: %lf Gbps (%.1lf pkts/s)\n", (unsigned)port, (double)(stats.ierrors + stats.imissed) / (double)((stats.ierrors + stats.imissed) + stats.ipackets), (uint64_t)stats.ipackets, (uint64_t) (stats.ierrors + stats.imissed), (((stats.ibytes) + stats.ipackets * (/*4crc+8prelud+12ifg*/ (8 + 12))) / (((end_ewr.tv_sec * 1000000. + end_ewr.tv_usec) - (start_ewr.tv_sec * 1000000. + start_ewr.tv_usec)) / 1000000.)) / (1000 * 1000 * 1000. / 8.), stats.ipackets / (((end_ewr.tv_sec * 1000000. + end_ewr.tv_usec) - (start_ewr.tv_sec * 1000000. + start_ewr.tv_usec)) / 1000000.)); #ifdef QUEUE_STATS } printf ( "NIC port %u:%u: drop ratio = %.2f (%u/%u) speed %.1lf pkts/s\n", (unsigned)port, queue, (double)stats.ierrors / (double)(stats.ierrors + lp->rx.nic_queues_count[i]), (uint32_t)lp->rx.nic_queues_count[i], (uint32_t)stats.ierrors, lp->rx.nic_queues_count[i] / (((end_ewr.tv_sec * 1000000. + end_ewr.tv_usec) - (start_ewr.tv_sec * 1000000. + start_ewr.tv_usec)) / 1000000.)); #endif lp->rx.nic_queues_iters[i] = 0; lp->rx.nic_queues_count[i] = 0; #ifdef QUEUE_STATS if (queue == 0) #endif rte_eth_stats_reset (port); #ifdef QUEUE_STATS if (queue == 0) #endif lp->rx.start_ewr = end_ewr; // Updating start } #endif for (j = 0; j < n_mbufs; j++) { struct rte_mbuf *pkt = lp->rx.mbuf_in.array[j]; rte_pktmbuf_free (pkt); } } }
void pktgen_config_ports(void) { uint32_t lid, pid, i, s, q, sid; rxtx_t rt; pkt_seq_t *pkt; port_info_t *info; char buff[RTE_MEMZONE_NAMESIZE]; int32_t ret, cache_size; char output_buff[256] = { 0 }; /* Find out the total number of ports in the system. */ /* We have already blacklisted the ones we needed to in main routine. */ pktgen.nb_ports = rte_eth_dev_count(); if (pktgen.nb_ports > RTE_MAX_ETHPORTS) pktgen.nb_ports = RTE_MAX_ETHPORTS; if (pktgen.nb_ports == 0) pktgen_log_panic("*** Did not find any ports to use ***"); pktgen.starting_port = 0; /* Setup the number of ports to display at a time */ if (pktgen.nb_ports > pktgen.nb_ports_per_page) pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports_per_page; else pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports; wr_port_matrix_dump(pktgen.l2p); pktgen_log_info("Configuring %d ports, MBUF Size %d, MBUF Cache Size %d", pktgen.nb_ports, MBUF_SIZE, MBUF_CACHE_SIZE); /* For each lcore setup each port that is handled by that lcore. */ for (lid = 0; lid < RTE_MAX_LCORE; lid++) { if (wr_get_map(pktgen.l2p, RTE_MAX_ETHPORTS, lid) == 0) continue; /* For each port attached or handled by the lcore */ for (pid = 0; pid < pktgen.nb_ports; pid++) { /* If non-zero then this port is handled by this lcore. */ if (wr_get_map(pktgen.l2p, pid, lid) == 0) continue; wr_set_port_private(pktgen.l2p, pid, &pktgen.info[pid]); pktgen.info[pid].pid = pid; } } wr_dump_l2p(pktgen.l2p); pktgen.total_mem_used = 0; for (pid = 0; pid < pktgen.nb_ports; pid++) { /* Skip if we do not have any lcores attached to a port. */ if ( (rt.rxtx = wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE)) == 0) continue; pktgen.port_cnt++; snprintf(output_buff, sizeof(output_buff), "Initialize Port %d -- TxQ %d, RxQ %d", pid, rt.tx, rt.rx); info = wr_get_port_private(pktgen.l2p, pid); info->fill_pattern_type = ABC_FILL_PATTERN; strncpy(info->user_pattern, "0123456789abcdef", USER_PATTERN_SIZE); rte_spinlock_init(&info->port_lock); /* Create the pkt header structures for transmitting sequence of packets. */ snprintf(buff, sizeof(buff), "seq_hdr_%d", pid); info->seq_pkt = (pkt_seq_t *)rte_zmalloc_socket(buff, (sizeof(pkt_seq_t) * NUM_TOTAL_PKTS), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (info->seq_pkt == NULL) pktgen_log_panic("Unable to allocate %d pkt_seq_t headers", NUM_TOTAL_PKTS); info->seqIdx = 0; info->seqCnt = 0; info->nb_mbufs = MAX_MBUFS_PER_PORT; cache_size = (info->nb_mbufs > RTE_MEMPOOL_CACHE_MAX_SIZE) ? RTE_MEMPOOL_CACHE_MAX_SIZE : info->nb_mbufs; pktgen_port_conf_setup(pid, &rt, &default_port_conf); if ( (ret = rte_eth_dev_configure(pid, rt.rx, rt.tx, &info->port_conf)) < 0) pktgen_log_panic("Cannot configure device: port=%d, Num queues %d,%d (%d)%s", pid, rt.rx, rt.tx, errno, rte_strerror(-ret)); pkt = &info->seq_pkt[SINGLE_PKT]; /* Grab the source MAC addresses * / */ rte_eth_macaddr_get(pid, &pkt->eth_src_addr); pktgen_log_info("%s, Src MAC %02x:%02x:%02x:%02x:%02x:%02x", output_buff, pkt->eth_src_addr.addr_bytes[0], pkt->eth_src_addr.addr_bytes[1], pkt->eth_src_addr.addr_bytes[2], pkt->eth_src_addr.addr_bytes[3], pkt->eth_src_addr.addr_bytes[4], pkt->eth_src_addr.addr_bytes[5]); /* Copy the first Src MAC address in SINGLE_PKT to the rest of the sequence packets. */ for (i = 0; i < NUM_SEQ_PKTS; i++) ethAddrCopy(&info->seq_pkt[i].eth_src_addr, &pkt->eth_src_addr); pktgen.mem_used = 0; for (q = 0; q < rt.rx; q++) { /* grab the socket id value based on the lcore being used. */ sid = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q)); /* Create and initialize the default Receive buffers. */ info->q[q].rx_mp = pktgen_mbuf_pool_create("Default RX", pid, q, info->nb_mbufs, sid, cache_size); if (info->q[q].rx_mp == NULL) pktgen_log_panic("Cannot init port %d for Default RX mbufs", pid); ret = rte_eth_rx_queue_setup(pid, q, pktgen.nb_rxd, sid, &info->rx_conf, pktgen.info[pid].q[q].rx_mp); if (ret < 0) pktgen_log_panic("rte_eth_rx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret)); } pktgen_log_info(""); for (q = 0; q < rt.tx; q++) { /* grab the socket id value based on the lcore being used. */ sid = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q)); /* Create and initialize the default Transmit buffers. */ info->q[q].tx_mp = pktgen_mbuf_pool_create("Default TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size); if (info->q[q].tx_mp == NULL) pktgen_log_panic("Cannot init port %d for Default TX mbufs", pid); /* Create and initialize the range Transmit buffers. */ info->q[q].range_mp = pktgen_mbuf_pool_create("Range TX", pid, q, MAX_MBUFS_PER_PORT, sid, 0); if (info->q[q].range_mp == NULL) pktgen_log_panic("Cannot init port %d for Range TX mbufs", pid); /* Create and initialize the sequence Transmit buffers. */ info->q[q].seq_mp = pktgen_mbuf_pool_create("Sequence TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size); if (info->q[q].seq_mp == NULL) pktgen_log_panic("Cannot init port %d for Sequence TX mbufs", pid); /* Used for sending special packets like ARP requests */ info->q[q].special_mp = pktgen_mbuf_pool_create("Special TX", pid, q, MAX_SPECIAL_MBUFS, sid, 0); if (info->q[q].special_mp == NULL) pktgen_log_panic("Cannot init port %d for Special TX mbufs", pid); /* Setup the PCAP file for each port */ if (pktgen.info[pid].pcap != NULL) if (pktgen_pcap_parse(pktgen.info[pid].pcap, info, q) == -1) pktgen_log_panic("Cannot load PCAP file for port %d", pid); /* Find out the link speed to program the WTHRESH value correctly. */ pktgen_get_link_status(info, pid, 0); ret = rte_eth_tx_queue_setup(pid, q, pktgen.nb_txd, sid, &info->tx_conf); if (ret < 0) pktgen_log_panic("rte_eth_tx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret)); pktgen_log_info(""); } pktgen_log_info("%*sPort memory used = %6lu KB", 71, " ", (pktgen.mem_used + 1023) / 1024); } pktgen_log_info("%*sTotal memory used = %6lu KB", 70, " ", (pktgen.total_mem_used + 1023) / 1024); /* Start up the ports and display the port Link status */ for (pid = 0; pid < pktgen.nb_ports; pid++) { if (wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE) == 0) continue; info = wr_get_port_private(pktgen.l2p, pid); /* Start device */ if ( (ret = rte_eth_dev_start(pid)) < 0) pktgen_log_panic("rte_eth_dev_start: port=%d, %s", pid, rte_strerror(-ret)); pktgen_get_link_status(info, pid, 1); if (info->link.link_status) snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Up - speed %u Mbps - %s", pid, (uint32_t)info->link.link_speed, (info->link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex")); else snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Down", pid); /* If enabled, put device in promiscuous mode. */ if (pktgen.flags & PROMISCUOUS_ON_FLAG) { strncatf(output_buff, " <Enable promiscuous mode>"); rte_eth_promiscuous_enable(pid); } pktgen_log_info("%s", output_buff); pktgen.info[pid].seq_pkt[SINGLE_PKT].pktSize = MIN_PKT_SIZE; /* Setup the port and packet defaults. (must be after link speed is found) */ for (s = 0; s < NUM_TOTAL_PKTS; s++) pktgen_port_defaults(pid, s); pktgen_range_setup(info); rte_eth_stats_get(pid, &info->init_stats); pktgen_rnd_bits_init(&pktgen.info[pid].rnd_bitfields); } /* Clear the log information by putting a blank line */ pktgen_log_info(""); /* Setup the packet capture per port if needed. */ for (sid = 0; sid < wr_coremap_cnt(pktgen.core_info, pktgen.core_cnt, 0); sid++) pktgen_packet_capture_init(&pktgen.capture[sid], sid); }