/* * Flush packets scheduled for transmit on ports */ static void flush_pkts(unsigned action) { unsigned i = 0; uint16_t deq_count = PKT_BURST_SIZE; struct rte_mbuf *pkts[PKT_BURST_SIZE] = {0}; struct port_queue *pq = &port_queues[action & PORT_MASK]; struct statistics *s = &vport_stats[action]; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; uint64_t diff_tsc = 0; static uint64_t prev_tsc[MAX_PHYPORTS] = {0}; uint64_t cur_tsc = rte_rdtsc(); unsigned num_pkts; diff_tsc = cur_tsc - prev_tsc[action & PORT_MASK]; if (unlikely(rte_ring_count(pq->tx_q) >= PKT_BURST_SIZE)) { num_pkts = PKT_BURST_SIZE; } else { /* If queue idles with less than PKT_BURST packets, drain it*/ if(unlikely(diff_tsc > drain_tsc)) { num_pkts = rte_ring_count(pq->tx_q); } else { return; } } if (unlikely(rte_ring_dequeue_bulk( pq->tx_q, (void **)pkts, num_pkts) != 0)) return; const uint16_t sent = rte_eth_tx_burst( ports->id[action & PORT_MASK], 0, pkts, num_pkts); prev_tsc[action & PORT_MASK] = cur_tsc; if (unlikely(sent < num_pkts)) { for (i = sent; i < num_pkts; i++) rte_pktmbuf_free(pkts[i]); s->tx_drop += (num_pkts - sent); } else { s->tx += sent; } }
void onvm_nf_check_status(void) { int i; void *msgs[MAX_NFS]; struct onvm_nf_msg *msg; struct onvm_nf_info *nf; int num_msgs = rte_ring_count(incoming_msg_queue); if (num_msgs == 0) return; if (rte_ring_dequeue_bulk(incoming_msg_queue, msgs, num_msgs, NULL) == 0) return; for (i = 0; i < num_msgs; i++) { msg = (struct onvm_nf_msg*) msgs[i]; switch (msg->msg_type) { case MSG_NF_STARTING: nf = (struct onvm_nf_info*)msg->msg_data; if (onvm_nf_start(nf) == 0) { onvm_stats_add_event("NF Starting", nf); } break; case MSG_NF_READY: nf = (struct onvm_nf_info*)msg->msg_data; if (onvm_nf_ready(nf) == 0) { onvm_stats_add_event("NF Ready", nf); } break; case MSG_NF_STOPPING: nf = (struct onvm_nf_info*)msg->msg_data; if (onvm_nf_stop(nf) == 0) { onvm_stats_add_event("NF Stopping", nf); num_nfs--; } break; } rte_mempool_put(nf_msg_pool, (void*)msg); } }
/** * CALLED BY NF: * Application main function - loops through * receiving and processing packets. Never returns */ int onvm_nf_run(struct onvm_nf_info* info, int(*handler)(struct rte_mbuf* pkt, struct onvm_pkt_meta* meta)) { void *pkts[PKT_READ_SIZE]; struct onvm_pkt_meta* meta; printf("\nClient process %d handling packets\n", info->instance_id); printf("[Press Ctrl-C to quit ...]\n"); /* Listen for ^C so we can exit gracefully */ signal(SIGINT, handle_signal); for (; keep_running;) { uint16_t i, j, nb_pkts = PKT_READ_SIZE; void *pktsTX[PKT_READ_SIZE]; int tx_batch_size = 0; int ret_act; /* try dequeuing max possible packets first, if that fails, get the * most we can. Loop body should only execute once, maximum */ while (nb_pkts > 0 && unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, nb_pkts) != 0)) nb_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE); if(nb_pkts == 0) { continue; } /* Give each packet to the user proccessing function */ for (i = 0; i < nb_pkts; i++) { meta = onvm_get_pkt_meta((struct rte_mbuf*)pkts[i]); ret_act = (*handler)((struct rte_mbuf*)pkts[i], meta); /* NF returns 0 to return packets or 1 to buffer */ if(likely(ret_act == 0)) { pktsTX[tx_batch_size++] = pkts[i]; } else { tx_stats->tx_buffer[info->instance_id]++; } } if (unlikely(tx_batch_size > 0 && rte_ring_enqueue_bulk(tx_ring, pktsTX, tx_batch_size) == -ENOBUFS)) { tx_stats->tx_drop[info->instance_id] += tx_batch_size; for (j = 0; j < tx_batch_size; j++) { rte_pktmbuf_free(pktsTX[j]); } } else { tx_stats->tx[info->instance_id] += tx_batch_size; } } nf_info->status = NF_STOPPED; /* Put this NF's info struct back into queue for manager to ack shutdown */ nf_info_ring = rte_ring_lookup(_NF_QUEUE_NAME); if (nf_info_ring == NULL) { rte_mempool_put(nf_info_mp, nf_info); // give back mermory rte_exit(EXIT_FAILURE, "Cannot get nf_info ring for shutdown"); } if (rte_ring_enqueue(nf_info_ring, nf_info) < 0) { rte_mempool_put(nf_info_mp, nf_info); // give back mermory rte_exit(EXIT_FAILURE, "Cannot send nf_info to manager for shutdown"); } return 0; }
/* * Application main function - loops through * receiving and processing packets. Never returns */ int main(int argc, char *argv[]) { struct rte_ring *rx_ring = NULL; struct rte_ring *tx_ring = NULL; int retval = 0; void *pkts[PKT_READ_SIZE]; int rslt = 0; if ((retval = rte_eal_init(argc, argv)) < 0) { return -1; } argc -= retval; argv += retval; if (parse_app_args(argc, argv) < 0) { rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n"); } rx_ring = rte_ring_lookup(get_rx_queue_name(client_id)); if (rx_ring == NULL) { rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n"); } tx_ring = rte_ring_lookup(get_tx_queue_name(client_id)); if (tx_ring == NULL) { rte_exit(EXIT_FAILURE, "Cannot get TX ring - is server process running?\n"); } RTE_LOG(INFO, APP, "Finished Process Init.\n"); printf("\nClient process %d handling packets\n", client_id); printf("[Press Ctrl-C to quit ...]\n"); for (;;) { unsigned rx_pkts = PKT_READ_SIZE; /* Try dequeuing max possible packets first, if that fails, get the * most we can. Loop body should only execute once, maximum. */ while (unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0) && rx_pkts > 0) { rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE); } if (rx_pkts > 0) { pkt++; /* blocking enqueue */ do { rslt = rte_ring_enqueue_bulk(tx_ring, pkts, rx_pkts); } while (rslt == -ENOBUFS); } else { no_pkt++; } if (!(pkt % 100000)) { printf("pkt %d %d\n", pkt, no_pkt); pkt = no_pkt = 0; } } }
/* * Application main function - loops through * receiving and processing packets. Never returns */ int main(int argc, char *argv[]) { const struct rte_memzone *mz; struct rte_ring *rx_ring; struct rte_mempool *mp; struct port_info *ports; int need_flush = 0; /* indicates whether we have unsent packets */ int retval; void *pkts[PKT_READ_SIZE]; uint16_t sent; if ((retval = rte_eal_init(argc, argv)) < 0) return -1; argc -= retval; argv += retval; if (parse_app_args(argc, argv) < 0) rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n"); if (rte_eth_dev_count() == 0) rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); rx_ring = rte_ring_lookup(get_rx_queue_name(client_id)); if (rx_ring == NULL) rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n"); mp = rte_mempool_lookup(PKTMBUF_POOL_NAME); if (mp == NULL) rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n"); mz = rte_memzone_lookup(MZ_PORT_INFO); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot get port info structure\n"); ports = mz->addr; tx_stats = &(ports->tx_stats[client_id]); configure_output_ports(ports); RTE_LOG(INFO, APP, "Finished Process Init.\n"); printf("\nClient process %d handling packets\n", client_id); printf("[Press Ctrl-C to quit ...]\n"); for (;;) { uint16_t i, rx_pkts = PKT_READ_SIZE; uint8_t port; /* try dequeuing max possible packets first, if that fails, get the * most we can. Loop body should only execute once, maximum */ while (rx_pkts > 0 && unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0)) rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE); if (unlikely(rx_pkts == 0)) { if (need_flush) for (port = 0; port < ports->num_ports; port++) { sent = rte_eth_tx_buffer_flush(ports->id[port], client_id, tx_buffer[port]); if (unlikely(sent)) tx_stats->tx[port] += sent; } need_flush = 0; continue; } for (i = 0; i < rx_pkts; i++) handle_packet(pkts[i]); need_flush = 1; } }