/* KNI Module Interface */ int odp_kni_sendpkt_burst(struct rte_mbuf ** mbufs, unsigned nb_mbufs, unsigned port_id) { if(unlikely(kni_port_params_array[port_id] == NULL)) return -ENOENT; struct rte_ring * ring = kni_port_params_array[port_id]->ring; if(unlikely(ring == NULL)) return -ENOENT; return rte_ring_enqueue_bulk(ring,(void **)mbufs,nb_mbufs); }
/* * Application main function - loops through * receiving and processing packets. Never returns */ int main(int argc, char *argv[]) { struct rte_ring *rx_ring = NULL; struct rte_ring *tx_ring = NULL; int retval = 0; void *pkts[PKT_READ_SIZE]; int rslt = 0; if ((retval = rte_eal_init(argc, argv)) < 0) { return -1; } argc -= retval; argv += retval; if (parse_app_args(argc, argv) < 0) { rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n"); } rx_ring = rte_ring_lookup(get_rx_queue_name(client_id)); if (rx_ring == NULL) { rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n"); } tx_ring = rte_ring_lookup(get_tx_queue_name(client_id)); if (tx_ring == NULL) { rte_exit(EXIT_FAILURE, "Cannot get TX ring - is server process running?\n"); } RTE_LOG(INFO, APP, "Finished Process Init.\n"); printf("\nClient process %d handling packets\n", client_id); printf("[Press Ctrl-C to quit ...]\n"); for (;;) { unsigned rx_pkts = PKT_READ_SIZE; /* Try dequeuing max possible packets first, if that fails, get the * most we can. Loop body should only execute once, maximum. */ while (unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0) && rx_pkts > 0) { rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE); } if (rx_pkts > 0) { pkt++; /* blocking enqueue */ do { rslt = rte_ring_enqueue_bulk(tx_ring, pkts, rx_pkts); } while (rslt == -ENOBUFS); } else { no_pkt++; } if (!(pkt % 100000)) { printf("pkt %d %d\n", pkt, no_pkt); pkt = no_pkt = 0; } } }
/** * CALLED BY NF: * Application main function - loops through * receiving and processing packets. Never returns */ int onvm_nf_run(struct onvm_nf_info* info, int(*handler)(struct rte_mbuf* pkt, struct onvm_pkt_meta* meta)) { void *pkts[PKT_READ_SIZE]; struct onvm_pkt_meta* meta; printf("\nClient process %d handling packets\n", info->instance_id); printf("[Press Ctrl-C to quit ...]\n"); /* Listen for ^C so we can exit gracefully */ signal(SIGINT, handle_signal); for (; keep_running;) { uint16_t i, j, nb_pkts = PKT_READ_SIZE; void *pktsTX[PKT_READ_SIZE]; int tx_batch_size = 0; int ret_act; /* try dequeuing max possible packets first, if that fails, get the * most we can. Loop body should only execute once, maximum */ while (nb_pkts > 0 && unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, nb_pkts) != 0)) nb_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE); if(nb_pkts == 0) { continue; } /* Give each packet to the user proccessing function */ for (i = 0; i < nb_pkts; i++) { meta = onvm_get_pkt_meta((struct rte_mbuf*)pkts[i]); ret_act = (*handler)((struct rte_mbuf*)pkts[i], meta); /* NF returns 0 to return packets or 1 to buffer */ if(likely(ret_act == 0)) { pktsTX[tx_batch_size++] = pkts[i]; } else { tx_stats->tx_buffer[info->instance_id]++; } } if (unlikely(tx_batch_size > 0 && rte_ring_enqueue_bulk(tx_ring, pktsTX, tx_batch_size) == -ENOBUFS)) { tx_stats->tx_drop[info->instance_id] += tx_batch_size; for (j = 0; j < tx_batch_size; j++) { rte_pktmbuf_free(pktsTX[j]); } } else { tx_stats->tx[info->instance_id] += tx_batch_size; } } nf_info->status = NF_STOPPED; /* Put this NF's info struct back into queue for manager to ack shutdown */ nf_info_ring = rte_ring_lookup(_NF_QUEUE_NAME); if (nf_info_ring == NULL) { rte_mempool_put(nf_info_mp, nf_info); // give back mermory rte_exit(EXIT_FAILURE, "Cannot get nf_info ring for shutdown"); } if (rte_ring_enqueue(nf_info_ring, nf_info) < 0) { rte_mempool_put(nf_info_mp, nf_info); // give back mermory rte_exit(EXIT_FAILURE, "Cannot send nf_info to manager for shutdown"); } return 0; }