/* * Parse the command line arguments passed to the application. */ int parse_args(int argc, char** argv, app_params* p) { // initialize the environment int ret = rte_eal_init(argc, argv); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to initialize EAL: %i\n", ret); } // advance past the environmental settings argc -= ret; argv += ret; // parse arguments to the application ret = parse_app_args(argc, argv, p); if (ret < 0) { rte_exit(EXIT_FAILURE, "\n"); } p->nb_ports = rte_eth_dev_count(); p->nb_rx_workers = p->nb_rx_queue; p->nb_tx_workers = (rte_lcore_count() - 1) - p->nb_rx_workers; // validate the number of workers if(p->nb_tx_workers < p->nb_rx_workers) { rte_exit(EXIT_FAILURE, "Additional lcore(s) required; found=%u, required=%u \n", rte_lcore_count(), (p->nb_rx_queue*2) + 1); } return 0; }
static int test_error_distributor_create_name(void) { struct rte_distributor *d = NULL; struct rte_distributor *db = NULL; char *name = NULL; d = rte_distributor_create(name, rte_socket_id(), rte_lcore_count() - 1, RTE_DIST_ALG_SINGLE); if (d != NULL || rte_errno != EINVAL) { printf("ERROR: No error on create() with NULL name param\n"); return -1; } db = rte_distributor_create(name, rte_socket_id(), rte_lcore_count() - 1, RTE_DIST_ALG_BURST); if (db != NULL || rte_errno != EINVAL) { printf("ERROR: No error on create() with NULL param\n"); return -1; } return 0; }
/***************************************************************************** * trace_init_component() ****************************************************************************/ static int trace_init_component(uint32_t trace_id) { trace_comp_t *tc; uint32_t i; if (trace_id >= TRACE_MAX) return -EINVAL; tc = &trace_components[trace_id]; tc->tc_comp_id = trace_id; /* To be set later if needed (through an API). */ tc->tc_fmt = NULL; tc->tc_buffers = rte_zmalloc("trace_buffer", rte_lcore_count() * sizeof(*tc->tc_buffers), 0); if (!tc->tc_buffers) return -ENOMEM; for (i = 0; i < rte_lcore_count(); i++) TRACE_BUF_SET_LEVEL(&tc->tc_buffers[i], TRACE_LVL_LOG); return 0; }
static int test_distributor_perf(void) { static struct rte_distributor *d; static struct rte_mempool *p; if (rte_lcore_count() < 2) { printf("ERROR: not enough cores to test distributor\n"); return -1; } /* first time how long it takes to round-trip a cache line */ time_cache_line_switch(); if (d == NULL) { d = rte_distributor_create("Test_perf", rte_socket_id(), rte_lcore_count() - 1); if (d == NULL) { printf("Error creating distributor\n"); return -1; } } else { rte_distributor_flush(d); rte_distributor_clear_returns(d); } const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ? (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count()); if (p == NULL) { p = rte_mempool_create("DPT_MBUF_POOL", nb_bufs, MBUF_SIZE, BURST, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (p == NULL) { printf("Error creating mempool\n"); return -1; } } rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER); if (perf_test(d, p) < 0) return -1; quit_workers(d, p); return 0; }
/* * This function is a NUMA-aware equivalent of calc_num_pages. * It takes in the list of hugepage sizes and the * number of pages thereof, and calculates the best number of * pages of each size to fulfill the request for <memory> ram */ static int calc_num_pages_per_socket(uint64_t * memory, struct hugepage_info *hp_info, struct hugepage_info *hp_used, unsigned num_hp_info) { unsigned socket, j, i = 0; unsigned requested, available; int total_num_pages = 0; uint64_t remaining_mem, cur_mem; uint64_t total_mem = internal_config.memory; if (num_hp_info == 0) return -1; /* if specific memory amounts per socket weren't requested */ if (internal_config.force_sockets == 0) { int cpu_per_socket[RTE_MAX_NUMA_NODES]; size_t default_size, total_size; unsigned lcore_id; /* Compute number of cores per socket */ memset(cpu_per_socket, 0, sizeof(cpu_per_socket)); RTE_LCORE_FOREACH(lcore_id) { cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++; } /* * Automatically spread requested memory amongst detected sockets according * to number of cores from cpu mask present on each socket */ total_size = internal_config.memory; for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) { /* Set memory amount per socket */ default_size = (internal_config.memory * cpu_per_socket[socket]) / rte_lcore_count(); /* Limit to maximum available memory on socket */ default_size = RTE_MIN(default_size, get_socket_mem_size(socket)); /* Update sizes */ memory[socket] = default_size; total_size -= default_size; } /* * If some memory is remaining, try to allocate it by getting all * available memory from sockets, one after the other */ for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) { /* take whatever is available */ default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket], total_size); /* Update sizes */ memory[socket] += default_size; total_size -= default_size; } }
/* Main function */ int main(int argc, char **argv) { int ret; int i; /* Create handler for SIGINT for CTRL + C closing and SIGALRM to print stats*/ signal(SIGINT, sig_handler); signal(SIGALRM, alarm_routine); /* Initialize DPDK enviroment with args, then shift argc and argv to get application parameters */ ret = rte_eal_init(argc, argv); if (ret < 0) FATAL_ERROR("Cannot init EAL\n"); argc -= ret; argv += ret; /* Check if this application can use 1 core*/ ret = rte_lcore_count (); if (ret != 2) FATAL_ERROR("This application needs exactly 2 cores."); /* Parse arguments */ parse_args(argc, argv); if (ret < 0) FATAL_ERROR("Wrong arguments\n"); /* Probe PCI bus for ethernet devices, mandatory only in DPDK < 1.8.0 */ #if RTE_VER_MAJOR == 1 && RTE_VER_MINOR < 8 ret = rte_eal_pci_probe(); if (ret < 0) FATAL_ERROR("Cannot probe PCI\n"); #endif /* Get number of ethernet devices */ nb_sys_ports = rte_eth_dev_count(); if (nb_sys_ports <= 0) FATAL_ERROR("Cannot find ETH devices\n"); /* Create a mempool with per-core cache, initializing every element for be used as mbuf, and allocating on the current NUMA node */ pktmbuf_pool = rte_mempool_create(MEMPOOL_NAME, buffer_size-1, MEMPOOL_ELEM_SZ, MEMPOOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,rte_socket_id(), 0); if (pktmbuf_pool == NULL) FATAL_ERROR("Cannot create cluster_mem_pool. Errno: %d [ENOMEM: %d, ENOSPC: %d, E_RTE_NO_TAILQ: %d, E_RTE_NO_CONFIG: %d, E_RTE_SECONDARY: %d, EINVAL: %d, EEXIST: %d]\n", rte_errno, ENOMEM, ENOSPC, E_RTE_NO_TAILQ, E_RTE_NO_CONFIG, E_RTE_SECONDARY, EINVAL, EEXIST ); /* Create a ring for exchanging packets between cores, and allocating on the current NUMA node */ intermediate_ring = rte_ring_create (RING_NAME, buffer_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ ); if (intermediate_ring == NULL ) FATAL_ERROR("Cannot create ring"); /* Operations needed for each ethernet device */ for(i=0; i < nb_sys_ports; i++) init_port(i); /* Start consumer and producer routine on 2 different cores: producer launched first... */ ret = rte_eal_mp_remote_launch (main_loop_producer, NULL, SKIP_MASTER); if (ret != 0) FATAL_ERROR("Cannot start consumer thread\n"); /* ... and then loop in consumer */ main_loop_consumer ( NULL ); return 0; }
/* Perform a sanity test of the distributor with a large number of packets, * where we allocate a new set of mbufs for each burst. The workers then * free the mbufs. This ensures that we don't have any packet leaks in the * library. */ static int sanity_test_with_worker_shutdown(struct rte_distributor *d, struct rte_mempool *p) { struct rte_mbuf *bufs[BURST]; unsigned i; printf("=== Sanity test of worker shutdown ===\n"); clear_packet_count(); if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { printf("line %d: Error getting mbufs from pool\n", __LINE__); return -1; } /* now set all hash values in all buffers to zero, so all pkts go to the * one worker thread */ for (i = 0; i < BURST; i++) bufs[i]->pkt.hash.rss = 0; rte_distributor_process(d, bufs, BURST); /* at this point, we will have processed some packets and have a full * backlog for the other ones at worker 0. */ /* get more buffers to queue up, again setting them to the same flow */ if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { printf("line %d: Error getting mbufs from pool\n", __LINE__); return -1; } for (i = 0; i < BURST; i++) bufs[i]->pkt.hash.rss = 0; /* get worker zero to quit */ zero_quit = 1; rte_distributor_process(d, bufs, BURST); /* flush the distributor */ rte_distributor_flush(d); if (total_packet_count() != BURST * 2) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", __LINE__, BURST * 2, total_packet_count()); return -1; } for (i = 0; i < rte_lcore_count() - 1; i++) printf("Worker %u handled %u packets\n", i, worker_stats[i].handled_packets); printf("Sanity test with worker shutdown passed\n\n"); return 0; }
int initDpdk(char* progname) { int ret; static char *eal_args[] = {progname, "-c0xf", "-n1", "-m128", "--file-prefix=drone"}; // TODO: read env var DRONE_RTE_EAL_ARGS to override defaults ret = rte_eal_init(sizeof(eal_args)/sizeof(char*), eal_args); if (ret < 0) rte_panic("Cannot init EAL\n"); mbufPool_ = rte_mempool_create("DpktPktMbuf", 16*1024, // # of mbufs 2048, // sz of mbuf 32, // per-lcore cache sz sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, // pool ctor NULL, // pool ctor arg rte_pktmbuf_init, // mbuf ctor NULL, // mbuf ctor arg SOCKET_ID_ANY, 0 // flags ); if (!mbufPool_) rte_exit(EXIT_FAILURE, "cannot init mbuf pool\n"); if (rte_pmd_init_all() < 0) rte_exit(EXIT_FAILURE, "cannot init pmd\n"); if (rte_eal_pci_probe() < 0) rte_exit(EXIT_FAILURE, "cannot probe PCI\n"); // init lcore information lcoreCount_ = rte_lcore_count(); lcoreFreeMask_ = 0; for (int i = 0; i < lcoreCount_; i++) { if (rte_lcore_is_enabled(i) && (unsigned(i) != rte_get_master_lcore())) lcoreFreeMask_ |= (1 << i); } qDebug("lcore_count = %d, lcore_free_mask = 0x%llx", lcoreCount_, lcoreFreeMask_); // assign a lcore for Rx polling rxLcoreId_ = getFreeLcore(); if (rxLcoreId_ < 0) rte_exit(EXIT_FAILURE, "not enough cores for Rx polling"); stopRxPoll_ = false; return 0; }
static inline uint8_t efd_get_all_sockets_bitmask(void) { uint8_t all_cpu_sockets_bitmask = 0; unsigned int i; unsigned int next_lcore = rte_get_master_lcore(); const int val_true = 1, val_false = 0; for (i = 0; i < rte_lcore_count(); i++) { all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore); next_lcore = rte_get_next_lcore(next_lcore, val_false, val_true); } return all_cpu_sockets_bitmask; }
/* * Initialises a given port using global settings and with the rx buffers * coming from the mbuf_pool passed as parameter */ static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_conf port_conf; const uint16_t rxRings = ETH_VMDQ_DCB_NUM_QUEUES, txRings = (uint16_t)rte_lcore_count(); const uint16_t rxRingSize = 128, txRingSize = 512; int retval; uint16_t q; retval = get_eth_conf(&port_conf, num_pools); if (retval < 0) return retval; if (port >= rte_eth_dev_count()) return -1; retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf); if (retval != 0) return retval; for (q = 0; q < rxRings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rxRingSize, rte_eth_dev_socket_id(port), NULL, mbuf_pool); if (retval < 0) return retval; } for (q = 0; q < txRings; q ++) { retval = rte_eth_tx_queue_setup(port, q, txRingSize, rte_eth_dev_socket_id(port), NULL); if (retval < 0) return retval; } retval = rte_eth_dev_start(port); if (retval < 0) return retval; struct ether_addr addr; rte_eth_macaddr_get(port, &addr); printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned)port, addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5]); return 0; }
/* Test that the flush function is able to move packets between workers when * one worker shuts down.. */ static int test_flush_with_worker_shutdown(struct worker_params *wp, struct rte_mempool *p) { struct rte_distributor *d = wp->dist; struct rte_mbuf *bufs[BURST]; unsigned i; printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name); clear_packet_count(); if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { printf("line %d: Error getting mbufs from pool\n", __LINE__); return -1; } /* now set all hash values in all buffers to zero, so all pkts go to the * one worker thread */ for (i = 0; i < BURST; i++) bufs[i]->hash.usr = 0; rte_distributor_process(d, bufs, BURST); /* at this point, we will have processed some packets and have a full * backlog for the other ones at worker 0. */ /* get worker zero to quit */ zero_quit = 1; /* flush the distributor */ rte_distributor_flush(d); rte_delay_us(10000); zero_quit = 0; for (i = 0; i < rte_lcore_count() - 1; i++) printf("Worker %u handled %u packets\n", i, worker_stats[i].handled_packets); if (total_packet_count() != BURST) { printf("Line %d: Error, not all packets flushed. " "Expected %u, got %u\n", __LINE__, BURST, total_packet_count()); return -1; } printf("Flush test with worker shutdown passed\n\n"); return 0; }
/* * The main function, which does initialization and calls the per-lcore * functions. */ int main(int argc, char *argv[]) { struct rte_mempool *mbuf_pool; unsigned nb_ports; uint8_t portid; /* Initialize the Environment Abstraction Layer (EAL). */ int ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); argc -= ret; argv += ret; /* Check that there is an even number of ports to send/receive on. */ nb_ports = rte_eth_dev_count(); if (nb_ports < 2 || (nb_ports & 1)) rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n"); /* Creates a new mempool in memory to hold the mbufs. */ mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS * nb_ports, MBUF_SIZE, MBUF_CACHE_SIZE, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); /* Initialize all ports. */ for (portid = 0; portid < nb_ports; portid++) if (port_init(portid, mbuf_pool) != 0) rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n", portid); if (rte_lcore_count() > 1) printf("\nWARNING: Too many lcores enabled. Only 1 used.\n"); /* Call lcore_main on the master core only. */ lcore_main(); return 0; }
static int setup_and_bond_ports(struct rte_mempool *mp) { int portid, queueid; int ret; int pl_idx; int nb_queue; nb_queue = rte_lcore_count(); nb_port = rte_eth_dev_count(); memset(lcore_args, 0, sizeof(struct lcore_arg_t) * RTE_MAX_LCORE); for(portid = 0; portid < nb_port; portid++) { ret = rte_eth_dev_configure(portid, nb_queue, nb_queue, &port_conf); if(unlikely(ret < 0)) { rte_exit(EINVAL, "port %d configure failed!\n", portid); } for(queueid = 0; queueid < nb_queue; queueid++) { ret = rte_eth_rx_queue_setup(portid, queueid, NB_RXD, rte_socket_id(), NULL, mp); if(unlikely(ret < 0)) { rte_exit(EINVAL, "port %d rx queue %d setup failed!\n", portid, queueid); } ret = rte_eth_tx_queue_setup(portid, queueid, NB_TXD, rte_socket_id(), NULL); if(unlikely(ret < 0)) { rte_exit(EINVAL, "port %d tx queue %d setup failed!\n", portid, queueid); } pl_idx = lcore_args[queueid].pl_len; lcore_args[queueid].pl[pl_idx].portid = portid; lcore_args[queueid].pl[pl_idx].queueid = queueid; lcore_args[queueid].mp = mp; lcore_args[queueid].pl_len = pl_idx + 1; } ret = rte_eth_dev_start(portid); if(unlikely(ret < 0)) { rte_exit(EINVAL, "port %d start failed!\n", portid); } rte_eth_promiscuous_enable(portid); } return 0; }
/* Main function, does initialisation and calls the per-lcore functions */ int main(int argc, char *argv[]) { struct rte_mempool *mbuf_pool; uint8_t portid = 0; /* init EAL */ int ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); argc -= ret; argv += ret; mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS, MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); /* initialize all ports */ if (port_init(portid, mbuf_pool) != 0) rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8"\n", portid); stats_mapping_setup(portid); fdir_filter_add(portid, FDIR_DROP_ADDR, RTE_ETH_FDIR_REJECT, 0); fdir_filter_add(portid, FDIR_ACCEPT_ADDR, RTE_ETH_FDIR_ACCEPT, 1); ntuple_filter_add(portid, NTUPLE_DROP_ADDR, PKT_DROP_QUEUE); ntuple_filter_add(portid, NTUPLE_ACCEPT_ADDR, PKT_ACCEPT_QUEUE); fdir_get_infos(portid); if (rte_lcore_count() > 1) printf("\nWARNING: Too much enabled lcores - " "App uses only 1 lcore\n"); lcore_stats(); /* call lcore_main on master core only */ //lcore_main(); return 0; }
/* * This basic performance test just repeatedly sends in 32 packets at a time * to the distributor and verifies at the end that we got them all in the worker * threads and finally how long per packet the processing took. */ static inline int perf_test(struct rte_distributor *d, struct rte_mempool *p) { unsigned int i; uint64_t start, end; struct rte_mbuf *bufs[BURST]; clear_packet_count(); if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { printf("Error getting mbufs from pool\n"); return -1; } /* ensure we have different hash value for each pkt */ for (i = 0; i < BURST; i++) bufs[i]->hash.usr = i; start = rte_rdtsc(); for (i = 0; i < (1<<ITER_POWER); i++) rte_distributor_process(d, bufs, BURST); end = rte_rdtsc(); do { usleep(100); rte_distributor_process(d, NULL, 0); } while (total_packet_count() < (BURST << ITER_POWER)); rte_distributor_clear_returns(d); printf("Time per burst: %"PRIu64"\n", (end - start) >> ITER_POWER); printf("Time per packet: %"PRIu64"\n\n", ((end - start) >> ITER_POWER)/BURST); rte_mempool_put_bulk(p, (void *)bufs, BURST); for (i = 0; i < rte_lcore_count() - 1; i++) printf("Worker %u handled %u packets\n", i, worker_stats[i].handled_packets); printf("Total packets: %u (%x)\n", total_packet_count(), total_packet_count()); printf("=== Perf test done ===\n\n"); return 0; }
/* Useful function which ensures that all worker functions terminate */ static void quit_workers(struct rte_distributor *d, struct rte_mempool *p) { const unsigned num_workers = rte_lcore_count() - 1; unsigned i; struct rte_mbuf *bufs[RTE_MAX_LCORE]; rte_mempool_get_bulk(p, (void *)bufs, num_workers); quit = 1; for (i = 0; i < num_workers; i++) bufs[i]->hash.usr = i << 1; rte_distributor_process(d, bufs, num_workers); rte_mempool_put_bulk(p, (void *)bufs, num_workers); rte_distributor_process(d, NULL, 0); rte_eal_mp_wait_lcore(); quit = 0; worker_idx = 0; }
/* * Main thread that does the work, reading from INPUT_PORT * and writing to OUTPUT_PORT */ static __attribute__((noreturn)) int lcore_main(void *arg) { const uintptr_t core_num = (uintptr_t)arg; const unsigned num_cores = rte_lcore_count(); uint16_t startQueue = (uint16_t)(core_num * (NUM_QUEUES/num_cores)); uint16_t endQueue = (uint16_t)(startQueue + (NUM_QUEUES/num_cores)); uint16_t q, i, p; printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num, rte_lcore_id(), startQueue, endQueue - 1); for (;;) { struct rte_mbuf *buf[32]; const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]); for (p = 0; p < num_ports; p++) { const uint8_t src = ports[p]; const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */ if ((src == INVALID_PORT_ID) || (dst == INVALID_PORT_ID)) continue; for (q = startQueue; q < endQueue; q++) { const uint16_t rxCount = rte_eth_rx_burst(src, q, buf, buf_size); if (rxCount == 0) continue; rxPackets[q] += rxCount; const uint16_t txCount = rte_eth_tx_burst(dst, (uint16_t)core_num, buf, rxCount); if (txCount != rxCount) { for (i = txCount; i < rxCount; i++) rte_pktmbuf_free(buf[i]); } } } } }
struct spdk_mempool * spdk_mempool_create(const char *name, size_t count, size_t ele_size, size_t cache_size) { struct rte_mempool *mp; size_t tmp; /* No more than half of all elements can be in cache */ tmp = (count / 2) / rte_lcore_count(); if (cache_size > tmp) { cache_size = tmp; } if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) { cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; } mp = rte_mempool_create(name, count, ele_size, cache_size, 0, NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); return (struct spdk_mempool *)mp; }
/********************************************************************** *@description: * * *@parameters: * [in]: * [in]: * *@return values: * **********************************************************************/ static int odp_init_ports(unsigned short nb_ports, struct odp_user_config *user_conf, struct odp_lcore_config *lcore_conf) { int ret; uint8_t portid; uint16_t queueid; unsigned lcore_id; uint8_t nb_rx_queue =0; uint8_t max_rx_queue =0; uint8_t queue, socketid; uint32_t n_tx_queue, nb_lcores, nb_mbuf; struct ether_addr eth_addr; struct rte_eth_dev_info dev_info; struct rte_eth_txconf *txconf; nb_lcores = rte_lcore_count(); n_tx_queue = nb_lcores; if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) n_tx_queue = MAX_TX_QUEUE_PER_PORT; printf("\nStart to Init port \n" ); /* initialize all ports */ for (portid = 0; portid < nb_ports; portid++) { /* skip ports that are not enabled */ if ((user_conf->port_mask & (1 << portid)) == 0) { printf("\nSkipping disabled port %d\n", portid); continue; } /* init port */ printf("\t port %d: \n", portid ); nb_rx_queue = odp_get_port_rx_queues_nb(portid, user_conf); if(max_rx_queue < nb_rx_queue) max_rx_queue = nb_rx_queue; printf("\t Creating queues: rx queue number=%d tx queue number=%u... \n", nb_rx_queue, (unsigned)n_tx_queue ); ret = rte_eth_dev_configure(portid, nb_rx_queue, (uint16_t)n_tx_queue, &odp_port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", ret, portid); rte_eth_macaddr_get(portid, ð_addr); printf ("\t MAC Address:%02X:%02X:%02X:%02X:%02X:%02X \n", eth_addr.addr_bytes[0], eth_addr.addr_bytes[1], eth_addr.addr_bytes[2], eth_addr.addr_bytes[3], eth_addr.addr_bytes[4], eth_addr.addr_bytes[5]); /* init one TX queue per couple (lcore,port) */ queueid = 0; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; if (user_conf->numa_on) socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); else socketid = 0; printf("\t lcore id:%u, tx queue id:%d, socket id:%d \n", lcore_id, queueid, socketid); ret = rte_eth_tx_queue_setup(portid, queueid, ODP_TX_DESC_DEFAULT, socketid, &odp_tx_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " "port=%d\n", ret, portid); lcore_conf[lcore_id].tx_queue_id[portid] = queueid; queueid++; } printf("\n"); } nb_mbuf = RTE_MAX((nb_ports*nb_rx_queue*ODP_RX_DESC_DEFAULT + nb_ports*nb_lcores*MAX_PKT_BURST + nb_ports*n_tx_queue*ODP_TX_DESC_DEFAULT + nb_lcores*MEMPOOL_CACHE_SIZE), (unsigned)8192); /* init memory */ ret = odp_init_mbuf_pool(nb_mbuf, user_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "init_mem failed\n"); for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; printf("\nInitializing rx queues on lcore %u ... \n", lcore_id ); /* init RX queues */ for(queue = 0; queue < lcore_conf[lcore_id].n_rx_queue; ++queue) { portid = lcore_conf[lcore_id].rx_queue_list[queue].port_id; queueid = lcore_conf[lcore_id].rx_queue_list[queue].queue_id; if (user_conf->numa_on) socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); else socketid = 0; printf("port id:%d, rx queue id: %d, socket id:%d \n", portid, queueid, socketid); ret = rte_eth_rx_queue_setup(portid, queueid, ODP_RX_DESC_DEFAULT, socketid, &odp_rx_conf, odp_pktmbuf_pool[socketid]); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," "port=%d\n", ret, portid); } } return 0; }
int32_t populateNodeInfo (void) { int32_t i = 0, socketId = -1, lcoreIndex = 0, enable = 0; uint8_t coreCount, portCount; struct rte_eth_dev_info devInfo; /* fetch total lcore count under DPDK */ coreCount = rte_lcore_count(); for (i = 0; i < coreCount; i++) { socketId = rte_lcore_to_socket_id(i); lcoreIndex = rte_lcore_index(i); enable = rte_lcore_is_enabled(i); //printf ("\n Logical %d Physical %d Socket %d Enabled %d \n", // i, lcoreIndex, socketId, enable); if (likely(enable)) { /* classify the lcore info per NUMA node */ numaNodeInfo[socketId].lcoreAvail = numaNodeInfo[socketId].lcoreAvail | (1 << lcoreIndex); numaNodeInfo[socketId].lcoreTotal += 1; } else { rte_panic("ERROR: Lcore %d Socket %d not enabled\n", lcoreIndex, socketId); exit(EXIT_FAILURE); } } /* Create mempool per numa node based on interface available */ portCount = rte_eth_dev_count(); for (i =0; i < portCount; i++) { rte_eth_dev_info_get(i, &devInfo); printf("\n Inteface %d", i); printf("\n - driver: %s", devInfo.driver_name); printf("\n - if_index: %d", devInfo.if_index); if (devInfo.pci_dev) { printf("\n - PCI INFO "); printf("\n -- ADDR - domain:bus:devid:function %x:%x:%x:%x", devInfo.pci_dev->addr.domain, devInfo.pci_dev->addr.bus, devInfo.pci_dev->addr.devid, devInfo.pci_dev->addr.function); printf("\n == PCI ID - vendor:device:sub-vendor:sub-device %x:%x:%x:%x", devInfo.pci_dev->id.vendor_id, devInfo.pci_dev->id.device_id, devInfo.pci_dev->id.subsystem_vendor_id, devInfo.pci_dev->id.subsystem_device_id); printf("\n -- numa node: %d", devInfo.pci_dev->numa_node); } socketId = (devInfo.pci_dev->numa_node == -1)?0:devInfo.pci_dev->numa_node; numaNodeInfo[socketId].intfAvail = numaNodeInfo[socketId].intfAvail | (1 << i); numaNodeInfo[socketId].intfTotal += 1; } /* allocate mempool for numa which has NIC interfaces */ for (i = 0; i < MAX_NUMANODE; i++) { if (likely(numaNodeInfo[i].intfAvail)) { /* ToDo: per interface */ uint8_t portIndex = 0; char mempoolName[25]; /* create mempool for TX */ sprintf(mempoolName, "mbuf_pool-%d-%d-tx", i, portIndex); numaNodeInfo[i].tx[portIndex] = rte_mempool_create( mempoolName, NB_MBUF, MBUF_SIZE, 64, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, i,/*SOCKET_ID_ANY*/ 0/*MEMPOOL_F_SP_PUT*/); if (unlikely(numaNodeInfo[i].tx[portIndex] == NULL)) { rte_panic("\n ERROR: failed to get mem-pool for tx on node %d intf %d\n", i, portIndex); exit(EXIT_FAILURE); } /* create mempool for RX */ sprintf(mempoolName, "mbuf_pool-%d-%d-rx", i, portIndex); numaNodeInfo[i].rx[portIndex] = rte_mempool_create( mempoolName, NB_MBUF, MBUF_SIZE, 64, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, i,/*SOCKET_ID_ANY*/ 0/*MEMPOOL_F_SP_PUT*/); if (unlikely(numaNodeInfo[i].rx[portIndex] == NULL)) { rte_panic("\n ERROR: failed to get mem-pool for rx on node %d intf %d\n", i, portIndex); exit(EXIT_FAILURE); } } } return 0; }
int main(int argc, char **argv) { int ret; unsigned nb_ports; unsigned int lcore_id, last_lcore_id, master_lcore_id; uint8_t port_id; uint8_t nb_ports_available; struct worker_thread_args worker_args = {NULL, NULL}; struct send_thread_args send_args = {NULL, NULL}; struct rte_ring *rx_to_workers; struct rte_ring *workers_to_tx; /* catch ctrl-c so we can print on exit */ signal(SIGINT, int_handler); /* Initialize EAL */ ret = rte_eal_init(argc, argv); if (ret < 0) return -1; argc -= ret; argv += ret; /* Parse the application specific arguments */ ret = parse_args(argc, argv); if (ret < 0) return -1; /* Check if we have enought cores */ if (rte_lcore_count() < 3) rte_exit(EXIT_FAILURE, "Error, This application needs at " "least 3 logical cores to run:\n" "1 lcore for packet RX\n" "1 lcore for packet TX\n" "and at least 1 lcore for worker threads\n"); nb_ports = rte_eth_dev_count(); if (nb_ports == 0) rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n"); if (nb_ports != 1 && (nb_ports & 1)) rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except " "when using a single port\n"); mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, MBUF_POOL_CACHE_SIZE, 0, MBUF_DATA_SIZE, rte_socket_id()); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); nb_ports_available = nb_ports; /* initialize all ports */ for (port_id = 0; port_id < nb_ports; port_id++) { /* skip ports that are not enabled */ if ((portmask & (1 << port_id)) == 0) { printf("\nSkipping disabled port %d\n", port_id); nb_ports_available--; continue; } /* init port */ printf("Initializing port %u... done\n", (unsigned) port_id); if (configure_eth_port(port_id) != 0) rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n", port_id); } if (!nb_ports_available) { rte_exit(EXIT_FAILURE, "All available ports are disabled. Please set portmask.\n"); } /* Create rings for inter core communication */ rx_to_workers = rte_ring_create("rx_to_workers", RING_SIZE, rte_socket_id(), RING_F_SP_ENQ); if (rx_to_workers == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); workers_to_tx = rte_ring_create("workers_to_tx", RING_SIZE, rte_socket_id(), RING_F_SC_DEQ); if (workers_to_tx == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); if (!disable_reorder) { send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(), REORDER_BUFFER_SIZE); if (send_args.buffer == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); } last_lcore_id = get_last_lcore_id(); master_lcore_id = rte_get_master_lcore(); worker_args.ring_in = rx_to_workers; worker_args.ring_out = workers_to_tx; /* Start worker_thread() on all the available slave cores but the last 1 */ for (lcore_id = 0; lcore_id <= get_previous_lcore_id(last_lcore_id); lcore_id++) if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id) rte_eal_remote_launch(worker_thread, (void *)&worker_args, lcore_id); if (disable_reorder) { /* Start tx_thread() on the last slave core */ rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx, last_lcore_id); } else { send_args.ring_in = workers_to_tx; /* Start send_thread() on the last slave core */ rte_eal_remote_launch((lcore_function_t *)send_thread, (void *)&send_args, last_lcore_id); } /* Start rx_thread() on the master core */ rx_thread(rx_to_workers); RTE_LCORE_FOREACH_SLAVE(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) return -1; } print_stats(); return 0; }
// Called by go code in main function unsigned GetCoreCount(void) { return rte_lcore_count(); }
static int dpdk_main(int port_id, int argc, char* argv[]) { struct rte_eth_dev_info dev_info; unsigned nb_queues; FILE* lfile; uint8_t core_id; int ret; printf("In dpdk_main\n"); // Open the log file lfile = fopen("./vrouter.log", "w"); // Program the rte log rte_openlog_stream(lfile); ret = rte_eal_init(argc, argv); if (ret < 0) { log_crit( "Invalid EAL parameters\n"); return -1; } log_info( "Programming cmd rings now!\n"); rx_event_fd = (int *) malloc(sizeof(int *) * rte_lcore_count()); if (!rx_event_fd) { log_crit("Failed to allocate memory for rx event fd arrays\n"); return -ENOMEM; } rte_eth_macaddr_get(port_id, &port_eth_addr); log_info("Port%d: MAC Address: ", port_id); print_ethaddr(&port_eth_addr); /* Determine the number of RX/TX pairs supported by NIC */ rte_eth_dev_info_get(port_id, &dev_info); dev_info.pci_dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX; dev_info.pci_dev->intr_handle.max_intr = dev_info.max_rx_queues + dev_info.max_tx_queues; ret = rte_intr_efd_enable(&dev_info.pci_dev->intr_handle, dev_info.max_rx_queues); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to enable rx interrupts\n"); } ret = rte_intr_enable(&dev_info.pci_dev->intr_handle); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to enable interrupts\n"); } ret = rte_eth_dev_configure(port_id, dev_info.max_rx_queues, dev_info.max_tx_queues, &port_conf); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to configure ethernet device\n"); } /* For each RX/TX pair */ nb_queues = dev_info.max_tx_queues; for (core_id = 0; core_id < nb_queues; core_id++) { char s[64]; if (rte_lcore_is_enabled(core_id) == 0) continue; /* NUMA socket number */ unsigned socketid = rte_lcore_to_socket_id(core_id); if (socketid >= NB_SOCKETS) { log_crit( "Socket %d of lcore %u is out of range %d\n", socketid, core_id, NB_SOCKETS); return -EBADF; } /* Create memory pool */ if (pktmbuf_pool[socketid] == NULL) { log_info("Creating mempool on %d of ~%lx bytes\n", socketid, NB_MBUF * MBUF_SIZE); printf("Creating mempool on %d of ~%lx bytes\n", socketid, NB_MBUF * MBUF_SIZE); snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); pktmbuf_pool[socketid] = rte_mempool_create(s, NB_MBUF, MBUF_SIZE, MEMPOOL_CACHE_SIZE, PKTMBUF_PRIV_SZ, rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, socketid, 0); if (!pktmbuf_pool[socketid]) { log_crit( "Cannot init mbuf pool on socket %d\n", socketid); return -ENOMEM; } } /* Setup the TX queue */ ret = rte_eth_tx_queue_setup(port_id, core_id, RTE_TX_DESC_DEFAULT, socketid, &tx_conf); if (ret < 0) { log_crit( "Cannot initialize TX queue (%d)\n", core_id); return -ENODEV; } /* Setup the RX queue */ ret = rte_eth_rx_queue_setup(port_id, core_id, RTE_RX_DESC_DEFAULT, socketid, &rx_conf, pktmbuf_pool[socketid]); if (ret < 0) { log_crit( "Cannot initialize RX queue (%d)\n", core_id); return -ENODEV; } /* Create the event fds for event notification */ lcore_cmd_event_fd[core_id] = eventfd(0, 0); } // Start the eth device ret = rte_eth_dev_start(port_id); if (ret < 0) { log_crit( "rte_eth_dev_start: err=%d, port=%d\n", ret, core_id); return -ENODEV; } // Put the device in promiscuous mode rte_eth_promiscuous_enable(port_id); // Wait for link up //check_all_ports_link_status(1, 1u << port_id); log_info( "Starting engines on every core\n"); rte_eal_mp_remote_launch(engine_loop, &dev_info, CALL_MASTER); return 0; }
/***************************************************************************** * mem_init() ****************************************************************************/ bool mem_init(void) { global_config_t *cfg; uint32_t core; uint32_t core_divider; /* * Add Memory module CLI commands */ if (!cli_add_main_ctx(cli_ctx)) { RTE_LOG(ERR, USER1, "ERROR: Can't add mem module specific CLI commands!\n"); return false; } cfg = cfg_get_config(); if (cfg == NULL) return false; mem_init_sockets(); core_divider = (rte_lcore_count() - TPG_NR_OF_NON_PACKET_PROCESSING_CORES); RTE_LCORE_FOREACH_SLAVE(core) { if (!cfg_is_pkt_core(core)) continue; mbuf_pool[core] = mem_create_local_pool(GCFG_MBUF_POOL_NAME, core, cfg->gcfg_mbuf_poolsize / core_divider, cfg->gcfg_mbuf_size, cfg->gcfg_mbuf_cache_size, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, rte_pktmbuf_init, MEM_MBUF_POOL_FLAGS); if (mbuf_pool[core] == NULL) return false; mbuf_pool_tx_hdr[core] = mem_create_local_pool(GCFG_MBUF_POOL_HDR_NAME, core, cfg->gcfg_mbuf_hdr_poolsize / core_divider, cfg->gcfg_mbuf_hdr_size, cfg->gcfg_mbuf_hdr_cache_size, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, rte_pktmbuf_init, MEM_MBUF_POOL_FLAGS); if (mbuf_pool_tx_hdr[core] == NULL) return false; mbuf_pool_clone[core] = mem_create_local_pool(GCFG_MBUF_POOL_CLONE_NAME, core, cfg->gcfg_mbuf_poolsize / core_divider, cfg->gcfg_mbuf_clone_size, cfg->gcfg_mbuf_cache_size, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, rte_pktmbuf_init, MEM_MBUF_POOL_FLAGS); if (mbuf_pool_clone[core] == NULL) return false; tcb_pool[core] = mem_create_local_pool(GCFG_TCB_POOL_NAME, core, cfg->gcfg_tcb_pool_size / core_divider, sizeof(tcp_control_block_t), 0, 0, NULL, NULL, MEM_TCB_POOL_FLAGS); if (tcb_pool[core] == NULL) return false; ucb_pool[core] = mem_create_local_pool(GCFG_UCB_POOL_NAME, core, cfg->gcfg_ucb_pool_size / core_divider, sizeof(udp_control_block_t), 0, 0, NULL, NULL, MEM_UCB_POOL_FLAGS); if (ucb_pool[core] == NULL) return false; } return true; }
int main(int argc, char **argv) { //struct lcore_queue_conf *qconf = NULL; //struct rte_eth_dev_info dev_info; struct lcore_env** envs; int ret; uint8_t n_ports; unsigned lcore_count; ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); argc -= ret; argv += ret; ret = l2sw_parse_args(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid MARIO arguments\n"); lcore_count = rte_lcore_count(); n_ports = rte_eth_dev_count(); //RTE_LOG(INFO, MARIO, "Find %u logical cores\n" , lcore_count); mbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); // init route_table route_table = create_route_table(ROUTE_ENTRY_SIZE); add_staticroute(route_table); // init arp_table arp_table = create_arp_table(ARP_ENTRY_SIZE); n_ports = rte_eth_dev_count(); if (n_ports == 0) rte_exit(EXIT_FAILURE, "No Ethernet ports - byte\n"); //RTE_LOG(INFO, MARIO, "Find %u ethernet ports\n", n_ports); if (n_ports > RTE_MAX_ETHPORTS) n_ports = RTE_MAX_ETHPORTS; /* Each logical core is assigned a dedicated TX queue on each port. */ /* for(uint8_t port_id = 0; port_id < n_ports; port_id++) { rte_eth_dev_info_get(port_id, &dev_info); } */ /* Initialize the port/queue configuration of each logical core */ /* for(uint8_t port_id = 0; port_id < n_ports; port_id++) { ; } */ /* Initialize lcore_env */ envs = (struct lcore_env**) rte_malloc(NULL,sizeof(struct lcore_env*),0); if (envs == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for core envs\n"); uint8_t lcore_id; for (lcore_id = 0; lcore_id < lcore_count; lcore_id++) { struct lcore_env* env; env = (struct lcore_env*) rte_malloc(NULL,sizeof(struct lcore_env) + sizeof(struct mbuf_table) *n_ports,0); if (env == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for %u core env\n", lcore_id); env->n_port = n_ports; env->lcore_id = lcore_id; memset(env->tx_mbufs, 0, sizeof(struct mbuf_table) * n_ports); envs[lcore_id] = env; } /* Initialise each port */ uint8_t port_id; for(port_id = 0; port_id < n_ports; port_id++) { //RTE_LOG(INFO, MARIO, "Initializing port %u...", port_id); fflush(stdout); ret = rte_eth_dev_configure(port_id, lcore_count, lcore_count, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", ret, (unsigned)port_id); //RTE_LOG(INFO, MARIO, "done\n"); rte_eth_macaddr_get(port_id, &port2eth[port_id]); /* init one RX queue */ uint8_t core_id; for (core_id = 0; core_id < lcore_count; core_id++) { ret = rte_eth_rx_queue_setup(port_id, core_id, nb_rxd, rte_eth_dev_socket_id(port_id), NULL, mbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u queue=%u\n", ret, (unsigned) port_id, (unsigned) core_id); } /* init one TX queue */ for (core_id = 0; core_id < lcore_count; core_id++) { ret = rte_eth_tx_queue_setup(port_id, core_id, nb_txd, rte_eth_dev_socket_id(port_id), NULL); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u queue=%u\n", ret, (unsigned) port_id, (unsigned) core_id); } /* Start device */ ret = rte_eth_dev_start(port_id); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n", ret, (unsigned) port_id); rte_eth_promiscuous_enable(port_id); /*RTE_LOG(INFO, MARIO, "Port %u, MAC address %02x:%02x:%02x:%02x:%02x:%02x\n\n", port_id, port2eth[port_id].addr_bytes[0], port2eth[port_id].addr_bytes[1], port2eth[port_id].addr_bytes[2], port2eth[port_id].addr_bytes[3], port2eth[port_id].addr_bytes[4], port2eth[port_id].addr_bytes[5]); */ memset(&port_statistics, 0, sizeof(port_statistics)); } check_all_ports_link_status(n_ports); /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(l2sw_launch_one_lcore, envs, CALL_MASTER); { uint8_t lcore_id; RTE_LCORE_FOREACH_SLAVE(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) return -1; } } rte_free(arp_table); rte_free(route_table); return 0; }
/* * Initialises a given port using global settings and with the rx buffers * coming from the mbuf_pool passed as parameter */ static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_dev_info dev_info; struct rte_eth_conf port_conf; uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count(); const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; int retval; uint16_t q; /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */ rte_eth_dev_info_get (port, &dev_info); /*configure the number of supported virtio devices based on VMDQ limits */ num_devices = dev_info.max_vmdq_pools; num_queues = dev_info.max_rx_queues; retval = validate_num_devices(MAX_DEVICES); if (retval < 0) return retval; /* Get port configuration. */ retval = get_eth_conf(&port_conf, num_devices); if (retval < 0) return retval; if (port >= rte_eth_dev_count()) return -1; rx_rings = (uint16_t)num_queues, /* Configure ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) return retval; /* Setup the queues. */ for (q = 0; q < rx_rings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, rte_eth_dev_socket_id(port), &rx_conf_default, mbuf_pool); if (retval < 0) return retval; } for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), &tx_conf_default); if (retval < 0) return retval; } /* Start the device. */ retval = rte_eth_dev_start(port); if (retval < 0) return retval; rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices); RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned)port, vmdq_ports_eth_addr[port].addr_bytes[0], vmdq_ports_eth_addr[port].addr_bytes[1], vmdq_ports_eth_addr[port].addr_bytes[2], vmdq_ports_eth_addr[port].addr_bytes[3], vmdq_ports_eth_addr[port].addr_bytes[4], vmdq_ports_eth_addr[port].addr_bytes[5]); return 0; }
* with all devices in the main linked list. */ static int switch_worker(__attribute__((unused)) void *arg) { struct rte_mempool *mbuf_pool = arg; struct virtio_net *dev = NULL; struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct virtio_net_data_ll *dev_ll; struct mbuf_table *tx_q; volatile struct lcore_ll_info *lcore_ll; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0; unsigned ret, i; const uint16_t lcore_id = rte_lcore_id(); const uint16_t num_cores = (uint16_t)rte_lcore_count(); uint16_t rx_count = 0; RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started \n", lcore_id); lcore_ll = lcore_info[lcore_id].lcore_ll; prev_tsc = 0; tx_q = &lcore_tx_queue[lcore_id]; for (i = 0; i < num_cores; i ++) { if (lcore_ids[i] == lcore_id) { tx_q->txq_id = i; break; } } while(1) {
/** * Initialises a given port using global settings and with the rx buffers * coming from the mbuf_pool passed as parameter */ int vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool) { int retval; uint16_t q; struct rte_eth_dev_info dev_info; uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count(); const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; const uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; struct rte_eth_udp_tunnel tunnel_udp; struct rte_eth_rxconf *rxconf; struct rte_eth_txconf *txconf; struct vxlan_conf *pconf = &vxdev; pconf->dst_port = udp_port; rte_eth_dev_info_get(port, &dev_info); if (dev_info.max_rx_queues > MAX_QUEUES) { rte_exit(EXIT_FAILURE, "please define MAX_QUEUES no less than %u in %s\n", dev_info.max_rx_queues, __FILE__); } rxconf = &dev_info.default_rxconf; txconf = &dev_info.default_txconf; txconf->txq_flags = 0; if (port >= rte_eth_dev_count()) return -1; rx_rings = nb_devices; /* Configure ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) return retval; /* Setup the queues. */ for (q = 0; q < rx_rings; q++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, rte_eth_dev_socket_id(port), rxconf, mbuf_pool); if (retval < 0) return retval; } for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), txconf); if (retval < 0) return retval; } /* Start the device. */ retval = rte_eth_dev_start(port); if (retval < 0) return retval; /* Configure UDP port for UDP tunneling */ tunnel_udp.udp_port = udp_port; tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN; retval = rte_eth_dev_udp_tunnel_port_add(port, &tunnel_udp); if (retval < 0) return retval; rte_eth_macaddr_get(port, &ports_eth_addr[port]); RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned)port, ports_eth_addr[port].addr_bytes[0], ports_eth_addr[port].addr_bytes[1], ports_eth_addr[port].addr_bytes[2], ports_eth_addr[port].addr_bytes[3], ports_eth_addr[port].addr_bytes[4], ports_eth_addr[port].addr_bytes[5]); if (tso_segsz != 0) { struct rte_eth_dev_info dev_info; rte_eth_dev_info_get(port, &dev_info); if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) RTE_LOG(WARNING, PORT, "hardware TSO offload is not supported\n"); } return 0; }
static int test_distributor_perf(void) { static struct rte_distributor *ds; static struct rte_distributor *db; static struct rte_mempool *p; if (rte_lcore_count() < 2) { printf("ERROR: not enough cores to test distributor\n"); return -1; } /* first time how long it takes to round-trip a cache line */ time_cache_line_switch(); if (ds == NULL) { ds = rte_distributor_create("Test_perf", rte_socket_id(), rte_lcore_count() - 1, RTE_DIST_ALG_SINGLE); if (ds == NULL) { printf("Error creating distributor\n"); return -1; } } else { rte_distributor_clear_returns(ds); } if (db == NULL) { db = rte_distributor_create("Test_burst", rte_socket_id(), rte_lcore_count() - 1, RTE_DIST_ALG_BURST); if (db == NULL) { printf("Error creating burst distributor\n"); return -1; } } else { rte_distributor_clear_returns(db); } const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ? (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count()); if (p == NULL) { p = rte_pktmbuf_pool_create("DPT_MBUF_POOL", nb_bufs, BURST, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); if (p == NULL) { printf("Error creating mempool\n"); return -1; } } printf("=== Performance test of distributor (single mode) ===\n"); rte_eal_mp_remote_launch(handle_work, ds, SKIP_MASTER); if (perf_test(ds, p) < 0) return -1; quit_workers(ds, p); printf("=== Performance test of distributor (burst mode) ===\n"); rte_eal_mp_remote_launch(handle_work, db, SKIP_MASTER); if (perf_test(db, p) < 0) return -1; quit_workers(db, p); return 0; }
int perf_opt_check(struct evt_options *opt, uint64_t nb_queues) { unsigned int lcores; /* N producer + N worker + 1 master when producer cores are used * Else N worker + 1 master when Rx adapter is used */ lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2; if (rte_lcore_count() < lcores) { evt_err("test need minimum %d lcores", lcores); return -1; } /* Validate worker lcores */ if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) { evt_err("worker lcores overlaps with master lcore"); return -1; } if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) { evt_err("worker lcores overlaps producer lcores"); return -1; } if (evt_has_disabled_lcore(opt->wlcores)) { evt_err("one or more workers lcores are not enabled"); return -1; } if (!evt_has_active_lcore(opt->wlcores)) { evt_err("minimum one worker is required"); return -1; } if (opt->prod_type == EVT_PROD_TYPE_SYNT) { /* Validate producer lcores */ if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) { evt_err("producer lcores overlaps with master lcore"); return -1; } if (evt_has_disabled_lcore(opt->plcores)) { evt_err("one or more producer lcores are not enabled"); return -1; } if (!evt_has_active_lcore(opt->plcores)) { evt_err("minimum one producer is required"); return -1; } } if (evt_has_invalid_stage(opt)) return -1; if (evt_has_invalid_sched_type(opt)) return -1; if (nb_queues > EVT_MAX_QUEUES) { evt_err("number of queues exceeds %d", EVT_MAX_QUEUES); return -1; } if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) { evt_err("number of ports exceeds %d", EVT_MAX_PORTS); return -1; } /* Fixups */ if (opt->nb_stages == 1 && opt->fwd_latency) { evt_info("fwd_latency is valid when nb_stages > 1, disabling"); opt->fwd_latency = 0; } if (opt->fwd_latency && !opt->q_priority) { evt_info("enabled queue priority for latency measurement"); opt->q_priority = 1; } if (opt->nb_pkts == 0) opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores); return 0; }