int32_t main(int32_t argc, char **argv) { int32_t ret = -1; ret = rte_eal_init(argc, argv); if(0 > ret) { rte_exit(EXIT_FAILURE, "rte_eal_init failed!\n"); } argc -= ret; argv += ret; ret = udpi_parse_args(argc, argv); if(0 > ret) { udpi_print_usage(argv[0]); rte_exit(EXIT_FAILURE, "invalid user args!\n"); } udpi_init(); rte_eal_mp_remote_launch(udpi_lcore_main_loop, NULL, CALL_MASTER); return 0; }
int MAIN(int argc, char **argv) { uint32_t lcore; int ret; /* Init EAL */ ret = rte_eal_init(argc, argv); if (ret < 0) return -1; argc -= ret; argv += ret; /* Parse application arguments (after the EAL ones) */ ret = app_parse_args(argc, argv); if (ret < 0) { app_print_usage(); return -1; } /* Init */ app_init(); app_print_params(); /* Launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, CALL_MASTER); RTE_LCORE_FOREACH_SLAVE(lcore) { if (rte_eal_wait_lcore(lcore) < 0) { return -1; } } return 0; }
int main(int argc, char **argv) { rte_openlog_stream(stderr); /* Config */ app_config_init(&app); app_config_args(&app, argc, argv); app_config_parse(&app, app.config_file); app_config_check(&app); /* Init */ app_init(&app); /* Run-time */ rte_eal_mp_remote_launch( app_thread, (void *) &app, CALL_MASTER); return 0; }
/* Main function */ int main(int argc, char **argv) { int ret; int i; /* Create handler for SIGINT for CTRL + C closing and SIGALRM to print stats*/ signal(SIGINT, sig_handler); signal(SIGALRM, alarm_routine); /* Initialize DPDK enviroment with args, then shift argc and argv to get application parameters */ ret = rte_eal_init(argc, argv); if (ret < 0) FATAL_ERROR("Cannot init EAL\n"); argc -= ret; argv += ret; /* Check if this application can use 1 core*/ ret = rte_lcore_count (); if (ret != 2) FATAL_ERROR("This application needs exactly 2 cores."); /* Parse arguments */ parse_args(argc, argv); if (ret < 0) FATAL_ERROR("Wrong arguments\n"); /* Probe PCI bus for ethernet devices, mandatory only in DPDK < 1.8.0 */ #if RTE_VER_MAJOR == 1 && RTE_VER_MINOR < 8 ret = rte_eal_pci_probe(); if (ret < 0) FATAL_ERROR("Cannot probe PCI\n"); #endif /* Get number of ethernet devices */ nb_sys_ports = rte_eth_dev_count(); if (nb_sys_ports <= 0) FATAL_ERROR("Cannot find ETH devices\n"); /* Create a mempool with per-core cache, initializing every element for be used as mbuf, and allocating on the current NUMA node */ pktmbuf_pool = rte_mempool_create(MEMPOOL_NAME, buffer_size-1, MEMPOOL_ELEM_SZ, MEMPOOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,rte_socket_id(), 0); if (pktmbuf_pool == NULL) FATAL_ERROR("Cannot create cluster_mem_pool. Errno: %d [ENOMEM: %d, ENOSPC: %d, E_RTE_NO_TAILQ: %d, E_RTE_NO_CONFIG: %d, E_RTE_SECONDARY: %d, EINVAL: %d, EEXIST: %d]\n", rte_errno, ENOMEM, ENOSPC, E_RTE_NO_TAILQ, E_RTE_NO_CONFIG, E_RTE_SECONDARY, EINVAL, EEXIST ); /* Create a ring for exchanging packets between cores, and allocating on the current NUMA node */ intermediate_ring = rte_ring_create (RING_NAME, buffer_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ ); if (intermediate_ring == NULL ) FATAL_ERROR("Cannot create ring"); /* Operations needed for each ethernet device */ for(i=0; i < nb_sys_ports; i++) init_port(i); /* Start consumer and producer routine on 2 different cores: producer launched first... */ ret = rte_eal_mp_remote_launch (main_loop_producer, NULL, SKIP_MASTER); if (ret != 0) FATAL_ERROR("Cannot start consumer thread\n"); /* ... and then loop in consumer */ main_loop_consumer ( NULL ); return 0; }
static int test_distributor_perf(void) { static struct rte_distributor *d; static struct rte_mempool *p; if (rte_lcore_count() < 2) { printf("ERROR: not enough cores to test distributor\n"); return -1; } /* first time how long it takes to round-trip a cache line */ time_cache_line_switch(); if (d == NULL) { d = rte_distributor_create("Test_perf", rte_socket_id(), rte_lcore_count() - 1); if (d == NULL) { printf("Error creating distributor\n"); return -1; } } else { rte_distributor_flush(d); rte_distributor_clear_returns(d); } const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ? (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count()); if (p == NULL) { p = rte_mempool_create("DPT_MBUF_POOL", nb_bufs, MBUF_SIZE, BURST, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (p == NULL) { printf("Error creating mempool\n"); return -1; } } rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER); if (perf_test(d, p) < 0) return -1; quit_workers(d, p); return 0; }
/** * @brief Initialize DPDK related functionality * * @param argc Number of command line arguments * @param argv Command line arguments array * @param ret Number of processed command line arguments * * @return true if success and false otherwice */ bool DPDKAdapter::init(int& argc, char**& argv) { qDebug("Initializing DPDK"); long unsigned int nr_hugepages = 0; parse_sysfs_value("/proc/sys/vm/nr_hugepages", &nr_hugepages); if(nr_hugepages == 0) { qCritical("No huge pages found"); return false; } int ret = rte_eal_init(argc, argv); if(ret < 0) { qCritical("Invalid EAL arguments"); return false; } argc -= ret; argv += ret; if(!initializeDevs()) { qCritical("Could not initialize devices"); return false; } portTxCoreMap(0, 1); portTxCoreMap(1, 2); portRxCoreMap(0, 1); portRxCoreMap(1, 2); startTx(); startRx(); rte_eal_mp_remote_launch(lcoreMainRoutine, this, SKIP_MASTER); initialized = true; return true; }
int MAIN(int argc, char *argv[]) { //Check for root privileges if(geteuid() != 0) { fprintf(stderr,"[%s] Root permissions are required to run %s\n",NAME,argv[0]); exit(EXIT_FAILURE); } uint32_t lcore; int ret; /* Init EAL */ ret = rte_eal_init(argc, argv); if (ret < 0) return -1; argc -= ret; argv += ret; if(init(argc, argv) < 0) return -1; fprintf(logFile,"[%s] Network function started!\n",NAME); fflush(logFile); rte_eal_mp_remote_launch(do_nf, NULL, CALL_MASTER); //In this version, the NF uses just one lcores.. Other potential lcores are no //used RTE_LCORE_FOREACH_SLAVE(lcore) { if (rte_eal_wait_lcore(lcore)/*Wait until an lcore finishes its job.*/ < 0) { return -1; } } return 0; }
lagopus_result_t dataplane_start(void) { lagopus_result_t rv; #ifdef HAVE_DPDK /* launch per-lcore init on every lcore */ if (dpdk_run == false && rawsocket_only_mode != true) { rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, SKIP_MASTER); } #endif /* HAVE_DPDK */ rv = dp_thread_start(&sock_thread, &sock_lock, &sock_run); if (rv == LAGOPUS_RESULT_OK) { rv = dp_thread_start(&timer_thread, &timer_lock, &timer_run); } #ifdef HAVE_DPDK if (rv == LAGOPUS_RESULT_OK && rawsocket_only_mode != true) { rv = dp_thread_start(&dpdk_thread, &dpdk_lock, &dpdk_run); } #endif /* HAVE_DPDK */ return rv; }
static int dpdk_main(int port_id, int argc, char* argv[]) { struct rte_eth_dev_info dev_info; unsigned nb_queues; FILE* lfile; uint8_t core_id; int ret; printf("In dpdk_main\n"); // Open the log file lfile = fopen("./vrouter.log", "w"); // Program the rte log rte_openlog_stream(lfile); ret = rte_eal_init(argc, argv); if (ret < 0) { log_crit( "Invalid EAL parameters\n"); return -1; } log_info( "Programming cmd rings now!\n"); rx_event_fd = (int *) malloc(sizeof(int *) * rte_lcore_count()); if (!rx_event_fd) { log_crit("Failed to allocate memory for rx event fd arrays\n"); return -ENOMEM; } rte_eth_macaddr_get(port_id, &port_eth_addr); log_info("Port%d: MAC Address: ", port_id); print_ethaddr(&port_eth_addr); /* Determine the number of RX/TX pairs supported by NIC */ rte_eth_dev_info_get(port_id, &dev_info); dev_info.pci_dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX; dev_info.pci_dev->intr_handle.max_intr = dev_info.max_rx_queues + dev_info.max_tx_queues; ret = rte_intr_efd_enable(&dev_info.pci_dev->intr_handle, dev_info.max_rx_queues); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to enable rx interrupts\n"); } ret = rte_intr_enable(&dev_info.pci_dev->intr_handle); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to enable interrupts\n"); } ret = rte_eth_dev_configure(port_id, dev_info.max_rx_queues, dev_info.max_tx_queues, &port_conf); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to configure ethernet device\n"); } /* For each RX/TX pair */ nb_queues = dev_info.max_tx_queues; for (core_id = 0; core_id < nb_queues; core_id++) { char s[64]; if (rte_lcore_is_enabled(core_id) == 0) continue; /* NUMA socket number */ unsigned socketid = rte_lcore_to_socket_id(core_id); if (socketid >= NB_SOCKETS) { log_crit( "Socket %d of lcore %u is out of range %d\n", socketid, core_id, NB_SOCKETS); return -EBADF; } /* Create memory pool */ if (pktmbuf_pool[socketid] == NULL) { log_info("Creating mempool on %d of ~%lx bytes\n", socketid, NB_MBUF * MBUF_SIZE); printf("Creating mempool on %d of ~%lx bytes\n", socketid, NB_MBUF * MBUF_SIZE); snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); pktmbuf_pool[socketid] = rte_mempool_create(s, NB_MBUF, MBUF_SIZE, MEMPOOL_CACHE_SIZE, PKTMBUF_PRIV_SZ, rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, socketid, 0); if (!pktmbuf_pool[socketid]) { log_crit( "Cannot init mbuf pool on socket %d\n", socketid); return -ENOMEM; } } /* Setup the TX queue */ ret = rte_eth_tx_queue_setup(port_id, core_id, RTE_TX_DESC_DEFAULT, socketid, &tx_conf); if (ret < 0) { log_crit( "Cannot initialize TX queue (%d)\n", core_id); return -ENODEV; } /* Setup the RX queue */ ret = rte_eth_rx_queue_setup(port_id, core_id, RTE_RX_DESC_DEFAULT, socketid, &rx_conf, pktmbuf_pool[socketid]); if (ret < 0) { log_crit( "Cannot initialize RX queue (%d)\n", core_id); return -ENODEV; } /* Create the event fds for event notification */ lcore_cmd_event_fd[core_id] = eventfd(0, 0); } // Start the eth device ret = rte_eth_dev_start(port_id); if (ret < 0) { log_crit( "rte_eth_dev_start: err=%d, port=%d\n", ret, core_id); return -ENODEV; } // Put the device in promiscuous mode rte_eth_promiscuous_enable(port_id); // Wait for link up //check_all_ports_link_status(1, 1u << port_id); log_info( "Starting engines on every core\n"); rte_eal_mp_remote_launch(engine_loop, &dev_info, CALL_MASTER); return 0; }
int main(int argc, char **argv) { //struct lcore_queue_conf *qconf = NULL; //struct rte_eth_dev_info dev_info; struct lcore_env** envs; int ret; uint8_t n_ports; unsigned lcore_count; ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); argc -= ret; argv += ret; ret = l2sw_parse_args(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid MARIO arguments\n"); lcore_count = rte_lcore_count(); n_ports = rte_eth_dev_count(); //RTE_LOG(INFO, MARIO, "Find %u logical cores\n" , lcore_count); mbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); // init route_table route_table = create_route_table(ROUTE_ENTRY_SIZE); add_staticroute(route_table); // init arp_table arp_table = create_arp_table(ARP_ENTRY_SIZE); n_ports = rte_eth_dev_count(); if (n_ports == 0) rte_exit(EXIT_FAILURE, "No Ethernet ports - byte\n"); //RTE_LOG(INFO, MARIO, "Find %u ethernet ports\n", n_ports); if (n_ports > RTE_MAX_ETHPORTS) n_ports = RTE_MAX_ETHPORTS; /* Each logical core is assigned a dedicated TX queue on each port. */ /* for(uint8_t port_id = 0; port_id < n_ports; port_id++) { rte_eth_dev_info_get(port_id, &dev_info); } */ /* Initialize the port/queue configuration of each logical core */ /* for(uint8_t port_id = 0; port_id < n_ports; port_id++) { ; } */ /* Initialize lcore_env */ envs = (struct lcore_env**) rte_malloc(NULL,sizeof(struct lcore_env*),0); if (envs == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for core envs\n"); uint8_t lcore_id; for (lcore_id = 0; lcore_id < lcore_count; lcore_id++) { struct lcore_env* env; env = (struct lcore_env*) rte_malloc(NULL,sizeof(struct lcore_env) + sizeof(struct mbuf_table) *n_ports,0); if (env == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for %u core env\n", lcore_id); env->n_port = n_ports; env->lcore_id = lcore_id; memset(env->tx_mbufs, 0, sizeof(struct mbuf_table) * n_ports); envs[lcore_id] = env; } /* Initialise each port */ uint8_t port_id; for(port_id = 0; port_id < n_ports; port_id++) { //RTE_LOG(INFO, MARIO, "Initializing port %u...", port_id); fflush(stdout); ret = rte_eth_dev_configure(port_id, lcore_count, lcore_count, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", ret, (unsigned)port_id); //RTE_LOG(INFO, MARIO, "done\n"); rte_eth_macaddr_get(port_id, &port2eth[port_id]); /* init one RX queue */ uint8_t core_id; for (core_id = 0; core_id < lcore_count; core_id++) { ret = rte_eth_rx_queue_setup(port_id, core_id, nb_rxd, rte_eth_dev_socket_id(port_id), NULL, mbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u queue=%u\n", ret, (unsigned) port_id, (unsigned) core_id); } /* init one TX queue */ for (core_id = 0; core_id < lcore_count; core_id++) { ret = rte_eth_tx_queue_setup(port_id, core_id, nb_txd, rte_eth_dev_socket_id(port_id), NULL); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u queue=%u\n", ret, (unsigned) port_id, (unsigned) core_id); } /* Start device */ ret = rte_eth_dev_start(port_id); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n", ret, (unsigned) port_id); rte_eth_promiscuous_enable(port_id); /*RTE_LOG(INFO, MARIO, "Port %u, MAC address %02x:%02x:%02x:%02x:%02x:%02x\n\n", port_id, port2eth[port_id].addr_bytes[0], port2eth[port_id].addr_bytes[1], port2eth[port_id].addr_bytes[2], port2eth[port_id].addr_bytes[3], port2eth[port_id].addr_bytes[4], port2eth[port_id].addr_bytes[5]); */ memset(&port_statistics, 0, sizeof(port_statistics)); } check_all_ports_link_status(n_ports); /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(l2sw_launch_one_lcore, envs, CALL_MASTER); { uint8_t lcore_id; RTE_LCORE_FOREACH_SLAVE(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) return -1; } } rte_free(arp_table); rte_free(route_table); return 0; }
static int test_distributor_perf(void) { static struct rte_distributor *ds; static struct rte_distributor *db; static struct rte_mempool *p; if (rte_lcore_count() < 2) { printf("ERROR: not enough cores to test distributor\n"); return -1; } /* first time how long it takes to round-trip a cache line */ time_cache_line_switch(); if (ds == NULL) { ds = rte_distributor_create("Test_perf", rte_socket_id(), rte_lcore_count() - 1, RTE_DIST_ALG_SINGLE); if (ds == NULL) { printf("Error creating distributor\n"); return -1; } } else { rte_distributor_clear_returns(ds); } if (db == NULL) { db = rte_distributor_create("Test_burst", rte_socket_id(), rte_lcore_count() - 1, RTE_DIST_ALG_BURST); if (db == NULL) { printf("Error creating burst distributor\n"); return -1; } } else { rte_distributor_clear_returns(db); } const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ? (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count()); if (p == NULL) { p = rte_pktmbuf_pool_create("DPT_MBUF_POOL", nb_bufs, BURST, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); if (p == NULL) { printf("Error creating mempool\n"); return -1; } } printf("=== Performance test of distributor (single mode) ===\n"); rte_eal_mp_remote_launch(handle_work, ds, SKIP_MASTER); if (perf_test(ds, p) < 0) return -1; quit_workers(ds, p); printf("=== Performance test of distributor (burst mode) ===\n"); rte_eal_mp_remote_launch(handle_work, db, SKIP_MASTER); if (perf_test(db, p) < 0) return -1; quit_workers(db, p); return 0; }
int test_distributor(void) { static struct rte_distributor *d; static struct rte_mempool *p; if (rte_lcore_count() < 2) { printf("ERROR: not enough cores to test distributor\n"); return -1; } if (d == NULL) { d = rte_distributor_create("Test_distributor", rte_socket_id(), rte_lcore_count() - 1); if (d == NULL) { printf("Error creating distributor\n"); return -1; } } else { rte_distributor_flush(d); rte_distributor_clear_returns(d); } const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ? (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count()); if (p == NULL) { p = rte_mempool_create("DT_MBUF_POOL", nb_bufs, MBUF_SIZE, BURST, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (p == NULL) { printf("Error creating mempool\n"); return -1; } } rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER); if (sanity_test(d, p) < 0) goto err; quit_workers(d, p); rte_eal_mp_remote_launch(handle_work_with_free_mbufs, d, SKIP_MASTER); if (sanity_test_with_mbuf_alloc(d, p) < 0) goto err; quit_workers(d, p); if (rte_lcore_count() > 2) { rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d, SKIP_MASTER); if (sanity_test_with_worker_shutdown(d, p) < 0) goto err; quit_workers(d, p); rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d, SKIP_MASTER); if (test_flush_with_worker_shutdown(d, p) < 0) goto err; quit_workers(d, p); } else { printf("Not enough cores to run tests for worker shutdown\n"); } if (test_error_distributor_create_numworkers() == -1 || test_error_distributor_create_name() == -1) { printf("rte_distributor_create parameter check tests failed"); return -1; } return 0; err: quit_workers(d, p); return -1; }
static int test_distributor(void) { static struct rte_distributor *ds; static struct rte_distributor *db; static struct rte_distributor *dist[2]; static struct rte_mempool *p; int i; if (rte_lcore_count() < 2) { printf("ERROR: not enough cores to test distributor\n"); return -1; } if (db == NULL) { db = rte_distributor_create("Test_dist_burst", rte_socket_id(), rte_lcore_count() - 1, RTE_DIST_ALG_BURST); if (db == NULL) { printf("Error creating burst distributor\n"); return -1; } } else { rte_distributor_flush(db); rte_distributor_clear_returns(db); } if (ds == NULL) { ds = rte_distributor_create("Test_dist_single", rte_socket_id(), rte_lcore_count() - 1, RTE_DIST_ALG_SINGLE); if (ds == NULL) { printf("Error creating single distributor\n"); return -1; } } else { rte_distributor_flush(ds); rte_distributor_clear_returns(ds); } const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ? (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count()); if (p == NULL) { p = rte_pktmbuf_pool_create("DT_MBUF_POOL", nb_bufs, BURST, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); if (p == NULL) { printf("Error creating mempool\n"); return -1; } } dist[0] = ds; dist[1] = db; for (i = 0; i < 2; i++) { worker_params.dist = dist[i]; if (i) sprintf(worker_params.name, "burst"); else sprintf(worker_params.name, "single"); rte_eal_mp_remote_launch(handle_work, &worker_params, SKIP_MASTER); if (sanity_test(&worker_params, p) < 0) goto err; quit_workers(&worker_params, p); rte_eal_mp_remote_launch(handle_work_with_free_mbufs, &worker_params, SKIP_MASTER); if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0) goto err; quit_workers(&worker_params, p); if (rte_lcore_count() > 2) { rte_eal_mp_remote_launch(handle_work_for_shutdown_test, &worker_params, SKIP_MASTER); if (sanity_test_with_worker_shutdown(&worker_params, p) < 0) goto err; quit_workers(&worker_params, p); rte_eal_mp_remote_launch(handle_work_for_shutdown_test, &worker_params, SKIP_MASTER); if (test_flush_with_worker_shutdown(&worker_params, p) < 0) goto err; quit_workers(&worker_params, p); } else { printf("Too few cores to run worker shutdown test\n"); } } if (test_error_distributor_create_numworkers() == -1 || test_error_distributor_create_name() == -1) { printf("rte_distributor_create parameter check tests failed"); return -1; } return 0; err: quit_workers(&worker_params, p); return -1; }
int main(int argc, char ** argv) { int ret, socket; unsigned pid, nb_ports, lcore_id, rx_lcore_id; struct sock_parameter sk_param; struct sock *sk; struct txrx_queue *rxq; struct port_queue_conf *port_q; struct lcore_queue_conf *lcore_q; ret = rte_eal_init(argc, argv); if (ret < 0) return -1; argc -= ret; argv += ret; /*parse gw ip and mac from cmdline*/ if (argc > 1) { default_host_addr = argv[1]; if (argc == 3) default_gw_addr = argv[2]; else if (argc == 4) default_gw_mac = argv[3]; else rte_exit(EXIT_FAILURE, "invalid arguments\n"); } /*config nic*/ nb_ports = rte_eth_dev_count(); if (nb_ports == 0) rte_exit(EXIT_FAILURE, "No available NIC\n"); for (pid = 0; pid < nb_ports; pid++) { ret = net_device_init(pid); if (ret) { RTE_LOG(WARNING, LDNS, "fail to initialize port %u\n", pid); goto release_net_device; } } pkt_rx_pool = rte_pktmbuf_pool_create("ldns rx pkt pool", PKT_RX_NB, 32, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id()); if (pkt_rx_pool == NULL) rte_exit(EXIT_FAILURE, "cannot alloc rx_mbuf_pool"); /*sock create*/ sk_param.mode = SOCK_MODE_COMPLETE; sk_param.func = dns_process; sk = create_sock(0, SOCK_PTOTO_IPPROTO_UDP, &sk_param); if (sk == NULL) rte_exit(EXIT_FAILURE, "cannot create sock\n"); if (sock_bind(sk, inet_network(default_host_addr), DNS_PORT)) rte_exit(EXIT_FAILURE, "cannot bind addr:%s port:%u", default_host_addr, DNS_PORT); /*init ethdev*/ lcore_id = 0; lcore_q = lcore_q_conf_get(lcore_id); for (pid = 0; pid < nb_ports; pid++) { port_q = port_q_conf_get(pid); ret = rte_eth_dev_configure(pid, rx_rings, tx_rings, &default_rte_eth_conf); if (ret != 0) rte_exit(EXIT_FAILURE, "port %u configure error\n", pid); while (rx_lcore_id == rte_get_master_lcore() || !rte_lcore_is_enabled(rx_lcore_id) || lcore_q->nb_rxq == nb_rx_queue_per_core) { rx_lcore_id++; if (rx_lcore_id == RTE_MAX_LCORE) rte_exit(EXIT_FAILURE, "not enough core for port %u\n", pid); lcore_q = lcore_q_conf_get(lcore_id); } rxq = &lcore_q->rxq[lcore_q->nb_rxq]; rxq->port = pid; rxq->lcore = rx_lcore_id; rxq->qid = port_q->nb_rxq; lcore_q->nb_rxq++; port_q->nb_rxq++; socket = rte_lcore_to_socket_id(rx_lcore_id); if (socket == SOCKET_ID_ANY) socket = 0; ret = rte_eth_tx_queue_setup(pid, rxq->qid, nb_txd, socket, NULL); if (ret < 0) rte_exit(EXIT_FAILURE, "fail to setup txq %u on port %u", rxq->qid, pid); ret = rte_eth_rx_queue_setup(pid, rxq->qid, nb_rxd, socket, NULL, pkt_rx_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "failt to setup rxq %u on port %u", rxq->qid, pid); ret = rte_eth_dev_start(pid); if (ret < 0) rte_exit(EXIT_FAILURE, "fail to start port %u\n", pid); } if (dns_set_cfg(&default_dns_cfg)) rte_exit(EXIT_FAILURE, "fail to set dns configuration%u\n", pid); rte_eal_mp_remote_launch(packet_launch_one_lcore, NULL, SKIP_MASTER); RTE_LCORE_FOREACH_SLAVE(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) return -1; } return 0; release_net_device: for (pid; pid != 0; pid--) { net_device_release(pid - 1); } return -1; }