static int globalinit(struct virtif_user *viu) { int rv; if ((rv = rte_eal_init(sizeof(ealargs)/sizeof(ealargs[0]), /*UNCONST*/(void *)(uintptr_t)ealargs)) < 0) OUT("eal init\n"); /* disable mempool cache due to DPDK bug, not thread safe */ if ((mbpool = rte_mempool_create("mbuf_pool", NMBUF, MBSIZE, 0/*MBCACHE*/, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, 0, 0)) == NULL) { rv = -EINVAL; OUT("mbuf pool\n"); } if ((rv = PMD_INIT()) < 0) OUT("pmd init\n"); if ((rv = rte_eal_pci_probe()) < 0) OUT("PCI probe\n"); if (rte_eth_dev_count() == 0) { rv = -1; OUT("no ports\n"); } rv = 0; out: return rv; }
/** * @brief Initialize all DPDK devices * * @return true on success */ bool DPDKAdapter::initializeDevs() { if(rte_eal_pci_probe() < 0) { qCritical("Cannot probe PCI"); return false; } nPortCount = rte_eth_dev_count(); if(nPortCount < 1) { qCritical("No ports found"); return false; } //Limit devices count if(nPortCount > RTE_MAX_ETHPORTS) nPortCount = RTE_MAX_ETHPORTS; for(u_int8_t portId; portId < nPortCount; ++portId) { if(!initDevRxTxMPool(portId)) { qCritical("TX/RX pools could not been allocated : port %u", portId); return false; } } return true; }
static int globalinit(void) { int rv; if (rte_eal_init(sizeof(ealargs)/sizeof(ealargs[0]), /*UNCONST*/(void *)(uintptr_t)ealargs) < 0) OUT("eal init\n"); if ((mbpool = rte_mempool_create("mbuf_pool", NMBUF, MBSIZE, MBALIGN, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, 0, 0)) == NULL) OUT("mbuf pool\n"); if (PMD_INIT() < 0) OUT("wm driver\n"); if (rte_eal_pci_probe() < 0) OUT("PCI probe\n"); if (rte_eth_dev_count() == 0) OUT("no ports\n"); rv = 0; out: return rv; }
static void app_init_ports(void) { uint32_t i; /* Init driver */ RTE_LOG(INFO, USER1, "Initializing the PMD driver ...\n"); if (rte_eal_pci_probe() < 0) rte_panic("Cannot probe PCI\n"); /* Init NIC ports, then start the ports */ for (i = 0; i < app.n_ports; i++) { uint8_t port; int ret; port = (uint8_t) app.ports[i]; RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port); /* Init port */ ret = rte_eth_dev_configure( port, 1, 1, &port_conf); if (ret < 0) rte_panic("Cannot init NIC port %u (%d)\n", port, ret); rte_eth_promiscuous_enable(port); /* Init RX queues */ ret = rte_eth_rx_queue_setup( port, 0, app.port_rx_ring_size, rte_eth_dev_socket_id(port), &rx_conf, app.pool); if (ret < 0) rte_panic("Cannot init RX for port %u (%d)\n", (uint32_t) port, ret); /* Init TX queues */ ret = rte_eth_tx_queue_setup( port, 0, app.port_tx_ring_size, rte_eth_dev_socket_id(port), &tx_conf); if (ret < 0) rte_panic("Cannot init TX for port %u (%d)\n", (uint32_t) port, ret); /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) rte_panic("Cannot start port %u (%d)\n", port, ret); } app_ports_check_link(); }
/* Main function */ int main(int argc, char **argv) { int ret; int i; /* Create handler for SIGINT for CTRL + C closing and SIGALRM to print stats*/ signal(SIGINT, sig_handler); signal(SIGALRM, alarm_routine); /* Initialize DPDK enviroment with args, then shift argc and argv to get application parameters */ ret = rte_eal_init(argc, argv); if (ret < 0) FATAL_ERROR("Cannot init EAL\n"); argc -= ret; argv += ret; /* Check if this application can use 1 core*/ ret = rte_lcore_count (); if (ret != 2) FATAL_ERROR("This application needs exactly 2 cores."); /* Parse arguments */ parse_args(argc, argv); if (ret < 0) FATAL_ERROR("Wrong arguments\n"); /* Probe PCI bus for ethernet devices, mandatory only in DPDK < 1.8.0 */ #if RTE_VER_MAJOR == 1 && RTE_VER_MINOR < 8 ret = rte_eal_pci_probe(); if (ret < 0) FATAL_ERROR("Cannot probe PCI\n"); #endif /* Get number of ethernet devices */ nb_sys_ports = rte_eth_dev_count(); if (nb_sys_ports <= 0) FATAL_ERROR("Cannot find ETH devices\n"); /* Create a mempool with per-core cache, initializing every element for be used as mbuf, and allocating on the current NUMA node */ pktmbuf_pool = rte_mempool_create(MEMPOOL_NAME, buffer_size-1, MEMPOOL_ELEM_SZ, MEMPOOL_CACHE_SZ, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,rte_socket_id(), 0); if (pktmbuf_pool == NULL) FATAL_ERROR("Cannot create cluster_mem_pool. Errno: %d [ENOMEM: %d, ENOSPC: %d, E_RTE_NO_TAILQ: %d, E_RTE_NO_CONFIG: %d, E_RTE_SECONDARY: %d, EINVAL: %d, EEXIST: %d]\n", rte_errno, ENOMEM, ENOSPC, E_RTE_NO_TAILQ, E_RTE_NO_CONFIG, E_RTE_SECONDARY, EINVAL, EEXIST ); /* Create a ring for exchanging packets between cores, and allocating on the current NUMA node */ intermediate_ring = rte_ring_create (RING_NAME, buffer_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ ); if (intermediate_ring == NULL ) FATAL_ERROR("Cannot create ring"); /* Operations needed for each ethernet device */ for(i=0; i < nb_sys_ports; i++) init_port(i); /* Start consumer and producer routine on 2 different cores: producer launched first... */ ret = rte_eal_mp_remote_launch (main_loop_producer, NULL, SKIP_MASTER); if (ret != 0) FATAL_ERROR("Cannot start consumer thread\n"); /* ... and then loop in consumer */ main_loop_consumer ( NULL ); return 0; }
int initDpdk(char* progname) { int ret; static char *eal_args[] = {progname, "-c0xf", "-n1", "-m128", "--file-prefix=drone"}; // TODO: read env var DRONE_RTE_EAL_ARGS to override defaults ret = rte_eal_init(sizeof(eal_args)/sizeof(char*), eal_args); if (ret < 0) rte_panic("Cannot init EAL\n"); mbufPool_ = rte_mempool_create("DpktPktMbuf", 16*1024, // # of mbufs 2048, // sz of mbuf 32, // per-lcore cache sz sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, // pool ctor NULL, // pool ctor arg rte_pktmbuf_init, // mbuf ctor NULL, // mbuf ctor arg SOCKET_ID_ANY, 0 // flags ); if (!mbufPool_) rte_exit(EXIT_FAILURE, "cannot init mbuf pool\n"); if (rte_pmd_init_all() < 0) rte_exit(EXIT_FAILURE, "cannot init pmd\n"); if (rte_eal_pci_probe() < 0) rte_exit(EXIT_FAILURE, "cannot probe PCI\n"); // init lcore information lcoreCount_ = rte_lcore_count(); lcoreFreeMask_ = 0; for (int i = 0; i < lcoreCount_; i++) { if (rte_lcore_is_enabled(i) && (unsigned(i) != rte_get_master_lcore())) lcoreFreeMask_ |= (1 << i); } qDebug("lcore_count = %d, lcore_free_mask = 0x%llx", lcoreCount_, lcoreFreeMask_); // assign a lcore for Rx polling rxLcoreId_ = getFreeLcore(); if (rxLcoreId_ < 0) rte_exit(EXIT_FAILURE, "not enough cores for Rx polling"); stopRxPoll_ = false; return 0; }
void app_init_nics(void) { #ifndef RTE_VERSION_NUM /* Init driver */ printf("Initializing the PMD driver ...\n"); if (rte_pmd_init_all() < 0) { rte_panic("Cannot init PMD\n"); } #elif RTE_VERSION < RTE_VERSION_NUM(1, 8, 0, 0) if (rte_eal_pci_probe() < 0) { rte_panic("Cannot probe PCI\n"); } #endif /* RTE_VERSION_NUM */ }
/* initialize rte devices and check the number of available ports */ static uint8_t init_rte_dev(void) { uint8_t nb_ports; struct rte_eth_dev_info dev_info; /* initialize driver(s) */ TGEN_PANIC(rte_ixgbe_pmd_init() < 0, "\tError: Cannot init ixgbe pmd\n"); if (tgen_cfg.flags & TGSF_USE_VF) { TGEN_PANIC(rte_ixgbevf_pmd_init() < 0, "\tError: cannot init ixgbevf pmd\n"); } TGEN_PANIC(rte_eal_pci_probe() < 0, "\tError: Cannot probe PCI\n"); /* get available ports configuration */ nb_ports = rte_eth_dev_count(); TGEN_PANIC(nb_ports == 0, "\tError: DPDK could not find any port\n"); mprintf("\tDPDK has found %u ports\n", nb_ports); if (nb_ports > TGEN_MAX_PORTS) { mprintf("\tWarning: I can deal with at most %u ports." " Please update TGEN_MAX_PORTS and recompile.\n", TGEN_MAX_PORTS); nb_ports = TGEN_MAX_PORTS; } TGEN_PANIC(tgen_used_port_mask & ~((1U << nb_ports) - 1), "\tError: invalid port(s) specified, used port mask is %#10x\n", tgen_used_port_mask); /* read max TX queues per port */ for (uint8_t port_id = 0; port_id < nb_ports; ++port_id) { /* skip ports that are not enabled */ if ((tgen_used_port_mask & (1U << port_id)) == 0) { continue; } rte_eth_dev_info_get(port_id, &dev_info); tgen_port_conf[port_id].max_tx_queue = dev_info.max_tx_queues; mprintf("\tPort %u, Max TX queue = %u\n", port_id, dev_info.max_tx_queues); if (strcmp(dev_info.driver_name, "rte_ixgbe_pmd") == 0) { tgen_port_conf[port_id].type = PORT_IXGBE; } else { tgen_port_conf[port_id].type = PORT_IGB; } } return nb_ports; }
/** * Main init function for the multi-process server app, * calls subfunctions to do each stage of the initialisation. */ int init(int argc, char *argv[]) { int retval; uint8_t total_ports = 0; /* init EAL, parsing EAL args */ retval = rte_eal_init(argc, argv); if (retval < 0) return -1; argc -= retval; argv += retval; if (rte_eal_pci_probe()) rte_panic("Cannot probe PCI\n"); /* initialise the nic drivers */ retval = init_drivers(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise drivers\n"); /* get total number of ports */ total_ports = rte_eth_dev_count(); printf("total_ports = %d\n", total_ports); /* parse additional, application arguments */ retval = parse_app_args(total_ports, argc, argv); if (retval != 0) return -1; /* initialise mbuf pools */ retval = init_mbuf_pools(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n"); flow_table_init(); datapath_init(); vport_init(); stats_init(); // Added by Y.Born RTE_LOG(INFO, QoS, "QOS system initializing\n"); pktsched_init(); return 0; }
void init_dpdk(void) { int ret; /* Initialize the PMD */ ret = rte_pmd_init_all(); if (ret < 0) rte_exit(EXIT_FAILURE, "Failed to initialize poll mode drivers (error %d)\n", ret); /* Bind the drivers to usable devices */ ret = rte_eal_pci_probe(); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eal_pci_probe(): error %d\n", ret); if (rte_eth_dev_count() < 2) rte_exit(EXIT_FAILURE, "Not enough ethernet port available\n"); }
DpdkPcapResultCode_t globalInit(char *errbuf) { char *args[] = {"dpdkpcap_test", "-c 0x03", "-n 2", "-m 128", "--file-prefix=dpdkpcap_test"}; if (initFinished == 1) { return DPDKPCAP_OK; } if (rte_eal_init(sizeof(args)/sizeof(char*), args) < 0) { snprintf (errbuf, PCAP_ERRBUF_SIZE, "Could not initialize DPDK"); return DPDKPCAP_FAILURE; } #ifdef VER_16 if (rte_pmd_init_all() < 0) { snprintf (errbuf, PCAP_ERRBUF_SIZE, "Could not init driver"); return DPDKPCAP_FAILURE; } #endif if (rte_eal_pci_probe() < 0) { snprintf (errbuf, PCAP_ERRBUF_SIZE, "Could not probe devices"); return DPDKPCAP_FAILURE; } rxPool = rte_mempool_create(DPDKPCAP_RX_POOL_NAME, DPDKPCAP_NB_MBUF, DPDKPCAP_MBUF_SIZE, DPDKPCAP_CACHE_SIZE, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); if(rxPool == NULL) { snprintf (errbuf, PCAP_ERRBUF_SIZE, "Could not allocate RX memory pool"); return DPDKPCAP_FAILURE; } txPool = rte_mempool_create(DPDKPCAP_TX_POOL_NAME, DPDKPCAP_NB_MBUF, DPDKPCAP_MBUF_SIZE, DPDKPCAP_CACHE_SIZE, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); if(txPool == NULL) { snprintf (errbuf, PCAP_ERRBUF_SIZE, "Could not allocate TX memory pool"); return DPDKPCAP_FAILURE; } initFinished = 1; return DPDKPCAP_OK; }
int app_init(void) { uint32_t i; char ring_name[MAX_NAME_LEN]; char pool_name[MAX_NAME_LEN]; /* init driver(s) */ if (rte_pmd_init_all() < 0) rte_exit(EXIT_FAILURE, "Cannot init PMD\n"); if (rte_eal_pci_probe() < 0) rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); if (rte_eth_dev_count() == 0) rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n"); /* load configuration profile */ if (app_load_cfg_profile(cfg_profile) != 0) rte_exit(EXIT_FAILURE, "Invalid configuration profile\n"); /* Initialize each active flow */ for(i = 0; i < nb_pfc; i++) { uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core); struct rte_ring *ring; rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core); ring = rte_ring_lookup(ring_name); if (ring == NULL) qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ); else qos_conf[i].rx_ring = ring; rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core); ring = rte_ring_lookup(ring_name); if (ring == NULL) qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ); else qos_conf[i].tx_ring = ring; /* create the mbuf pools for each RX Port */ rte_snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i); qos_conf[i].mbuf_pool = rte_mempool_create(pool_name, mp_size, MBUF_SIZE, burst_conf.rx_burst * 4, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_eth_dev_socket_id(qos_conf[i].rx_port), 0); if (qos_conf[i].mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i); app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool); app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool); qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket); } RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n", rte_get_timer_hz()); RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u," "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size, ring_conf.tx_size); RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n" " Worker read/QoS enqueue = %hu,\n" " QoS dequeue = %hu, Worker write = %hu\n", burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst, burst_conf.qos_dequeue, burst_conf.tx_burst); RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu)," "TX (p = %hhu, h = %hhu, w = %hhu)\n", rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh, tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh); return 0; }
static void app_init_nics(void) { unsigned socket; uint32_t lcore; uint8_t port, queue; int ret; uint32_t n_rx_queues, n_tx_queues; /* Init driver */ printf("Initializing the PMD driver ...\n"); if (rte_pmd_init_all() < 0) { rte_panic("Cannot init PMD\n"); } if (rte_eal_pci_probe() < 0) { rte_panic("Cannot probe PCI\n"); } /* Init NIC ports and queues, then start the ports */ for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { struct rte_mempool *pool; n_rx_queues = app_get_nic_rx_queues_per_port(port); n_tx_queues = app.nic_tx_port_mask[port]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) { continue; } /* Init port */ printf("Initializing NIC port %u ...\n", (unsigned) port); ret = rte_eth_dev_configure( port, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, &port_conf); if (ret < 0) { rte_panic("Cannot init NIC port %u (%d)\n", (unsigned) port, ret); } rte_eth_promiscuous_enable(port); /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 0) { continue; } app_get_lcore_for_nic_rx(port, queue, &lcore); socket = rte_lcore_to_socket_id(lcore); pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", (unsigned) port, (unsigned) queue); ret = rte_eth_rx_queue_setup( port, queue, (uint16_t) app.nic_rx_ring_size, socket, &rx_conf, pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", (unsigned) queue, (unsigned) port, ret); } } /* Init TX queues */ if (app.nic_tx_port_mask[port] == 1) { app_get_lcore_for_nic_tx(port, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", (unsigned) port); ret = rte_eth_tx_queue_setup( port, 0, (uint16_t) app.nic_tx_ring_size, socket, &tx_conf); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", port, ret); } } /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) { rte_panic("Cannot start port %d (%d)\n", port, ret); } } check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0)); }
int main(int argc, char *argv[]) { int err, ret; uint32_t i, pmsk; struct nmreq req; struct pollfd pollfd[MAX_PORT_NUM]; struct rte_mempool *pool; struct netmap_ring *rx_ring, *tx_ring; ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n"); argc -= ret; argv += ret; parse_args(argc, argv); if (ports.num == 0) rte_exit(EXIT_FAILURE, "no ports specified\n"); err = rte_eal_pci_probe(); if (err < 0) rte_exit(EXIT_FAILURE, "rte_eal_pci_probe(): error %d\n", err); if (rte_eth_dev_count() < 1) rte_exit(EXIT_FAILURE, "Not enough ethernet ports available\n"); pool = rte_mempool_create("mbuf_pool", MBUF_PER_POOL, MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (pool == NULL) rte_exit(EXIT_FAILURE, "Couldn't create mempool\n"); netmap_conf.socket_id = rte_socket_id(); err = rte_netmap_init(&netmap_conf); if (err < 0) rte_exit(EXIT_FAILURE, "Couldn't initialize librte_compat_netmap\n"); else printf("librte_compat_netmap initialized\n"); port_conf.pool = pool; port_conf.socket_id = rte_socket_id(); for (i = 0; i != ports.num; i++) { err = rte_netmap_init_port(ports.p[i].id, &port_conf); if (err < 0) rte_exit(EXIT_FAILURE, "Couldn't setup port %hhu\n", ports.p[i].id); rte_eth_promiscuous_enable(ports.p[i].id); } for (i = 0; i != ports.num; i++) { err = netmap_port_open(i); if (err) { rte_exit(EXIT_FAILURE, "Couldn't set port %hhu " "under NETMAP control\n", ports.p[i].id); } else printf("Port %hhu now in Netmap mode\n", ports.p[i].id); } memset(pollfd, 0, sizeof(pollfd)); for (i = 0; i != ports.num; i++) { pollfd[i].fd = ports.p[i].fd; pollfd[i].events = POLLIN | POLLOUT; } signal(SIGINT, sigint_handler); pmsk = ports.num - 1; printf("Bridge up and running!\n"); while (!stop) { uint32_t n_pkts; pollfd[0].revents = 0; pollfd[1].revents = 0; ret = rte_netmap_poll(pollfd, ports.num, 0); if (ret < 0) { stop = 1; printf("[E] poll returned with error %d\n", ret); } if (((pollfd[0].revents | pollfd[1].revents) & POLLERR) != 0) { printf("POLLERR!\n"); } if ((pollfd[0].revents & POLLIN) != 0 && (pollfd[pmsk].revents & POLLOUT) != 0) { rx_ring = ports.p[0].rx_ring; tx_ring = ports.p[pmsk].tx_ring; n_pkts = RTE_MIN(rx_ring->avail, tx_ring->avail); move(n_pkts, rx_ring, tx_ring); } if (pmsk != 0 && (pollfd[pmsk].revents & POLLIN) != 0 && (pollfd[0].revents & POLLOUT) != 0) { rx_ring = ports.p[pmsk].rx_ring; tx_ring = ports.p[0].tx_ring; n_pkts = RTE_MIN(rx_ring->avail, tx_ring->avail); move(n_pkts, rx_ring, tx_ring); } } printf("Bridge stopped!\n"); for (i = 0; i != ports.num; i++) { err = rte_netmap_ioctl(ports.p[i].fd, NIOCUNREGIF, &req); if (err) { printf("[E] NIOCUNREGIF ioctl failed (error %d)\n", err); } else printf("Port %hhu unregistered from Netmap mode\n", ports.p[i].id); rte_netmap_close(ports.p[i].fd); } return 0; }
/* Main function, does initialisation and calls the per-lcore functions */ int MAIN(int argc, char *argv[]) { unsigned cores; struct rte_mempool *mbuf_pool; unsigned lcore_id; uintptr_t i; int ret; unsigned nb_ports, valid_num_ports; uint8_t portid; #ifndef RTE_EXEC_ENV_BAREMETAL signal(SIGHUP, sighup_handler); #endif /* init EAL */ ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); argc -= ret; argv += ret; /* parse app arguments */ ret = vmdq_parse_args(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid VMDQ argument\n"); if (rte_ixgbe_pmd_init() != 0 || rte_eal_pci_probe() != 0) rte_exit(EXIT_FAILURE, "Error with NIC driver initialization\n"); cores = rte_lcore_count(); if ((cores & (cores - 1)) != 0 || cores > 128) { rte_exit(EXIT_FAILURE,"This program can only run on an even" "number of cores(1-128)\n\n"); } nb_ports = rte_eth_dev_count(); if (nb_ports > RTE_MAX_ETHPORTS) nb_ports = RTE_MAX_ETHPORTS; /* * Update the global var NUM_PORTS and global array PORTS * and get value of var VALID_NUM_PORTS according to system ports number */ valid_num_ports = check_ports_num(nb_ports); if (valid_num_ports < 2 || valid_num_ports % 2) { printf("Current valid ports number is %u\n", valid_num_ports); rte_exit(EXIT_FAILURE, "Error with valid ports number is not even or less than 2\n"); } mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS * nb_ports, MBUF_SIZE, MBUF_CACHE_SIZE, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); /* initialize all ports */ for (portid = 0; portid < nb_ports; portid++) { /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { printf("\nSkipping disabled port %d\n", portid); continue; } if (port_init(portid, mbuf_pool) != 0) rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n"); } /* call lcore_main() on every slave lcore */ i = 0; RTE_LCORE_FOREACH_SLAVE(lcore_id) { rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id); } /* call on master too */ (void) lcore_main((void*)i); return 0; }
/** * Main init function for the multi-process server app, * calls subfunctions to do each stage of the initialisation. */ int init(int argc, char *argv[]) { int retval; const struct rte_memzone *mz; uint8_t i, total_ports; /* init EAL, parsing EAL args */ retval = rte_eal_init(argc, argv); if (retval < 0) return -1; argc -= retval; argv += retval; if (rte_eal_pci_probe()) rte_panic("Cannot probe PCI\n"); /* initialise the nic drivers */ retval = init_drivers(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise drivers\n"); /* get total number of ports */ total_ports = rte_eth_dev_count(); /* set up array for port data */ mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports), rte_socket_id(), NO_FLAGS); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n"); memset(mz->addr, 0, sizeof(*ports)); ports = mz->addr; RTE_LOG(INFO, APP, "memzone address is %lx\n", mz->phys_addr); /* set up array for statistics */ mz = rte_memzone_reserve(MZ_STATS_INFO, VPORT_STATS_SIZE, rte_socket_id(), NO_FLAGS); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for statistics\n"); memset(mz->addr, 0, VPORT_STATS_SIZE); vport_stats = mz->addr; /* set up array for flow table data */ mz = rte_memzone_reserve(MZ_FLOW_TABLE, sizeof(*flow_table), rte_socket_id(), NO_FLAGS); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n"); memset(mz->addr, 0, sizeof(*flow_table)); flow_table = mz->addr; /* parse additional, application arguments */ retval = parse_app_args(total_ports, argc, argv); if (retval != 0) return -1; /* initialise mbuf pools */ retval = init_mbuf_pools(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n"); /* now initialise the ports we will use */ for (i = 0; i < ports->num_ports; i++) { retval = init_port(ports->id[i]); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", (unsigned)i); } /* initialise the client queues/rings for inter process comms */ init_shm_rings(); /* initalise kni queues */ init_kni(); return 0; }
static void app_init_nics(void) { uint32_t socket, lcore; uint8_t port, queue; struct ether_addr mac_addr; int ret; /* Init driver */ printf("Initializing the PMD driver ...\n"); #ifdef RTE_LIBRTE_IGB_PMD if (rte_igb_pmd_init() < 0) { rte_panic("Cannot init IGB PMD\n"); } #endif #ifdef RTE_LIBRTE_IXGBE_PMD if (rte_ixgbe_pmd_init() < 0) { rte_panic("Cannot init IXGBE PMD\n"); } #endif if (rte_eal_pci_probe() < 0) { rte_panic("Cannot probe PCI\n"); } memset(port_stat,0,sizeof(struct port_stat)*MAX_PORT_NUM); /* Init NIC ports and queues, then start the ports */ for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { struct rte_eth_link link; struct rte_mempool *pool; uint32_t n_rx_queues, n_tx_queues; n_rx_queues = app_get_nic_rx_queues_per_port(port); n_tx_queues = app.nic_tx_port_mask[port]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) { continue; } /* Init port */ printf("Initializing NIC port %u ...\n", (uint32_t) port); ret = rte_eth_dev_configure( port, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, &port_conf); if (ret < 0) { rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret); } rte_eth_promiscuous_enable(port); /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 0) { continue; } app_get_lcore_for_nic_rx(port, queue, &lcore); socket = rte_lcore_to_socket_id(lcore); pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", (uint32_t) port, (uint32_t) queue); ret = rte_eth_rx_queue_setup( port, queue, (uint16_t) app.nic_rx_ring_size, socket, &rx_conf, pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", (uint32_t) queue, (uint32_t) port, ret); } } /* Init TX queues */ if (app.nic_tx_port_mask[port] == 1) { app_get_lcore_for_nic_tx(port, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", (uint32_t) port); ret = rte_eth_tx_queue_setup( port, 0, (uint16_t) app.nic_tx_ring_size, socket, &tx_conf); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", port, ret); } } /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) { rte_panic("Cannot start port %d (%d)\n", port, ret); } /* Get link status */ rte_eth_link_get(port, &link); rte_eth_macaddr_get(port,&mac_addr); int i=0; for(i=0;i<5;i++) printf("%02x:",mac_addr.addr_bytes[i]); printf("%02x\n",mac_addr.addr_bytes[i]); memcpy(port_stat[port].mac_addr,mac_addr.addr_bytes,6); for(i=0;i<5;i++) printf("%02x:",port_stat[port].mac_addr[i]); printf("%02x\n",port_stat[port].mac_addr[i]); if (link.link_status) { printf("Port %u is UP (%u Mbps)\n", (uint32_t) port, (unsigned) link.link_speed); port_stat[port].port_status=1; port_stat[port].port_speed=link.link_speed; } else { printf("Port %u is DOWN\n", (uint32_t) port); port_stat[port].port_status=0; } } }