int main(void) { int ret; struct cmdline *cl; int param_num = 8; char *param[] = {"anscli", "-c", "1", "-n", "1", "--no-pci", "--socket-mem=1", "--proc-type=secondary", NULL}; rte_set_log_level(RTE_LOG_ERR); ret = rte_eal_init(param_num, param); if (ret < 0) rte_panic("Cannot init EAL\n"); ret = anscli_ring_init(); if(ret != 0) rte_panic("Cannot init ring\n"); cl = cmdline_stdin_new(ip_main_ctx, "ans> "); if (cl == NULL) rte_panic("Cannot create ans cmdline instance\n"); cmdline_interact(cl); cmdline_stdin_exit(cl); return 0; }
static void udpi_init_rings(void) { uint32_t n_swq, i; n_swq = udpi.n_workers ; RTE_LOG(INFO, USER1, "Initializing %u SW rings for ctrlmsg\n", n_swq); udpi.msg_rings = (struct rte_ring**)rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (udpi.msg_rings == NULL) rte_panic("Cannot allocate memory to store ring pointers\n"); for (i = 0; i < n_swq; i++) { struct rte_ring *ring; char name[32]; snprintf(name, sizeof(name), "udpi_ctrlmsg_%u", i); ring = rte_ring_create( name, 16, rte_socket_id(), RING_F_SC_DEQ|RING_F_SP_ENQ); if (ring == NULL) rte_panic("Cannot create ctrlmsg ring %u\n", i); udpi.msg_rings[i] = ring; } }
/* * Send a message to a slave lcore identified by slave_id to call a * function f with argument arg. Once the execution is done, the * remote lcore switch in FINISHED state. */ int rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id) { int n; char c = 0; int m2s = lcore_config[slave_id].pipe_master2slave[1]; int s2m = lcore_config[slave_id].pipe_slave2master[0]; if (lcore_config[slave_id].state != WAIT) return -EBUSY; lcore_config[slave_id].f = f; lcore_config[slave_id].arg = arg; /* send message */ n = 0; while (n == 0 || (n < 0 && errno == EINTR)) n = write(m2s, &c, 1); if (n < 0) rte_panic("cannot write on configuration pipe\n"); /* wait ack */ do { n = read(s2m, &c, 1); } while (n < 0 && errno == EINTR); if (n <= 0) rte_panic("cannot read on configuration pipe\n"); return 0; }
static void app_init_lpm_tables(void) { unsigned socket, lcore; /* Init the LPM tables */ for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) { char name[32]; uint32_t rule; if (app_is_socket_used(socket) == 0) { continue; } struct rte_lpm_config lpm_config; lpm_config.max_rules = APP_MAX_LPM_RULES; lpm_config.number_tbl8s = 256; lpm_config.flags = 0; snprintf(name, sizeof(name), "lpm_table_%u", socket); printf("Creating the LPM table for socket %u ...\n", socket); app.lpm_tables[socket] = rte_lpm_create( name, socket, &lpm_config); if (app.lpm_tables[socket] == NULL) { rte_panic("Unable to create LPM table on socket %u\n", socket); } for (rule = 0; rule < app.n_lpm_rules; rule ++) { int ret; ret = rte_lpm_add(app.lpm_tables[socket], app.lpm_rules[rule].ip, app.lpm_rules[rule].depth, app.lpm_rules[rule].if_out); if (ret < 0) { rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n", (unsigned) rule, (unsigned) app.lpm_rules[rule].ip, (unsigned) app.lpm_rules[rule].depth, (unsigned) app.lpm_rules[rule].if_out, socket, ret); } } } for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { continue; } socket = rte_lcore_to_socket_id(lcore); app.lcore_params[lcore].worker.lpm_table = app.lpm_tables[socket]; } }
static void app_init_ports(void) { uint32_t i; /* Init NIC ports, then start the ports */ for (i = 0; i < app.n_ports; i++) { uint16_t port; int ret; port = app.ports[i]; RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port); /* Init port */ ret = rte_eth_dev_configure( port, 1, 1, &port_conf); if (ret < 0) rte_panic("Cannot init NIC port %u (%d)\n", port, ret); rte_eth_promiscuous_enable(port); /* Init RX queues */ ret = rte_eth_rx_queue_setup( port, 0, app.port_rx_ring_size, rte_eth_dev_socket_id(port), &rx_conf, app.pool); if (ret < 0) rte_panic("Cannot init RX for port %u (%d)\n", (uint32_t) port, ret); /* Init TX queues */ ret = rte_eth_tx_queue_setup( port, 0, app.port_tx_ring_size, rte_eth_dev_socket_id(port), &tx_conf); if (ret < 0) rte_panic("Cannot init TX for port %u (%d)\n", (uint32_t) port, ret); /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) rte_panic("Cannot start port %u (%d)\n", port, ret); } app_ports_check_link(); }
void app_ping(void) { unsigned i; uint64_t timestamp, diff_tsc; const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC; for (i = 0; i < RTE_MAX_LCORE; i++) { struct app_core_params *p = &app.cores[i]; struct rte_ring *ring_req, *ring_resp; void *msg; struct app_msg_req *req; int status; if ((p->core_type != APP_CORE_FC) && (p->core_type != APP_CORE_FW) && (p->core_type != APP_CORE_RT) && (p->core_type != APP_CORE_RX)) continue; ring_req = app_get_ring_req(p->core_id); ring_resp = app_get_ring_resp(p->core_id); /* Fill request message */ msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool); if (msg == NULL) rte_panic("Unable to allocate new message\n"); req = (struct app_msg_req *) rte_ctrlmbuf_data((struct rte_mbuf *)msg); req->type = APP_MSG_REQ_PING; /* Send request */ do { status = rte_ring_sp_enqueue(ring_req, msg); } while (status == -ENOBUFS); /* Wait for response */ timestamp = rte_rdtsc(); do { status = rte_ring_sc_dequeue(ring_resp, &msg); diff_tsc = rte_rdtsc() - timestamp; if (unlikely(diff_tsc > timeout)) rte_panic("Core %u of type %d does not respond " "to requests\n", p->core_id, p->core_type); } while (status != 0); /* Free message buffer */ rte_ctrlmbuf_free(msg); } }
/* create memory configuration in shared/mmap memory. Take out * a write lock on the memsegs, so we can auto-detect primary/secondary. * This means we never close the file while running (auto-close on exit). * We also don't lock the whole file, so that in future we can use read-locks * on other parts, e.g. memzones, to detect if there are running secondary * processes. */ static void rte_eal_config_create(void) { void *rte_mem_cfg_addr; int retval; const char *pathname = eal_runtime_config_path(); if (internal_config.no_shconf) return; /* map the config before hugepage address so that we don't waste a page */ if (internal_config.base_virtaddr != 0) rte_mem_cfg_addr = (void *) RTE_ALIGN_FLOOR(internal_config.base_virtaddr - sizeof(struct rte_mem_config), sysconf(_SC_PAGE_SIZE)); else rte_mem_cfg_addr = NULL; if (mem_cfg_fd < 0){ mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660); if (mem_cfg_fd < 0) rte_panic("Cannot open '%s' for rte_mem_config\n", pathname); } retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config)); if (retval < 0){ close(mem_cfg_fd); rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname); } retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock); if (retval < 0){ close(mem_cfg_fd); rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary " "process running?\n", pathname); } rte_mem_cfg_addr = mmap(rte_mem_cfg_addr, sizeof(*rte_config.mem_config), PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0); if (rte_mem_cfg_addr == MAP_FAILED){ rte_panic("Cannot mmap memory for rte_config\n"); } memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config)); rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr; /* store address of the config in the config itself so that secondary * processes could later map the config into this exact location */ rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr; }
void app_init_nics(void) { #ifndef RTE_VERSION_NUM /* Init driver */ printf("Initializing the PMD driver ...\n"); if (rte_pmd_init_all() < 0) { rte_panic("Cannot init PMD\n"); } #elif RTE_VERSION < RTE_VERSION_NUM(1, 8, 0, 0) if (rte_eal_pci_probe() < 0) { rte_panic("Cannot probe PCI\n"); } #endif /* RTE_VERSION_NUM */ }
int32_t interfaceSetup(void) { uint8_t portIndex = 0, portCount = rte_eth_dev_count(); int32_t ret = 0, socket_id = -1; struct rte_eth_link link; for (portIndex = 0; portIndex < portCount; portIndex++) { /* fetch the socket Id to which the port the mapped */ for (ret = 0; ret < GTP_MAX_NUMANODE; ret++) { if (numaNodeInfo[ret].intfTotal) { if (numaNodeInfo[ret].intfAvail & (1 << portIndex)) { socket_id = ret; break; } } } memset(&link, 0x00, sizeof(struct rte_eth_link)); ret = rte_eth_dev_configure(portIndex, 1, 1, &portConf); if (unlikely(ret < 0)) { rte_panic("ERROR: Dev Configure\n"); return -1; } ret = rte_eth_rx_queue_setup(portIndex, 0, RTE_TEST_RX_DESC_DEFAULT, 0, NULL, numaNodeInfo[socket_id].rx[0]); if (unlikely(ret < 0)) { rte_panic("ERROR: Rx Queue Setup\n"); return -2; } ret = rte_eth_tx_queue_setup(portIndex, 0, RTE_TEST_TX_DESC_DEFAULT, 0, NULL); if (unlikely(ret < 0)) { rte_panic("ERROR: Tx Queue Setup\n"); return -3; } rte_eth_promiscuous_enable(portIndex); rte_eth_dev_start(portIndex); } return 0; }
static void app_init_mbuf_pools(void) { /* Init the buffer pool */ RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n"); app.pool = rte_mempool_create( "mempool", app.pool_size, app.pool_buffer_size, app.pool_cache_size, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (app.pool == NULL) rte_panic("Cannot create mbuf pool\n"); /* Init the indirect buffer pool */ RTE_LOG(INFO, USER1, "Creating the indirect mbuf pool ...\n"); app.indirect_pool = rte_mempool_create( "indirect mempool", app.pool_size, sizeof(struct rte_mbuf) + sizeof(struct app_pkt_metadata), app.pool_cache_size, 0, NULL, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (app.indirect_pool == NULL) rte_panic("Cannot create mbuf pool\n"); /* Init the message buffer pool */ RTE_LOG(INFO, USER1, "Creating the message pool ...\n"); app.msg_pool = rte_mempool_create( "mempool msg", app.msg_pool_size, app.msg_pool_buffer_size, app.msg_pool_cache_size, 0, NULL, NULL, rte_ctrlmbuf_init, NULL, rte_socket_id(), 0); if (app.msg_pool == NULL) rte_panic("Cannot create message pool\n"); }
static void app_init_kni(struct app_params *app) { if (app->n_pktq_kni == 0) return; rte_panic("Can not init KNI without librte_kni support.\n"); }
static inline void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq) { int completed = 0; struct rte_mbuf *mbuf; vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring; struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *) (comp_ring->base + comp_ring->next2proc); while (tcd->gen == comp_ring->gen) { /* Release cmd_ring descriptor and free mbuf */ #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1); #endif mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m; if (unlikely(mbuf == NULL)) rte_panic("EOP desc does not point to a valid mbuf"); else rte_pktmbuf_free(mbuf); txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL; /* Mark the txd for which tcd was generated as completed */ vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring); vmxnet3_comp_ring_adv_next2proc(comp_ring); tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base + comp_ring->next2proc); completed++; } PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed); }
static int app_install_coremask(uint64_t core_mask) { uint32_t n_cores, i; for (n_cores = 0, i = 0; i < RTE_MAX_LCORE; i++) if (app.cores[i].core_type != APP_CORE_NONE) n_cores++; if (n_cores != app.n_cores) { rte_panic("Number of cores in COREMASK should be %u instead " "of %u\n", n_cores, app.n_cores); return -1; } for (i = 0; i < RTE_MAX_LCORE; i++) { uint32_t core_id; if (app.cores[i].core_type == APP_CORE_NONE) continue; core_id = __builtin_ctzll(core_mask); core_mask &= ~(1LLU << core_id); app.cores[i].core_id = core_id; } return 0; }
/* * Resize allocated memory. */ void * rte_realloc(void *ptr, size_t size, unsigned align) { if (ptr == NULL) return rte_malloc(NULL, size, align); struct malloc_elem *elem = malloc_elem_from_data(ptr); if (elem == NULL) rte_panic("Fatal error: memory corruption detected\n"); size = CACHE_LINE_ROUNDUP(size), align = CACHE_LINE_ROUNDUP(align); /* check alignment matches first, and if ok, see if we can resize block */ if (RTE_PTR_ALIGN(ptr,align) == ptr && malloc_elem_resize(elem, size) == 0) return ptr; /* either alignment is off, or we have no room to expand, * so move data. */ void *new_ptr = rte_malloc(NULL, size, align); if (new_ptr == NULL) return NULL; const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD; rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size); rte_free(ptr); return new_ptr; }
void app_main_loop_worker(void) { struct app_mbuf_array *worker_mbuf; uint32_t i; RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n", rte_lcore_id()); worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (worker_mbuf == NULL) rte_panic("Worker thread: cannot allocate buffer space\n"); for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { int ret; ret = rte_ring_sc_dequeue_bulk( app.rings_rx[i], (void **) worker_mbuf->array, app.burst_size_worker_read); if (ret == -ENOENT) continue; do { ret = rte_ring_sp_enqueue_bulk( app.rings_tx[i ^ 1], (void **) worker_mbuf->array, app.burst_size_worker_write); } while (ret < 0); } }
int control_callback_setup(const char* cb, uint8_t nb_ports) { char cmd[CTRL_CBK_MAX_SIZE]; int len; char ether1[ETHER_ADDR_FMT_SIZE]; uint8_t port; const char* argv[4]; len = snprintf(cmd, CTRL_CBK_MAX_SIZE, "%s", cb); for (port = 0; port < nb_ports; port++) { ether_format_addr(ether1, ETHER_ADDR_FMT_SIZE, &ports_eth_addr[port]); len += snprintf(&cmd[len], CTRL_CBK_MAX_SIZE - len, " dpdk%d %s", port, ether1); if (len >= CTRL_CBK_MAX_SIZE) { rte_panic("control callback too long"); } } argv[0] = "/bin/sh"; argv[1] = "-c"; argv[2] = cmd; argv[3] = NULL; RTE_LOG(INFO, PKTJ_CTRL1, "executing command `%s`\n", cmd); return posix_spawn(NULL, "/bin/sh", NULL, NULL, __DECONST(char**, argv), environ); }
/* reattach the shared config at exact memory location primary process has it */ static void rte_eal_config_reattach(void) { struct rte_mem_config *mem_config; void *rte_mem_cfg_addr; if (internal_config.no_shconf) return; /* save the address primary process has mapped shared config to */ rte_mem_cfg_addr = (void *) (uintptr_t) rte_config.mem_config->mem_cfg_addr; /* unmap original config */ munmap(rte_config.mem_config, sizeof(struct rte_mem_config)); /* remap the config at proper address */ mem_config = (struct rte_mem_config *) mmap(rte_mem_cfg_addr, sizeof(*mem_config), PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0); close(mem_cfg_fd); if (mem_config == MAP_FAILED || mem_config != rte_mem_cfg_addr) rte_panic("Cannot mmap memory for rte_config\n"); rte_config.mem_config = mem_config; }
int rte_memzone_free(const struct rte_memzone *mz) { struct rte_mem_config *mcfg; int ret = 0; void *addr; unsigned idx; if (mz == NULL) return -EINVAL; mcfg = rte_eal_get_configuration()->mem_config; rte_rwlock_write_lock(&mcfg->mlock); idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone); idx = idx / sizeof(struct rte_memzone); addr = mcfg->memzone[idx].addr; if (addr == NULL) ret = -EINVAL; else if (mcfg->memzone_cnt == 0) { rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n", __func__); } else { memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx])); mcfg->memzone_cnt--; } rte_rwlock_write_unlock(&mcfg->mlock); rte_free(addr); return ret; }
static void app_init_mbuf_pools(void) { unsigned socket, lcore; /* Init the buffer pools */ for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) { char name[32]; if (app_is_socket_used(socket) == 0) { continue; } snprintf(name, sizeof(name), "mbuf_pool_%u", socket); printf("Creating the mbuf pool for socket %u ...\n", socket); app.pools[socket] = rte_pktmbuf_pool_create( name, APP_DEFAULT_MEMPOOL_BUFFERS, APP_DEFAULT_MEMPOOL_CACHE_SIZE, 0, APP_DEFAULT_MBUF_DATA_SIZE, socket); if (app.pools[socket] == NULL) { rte_panic("Cannot create mbuf pool on socket %u\n", socket); } } for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) { continue; } socket = rte_lcore_to_socket_id(lcore); app.lcore_params[lcore].pool = app.pools[socket]; } }
static void app_ports_check_link(void) { uint32_t all_ports_up, i; all_ports_up = 1; for (i = 0; i < app.n_ports; i++) { struct rte_eth_link link; uint8_t port; port = (uint8_t) app.ports[i]; memset(&link, 0, sizeof(link)); rte_eth_link_get_nowait(port, &link); RTE_LOG(INFO, USER1, "Port %u (%u Gbps) %s\n", port, link.link_speed / 1000, link.link_status ? "UP" : "DOWN"); if (link.link_status == 0) all_ports_up = 0; } if (all_ports_up == 0) rte_panic("Some NIC ports are DOWN\n"); }
int rte_kni_release(struct rte_kni *kni) { struct rte_kni_device_info dev_info; uint32_t slot_id; if (!kni || !kni->in_use) return -1; snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name); if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) { RTE_LOG(ERR, KNI, "Fail to release kni device\n"); return -1; } /* mbufs in all fifo should be released, except request/response */ kni_free_fifo(kni->tx_q); kni_free_fifo(kni->rx_q); kni_free_fifo(kni->alloc_q); kni_free_fifo(kni->free_q); slot_id = kni->slot_id; /* Memset the KNI struct */ memset(kni, 0, sizeof(struct rte_kni)); /* Release memzone */ if (slot_id > kni_memzone_pool.max_ifaces) { rte_panic("KNI pool: corrupted slot ID: %d, max: %d\n", slot_id, kni_memzone_pool.max_ifaces); } kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]); return 0; }
static void set_mempool(struct rte_mempool *mempool) { #if (!PER_CORE) int initialized[RTE_MAX_NUMA_NODES]; for (int i = 0; i < RTE_MAX_NUMA_NODES; i++) { initialized[i] = 0; } #endif if (mempool == NULL) { rte_panic("Got a NULL mempool"); } /* Loop through all cores, to see if any of them belong to this * socket. */ for (int i = 0; i < RTE_MAX_LCORE; i++) { int sid = rte_lcore_to_socket_id(i); #if (!PER_CORE) if (!initialized[sid]) { #endif struct rte_mbuf *mbuf = NULL; #if (PER_CORE) pframe_pool[i] = mempool; #else pframe_pool[sid] = mempool; #endif /* Initialize mbuf template */ #if PER_CORE mbuf = rte_pktmbuf_alloc(pframe_pool[i]); if (mbuf == NULL) { rte_panic("Bad mbuf"); } mbuf_template[i] = *mbuf; rte_pktmbuf_free(mbuf); #else mbuf = rte_pktmbuf_alloc(pframe_pool[sid]); if (mbuf == NULL || mbuf->next != NULL || mbuf->pool == NULL) { rte_panic("Bad mbuf"); } mbuf_template[sid] = *mbuf; rte_pktmbuf_free(mbuf); #endif #if (!PER_CORE) initialized[sid] = 1; } #endif } }
static void udpi_init_mbuf_pools(void) { /* Init the buffer pool */ RTE_LOG(INFO, MEMPOOL, "Creating the mbuf pool ...\n"); udpi.pool = rte_mempool_create("mempool", udpi.pool_size, udpi.pool_buffer_size, udpi.pool_cache_size, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if(NULL == udpi.pool) { rte_panic("Cannot create mbuf pool\n"); } /* Init the indirect buffer pool */ /*RTE_LOG(INFO, MEMPOOL, "Creating the indirect mbuf pool ...\n"); udpi.indirect_pool = rte_mempool_create("indirect mempool", udpi.pool_size, sizeof(struct rte_mbuf), udpi.pool_cache_size, 0, NULL, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if(NULL == udpi.indirect_pool) { rte_panic("Cannot create indirect mbuf pool\n"); }*/ /* Init the message buffer pool */ RTE_LOG(INFO, MEMPOOL, "Creating the message mbuf pool ...\n"); udpi.msg_pool = rte_mempool_create("msg mempool ", udpi.msg_pool_size, udpi.msg_pool_buffer_size, udpi.msg_pool_cache_size, 0, NULL, NULL, rte_ctrlmbuf_init, NULL, rte_socket_id(), 0); if(NULL == udpi.msg_pool) { rte_panic("Cannot create message mbuf pool\n"); } return; }
static void app_init_pipelines(struct app_params *app) { uint32_t p_id; for (p_id = 0; p_id < app->n_pipelines; p_id++) { struct app_pipeline_params *params = &app->pipeline_params[p_id]; struct app_pipeline_data *data = &app->pipeline_data[p_id]; struct pipeline_type *ptype; struct pipeline_params pp; APP_LOG(app, HIGH, "Initializing %s ...", params->name); ptype = app_pipeline_type_find(app, params->type); if (ptype == NULL) rte_panic("Init error: Unknown pipeline type \"%s\"\n", params->type); app_pipeline_params_get(app, params, &pp); /* Back-end */ data->be = NULL; if (ptype->be_ops->f_init) { data->be = ptype->be_ops->f_init(&pp, (void *) app); if (data->be == NULL) rte_panic("Pipeline instance \"%s\" back-end " "init error\n", params->name); } /* Front-end */ data->fe = NULL; if (ptype->fe_ops->f_init) { data->fe = ptype->fe_ops->f_init(&pp, (void *) app); if (data->fe == NULL) rte_panic("Pipeline instance \"%s\" front-end " "init error\n", params->name); } data->ptype = ptype; data->timer_period = (rte_get_tsc_hz() * params->timer_period) / 100; } }
int main(int argc, char **argv) { int ret; struct cmdline *cl; ret = rte_eal_init(argc, argv); if (ret < 0) rte_panic("Cannot init EAL\n"); cl = cmdline_stdin_new(main_ctx, "example> "); if (cl == NULL) rte_panic("Cannot create cmdline instance\n"); cmdline_interact(cl); cmdline_stdin_exit(cl); return 0; }
void* control_init(int32_t socket_id, unsigned events) { struct netl_handle* netl_h; struct handle_res* res; netl_h = netl_create(events); if (netl_h == NULL) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't initialize netlink socket"); goto err; } neighbor4_struct[socket_id] = nei_create(socket_id); if (neighbor4_struct[socket_id] == NULL) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't initialize neighbor4 struct"); goto err; } neighbor6_struct[socket_id] = nei_create(socket_id); if (neighbor6_struct[socket_id] == NULL) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't initialize neighbor6 struct"); goto err; } netl_h->cb.addr4 = addr4; netl_h->cb.addr6 = addr6; netl_h->cb.neighbor4 = neighbor4; netl_h->cb.neighbor6 = neighbor6; netl_h->cb.route4 = route4; netl_h->cb.route6 = route6; netl_h->cb.link = eth_link; struct in_addr invalid_ip = {INADDR_ANY}; struct in6_addr invalid_ip6 = IN6ADDR_ANY_INIT; if (add_invalid_neighbor4(neighbor4_struct[socket_id], &invalid_ip, BAD_PORT) < 0) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't add drop target in neighbor4 table"); goto err; } if (add_invalid_neighbor6(neighbor6_struct[socket_id], &invalid_ip6, BAD_PORT) < 0) { RTE_LOG(ERR, PKTJ_CTRL1, "Couldn't add drop target in neighbor6 table"); goto err; } res = rte_malloc("handle-res", sizeof(*res), socket_id); res->socket_id = socket_id; res->netl_h = netl_h; return res; err: rte_panic("failed to init control_main"); }
/* * Parse /sys/devices/system/cpu to get the number of physical and logical * processors on the machine. The function will fill the cpu_info * structure. */ int rte_eal_cpu_init(void) { /* pointer to global configuration */ struct rte_config *config = rte_eal_get_configuration(); unsigned lcore_id; unsigned count = 0; /* * Parse the maximum set of logical cores, detect the subset of running * ones and enable them by default. */ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { lcore_config[lcore_id].core_index = count; /* init cpuset for per lcore config */ CPU_ZERO(&lcore_config[lcore_id].cpuset); /* in 1:1 mapping, record related cpu detected state */ lcore_config[lcore_id].detected = eal_cpu_detected(lcore_id); if (lcore_config[lcore_id].detected == 0) { config->lcore_role[lcore_id] = ROLE_OFF; lcore_config[lcore_id].core_index = -1; continue; } /* By default, lcore 1:1 map to cpu id */ CPU_SET(lcore_id, &lcore_config[lcore_id].cpuset); /* By default, each detected core is enabled */ config->lcore_role[lcore_id] = ROLE_RTE; lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id); lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id); if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES) #ifdef RTE_EAL_ALLOW_INV_SOCKET_ID lcore_config[lcore_id].socket_id = 0; #else rte_panic("Socket ID (%u) is greater than " "RTE_MAX_NUMA_NODES (%d)\n", lcore_config[lcore_id].socket_id, RTE_MAX_NUMA_NODES); #endif RTE_LOG(DEBUG, EAL, "Detected lcore %u as " "core %u on socket %u\n", lcore_id, lcore_config[lcore_id].core_id, lcore_config[lcore_id].socket_id); count++; } /* Set the count of enabled logical cores of the EAL configuration */ config->lcore_count = count; RTE_LOG(DEBUG, EAL, "Support maximum %u logical core(s) by configuration.\n", RTE_MAX_LCORE); RTE_LOG(DEBUG, EAL, "Detected %u lcore(s)\n", config->lcore_count); return 0; }
int tcpreplay_netport_init(struct arguments *args) { int ret; uint8_t rss_key [40]; struct rte_eth_link link; struct rte_eth_dev_info dev_info; struct rte_eth_rss_conf rss_conf; struct rte_eth_fdir fdir_conf; /* Retreiving and printing device infos */ rte_eth_dev_info_get(i, &dev_info); printf("Name:%s\n\tDriver name: %s\n\tMax rx queues: %d\n\tMax tx queues: %d\n", dev_info.pci_dev->driver->name,dev_info.driver_name, dev_info.max_rx_queues, dev_info.max_tx_queues); printf("\tPCI Adress: %04d:%02d:%02x:%01d\n", dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus, dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function); /* Configure device with '1' rx queues and 1 tx queue */ ret = rte_eth_dev_configure(i, 1, 1, &port_conf); if (ret < 0) rte_panic("Error configuring the port\n"); /* For each RX queue in each NIC */ /* Configure rx queue j of current device on current NUMA socket. It takes elements from the mempool */ ret = rte_eth_rx_queue_setup(i, 0, RX_QUEUE_SZ, rte_socket_id(), &rx_conf, pktmbuf_pool); if (ret < 0) FATAL_ERROR("Error configuring receiving queue\n"); /* Configure mapping [queue] -> [element in stats array] */ ret = rte_eth_dev_set_rx_queue_stats_mapping (i, 0, 0); if (ret < 0) FATAL_ERROR("Error configuring receiving queue stats\n"); /* Configure tx queue of current device on current NUMA socket. Mandatory configuration even if you want only rx packet */ ret = rte_eth_tx_queue_setup(i, 0, TX_QUEUE_SZ, rte_socket_id(), &tx_conf); if (ret < 0) FATAL_ERROR("Error configuring transmitting queue. Errno: %d (%d bad arg, %d no mem)\n", -ret, EINVAL ,ENOMEM); /* Start device */ ret = rte_eth_dev_start(i); if (ret < 0) FATAL_ERROR("Cannot start port\n"); /* Enable receipt in promiscuous mode for an Ethernet device */ rte_eth_promiscuous_enable(i); /* Print link status */ rte_eth_link_get_nowait(i, &link); if (link.link_status) printf("\tPort %d Link Up - speed %u Mbps - %s\n", (uint8_t)i, (unsigned)link.link_speed,(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?("full-duplex") : ("half-duplex\n")); else printf("\tPort %d Link Down\n",(uint8_t)i); /* Print RSS support, not reliable because a NIC could support rss configuration just in rte_eth_dev_configure whithout supporting rte_eth_dev_rss_hash_conf_get*/ rss_conf.rss_key = rss_key; ret = rte_eth_dev_rss_hash_conf_get (i,&rss_conf); if (ret == 0) printf("\tDevice supports RSS\n"); else printf("\tDevice DOES NOT support RSS\n"); /* Print Flow director support */ ret = rte_eth_dev_fdir_get_infos (i, &fdir_conf); if (ret == 0) printf("\tDevice supports Flow Director\n"); else printf("\tDevice DOES NOT support Flow Director\n"); if (args) return 1; return 1; }
int main(int argc, char **argv) { int c; int ret; int sp_sc; unsigned socket_io; /* initialize EAL first */ ret = rte_eal_init(argc, argv); argc -= ret; argv += ret; sp_sc = 1; bulk_size = 1; while ((c = getopt(argc, argv, "sm:b:w:")) != -1) { switch (c) { case 's': sp_sc = 1; break; case 'm': sp_sc = 0; nb_producers = atoi(optarg); break; case 'b': bulk_size = atoi(optarg); break; case 'w': work_cycles = atoi(optarg); break; case '?': break; } } setlocale(LC_NUMERIC, ""); socket_io = rte_lcore_to_socket_id(rte_get_master_lcore()); ring = rte_ring_create(ring_name, ring_size, socket_io, RING_F_SP_ENQ | RING_F_SC_DEQ); if (ring == NULL) { rte_panic("Cannot create ring"); } if (sp_sc) { printf("[MASTER] Single Producer/Consumer\n"); printf("[MASTER] Bulk size: %d\n", bulk_size); driver_sp_sc(); } else { printf("[MASTER] Number of Producers/Consumers: %d\n", nb_producers); printf("[MASTER] Bulk size: %d\n", bulk_size); driver_mp_mc(); } rte_eal_mp_wait_lcore(); }
void eal_thread_init_master(unsigned lcore_id) { /* set the lcore ID in per-lcore memory area */ RTE_PER_LCORE(_lcore_id) = lcore_id; /* set CPU affinity */ if (eal_thread_set_affinity() < 0) rte_panic("cannot set affinity\n"); }