int main(int argc, char **argv) { int32_t ret; uint8_t lcore_id; /* Signal */ signal(SIGINT,(void *)app_print); clrscr(); // call before the rte_eal_init() (void)rte_set_application_usage_hook(app_usage); init_probe(&probe); ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Failed in rte_eal_init\n"); argc -= ret; argv += ret; ret = app_parse_args(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid arguments\n"); app_init(&probe); RTE_LCORE_FOREACH_SLAVE(lcore_id) { rte_eal_remote_launch(launch_probe, NULL, lcore_id); } rte_delay_ms(5000); // wait for the lcores to start up // Wait for all of the cores to stop runing and exit. clrscr(); app_logo(8, 0, APP_NAME); //process_hashtable(); rte_eal_mp_wait_lcore(); return 0; }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process server and NF processes. * Each NF needs one RX queue. */ static int init_shm_rings(void) { unsigned i; unsigned socket_id; const char * rq_name; const char * tq_name; const char * msg_q_name; const unsigned ringsize = NF_QUEUE_RINGSIZE; const unsigned msgringsize = NF_MSG_QUEUE_SIZE; // use calloc since we allocate for all possible NFs // ensure that all fields are init to 0 to avoid reading garbage // TODO plopreiato, move to creation when a NF starts for (i = 0; i < MAX_NFS; i++) { /* Create an RX queue for each NF */ socket_id = rte_socket_id(); rq_name = get_rx_queue_name(i); tq_name = get_tx_queue_name(i); msg_q_name = get_msg_queue_name(i); nfs[i].instance_id = i; nfs[i].rx_q = rte_ring_create(rq_name, ringsize, socket_id, RING_F_SC_DEQ); /* multi prod, single cons */ nfs[i].tx_q = rte_ring_create(tq_name, ringsize, socket_id, RING_F_SC_DEQ); /* multi prod, single cons */ nfs[i].msg_q = rte_ring_create(msg_q_name, msgringsize, socket_id, RING_F_SC_DEQ); /* multi prod, single cons */ if (nfs[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for NF %u\n", i); if (nfs[i].tx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create tx ring queue for NF %u\n", i); if (nfs[i].msg_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create msg queue for NF %u\n", i); } return 0; }
static void configure_tx_buffer(uint8_t port_id, uint16_t size) { int ret; /* Initialize TX buffers */ tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer", RTE_ETH_TX_BUFFER_SIZE(size), 0, rte_eth_dev_socket_id(port_id)); if (tx_buffer[port_id] == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n", (unsigned) port_id); rte_eth_tx_buffer_init(tx_buffer[port_id], size); ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id], flush_tx_error_callback, (void *)(intptr_t)port_id); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot set error callback for " "tx buffer on port %u\n", (unsigned) port_id); }
static void setup_shared_variables(void) { const struct rte_memzone *qw_memzone; qw_memzone = rte_memzone_lookup(QUOTA_WATERMARK_MEMZONE_NAME); if (qw_memzone == NULL) rte_exit(EXIT_FAILURE, "Couldn't find memzone\n"); quota = qw_memzone->addr; low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int); }
/* * Create flow distributor table which will contain all the flows * that will be distributed among the nodes */ static void create_flow_distributor_table(void) { uint8_t socket_id = rte_socket_id(); /* create table */ efd_table = rte_efd_create("flow table", num_flows * 2, sizeof(uint32_t), 1 << socket_id, socket_id); if (efd_table == NULL) rte_exit(EXIT_FAILURE, "Problem creating the flow table\n"); }
void sp_init(struct socket_ctx *ctx, int socket_id, unsigned ep) { const char *name; const struct acl4_rules *rules_out, *rules_in; unsigned nb_out_rules, nb_in_rules; if (ctx == NULL) rte_exit(EXIT_FAILURE, "NULL context.\n"); if (ctx->sp_ipv4_in != NULL) rte_exit(EXIT_FAILURE, "Inbound SP DB for socket %u already " "initialized\n", socket_id); if (ctx->sp_ipv4_out != NULL) rte_exit(EXIT_FAILURE, "Outbound SP DB for socket %u already " "initialized\n", socket_id); if (ep == 0) { rules_out = acl4_rules_in; nb_out_rules = RTE_DIM(acl4_rules_in); rules_in = acl4_rules_out; nb_in_rules = RTE_DIM(acl4_rules_out); } else if (ep == 1) { rules_out = acl4_rules_out; nb_out_rules = RTE_DIM(acl4_rules_out); rules_in = acl4_rules_in; nb_in_rules = RTE_DIM(acl4_rules_in); } else rte_exit(EXIT_FAILURE, "Invalid EP value %u. " "Only 0 or 1 supported.\n", ep); name = "sp_ipv4_in"; ctx->sp_ipv4_in = (struct sp_ctx *)acl4_init(name, socket_id, rules_in, nb_in_rules); name = "sp_ipv4_out"; ctx->sp_ipv4_out = (struct sp_ctx *)acl4_init(name, socket_id, rules_out, nb_out_rules); }
/* create memory configuration in shared/mmap memory. Take out * a write lock on the memsegs, so we can auto-detect primary/secondary. * This means we never close the file while running (auto-close on exit). * We also don't lock the whole file, so that in future we can use read-locks * on other parts, e.g. memzones, to detect if there are running secondary * processes. */ static void rte_eal_config_create(void) { void *rte_mem_cfg_addr; int retval; const char *pathname = eal_runtime_config_path(); if (internal_config.no_shconf) return; /* map the config before hugepage address so that we don't waste a page */ if (internal_config.base_virtaddr != 0) rte_mem_cfg_addr = (void *) RTE_ALIGN_FLOOR(internal_config.base_virtaddr - sizeof(struct rte_mem_config), sysconf(_SC_PAGE_SIZE)); else rte_mem_cfg_addr = NULL; if (mem_cfg_fd < 0){ mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660); if (mem_cfg_fd < 0) rte_panic("Cannot open '%s' for rte_mem_config\n", pathname); } retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config)); if (retval < 0){ close(mem_cfg_fd); rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname); } retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock); if (retval < 0){ close(mem_cfg_fd); rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary " "process running?\n", pathname); } rte_mem_cfg_addr = mmap(rte_mem_cfg_addr, sizeof(*rte_config.mem_config), PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0); if (rte_mem_cfg_addr == MAP_FAILED){ rte_panic("Cannot mmap memory for rte_config\n"); } memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config)); rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr; /* store address of the config in the config itself so that secondary * processes could later map the config into this exact location */ rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr; }
static void process_pkts(struct rte_mbuf *buf[], int n) { int i; uint8_t *pkt; int ret; uint32_t ft[5]; unsigned char *payload; int len; for(i = 0; i < n; i++) { #ifdef EXEC_MBUF_PA_CNT uint32_t lcoreid = rte_lcore_id(); uint32_t *count; struct rte_hash *h = lcore_args[lcoreid].pa_ht; if(rte_hash_lookup_data(h, (const void *)&(buf[i]->buf_physaddr), (void**)&count) >= 0) { *count = *count + 1; } else { if(pacnt_hash_add(h, (const void *)&(buf[i]->buf_physaddr), 1) < 0) { rte_exit(EINVAL, "pacnt hash add failed in lcore %d\n", lcoreid); } } #endif #if defined(EXEC_PC) || defined(EXEC_HASH) parse_packet_to_tuple(buf[i], ft); #ifdef EXEC_PC ret = packet_classifier_search(ft); if(ret < 0) { fprintf(stderr, "packet classifing failed!\n"); } #else ret = hash_table_lkup((void*)ft); #endif #endif #ifdef EXEC_CRC calc_chk_sum(buf[i]); #endif #ifdef EXEC_DPI ret = get_payload(buf[i], &payload, &len); if(ret < 0) { fprintf(stderr, "packet get payload failed!\n"); continue; } ret = dpi_engine_exec(payload, len); #endif } }
int main(int argc, char *argv[]) { int eal_init_ret = rte_eal_init(argc, argv); if (eal_init_ret < 0) { rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); } FLAGS_logtostderr = 1; google::InitGoogleLogging(argv[0]); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
struct onvm_service_chain* onvm_sc_create(void) { struct onvm_service_chain *chain; chain = rte_calloc("ONVM_sercice_chain", 1, sizeof(struct onvm_service_chain), 0); if (chain == NULL) { rte_exit(EXIT_FAILURE, "Cannot allocate memory for service chain\n"); } return chain; }
int MAIN(int argc, char **argv) { int ret; struct cmdline *cl; rte_set_log_level(RTE_LOG_INFO); ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n"); setup_shared_variables(); cl = cmdline_stdin_new(qwctl_ctx, "qwctl> "); if (cl == NULL) rte_exit(EXIT_FAILURE, "Cannot create cmdline instance\n"); cmdline_interact(cl); cmdline_stdin_exit(cl); return 0; }
/** * CALLED BY NF: * Create a new nf_info struct for this NF * Pass a unique tag for this NF */ static struct onvm_nf_info * ovnm_nf_info_init(const char *tag) { void *mempool_data; struct onvm_nf_info *info; if (rte_mempool_get(nf_info_mp, &mempool_data) < 0) { rte_exit(EXIT_FAILURE, "Failed to get client info memory"); } if (mempool_data == NULL) { rte_exit(EXIT_FAILURE, "Client Info struct not allocated"); } info = (struct onvm_nf_info*) mempool_data; info->instance_id = initial_instance_id; info->service_id = service_id; info->status = NF_WAITING_FOR_ID; info->tag = tag; return info; }
void setup_shared_variables(void) { const struct rte_memzone *qw_memzone; qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME, 2 * sizeof(int), rte_socket_id(), RTE_MEMZONE_2MB); if (qw_memzone == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); quota = qw_memzone->addr; low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int); }
static uint8_t parse_portid(const char *portid_str) { char *end; unsigned id; id = strtoul(portid_str, &end, 10); if (end == portid_str || *end != '\0' || id > RTE_MAX_ETHPORTS) rte_exit(EXIT_FAILURE, "Invalid port number\n"); return (uint8_t) id; }
/** * Allocate a rte_ring for newly created NFs */ static int init_info_queue(void) { incoming_msg_queue = rte_ring_create( _MGR_MSG_QUEUE_NAME, MAX_NFS, rte_socket_id(), RING_F_SC_DEQ); // MP enqueue (default), SC dequeue if (incoming_msg_queue == NULL) rte_exit(EXIT_FAILURE, "Cannot create incoming msg queue\n"); return 0; }
/* * Parse the coremask given as argument (hexadecimal string) and set * the global configuration of forwarding cores. */ static void parse_fwd_portmask(const char *portmask) { char *end; unsigned long long int pm; /* parse hexadecimal string */ end = NULL; pm = strtoull(portmask, &end, 16); if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) rte_exit(EXIT_FAILURE, "Invalid fwd port mask\n"); else set_fwd_ports_mask((uint64_t) pm); }
int main(int argc, char **argv) { // eal args int num_args = rte_eal_init(argc, argv); if (num_args < 0) rte_exit(EXIT_FAILURE, "init failed"); argc -= num_args; argv += num_args; // our args: [-s] port1 port2 uint8_t port1, port2; char opt = getopt(argc, argv, "s"); bool simple_tx = opt == 's'; if (simple_tx) { printf("Requesting simple tx path\n"); argc--; argv++; } else { printf("Requesting full-featured tx path\n"); } if (argc != 3) { printf("usage: [-s] port1 port2\n"); return -1; } port1 = atoi(argv[1]); port2 = atoi(argv[2]); printf("Using ports %d and %d\n", port1, port2); if (!config_port(port1, simple_tx)) return -1; if (!config_port(port2, simple_tx)) return -1; struct rte_mempool* pool = make_mempool(); uint64_t sent = 0; uint64_t next_print = rte_get_tsc_hz(); uint64_t last_sent = 0; while (true) { sent += send_pkts(port1, pool); sent += send_pkts(port2, pool); uint64_t time = rte_rdtsc(); if (time >= next_print) { double elapsed = (time - next_print + rte_get_tsc_hz()) / rte_get_tsc_hz(); uint64_t pkts = sent - last_sent; printf("Packet rate: %.2f Mpps\n", (double) pkts / elapsed / 1000000); next_print = time + rte_get_tsc_hz(); last_sent = sent; } } return 0; }
static int init_mbufpool(unsigned nb_mbuf) { int socketid; unsigned lcore_id; char s[64]; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; socketid = rte_lcore_to_socket_id(lcore_id); if (socketid >= NB_SOCKETS) { rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n", socketid, lcore_id, NB_SOCKETS); } if (mbufpool[socketid] == NULL) { snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); mbufpool[socketid] = rte_mempool_create(s, nb_mbuf, MBUF_SIZE, MEMPOOL_CACHE_SIZE, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, socketid, 0); if (mbufpool[socketid] == NULL) rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", socketid); else printf("Allocated mbuf pool on socket %d\n", socketid); } } return 0; }
int main(int argc, char *argv[]) { int arg_offset; const char *progname = argv[0]; if ((arg_offset = onvm_nf_init(argc, argv, NF_TAG)) < 0) return -1; argc -= arg_offset; argv += arg_offset; if (parse_app_args(argc, argv, progname) < 0) rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n"); struct rte_mempool *pktmbuf_pool; struct rte_mbuf* pkts[NUM_PKTS]; int i; pktmbuf_pool = rte_mempool_lookup(PKTMBUF_POOL_NAME); if(pktmbuf_pool == NULL) { rte_exit(EXIT_FAILURE, "Cannot find mbuf pool!\n"); } printf("Creating %d packets to send to %d\n", NUM_PKTS, destination); for (i=0; i < NUM_PKTS; i++) { struct onvm_pkt_meta* pmeta; pkts[i] = rte_pktmbuf_alloc(pktmbuf_pool); pmeta = onvm_get_pkt_meta(pkts[i]); pmeta->destination = destination; pmeta->action = ONVM_NF_ACTION_TONF; pkts[i]->port = 3; pkts[i]->hash.rss = i; onvm_nf_return_pkt(pkts[i]); } onvm_nf_run(nf_info, &packet_handler); printf("If we reach here, program is ending"); return 0; }
/** * Set up the DPDK rings which will be used to pass packets, via * pointers, between the multi-process server and client processes. * Each client needs one RX queue. */ static int init_shm_rings(void) { unsigned i; unsigned socket_id; const char * q_name; const unsigned ringsize = CLIENT_QUEUE_RINGSIZE; int retval; clients = rte_malloc("client details", sizeof(*clients) * num_rings, 0); if (clients == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n"); for (i = 0; i < num_rings; i++) { /* Create an RX queue for each client */ socket_id = rte_socket_id(); q_name = get_ring_name(i); if (rte_eal_process_type() == RTE_PROC_SECONDARY) { clients[i].rx_q = rte_ring_lookup(get_ring_name(i)); } else { clients[i].rx_q = rte_ring_create(q_name, ringsize, socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */ } if (clients[i].rx_q == NULL) rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i); } if (retval < 0) return retval; return 0; }
// rte_eal_mp_remote_launch causes calling thread to be hang/wait state. // Thus calling dpdk_main in thread static void* dpdk_init_worker(CC_UNUSED void* arg) { const char *argv[5] = { "vrouter", "-m", HUGEPAGE_MEMORY_SZ, "-w", PCI_DEVICE_BDF }; #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" if (dpdk_main(0, 5, (char**)argv) < 0) rte_exit(EXIT_FAILURE, "DPDK Main failed\n"); #pragma GCC diagnostic pop return NULL; }
static void bond_port_init(struct rte_mempool *mbuf_pool) { int retval; uint8_t i; retval = rte_eth_bond_create("bond0", BONDING_MODE_ALB, 0 /*SOCKET_ID_ANY*/); if (retval < 0) rte_exit(EXIT_FAILURE, "Faled to create bond port\n"); BOND_PORT = (uint8_t)retval; retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &port_conf); if (retval != 0) rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n", BOND_PORT, retval); /* RX setup */ retval = rte_eth_rx_queue_setup(BOND_PORT, 0, RTE_RX_DESC_DEFAULT, rte_eth_dev_socket_id(BOND_PORT), NULL, mbuf_pool); if (retval < 0) rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)", BOND_PORT, retval); /* TX setup */ retval = rte_eth_tx_queue_setup(BOND_PORT, 0, RTE_TX_DESC_DEFAULT, rte_eth_dev_socket_id(BOND_PORT), NULL); if (retval < 0) rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)", BOND_PORT, retval); for (i = 0; i < slaves_count; i++) { if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1) rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n", slaves[i], BOND_PORT); } retval = rte_eth_dev_start(BOND_PORT); if (retval < 0) rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval); rte_eth_promiscuous_enable(BOND_PORT); struct ether_addr addr; rte_eth_macaddr_get(BOND_PORT, &addr); printf("Port %u MAC: ", (unsigned)BOND_PORT); PRINT_MAC(addr); printf("\n"); }
void init_ring(int lcore_id, uint8_t port_id) { struct rte_ring *ring; char ring_name[RTE_RING_NAMESIZE]; rte_snprintf(ring_name, RTE_RING_NAMESIZE, "core%d_port%d", lcore_id, port_id); ring = rte_ring_create(ring_name, RING_SIZE, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (ring == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); rte_ring_set_water_mark(ring, 80 * RING_SIZE / 100); rings[lcore_id][port_id] = ring; }
int kni_init(void) { int i; char poolname[32]; for (i = 0; i < get_numa_nodes(); i++) { memset(poolname, 0, sizeof(poolname)); snprintf(poolname, sizeof(poolname) - 1, "kni_mbuf_pool_%d", i); kni_mbuf_pool[i] = rte_pktmbuf_pool_create(poolname, KNI_MBUFPOOL_ELEMS, KNI_MBUFPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, i); if (!kni_mbuf_pool[i]) rte_exit(EXIT_FAILURE, "Fail to create pktmbuf_pool for kni."); } return EDPVS_OK; }
int lpm_entry_add(unsigned int ip, int depth, int next_hop, int socketid) { int ret = -1; /* populate the LPM table */ ret = rte_lpm_add(ipv4_l3fwd_lpm_lookup_struct[socketid], ip, depth, next_hop); if (ret < 0) { rte_exit(EXIT_FAILURE, "Unable to add entry to the l3fwd LPM table on socket %d\n", socketid); } printf("LPM: Adding route 0x%08x / %d (%d)\n", (unsigned)ip, depth, next_hop); return 1; }
/* * This function parses the backend config. It takes the filename * and fills up the backend_server array. This includes the mac and ip * address of the backend servers */ static int parse_backend_config(void) { int ret, temp, i; char ip[32]; char mac[32]; FILE * cfg; cfg = fopen(lb->cfg_filename, "r"); if (cfg == NULL) { rte_exit(EXIT_FAILURE, "Error openning server \'%s\' config\n", lb->cfg_filename); } ret = fscanf(cfg, "%*s %d", &temp); if (temp <= 0) { rte_exit(EXIT_FAILURE, "Error parsing config, need at least one server configurations\n"); } lb->server_count = temp; lb->server = (struct backend_server *)rte_malloc("backend server info", sizeof(struct backend_server) * lb->server_count, 0); if (lb->server == NULL) { rte_exit(EXIT_FAILURE, "Malloc failed, can't allocate server information\n"); } for (i = 0; i < lb->server_count; i++) { ret = fscanf(cfg, "%s %s", ip, mac); if (ret != 2) { rte_exit(EXIT_FAILURE, "Invalid backend config structure\n"); } ret = onvm_pkt_parse_ip(ip, &lb->server[i].d_ip); if (ret < 0) { rte_exit(EXIT_FAILURE, "Error parsing config IP address #%d\n", i); } ret =onvm_pkt_parse_mac(mac, lb->server[i].d_addr_bytes); if (ret < 0) { rte_exit(EXIT_FAILURE, "Error parsing config MAC address #%d\n", i); } } fclose(cfg); printf("\nARP config:\n"); for (i = 0; i < lb->server_count; i++) { printf("%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8 " ", lb->server[i].d_ip & 0xFF, (lb->server[i].d_ip >> 8) & 0xFF, (lb->server[i].d_ip >> 16) & 0xFF, (lb->server[i].d_ip >> 24) & 0xFF); printf("%02x:%02x:%02x:%02x:%02x:%02x\n", lb->server[i].d_addr_bytes[0], lb->server[i].d_addr_bytes[1], lb->server[i].d_addr_bytes[2], lb->server[i].d_addr_bytes[3], lb->server[i].d_addr_bytes[4], lb->server[i].d_addr_bytes[5]); } return ret; }
/* * set up output ports so that all traffic on port gets sent out * its paired port. Index using actual port numbers since that is * what comes in the mbuf structure. */ static void configure_output_ports(const struct port_info *ports) { int i; if (ports->num_ports > RTE_MAX_ETHPORTS) rte_exit(EXIT_FAILURE, "Too many ethernet ports. RTE_MAX_ETHPORTS = %u\n", (unsigned)RTE_MAX_ETHPORTS); for (i = 0; i < ports->num_ports - 1; i+=2) { uint8_t p1 = ports->id[i]; uint8_t p2 = ports->id[i+1]; output_ports[p1] = p2; output_ports[p2] = p1; configure_tx_buffer(p1, MBQ_CAPACITY); configure_tx_buffer(p2, MBQ_CAPACITY); } }
static int app_load_cfg_profile(const char *profile) { if (profile == NULL) return 0; struct cfg_file *cfg_file = cfg_load(profile, 0); if (cfg_file == NULL) rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile); cfg_load_port(cfg_file, &port_params); cfg_load_subport(cfg_file, subport_params); cfg_load_pipe(cfg_file, pipe_profiles); cfg_close(cfg_file); return 0; }
static struct vhost_scsi_ctrlr * vhost_scsi_ctrlr_construct(const char *ctrlr_name) { int ret; struct vhost_scsi_ctrlr *ctrlr; char *path; char cwd[PATH_MAX]; /* always use current directory */ path = getcwd(cwd, PATH_MAX); if (!path) { fprintf(stderr, "Cannot get current working directory\n"); return NULL; } snprintf(dev_pathname, sizeof(dev_pathname), "%s/%s", path, ctrlr_name); if (access(dev_pathname, F_OK) != -1) { if (unlink(dev_pathname) != 0) rte_exit(EXIT_FAILURE, "Cannot remove %s.\n", dev_pathname); } if (rte_vhost_driver_register(dev_pathname, 0) != 0) { fprintf(stderr, "socket %s already exists\n", dev_pathname); return NULL; } fprintf(stdout, "socket file: %s created\n", dev_pathname); ret = rte_vhost_driver_set_features(dev_pathname, VIRTIO_SCSI_FEATURES); if (ret != 0) { fprintf(stderr, "Set vhost driver features failed\n"); return NULL; } ctrlr = rte_zmalloc(NULL, sizeof(*ctrlr), RTE_CACHE_LINE_SIZE); if (!ctrlr) return NULL; rte_vhost_driver_callback_register(dev_pathname, &vhost_scsi_device_ops); return ctrlr; }
int lpm_table_init(int socketid) { char s[64]; struct rte_lpm_config config_ipv4; /* create the LPM table */ config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES; config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S; config_ipv4.flags = 0; snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid); ipv4_l3fwd_lpm_lookup_struct[socketid] = rte_lpm_create(s, socketid, &config_ipv4); if (ipv4_l3fwd_lpm_lookup_struct[socketid] == NULL) rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table on socket %d\n", socketid); return 1; }