static int test_ivshmem_create_lots_of_memzones(void) { int i; char name[IVSHMEM_NAME_LEN]; const struct rte_memzone *mz; ASSERT(rte_ivshmem_metadata_create(METADATA_NAME) == 0, "Failed to create metadata"); for (i = 0; i < RTE_LIBRTE_IVSHMEM_MAX_ENTRIES; i++) { snprintf(name, sizeof(name), "mz_%i", i); mz = rte_memzone_reserve(name, CACHE_LINE_SIZE, SOCKET_ID_ANY, 0); ASSERT(mz != NULL, "Failed to reserve memzone"); ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) == 0, "Failed to add memzone"); } mz = rte_memzone_reserve("one too many", CACHE_LINE_SIZE, SOCKET_ID_ANY, 0); ASSERT(mz != NULL, "Failed to reserve memzone"); ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) < 0, "Metadata should have been full"); return 0; }
static struct sa_ctx * sa_ipv4_create(const char *name, int socket_id) { char s[PATH_MAX]; struct sa_ctx *sa_ctx; unsigned mz_size; const struct rte_memzone *mz; snprintf(s, sizeof(s), "%s_%u", name, socket_id); /* Create SA array table */ printf("Creating SA context with %u maximum entries\n", IPSEC_SA_MAX_ENTRIES); mz_size = sizeof(struct sa_ctx); mz = rte_memzone_reserve(s, mz_size, socket_id, RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { printf("Failed to allocate SA DB memory\n"); rte_errno = -ENOMEM; return NULL; } sa_ctx = (struct sa_ctx *)mz->addr; return sa_ctx; }
static inline int rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data, int socket_id) { char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; const struct rte_memzone *mz; int n; /* Generate memzone name */ n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id); if (n >= (int)sizeof(mz_name)) return -EINVAL; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { mz = rte_memzone_reserve(mz_name, sizeof(struct rte_eventdev_data), socket_id, 0); } else mz = rte_memzone_lookup(mz_name); if (mz == NULL) return -ENOMEM; *data = mz->addr; if (rte_eal_process_type() == RTE_PROC_PRIMARY) memset(*data, 0, sizeof(struct rte_eventdev_data)); return 0; }
static int test_memzone_reserve_memory_in_smallest_segment(void) { const struct rte_memzone *mz; const struct rte_memseg *ms, *min_ms, *prev_min_ms; size_t min_len, prev_min_len; const struct rte_config *config; int i; config = rte_eal_get_configuration(); min_ms = NULL; /*< smallest segment */ prev_min_ms = NULL; /*< second smallest segment */ /* find two smallest segments */ for (i = 0; i < RTE_MAX_MEMSEG; i++) { ms = &config->mem_config->free_memseg[i]; if (ms->addr == NULL) break; if (ms->len == 0) continue; if (min_ms == NULL) min_ms = ms; else if (min_ms->len > ms->len) { /* set last smallest to second last */ prev_min_ms = min_ms; /* set new smallest */ min_ms = ms; } else if ((prev_min_ms == NULL) || (prev_min_ms->len > ms->len)) prev_min_ms = ms; } if (min_ms == NULL || prev_min_ms == NULL) { printf("Smallest segments not found!\n"); return -1; } min_len = min_ms->len; prev_min_len = prev_min_ms->len; /* try reserving a memzone in the smallest memseg */ mz = rte_memzone_reserve("smallest_mz", RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY, 0); if (mz == NULL) { printf("Failed to reserve memory from smallest memseg!\n"); return -1; } if (prev_min_ms->len != prev_min_len && min_ms->len != min_len - RTE_CACHE_LINE_SIZE) { printf("Reserved memory from wrong memseg!\n"); return -1; } return 0; }
/** * Main init function for the multi-process server app, * calls subfunctions to do each stage of the initialisation. */ int init(int argc, char *argv[]) { int retval; const struct rte_memzone *mz; uint8_t i, total_ports; /* init EAL, parsing EAL args */ retval = rte_eal_init(argc, argv); if (retval < 0) return -1; argc -= retval; argv += retval; /* initialise the nic drivers */ retval = init_drivers(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise drivers\n"); /* get total number of ports */ total_ports = rte_eth_dev_count(); /* set up array for port data */ mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports), rte_socket_id(), NO_FLAGS); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n"); memset(mz->addr, 0, sizeof(*ports)); ports = mz->addr; /* parse additional, application arguments */ retval = parse_app_args(total_ports, argc, argv); if (retval != 0) return -1; /* initialise mbuf pools */ retval = init_mbuf_pools(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n"); /* now initialise the ports we will use */ for (i = 0; i < ports->num_ports; i++) { retval = init_port(ports->id[i]); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", (unsigned)i); } check_all_ports_link_status(ports->num_ports, (~0x0)); /* initialise the client queues/rings for inter-eu comms */ init_shm_rings(); return 0; }
static const struct rte_memzone * kni_memzone_reserve(const char *name, size_t len, int socket_id, unsigned flags) { const struct rte_memzone *mz = rte_memzone_lookup(name); if (mz == NULL) mz = rte_memzone_reserve(name, len, socket_id, flags); return mz; }
/* create the ring */ struct rte_ring * rte_ring_create(const char *name, unsigned count, int socket_id, unsigned flags) { char mz_name[RTE_MEMZONE_NAMESIZE]; struct rte_ring *r; struct rte_tailq_entry *te; const struct rte_memzone *mz; ssize_t ring_size; int mz_flags = 0; struct rte_ring_list* ring_list = NULL; ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list); ring_size = rte_ring_get_memsize(count); if (ring_size < 0) { rte_errno = ring_size; return NULL; } te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n"); rte_errno = ENOMEM; return NULL; } snprintf(mz_name, sizeof(mz_name), "%s%s", RTE_RING_MZ_PREFIX, name); rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); /* reserve a memory zone for this ring. If we can't get rte_config or * we are secondary process, the memzone_reserve function will set * rte_errno for us appropriately - hence no check in this this function */ mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags); if (mz != NULL) { r = mz->addr; /* no need to check return value here, we already checked the * arguments above */ rte_ring_init(r, name, count, flags); te->data = (void *) r; TAILQ_INSERT_TAIL(ring_list, te, next); } else { r = NULL; RTE_LOG(ERR, RING, "Cannot reserve memory\n"); rte_free(te); } rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); return r; }
void * spdk_memzone_reserve(const char *name, size_t len, int socket_id, unsigned flags) { const struct rte_memzone *mz = rte_memzone_reserve(name, len, socket_id, flags); if (mz != NULL) { memset(mz->addr, 0, len); return mz->addr; } else { return NULL; } }
void setup_shared_variables(void) { const struct rte_memzone *qw_memzone; qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME, 2 * sizeof(int), rte_socket_id(), RTE_MEMZONE_2MB); if (qw_memzone == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); quota = qw_memzone->addr; low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int); }
/* * reserve an extra memory zone and make it available for use by a particular * heap. This reserves the zone and sets a dummy malloc_elem header at the end * to prevent overflow. The rest of the zone is added to free list as a single * large free block */ static int malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align) { const unsigned mz_flags = 0; const size_t block_size = get_malloc_memzone_size(); /* ensure the data we want to allocate will fit in the memzone */ const size_t min_size = size + align + MALLOC_ELEM_OVERHEAD * 2; const struct rte_memzone *mz = NULL; struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; unsigned numa_socket = heap - mcfg->malloc_heaps; size_t mz_size = min_size; if (mz_size < block_size) mz_size = block_size; char mz_name[RTE_MEMZONE_NAMESIZE]; snprintf(mz_name, sizeof(mz_name), "MALLOC_S%u_HEAP_%u", numa_socket, heap->mz_count++); /* try getting a block. if we fail and we don't need as big a block * as given in the config, we can shrink our request and try again */ do { mz = rte_memzone_reserve(mz_name, mz_size, numa_socket, mz_flags); if (mz == NULL) mz_size /= 2; } while (mz == NULL && mz_size > min_size); if (mz == NULL) return -1; /* allocate the memory block headers, one at end, one at start */ struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr; struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr, mz_size - MALLOC_ELEM_OVERHEAD); end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, RTE_CACHE_LINE_SIZE); const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem; malloc_elem_init(start_elem, heap, mz, elem_size); malloc_elem_mkend(end_elem, start_elem); malloc_elem_free_list_insert(start_elem); /* increase heap total size by size of new memzone */ heap->total_size+=mz_size - MALLOC_ELEM_OVERHEAD; return 0; }
void init_snstore(void) { const struct rte_memzone *memzone; struct snstore_kvpair *kvpair; int i; memzone = rte_memzone_reserve("snstore", sizeof(struct snstore_kvpair) * SNSTORE_PAIRS, SOCKET_ID_ANY, RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY); assert(memzone); kvpair = (struct snstore_kvpair *)memzone->addr; for (i = 0; i < SNSTORE_PAIRS; i++) { strcpy(kvpair->key, ""); kvpair++; } }
static int test_ivshmem_create_duplicate_memzone(void) { const struct rte_memzone *mz; ASSERT(rte_ivshmem_metadata_create(METADATA_NAME) == 0, "Failed to create metadata"); mz = rte_memzone_reserve("mz", CACHE_LINE_SIZE, SOCKET_ID_ANY, 0); ASSERT(mz != NULL, "Failed to reserve memzone"); ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) == 0, "Failed to add memzone"); ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) < 0, "Added the same memzone twice"); return 0; }
static int test_memzone_reserving_zone_size_bigger_than_the_maximum(void) { const struct rte_memzone * mz; mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum"); if (mz != NULL) { printf("zone_size_bigger_than_the_maximum has been reserved\n"); return -1; } mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", (size_t)-1, SOCKET_ID_ANY, 0); if (mz != NULL) { printf("It is impossible to reserve such big a memzone\n"); return -1; } return 0; }
/* create the ring */ struct rte_ring * rte_ring_create(const char *name, unsigned count, int socket_id, unsigned flags) { char mz_name[RTE_MEMZONE_NAMESIZE]; struct rte_ring *r; const struct rte_memzone *mz; size_t ring_size; int mz_flags = 0; struct rte_ring_list* ring_list = NULL; /* compilation-time checks */ RTE_BUILD_BUG_ON((sizeof(struct rte_ring) & CACHE_LINE_MASK) != 0); RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) & CACHE_LINE_MASK) != 0); RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) & CACHE_LINE_MASK) != 0); #ifdef RTE_LIBRTE_RING_DEBUG RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) & CACHE_LINE_MASK) != 0); RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) & CACHE_LINE_MASK) != 0); #endif /* check that we have an initialised tail queue */ if ((ring_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_RING, rte_ring_list)) == NULL) { rte_errno = E_RTE_NO_TAILQ; return NULL; } /* count must be a power of 2 */ if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK )) { rte_errno = EINVAL; RTE_LOG(ERR, RING, "Requested size is invalid, must be power of 2, and " "do not exceed the size limit %u\n", RTE_RING_SZ_MASK); return NULL; } rte_snprintf(mz_name, sizeof(mz_name), "RG_%s", name); ring_size = count * sizeof(void *) + sizeof(struct rte_ring); rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); /* reserve a memory zone for this ring. If we can't get rte_config or * we are secondary process, the memzone_reserve function will set * rte_errno for us appropriately - hence no check in this this function */ mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags); if (mz != NULL) { r = mz->addr; /* init the ring structure */ memset(r, 0, sizeof(*r)); rte_snprintf(r->name, sizeof(r->name), "%s", name); r->flags = flags; r->prod.watermark = count; r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ); r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ); r->prod.size = r->cons.size = count; r->prod.mask = r->cons.mask = count-1; r->prod.head = r->cons.head = 0; r->prod.tail = r->cons.tail = 0; TAILQ_INSERT_TAIL(ring_list, r, next); } else { r = NULL; RTE_LOG(ERR, RING, "Cannot reserve memory\n"); } rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); return r; }
/* * This function is run in the secondary instance to test that creation of * objects fails in a secondary */ static int run_object_creation_tests(void) { const unsigned flags = 0; const unsigned size = 1024; const unsigned elt_size = 64; const unsigned cache_size = 64; const unsigned priv_data_size = 32; printf("### Testing object creation - expect lots of mz reserve errors!\n"); rte_errno = 0; if ((rte_memzone_reserve("test_mz", size, rte_socket_id(), flags) == NULL) && (rte_memzone_lookup("test_mz") == NULL)) { printf("Error: unexpected return value from rte_memzone_reserve\n"); return -1; } printf("# Checked rte_memzone_reserve() OK\n"); rte_errno = 0; if ((rte_ring_create( "test_ring", size, rte_socket_id(), flags) == NULL) && (rte_ring_lookup("test_ring") == NULL)){ printf("Error: unexpected return value from rte_ring_create()\n"); return -1; } printf("# Checked rte_ring_create() OK\n"); rte_errno = 0; if ((rte_mempool_create("test_mp", size, elt_size, cache_size, priv_data_size, NULL, NULL, NULL, NULL, rte_socket_id(), flags) == NULL) && (rte_mempool_lookup("test_mp") == NULL)){ printf("Error: unexpected return value from rte_mempool_create()\n"); return -1; } printf("# Checked rte_mempool_create() OK\n"); #ifdef RTE_LIBRTE_HASH const struct rte_hash_parameters hash_params = { .name = "test_mp_hash" }; rte_errno=0; if ((rte_hash_create(&hash_params) != NULL) && (rte_hash_find_existing(hash_params.name) == NULL)){ printf("Error: unexpected return value from rte_hash_create()\n"); return -1; } printf("# Checked rte_hash_create() OK\n"); const struct rte_fbk_hash_params fbk_params = { .name = "test_fbk_mp_hash" }; rte_errno=0; if ((rte_fbk_hash_create(&fbk_params) != NULL) && (rte_fbk_hash_find_existing(fbk_params.name) == NULL)){ printf("Error: unexpected return value from rte_fbk_hash_create()\n"); return -1; } printf("# Checked rte_fbk_hash_create() OK\n"); #endif #ifdef RTE_LIBRTE_LPM rte_errno=0; struct rte_lpm_config config; config.max_rules = rte_socket_id(); config.number_tbl8s = 256; config.flags = 0; if ((rte_lpm_create("test_lpm", size, &config) != NULL) && (rte_lpm_find_existing("test_lpm") == NULL)){ printf("Error: unexpected return value from rte_lpm_create()\n"); return -1; } printf("# Checked rte_lpm_create() OK\n"); #endif /* Run a test_pci call */ if (test_pci() != 0) { printf("PCI scan failed in secondary\n"); if (getuid() == 0) /* pci scans can fail as non-root */ return -1; } else printf("PCI scan succeeded in secondary\n"); return 0; } /* if called in a primary process, just spawns off a secondary process to * run validation tests - which brings us right back here again... * if called in a secondary process, this runs a series of API tests to check * how things run in a secondary instance. */ int test_mp_secondary(void) { if (rte_eal_process_type() == RTE_PROC_PRIMARY) { if (!test_pci_run) { printf("=== Running pre-requisite test of test_pci\n"); test_pci(); printf("=== Requisite test done\n"); } return run_secondary_instances(); } printf("IN SECONDARY PROCESS\n"); return run_object_creation_tests(); } static struct test_command multiprocess_cmd = { .command = "multiprocess_autotest", .callback = test_mp_secondary, }; REGISTER_TEST_COMMAND(multiprocess_cmd);
/** * Main init function for the multi-process distributor app, * calls subfunctions to do each stage of the initialisation. */ int init(int argc, char *argv[]) { int retval; const struct rte_memzone *mz; uint8_t i, total_ports; /* init EAL, parsing EAL args */ retval = rte_eal_init(argc, argv); if (retval < 0) return -1; argc -= retval; argv += retval; /* get total number of ports */ total_ports = rte_eth_dev_count(); /* set up array for port data */ mz = rte_memzone_reserve(MZ_SHARED_INFO, sizeof(*info), rte_socket_id(), NO_FLAGS); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone " "for port information\n"); memset(mz->addr, 0, sizeof(*info)); info = mz->addr; /* parse additional, application arguments */ retval = parse_app_args(total_ports, argc, argv); if (retval != 0) return -1; /* initialise mbuf pools */ retval = init_mbuf_pools(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n"); /* now initialise the ports we will use */ for (i = 0; i < info->num_ports; i++) { retval = init_port(info->id[i]); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", (unsigned int) i); } check_all_ports_link_status(info->num_ports, (~0x0)); /* initialise the node queues/rings for inter-eu comms */ init_shm_rings(); /* Create the flow distributor table */ create_flow_distributor_table(); /* Populate the flow distributor table */ populate_flow_distributor_table(); /* Share the total number of nodes */ info->num_nodes = num_nodes; /* Share the total number of flows */ info->num_flows = num_flows; return 0; }
/** * Main init function for the multi-process server app, * calls subfunctions to do each stage of the initialisation. */ int init(int argc, char *argv[]) { int retval; const struct rte_memzone *mz; unsigned i, total_ports; /* init EAL, parsing EAL args */ retval = rte_eal_init(argc, argv); if (retval < 0) return -1; argc -= retval; argv += retval; /* get total number of ports */ total_ports = rte_eth_dev_count(); /* set up array for port data */ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { mz = rte_memzone_lookup(MZ_PORT_INFO); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot get port info structure\n"); ports = mz->addr; } else /* RTE_PROC_PRIMARY */ { mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports), rte_socket_id(), NO_FLAGS); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n"); memset(mz->addr, 0, sizeof(*ports)); ports = mz->addr; } /* parse additional, application arguments */ retval = parse_app_args(total_ports, argc, argv); if (retval != 0) return -1; /* initialise mbuf pools */ retval = init_mbuf_pools(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n"); /* now initialise the ports we will use */ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { for (i = 0; i < ports->num_ports; i++) { retval = init_port(ports->id[i]); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", (unsigned)i); } } check_all_ports_link_status(ports->num_ports, (~0x0)); /* initialise the client queues/rings for inter-eu comms */ init_shm_rings(); if (rte_eal_process_type() == RTE_PROC_PRIMARY) { RTE_LOG(INFO, APP, "HOST SHARE MEM Init.\n"); /* create metadata, output cmdline if (rte_hostshmem_metadata_create(HOSTSHMEM_METADATA_NAME) < 0) rte_exit(EXIT_FAILURE, "Cannot create HOSTSHMEM metadata\n"); if (rte_hostshmem_metadata_add_memzone(mz, HOSTSHMEM_METADATA_NAME)) rte_exit(EXIT_FAILURE, "Cannot add memzone to HOSTSHMEM metadata\n"); if (rte_hostshmem_metadata_add_mempool(pktmbuf_pool, HOSTSHMEM_METADATA_NAME)) rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to HOSTSHMEM metadata\n"); for (i = 0; i < num_clients; i++) { if (rte_hostshmem_metadata_add_ring(clients[i].rx_q, HOSTSHMEM_METADATA_NAME) < 0) rte_exit(EXIT_FAILURE, "Cannot add ring client %d to HOSTSHMEM metadata\n", i); } generate_hostshmem_cmdline(HOSTSHMEM_METADATA_NAME); */ const struct rte_mem_config *mcfg; /* get pointer to global configuration */ mcfg = rte_eal_get_configuration()->mem_config; for (i = 0; i < RTE_MAX_MEMSEG; i++) { if (mcfg->memseg[i].addr == NULL) break; printf("Segment %u: phys:0x%"PRIx64", len:%zu, " "virt:%p, socket_id:%"PRId32", " "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", " "nrank:%"PRIx32"\n", i, mcfg->memseg[i].phys_addr, mcfg->memseg[i].len, mcfg->memseg[i].addr, mcfg->memseg[i].socket_id, mcfg->memseg[i].hugepage_sz, mcfg->memseg[i].nchannel, mcfg->memseg[i].nrank); } RTE_LOG(INFO, APP, "HOST SHARE MEM Init. done\n"); RTE_LOG(INFO, APP, "IV SHARE MEM Init.\n"); /* create metadata, output cmdline */ if (rte_ivshmem_metadata_create(IVSHMEM_METADATA_NAME) < 0) rte_exit(EXIT_FAILURE, "Cannot create IVSHMEM metadata\n"); if (rte_ivshmem_metadata_add_memzone(mz, IVSHMEM_METADATA_NAME)) rte_exit(EXIT_FAILURE, "Cannot add memzone to IVSHMEM metadata\n"); if (rte_ivshmem_metadata_add_mempool(pktmbuf_pool, IVSHMEM_METADATA_NAME)) rte_exit(EXIT_FAILURE, "Cannot add mbuf mempool to IVSHMEM metadata\n"); for (i = 0; i < num_clients; i++) { if (rte_ivshmem_metadata_add_ring(clients[i].rx_q, IVSHMEM_METADATA_NAME) < 0) rte_exit(EXIT_FAILURE, "Cannot add ring client %d to IVSHMEM metadata\n", i); } generate_ivshmem_cmdline(IVSHMEM_METADATA_NAME); RTE_LOG(INFO, APP, "IV SHARE MEM Done.\n"); } return 0; }
static int test_memzone_reserve_remainder(void) { const struct rte_memzone *mz1, *mz2; const struct rte_memseg *ms, *min_ms = NULL; size_t min_len; const struct rte_config *config; int i, align; min_len = 0; align = RTE_CACHE_LINE_SIZE; config = rte_eal_get_configuration(); /* find minimum free contiguous length */ for (i = 0; i < RTE_MAX_MEMSEG; i++) { ms = &config->mem_config->free_memseg[i]; if (ms->addr == NULL) break; if (ms->len == 0) continue; if (min_len == 0 || ms->len < min_len) { min_len = ms->len; min_ms = ms; /* find maximum alignment this segment is able to hold */ align = RTE_CACHE_LINE_SIZE; while ((ms->addr_64 & (align-1)) == 0) { align <<= 1; } } } if (min_ms == NULL) { printf("Minimal sized segment not found!\n"); return -1; } /* try reserving min_len bytes with alignment - this should not affect our * memseg, the memory will be taken from a different one. */ mz1 = rte_memzone_reserve_aligned("reserve_remainder_1", min_len, SOCKET_ID_ANY, 0, align); if (mz1 == NULL) { printf("Failed to reserve %zu bytes aligned on %i bytes\n", min_len, align); return -1; } if (min_ms->len != min_len) { printf("Memseg memory should not have been reserved!\n"); return -1; } /* try reserving min_len bytes with less alignment - this should fill up * the segment. */ mz2 = rte_memzone_reserve("reserve_remainder_2", min_len, SOCKET_ID_ANY, 0); if (mz2 == NULL) { printf("Failed to reserve %zu bytes\n", min_len); return -1; } if (min_ms->len != 0) { printf("Memseg memory should have been reserved!\n"); return -1; } return 0; }
static int test_ivshmem_api_test(void) { const struct rte_memzone * mz; struct rte_mempool * mp; struct rte_ring * r; char buf[BUFSIZ]; memset(buf, 0, sizeof(buf)); r = rte_ring_create("ring", 1, SOCKET_ID_ANY, 0); mp = rte_mempool_create("mempool", 1, 1, 1, 1, NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); mz = rte_memzone_reserve("memzone", 64, SOCKET_ID_ANY, 0); ASSERT(r != NULL, "Failed to create ring"); ASSERT(mp != NULL, "Failed to create mempool"); ASSERT(mz != NULL, "Failed to reserve memzone"); /* try to create NULL metadata */ ASSERT(rte_ivshmem_metadata_create(NULL) < 0, "Created metadata with NULL name"); /* create valid metadata to do tests on */ ASSERT(rte_ivshmem_metadata_create(METADATA_NAME) == 0, "Failed to create metadata"); /* test adding memzone */ ASSERT(rte_ivshmem_metadata_add_memzone(NULL, NULL) < 0, "Added NULL memzone to NULL metadata"); ASSERT(rte_ivshmem_metadata_add_memzone(NULL, METADATA_NAME) < 0, "Added NULL memzone"); ASSERT(rte_ivshmem_metadata_add_memzone(mz, NULL) < 0, "Added memzone to NULL metadata"); ASSERT(rte_ivshmem_metadata_add_memzone(mz, NONEXISTENT_METADATA) < 0, "Added memzone to nonexistent metadata"); /* test adding ring */ ASSERT(rte_ivshmem_metadata_add_ring(NULL, NULL) < 0, "Added NULL ring to NULL metadata"); ASSERT(rte_ivshmem_metadata_add_ring(NULL, METADATA_NAME) < 0, "Added NULL ring"); ASSERT(rte_ivshmem_metadata_add_ring(r, NULL) < 0, "Added ring to NULL metadata"); ASSERT(rte_ivshmem_metadata_add_ring(r, NONEXISTENT_METADATA) < 0, "Added ring to nonexistent metadata"); /* test adding mempool */ ASSERT(rte_ivshmem_metadata_add_mempool(NULL, NULL) < 0, "Added NULL mempool to NULL metadata"); ASSERT(rte_ivshmem_metadata_add_mempool(NULL, METADATA_NAME) < 0, "Added NULL mempool"); ASSERT(rte_ivshmem_metadata_add_mempool(mp, NULL) < 0, "Added mempool to NULL metadata"); ASSERT(rte_ivshmem_metadata_add_mempool(mp, NONEXISTENT_METADATA) < 0, "Added mempool to nonexistent metadata"); /* test creating command line */ ASSERT(rte_ivshmem_metadata_cmdline_generate(NULL, sizeof(buf), METADATA_NAME) < 0, "Written command line into NULL buffer"); ASSERT(strnlen(buf, sizeof(buf)) == 0, "Buffer is not empty"); ASSERT(rte_ivshmem_metadata_cmdline_generate(buf, 0, METADATA_NAME) < 0, "Written command line into small buffer"); ASSERT(strnlen(buf, sizeof(buf)) == 0, "Buffer is not empty"); ASSERT(rte_ivshmem_metadata_cmdline_generate(buf, sizeof(buf), NULL) < 0, "Written command line for NULL metadata"); ASSERT(strnlen(buf, sizeof(buf)) == 0, "Buffer is not empty"); ASSERT(rte_ivshmem_metadata_cmdline_generate(buf, sizeof(buf), NONEXISTENT_METADATA) < 0, "Writen command line for nonexistent metadata"); ASSERT(strnlen(buf, sizeof(buf)) == 0, "Buffer is not empty"); /* add stuff to config */ ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) == 0, "Failed to add memzone to valid config"); ASSERT(rte_ivshmem_metadata_add_ring(r, METADATA_NAME) == 0, "Failed to add ring to valid config"); ASSERT(rte_ivshmem_metadata_add_mempool(mp, METADATA_NAME) == 0, "Failed to add mempool to valid config"); /* create config */ ASSERT(rte_ivshmem_metadata_cmdline_generate(buf, sizeof(buf), METADATA_NAME) == 0, "Failed to write command-line"); /* check if something was written */ ASSERT(strnlen(buf, sizeof(buf)) != 0, "Buffer is empty"); /* make sure we don't segfault */ rte_ivshmem_metadata_dump(stdout, NULL); /* dump our metadata */ rte_ivshmem_metadata_dump(stdout, METADATA_NAME); return 0; }
/* * Allocates a completion ring with vmem and stats optionally also allocating * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info * to not allocate them. * * Order in the allocation is: * stats - Always non-zero length * cp vmem - Always zero-length, supported for the bnxt_ring abstraction * tx vmem - Only non-zero length if tx_ring_info is not NULL * rx vmem - Only non-zero length if rx_ring_info is not NULL * cp bd ring - Always non-zero length * tx bd ring - Only non-zero length if tx_ring_info is not NULL * rx bd ring - Only non-zero length if rx_ring_info is not NULL */ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, struct bnxt_tx_ring_info *tx_ring_info, struct bnxt_rx_ring_info *rx_ring_info, struct bnxt_cp_ring_info *cp_ring_info, const char *suffix) { struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct; struct bnxt_ring *tx_ring; struct bnxt_ring *rx_ring; struct rte_pci_device *pdev = bp->pdev; const struct rte_memzone *mz = NULL; char mz_name[RTE_MEMZONE_NAMESIZE]; int stats_len = (tx_ring_info || rx_ring_info) ? RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0; int cp_vmem_start = stats_len; int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size); int tx_vmem_start = cp_vmem_start + cp_vmem_len; int tx_vmem_len = tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info-> tx_ring_struct->vmem_size) : 0; int rx_vmem_start = tx_vmem_start + tx_vmem_len; int rx_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(rx_ring_info-> rx_ring_struct->vmem_size) : 0; int cp_ring_start = rx_vmem_start + rx_vmem_len; int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size * sizeof(struct cmpl_base)); int tx_ring_start = cp_ring_start + cp_ring_len; int tx_ring_len = tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size * sizeof(struct tx_bd_long)) : 0; int rx_ring_start = tx_ring_start + tx_ring_len; int rx_ring_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size * sizeof(struct rx_prod_pkt_bd)) : 0; int total_alloc_len = rx_ring_start + rx_ring_len; snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain, pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx, suffix); mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; mz = rte_memzone_lookup(mz_name); if (!mz) { mz = rte_memzone_reserve(mz_name, total_alloc_len, SOCKET_ID_ANY, RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) return -ENOMEM; } memset(mz->addr, 0, mz->len); if (tx_ring_info) { tx_ring = tx_ring_info->tx_ring_struct; tx_ring->bd = ((char *)mz->addr + tx_ring_start); tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd; tx_ring->bd_dma = mz->phys_addr + tx_ring_start; tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; tx_ring->mem_zone = (const void *)mz; if (!tx_ring->bd) return -ENOMEM; if (tx_ring->vmem_size) { tx_ring->vmem = (void **)((char *)mz->addr + tx_vmem_start); tx_ring_info->tx_buf_ring = (struct bnxt_sw_tx_bd *)tx_ring->vmem; } } if (rx_ring_info) { rx_ring = rx_ring_info->rx_ring_struct; rx_ring->bd = ((char *)mz->addr + rx_ring_start); rx_ring_info->rx_desc_ring = (struct rx_prod_pkt_bd *)rx_ring->bd; rx_ring->bd_dma = mz->phys_addr + rx_ring_start; rx_ring_info->rx_desc_mapping = rx_ring->bd_dma; rx_ring->mem_zone = (const void *)mz; if (!rx_ring->bd) return -ENOMEM; if (rx_ring->vmem_size) { rx_ring->vmem = (void **)((char *)mz->addr + rx_vmem_start); rx_ring_info->rx_buf_ring = (struct bnxt_sw_rx_bd *)rx_ring->vmem; } } cp_ring->bd = ((char *)mz->addr + cp_ring_start); cp_ring->bd_dma = mz->phys_addr + cp_ring_start; cp_ring_info->cp_desc_ring = cp_ring->bd; cp_ring_info->cp_desc_mapping = cp_ring->bd_dma; cp_ring->mem_zone = (const void *)mz; if (!cp_ring->bd) return -ENOMEM; if (cp_ring->vmem_size) *cp_ring->vmem = ((char *)mz->addr + stats_len); if (stats_len) { cp_ring_info->hw_stats = mz->addr; cp_ring_info->hw_stats_map = mz->phys_addr; } cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; return 0; }
static int test_memzone(void) { const struct rte_memzone *memzone1; const struct rte_memzone *memzone2; const struct rte_memzone *memzone3; const struct rte_memzone *memzone4; const struct rte_memzone *mz; memzone1 = rte_memzone_reserve("testzone1", 100, SOCKET_ID_ANY, 0); memzone2 = rte_memzone_reserve("testzone2", 1000, 0, 0); memzone3 = rte_memzone_reserve("testzone3", 1000, 1, 0); memzone4 = rte_memzone_reserve("testzone4", 1024, SOCKET_ID_ANY, 0); /* memzone3 may be NULL if we don't have NUMA */ if (memzone1 == NULL || memzone2 == NULL || memzone4 == NULL) return -1; rte_memzone_dump(stdout); /* check cache-line alignments */ printf("check alignments and lengths\n"); if ((memzone1->phys_addr & RTE_CACHE_LINE_MASK) != 0) return -1; if ((memzone2->phys_addr & RTE_CACHE_LINE_MASK) != 0) return -1; if (memzone3 != NULL && (memzone3->phys_addr & RTE_CACHE_LINE_MASK) != 0) return -1; if ((memzone1->len & RTE_CACHE_LINE_MASK) != 0 || memzone1->len == 0) return -1; if ((memzone2->len & RTE_CACHE_LINE_MASK) != 0 || memzone2->len == 0) return -1; if (memzone3 != NULL && ((memzone3->len & RTE_CACHE_LINE_MASK) != 0 || memzone3->len == 0)) return -1; if (memzone4->len != 1024) return -1; /* check that zones don't overlap */ printf("check overlapping\n"); if (is_memory_overlap(memzone1->phys_addr, memzone1->len, memzone2->phys_addr, memzone2->len)) return -1; if (memzone3 != NULL && is_memory_overlap(memzone1->phys_addr, memzone1->len, memzone3->phys_addr, memzone3->len)) return -1; if (memzone3 != NULL && is_memory_overlap(memzone2->phys_addr, memzone2->len, memzone3->phys_addr, memzone3->len)) return -1; printf("check socket ID\n"); /* memzone2 must be on socket id 0 and memzone3 on socket 1 */ if (memzone2->socket_id != 0) return -1; if (memzone3 != NULL && memzone3->socket_id != 1) return -1; printf("test zone lookup\n"); mz = rte_memzone_lookup("testzone1"); if (mz != memzone1) return -1; printf("test duplcate zone name\n"); mz = rte_memzone_reserve("testzone1", 100, SOCKET_ID_ANY, 0); if (mz != NULL) return -1; printf("test reserving memzone with bigger size than the maximum\n"); if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0) return -1; printf("test reserving memory in smallest segments\n"); if (test_memzone_reserve_memory_in_smallest_segment() < 0) return -1; printf("test reserving memory in segments with smallest offsets\n"); if (test_memzone_reserve_memory_with_smallest_offset() < 0) return -1; printf("test memzone_reserve flags\n"); if (test_memzone_reserve_flags() < 0) return -1; printf("test alignment for memzone_reserve\n"); if (test_memzone_aligned() < 0) return -1; printf("test boundary alignment for memzone_reserve\n"); if (test_memzone_bounded() < 0) return -1; printf("test invalid alignment for memzone_reserve\n"); if (test_memzone_invalid_alignment() < 0) return -1; printf("test reserving amounts of memory equal to segment's length\n"); if (test_memzone_reserve_remainder() < 0) return -1; printf("test reserving the largest size memzone possible\n"); if (test_memzone_reserve_max() < 0) return -1; printf("test reserving the largest size aligned memzone possible\n"); if (test_memzone_reserve_max_aligned() < 0) return -1; return 0; }
/* create the mempool */ struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags) { char mz_name[RTE_MEMZONE_NAMESIZE]; char rg_name[RTE_RING_NAMESIZE]; struct rte_mempool *mp = NULL; struct rte_ring *r; const struct rte_memzone *mz; size_t mempool_size, total_elt_size; int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; int rg_flags = 0; uint32_t header_size, trailer_size; unsigned i; void *obj; /* compilation-time checks */ RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) & CACHE_LINE_MASK) != 0); #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) & CACHE_LINE_MASK) != 0); RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) & CACHE_LINE_MASK) != 0); #endif #ifdef RTE_LIBRTE_MEMPOOL_DEBUG RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) & CACHE_LINE_MASK) != 0); RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) & CACHE_LINE_MASK) != 0); #endif /* check that we have an initialised tail queue */ if (RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_MEMPOOL, rte_mempool_list) == NULL) { rte_errno = E_RTE_NO_TAILQ; return NULL; } /* asked cache too big */ if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE){ rte_errno = EINVAL; return NULL; } /* "no cache align" imply "no spread" */ if (flags & MEMPOOL_F_NO_CACHE_ALIGN) flags |= MEMPOOL_F_NO_SPREAD; /* ring flags */ if (flags & MEMPOOL_F_SP_PUT) rg_flags |= RING_F_SP_ENQ; if (flags & MEMPOOL_F_SC_GET) rg_flags |= RING_F_SC_DEQ; rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK); /* allocate the ring that will be used to store objects */ /* Ring functions will return appropriate errors if we are * running as a secondary process etc., so no checks made * in this function for that condition */ rte_snprintf(rg_name, sizeof(rg_name), "MP_%s", name); r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags); if (r == NULL) goto exit; /* * In header, we have at least the pointer to the pool, and * optionaly a 64 bits cookie. */ header_size = 0; header_size += sizeof(struct rte_mempool *); /* ptr to pool */ #ifdef RTE_LIBRTE_MEMPOOL_DEBUG header_size += sizeof(uint64_t); /* cookie */ #endif if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) header_size = (header_size + CACHE_LINE_MASK) & (~CACHE_LINE_MASK); /* trailer contains the cookie in debug mode */ trailer_size = 0; #ifdef RTE_LIBRTE_MEMPOOL_DEBUG trailer_size += sizeof(uint64_t); /* cookie */ #endif /* element size is 8 bytes-aligned at least */ elt_size = (elt_size + 7) & (~7); /* expand trailer to next cache line */ if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) { total_elt_size = header_size + elt_size + trailer_size; trailer_size += ((CACHE_LINE_SIZE - (total_elt_size & CACHE_LINE_MASK)) & CACHE_LINE_MASK); } /* * increase trailer to add padding between objects in order to * spread them accross memory channels/ranks */ if ((flags & MEMPOOL_F_NO_SPREAD) == 0) { unsigned new_size; new_size = optimize_object_size(header_size + elt_size + trailer_size); trailer_size = new_size - header_size - elt_size; } /* this is the size of an object, including header and trailer */ total_elt_size = header_size + elt_size + trailer_size; /* reserve a memory zone for this mempool: private data is * cache-aligned */ private_data_size = (private_data_size + CACHE_LINE_MASK) & (~CACHE_LINE_MASK); mempool_size = total_elt_size * n + sizeof(struct rte_mempool) + private_data_size; rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", name); mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags); /* * no more memory: in this case we loose previously reserved * space for the as we cannot free it */ if (mz == NULL) goto exit; /* init the mempool structure */ mp = mz->addr; memset(mp, 0, sizeof(*mp)); rte_snprintf(mp->name, sizeof(mp->name), "%s", name); mp->phys_addr = mz->phys_addr; mp->ring = r; mp->size = n; mp->flags = flags; mp->elt_size = elt_size; mp->header_size = header_size; mp->trailer_size = trailer_size; mp->cache_size = cache_size; mp->cache_flushthresh = (uint32_t)(cache_size * CACHE_FLUSHTHRESH_MULTIPLIER); mp->private_data_size = private_data_size; /* call the initializer */ if (mp_init) mp_init(mp, mp_init_arg); /* fill the headers and trailers, and add objects in ring */ obj = (char *)mp + sizeof(struct rte_mempool) + private_data_size; for (i = 0; i < n; i++) { struct rte_mempool **mpp; obj = (char *)obj + header_size; /* set mempool ptr in header */ mpp = __mempool_from_obj(obj); *mpp = mp; #ifdef RTE_LIBRTE_MEMPOOL_DEBUG __mempool_write_header_cookie(obj, 1); __mempool_write_trailer_cookie(obj); #endif /* call the initializer */ if (obj_init) obj_init(mp, obj_init_arg, obj, i); /* enqueue in ring */ rte_ring_sp_enqueue(mp->ring, obj); obj = (char *)obj + elt_size + trailer_size; } RTE_EAL_TAILQ_INSERT_TAIL(RTE_TAILQ_MEMPOOL, rte_mempool_list, mp); exit: rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK); return mp; }
struct rte_kni * rte_kni_create(uint8_t port_id, unsigned mbuf_size, struct rte_mempool *pktmbuf_pool, struct rte_kni_ops *ops) { struct rte_kni_device_info dev_info; struct rte_eth_dev_info eth_dev_info; struct rte_kni *ctx; char itf_name[IFNAMSIZ]; #define OBJNAMSIZ 32 char obj_name[OBJNAMSIZ]; const struct rte_memzone *mz; if (port_id >= RTE_MAX_ETHPORTS || pktmbuf_pool == NULL || !ops) return NULL; /* Check FD and open once */ if (kni_fd < 0) { kni_fd = open("/dev/" KNI_DEVICE, O_RDWR); if (kni_fd < 0) { RTE_LOG(ERR, KNI, "Can not open /dev/%s\n", KNI_DEVICE); return NULL; } } rte_eth_dev_info_get(port_id, ð_dev_info); RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n", eth_dev_info.pci_dev->addr.bus, eth_dev_info.pci_dev->addr.devid, eth_dev_info.pci_dev->addr.function, eth_dev_info.pci_dev->id.vendor_id, eth_dev_info.pci_dev->id.device_id); dev_info.bus = eth_dev_info.pci_dev->addr.bus; dev_info.devid = eth_dev_info.pci_dev->addr.devid; dev_info.function = eth_dev_info.pci_dev->addr.function; dev_info.vendor_id = eth_dev_info.pci_dev->id.vendor_id; dev_info.device_id = eth_dev_info.pci_dev->id.device_id; ctx = rte_zmalloc("kni devs", sizeof(struct rte_kni), 0); if (ctx == NULL) rte_panic("Cannot allocate memory for kni dev\n"); memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops)); rte_snprintf(itf_name, IFNAMSIZ, "vEth%u", port_id); rte_snprintf(ctx->name, IFNAMSIZ, itf_name); rte_snprintf(dev_info.name, IFNAMSIZ, itf_name); /* TX RING */ rte_snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", port_id); mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); if (mz == NULL || mz->addr == NULL) rte_panic("Cannot create kni_tx_%d queue\n", port_id); ctx->tx_q = mz->addr; kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX); dev_info.tx_phys = mz->phys_addr; /* RX RING */ rte_snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", port_id); mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); if (mz == NULL || mz->addr == NULL) rte_panic("Cannot create kni_rx_%d queue\n", port_id); ctx->rx_q = mz->addr; kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX); dev_info.rx_phys = mz->phys_addr; /* ALLOC RING */ rte_snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", port_id); mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); if (mz == NULL || mz->addr == NULL) rte_panic("Cannot create kni_alloc_%d queue\n", port_id); ctx->alloc_q = mz->addr; kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX); dev_info.alloc_phys = mz->phys_addr; /* FREE RING */ rte_snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", port_id); mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); if (mz == NULL || mz->addr == NULL) rte_panic("Cannot create kni_free_%d queue\n", port_id); ctx->free_q = mz->addr; kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX); dev_info.free_phys = mz->phys_addr; /* Request RING */ rte_snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", port_id); mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); if (mz == NULL || mz->addr == NULL) rte_panic("Cannot create kni_req_%d ring\n", port_id); ctx->req_q = mz->addr; kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX); dev_info.req_phys = mz->phys_addr; /* Response RING */ rte_snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", port_id); mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); if (mz == NULL || mz->addr == NULL) rte_panic("Cannot create kni_resp_%d ring\n", port_id); ctx->resp_q = mz->addr; kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX); dev_info.resp_phys = mz->phys_addr; /* Req/Resp sync mem area */ rte_snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", port_id); mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0); if (mz == NULL || mz->addr == NULL) rte_panic("Cannot create kni_sync_%d mem\n", port_id); ctx->sync_addr = mz->addr; dev_info.sync_va = mz->addr; dev_info.sync_phys = mz->phys_addr; /* MBUF mempool */ mz = rte_memzone_lookup("MP_mbuf_pool"); if (mz == NULL) { RTE_LOG(ERR, KNI, "Can not find MP_mbuf_pool\n"); goto fail; } dev_info.mbuf_va = mz->addr; dev_info.mbuf_phys = mz->phys_addr; ctx->pktmbuf_pool = pktmbuf_pool; ctx->port_id = port_id; ctx->mbuf_size = mbuf_size; /* Configure the buffer size which will be checked in kernel module */ dev_info.mbuf_size = ctx->mbuf_size; if (ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info) < 0) { RTE_LOG(ERR, KNI, "Fail to create kni device\n"); goto fail; } return ctx; fail: if (ctx != NULL) rte_free(ctx); return NULL; }
/** * Main init function for the multi-process server app, * calls subfunctions to do each stage of the initialisation. */ int init(int argc, char *argv[]) { int retval; const struct rte_memzone *mz; uint8_t i, total_ports; /* init EAL, parsing EAL args */ retval = rte_eal_init(argc, argv); if (retval < 0) return -1; argc -= retval; argv += retval; if (rte_eal_pci_probe()) rte_panic("Cannot probe PCI\n"); /* initialise the nic drivers */ retval = init_drivers(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise drivers\n"); /* get total number of ports */ total_ports = rte_eth_dev_count(); /* set up array for port data */ mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports), rte_socket_id(), NO_FLAGS); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n"); memset(mz->addr, 0, sizeof(*ports)); ports = mz->addr; RTE_LOG(INFO, APP, "memzone address is %lx\n", mz->phys_addr); /* set up array for statistics */ mz = rte_memzone_reserve(MZ_STATS_INFO, VPORT_STATS_SIZE, rte_socket_id(), NO_FLAGS); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for statistics\n"); memset(mz->addr, 0, VPORT_STATS_SIZE); vport_stats = mz->addr; /* set up array for flow table data */ mz = rte_memzone_reserve(MZ_FLOW_TABLE, sizeof(*flow_table), rte_socket_id(), NO_FLAGS); if (mz == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n"); memset(mz->addr, 0, sizeof(*flow_table)); flow_table = mz->addr; /* parse additional, application arguments */ retval = parse_app_args(total_ports, argc, argv); if (retval != 0) return -1; /* initialise mbuf pools */ retval = init_mbuf_pools(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n"); /* now initialise the ports we will use */ for (i = 0; i < ports->num_ports; i++) { retval = init_port(ports->id[i]); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", (unsigned)i); } /* initialise the client queues/rings for inter process comms */ init_shm_rings(); /* initalise kni queues */ init_kni(); return 0; }
static int test_memzone_reserve_flags(void) { const struct rte_memzone *mz; const struct rte_memseg *ms; int hugepage_2MB_avail = 0; int hugepage_1GB_avail = 0; int hugepage_16MB_avail = 0; int hugepage_16GB_avail = 0; const size_t size = 100; int i = 0; ms = rte_eal_get_physmem_layout(); for (i = 0; i < RTE_MAX_MEMSEG; i++) { if (ms[i].hugepage_sz == RTE_PGSIZE_2M) hugepage_2MB_avail = 1; if (ms[i].hugepage_sz == RTE_PGSIZE_1G) hugepage_1GB_avail = 1; if (ms[i].hugepage_sz == RTE_PGSIZE_16M) hugepage_16MB_avail = 1; if (ms[i].hugepage_sz == RTE_PGSIZE_16G) hugepage_16GB_avail = 1; } /* Display the availability of 2MB ,1GB, 16MB, 16GB pages */ if (hugepage_2MB_avail) printf("2MB Huge pages available\n"); if (hugepage_1GB_avail) printf("1GB Huge pages available\n"); if (hugepage_16MB_avail) printf("16MB Huge pages available\n"); if (hugepage_16GB_avail) printf("16GB Huge pages available\n"); /* * If 2MB pages available, check that a small memzone is correctly * reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag. * Also check that RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an * available page size (i.e 1GB ) when 2MB pages are unavailable. */ if (hugepage_2MB_avail) { mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY, RTE_MEMZONE_2MB); if (mz == NULL) { printf("MEMZONE FLAG 2MB\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_2M) { printf("hugepage_sz not equal 2M\n"); return -1; } mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { printf("MEMZONE FLAG 2MB\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_2M) { printf("hugepage_sz not equal 2M\n"); return -1; } /* Check if 1GB huge pages are unavailable, that function fails unless * HINT flag is indicated */ if (!hugepage_1GB_avail) { mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { printf("MEMZONE FLAG 1GB & HINT\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_2M) { printf("hugepage_sz not equal 2M\n"); return -1; } mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY, RTE_MEMZONE_1GB); if (mz != NULL) { printf("MEMZONE FLAG 1GB\n"); return -1; } } } /*As with 2MB tests above for 1GB huge page requests*/ if (hugepage_1GB_avail) { mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY, RTE_MEMZONE_1GB); if (mz == NULL) { printf("MEMZONE FLAG 1GB\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_1G) { printf("hugepage_sz not equal 1G\n"); return -1; } mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { printf("MEMZONE FLAG 1GB\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_1G) { printf("hugepage_sz not equal 1G\n"); return -1; } /* Check if 1GB huge pages are unavailable, that function fails unless * HINT flag is indicated */ if (!hugepage_2MB_avail) { mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL){ printf("MEMZONE FLAG 2MB & HINT\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_1G) { printf("hugepage_sz not equal 1G\n"); return -1; } mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY, RTE_MEMZONE_2MB); if (mz != NULL) { printf("MEMZONE FLAG 2MB\n"); return -1; } } if (hugepage_2MB_avail && hugepage_1GB_avail) { mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_2MB|RTE_MEMZONE_1GB); if (mz != NULL) { printf("BOTH SIZES SET\n"); return -1; } } } /* * This option is for IBM Power. If 16MB pages available, check * that a small memzone is correctly reserved from 16MB huge pages * when requested by the RTE_MEMZONE_16MB flag. Also check that * RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an available * page size (i.e 16GB ) when 16MB pages are unavailable. */ if (hugepage_16MB_avail) { mz = rte_memzone_reserve("flag_zone_16M", size, SOCKET_ID_ANY, RTE_MEMZONE_16MB); if (mz == NULL) { printf("MEMZONE FLAG 16MB\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_16M) { printf("hugepage_sz not equal 16M\n"); return -1; } mz = rte_memzone_reserve("flag_zone_16M_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { printf("MEMZONE FLAG 2MB\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_16M) { printf("hugepage_sz not equal 16M\n"); return -1; } /* Check if 1GB huge pages are unavailable, that function fails * unless HINT flag is indicated */ if (!hugepage_16GB_avail) { mz = rte_memzone_reserve("flag_zone_16G_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { printf("MEMZONE FLAG 16GB & HINT\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_16M) { printf("hugepage_sz not equal 16M\n"); return -1; } mz = rte_memzone_reserve("flag_zone_16G", size, SOCKET_ID_ANY, RTE_MEMZONE_16GB); if (mz != NULL) { printf("MEMZONE FLAG 16GB\n"); return -1; } } } /*As with 16MB tests above for 16GB huge page requests*/ if (hugepage_16GB_avail) { mz = rte_memzone_reserve("flag_zone_16G", size, SOCKET_ID_ANY, RTE_MEMZONE_16GB); if (mz == NULL) { printf("MEMZONE FLAG 16GB\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_16G) { printf("hugepage_sz not equal 16G\n"); return -1; } mz = rte_memzone_reserve("flag_zone_16G_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { printf("MEMZONE FLAG 16GB\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_16G) { printf("hugepage_sz not equal 16G\n"); return -1; } /* Check if 1GB huge pages are unavailable, that function fails * unless HINT flag is indicated */ if (!hugepage_16MB_avail) { mz = rte_memzone_reserve("flag_zone_16M_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { printf("MEMZONE FLAG 16MB & HINT\n"); return -1; } if (mz->hugepage_sz != RTE_PGSIZE_16G) { printf("hugepage_sz not equal 16G\n"); return -1; } mz = rte_memzone_reserve("flag_zone_16M", size, SOCKET_ID_ANY, RTE_MEMZONE_16MB); if (mz != NULL) { printf("MEMZONE FLAG 16MB\n"); return -1; } } if (hugepage_16MB_avail && hugepage_16GB_avail) { mz = rte_memzone_reserve("flag_zone_16M_HINT", size, SOCKET_ID_ANY, RTE_MEMZONE_16MB|RTE_MEMZONE_16GB); if (mz != NULL) { printf("BOTH SIZES SET\n"); return -1; } } } return 0; }
/* this test is a bit tricky, and thus warrants explanation. * * first, we find two smallest memsegs to conduct our experiments on. * * then, we bring them within alignment from each other: if second segment is * twice+ as big as the first, reserve memory from that segment; if second * segment is comparable in length to the first, then cut the first segment * down until it becomes less than half of second segment, and then cut down * the second segment to be within alignment of the first. * * then, we have to pass the following test: if segments are within alignment * of each other (that is, the difference is less than 256 bytes, which is what * our alignment will be), segment with smallest offset should be picked. * * we know that min_ms will be our smallest segment, so we need to make sure * that we adjust the alignments so that the bigger segment has smallest * alignment (in our case, smallest segment will have 64-byte alignment, while * bigger segment will have 128-byte alignment). */ static int test_memzone_reserve_memory_with_smallest_offset(void) { const struct rte_memseg *ms, *min_ms, *prev_min_ms; size_t len, min_len, prev_min_len; const struct rte_config *config; int i, align; config = rte_eal_get_configuration(); min_ms = NULL; /*< smallest segment */ prev_min_ms = NULL; /*< second smallest segment */ align = RTE_CACHE_LINE_SIZE * 4; /* find two smallest segments */ for (i = 0; i < RTE_MAX_MEMSEG; i++) { ms = &config->mem_config->free_memseg[i]; if (ms->addr == NULL) break; if (ms->len == 0) continue; if (min_ms == NULL) min_ms = ms; else if (min_ms->len > ms->len) { /* set last smallest to second last */ prev_min_ms = min_ms; /* set new smallest */ min_ms = ms; } else if ((prev_min_ms == NULL) || (prev_min_ms->len > ms->len)) { prev_min_ms = ms; } } if (min_ms == NULL || prev_min_ms == NULL) { printf("Smallest segments not found!\n"); return -1; } prev_min_len = prev_min_ms->len; min_len = min_ms->len; /* if smallest segment is bigger than half of bigger segment */ if (prev_min_ms->len - min_ms->len <= min_ms->len) { len = (min_ms->len * 2) - prev_min_ms->len; /* make sure final length is *not* aligned */ while (((min_ms->addr_64 + len) & (align-1)) == 0) len += RTE_CACHE_LINE_SIZE; if (rte_memzone_reserve("dummy_mz1", len, SOCKET_ID_ANY, 0) == NULL) { printf("Cannot reserve memory!\n"); return -1; } /* check if we got memory from correct segment */ if (min_ms->len != min_len - len) { printf("Reserved memory from wrong segment!\n"); return -1; } } /* if we don't need to touch smallest segment but it's aligned */ else if ((min_ms->addr_64 & (align-1)) == 0) { if (rte_memzone_reserve("align_mz1", RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY, 0) == NULL) { printf("Cannot reserve memory!\n"); return -1; } if (min_ms->len != min_len - RTE_CACHE_LINE_SIZE) { printf("Reserved memory from wrong segment!\n"); return -1; } } /* if smallest segment is less than half of bigger segment */ if (prev_min_ms->len - min_ms->len > min_ms->len) { len = prev_min_ms->len - min_ms->len - align; /* make sure final length is aligned */ while (((prev_min_ms->addr_64 + len) & (align-1)) != 0) len += RTE_CACHE_LINE_SIZE; if (rte_memzone_reserve("dummy_mz2", len, SOCKET_ID_ANY, 0) == NULL) { printf("Cannot reserve memory!\n"); return -1; } /* check if we got memory from correct segment */ if (prev_min_ms->len != prev_min_len - len) { printf("Reserved memory from wrong segment!\n"); return -1; } } len = RTE_CACHE_LINE_SIZE; prev_min_len = prev_min_ms->len; min_len = min_ms->len; if (min_len >= prev_min_len || prev_min_len - min_len > (unsigned) align) { printf("Segments are of wrong lengths!\n"); return -1; } /* try reserving from a bigger segment */ if (rte_memzone_reserve_aligned("smallest_offset", len, SOCKET_ID_ANY, 0, align) == NULL) { printf("Cannot reserve memory!\n"); return -1; } /* check if we got memory from correct segment */ if (min_ms->len != min_len && prev_min_ms->len != (prev_min_len - len)) { printf("Reserved memory from segment with smaller offset!\n"); return -1; } return 0; }
static int test_memzone_reserve_max(void) { const struct rte_memzone *mz; const struct rte_config *config; const struct rte_memseg *ms; int memseg_idx = 0; int memzone_idx = 0; size_t len = 0; void* last_addr; size_t maxlen = 0; /* get pointer to global configuration */ config = rte_eal_get_configuration(); ms = rte_eal_get_physmem_layout(); for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){ /* ignore smaller memsegs as they can only get smaller */ if (ms[memseg_idx].len < maxlen) continue; /* align everything */ last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, RTE_CACHE_LINE_SIZE); len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr); len &= ~((size_t) RTE_CACHE_LINE_MASK); /* cycle through all memzones */ for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) { /* stop when reaching last allocated memzone */ if (config->mem_config->memzone[memzone_idx].addr == NULL) break; /* check if the memzone is in our memseg and subtract length */ if ((config->mem_config->memzone[memzone_idx].addr >= ms[memseg_idx].addr) && (config->mem_config->memzone[memzone_idx].addr < (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) { /* since the zones can now be aligned and occasionally skip * some space, we should calculate the length based on * reported length and start addresses difference. Addresses * are allocated sequentially so we don't need to worry about * them being in the right order. */ len -= RTE_PTR_DIFF( config->mem_config->memzone[memzone_idx].addr, last_addr); len -= config->mem_config->memzone[memzone_idx].len; last_addr = RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr, (size_t) config->mem_config->memzone[memzone_idx].len); } } /* we don't need to calculate offset here since length * is always cache-aligned */ if (len > maxlen) maxlen = len; } if (maxlen == 0) { printf("There is no space left!\n"); return 0; } mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0); if (mz == NULL){ printf("Failed to reserve a big chunk of memory\n"); rte_dump_physmem_layout(stdout); rte_memzone_dump(stdout); return -1; } if (mz->len != maxlen) { printf("Memzone reserve with 0 size did not return bigest block\n"); printf("Expected size = %zu, actual size = %zu\n", maxlen, mz->len); rte_dump_physmem_layout(stdout); rte_memzone_dump(stdout); return -1; } return 0; }
int init(int argc, char *argv[]) { int retval; const struct rte_memzone *mz_nf; const struct rte_memzone *mz_port; const struct rte_memzone *mz_cores; const struct rte_memzone *mz_scp; const struct rte_memzone *mz_services; const struct rte_memzone *mz_nf_per_service; uint8_t i, total_ports, port_id; /* init EAL, parsing EAL args */ retval = rte_eal_init(argc, argv); if (retval < 0) return -1; argc -= retval; argv += retval; #ifdef RTE_LIBRTE_PDUMP rte_pdump_init(NULL); #endif /* get total number of ports */ total_ports = rte_eth_dev_count_avail(); /* set up array for NF tx data */ mz_nf = rte_memzone_reserve(MZ_NF_INFO, sizeof(*nfs) * MAX_NFS, rte_socket_id(), NO_FLAGS); if (mz_nf == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for nf information\n"); memset(mz_nf->addr, 0, sizeof(*nfs) * MAX_NFS); nfs = mz_nf->addr; /* set up ports info */ mz_port = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports), rte_socket_id(), NO_FLAGS); if (mz_port == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n"); ports = mz_port->addr; /* set up core status */ mz_cores = rte_memzone_reserve(MZ_CORES_STATUS, sizeof(*cores) * onvm_threading_get_num_cores(), rte_socket_id(), NO_FLAGS); if (mz_cores == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for core information\n"); memset(mz_cores->addr, 0, sizeof(*cores) * 64); cores = mz_cores->addr; /* set up array for NF tx data */ mz_services = rte_memzone_reserve(MZ_SERVICES_INFO, sizeof(uint16_t *) * num_services, rte_socket_id(), NO_FLAGS); if (mz_services == NULL) rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for services information\n"); services = mz_services->addr; for (i = 0; i < num_services; i++) { services[i] = rte_calloc("one service NFs", MAX_NFS_PER_SERVICE, sizeof(uint16_t), 0); } mz_nf_per_service = rte_memzone_reserve(MZ_NF_PER_SERVICE_INFO, sizeof(uint16_t) * num_services, rte_socket_id(), NO_FLAGS); if (mz_nf_per_service == NULL) { rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for NF per service information.\n"); } nf_per_service_count = mz_nf_per_service->addr; /* parse additional, application arguments */ retval = parse_app_args(total_ports, argc, argv); if (retval != 0) return -1; /* initialise mbuf pools */ retval = init_mbuf_pools(); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n"); /* initialise nf info pool */ retval = init_nf_info_pool(); if (retval != 0) { rte_exit(EXIT_FAILURE, "Cannot create nf info mbuf pool: %s\n", rte_strerror(rte_errno)); } /* initialise pool for NF messages */ retval = init_nf_msg_pool(); if (retval != 0) { rte_exit(EXIT_FAILURE, "Cannot create nf message pool: %s\n", rte_strerror(rte_errno)); } /* now initialise the ports we will use */ for (i = 0; i < ports->num_ports; i++) { port_id = ports->id[i]; rte_eth_macaddr_get(port_id, &ports->mac[port_id]); retval = init_port(port_id); if (retval != 0) rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", port_id); char event_msg_buf[20]; sprintf(event_msg_buf, "Port %d initialized", port_id); onvm_stats_add_event(event_msg_buf, NULL); } check_all_ports_link_status(ports->num_ports, (~0x0)); /* initialise the NF queues/rings for inter-eu comms */ init_shm_rings(); /* initialise a queue for newly created NFs */ init_info_queue(); /*initialize a default service chain*/ default_chain = onvm_sc_create(); retval = onvm_sc_append_entry(default_chain, ONVM_NF_ACTION_TONF, 1); if (retval == ENOSPC) { printf("chain length can not be larger than the maximum chain length\n"); exit(1); } printf("Default service chain: send to sdn NF\n"); /* set up service chain pointer shared to NFs*/ mz_scp = rte_memzone_reserve(MZ_SCP_INFO, sizeof(struct onvm_service_chain *), rte_socket_id(), NO_FLAGS); if (mz_scp == NULL) rte_exit(EXIT_FAILURE, "Canot reserve memory zone for service chain pointer\n"); memset(mz_scp->addr, 0, sizeof(struct onvm_service_chain *)); default_sc_p = mz_scp->addr; *default_sc_p = default_chain; onvm_sc_print(default_chain); onvm_flow_dir_init(); return 0; }