static void init_task_gen(struct task_base *tbase, struct task_args *targ) { struct task_gen_server *task = (struct task_gen_server *)tbase; const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); static char name[] = "server_mempool"; name[0]++; task->mempool = rte_mempool_create(name, 4*1024 - 1, MBUF_SIZE, targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, socket_id, 0); PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1); int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams); PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams); lua_len(prox_lua(), -1); uint32_t n_listen = lua_tointeger(prox_lua(), -1); lua_pop(prox_lua(), 1); PROX_PANIC(n_listen == 0, "No services specified to listen on\n"); task->bundle_cfgs = prox_zmalloc(n_listen * sizeof(task->bundle_cfgs[0]), socket_id); plogx_info("n_listen = %d\n", n_listen); struct hash_set *hs = prox_sh_find_socket(socket_id, "genl4_streams"); if (hs == NULL) { /* Expected number of streams per bundle = 1, hash_set will grow if full. */ hs = hash_set_create(n_listen, socket_id); prox_sh_add_socket(socket_id, "genl4_streams", hs); } const struct rte_hash_parameters listen_table = { .name = name, .entries = n_listen * 4, .key_len = sizeof(struct new_tuple), .hash_func = rte_hash_crc, .hash_func_init_val = 0, .socket_id = socket_id, }; name[0]++; task->listen_hash = rte_hash_create(&listen_table); task->listen_entries = prox_zmalloc(listen_table.entries * sizeof(task->listen_entries[0]), socket_id); int idx = 0; lua_pushnil(prox_lua()); while (lua_next(prox_lua(), -2)) { task->bundle_cfgs[idx].n_stream_cfgs = 1; task->bundle_cfgs[idx].stream_cfgs = prox_zmalloc(sizeof(*task->bundle_cfgs[idx].stream_cfgs), socket_id); int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0], hs); PROX_PANIC(ret, "Failed to load stream cfg\n"); struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0]; // TODO: check mask and add to hash for each host struct new_tuple nt = { .dst_addr = stream->servers.ip, .proto_id = stream->proto, .dst_port = stream->servers.port, .l2_types[0] = 0x0008, }; ret = rte_hash_add_key(task->listen_hash, &nt); PROX_PANIC(ret < 0, "Failed to add\n"); task->listen_entries[ret] = &task->bundle_cfgs[idx]; plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port)); ++idx; lua_pop(prox_lua(), 1); } static char name2[] = "task_gen_hash2"; name2[0]++; plogx_dbg("Creating bundle ctx pool\n"); if (bundle_ctx_pool_create(name2, targ->n_concur_conn * 2, &task->bundle_ctx_pool, NULL, 0, NULL, socket_id)) { cmd_mem_stats(); PROX_PANIC(1, "Failed to create conn_ctx_pool\n"); } task->heap = heap_create(targ->n_concur_conn * 2, socket_id); task->seed = rte_rdtsc(); /* TODO: calculate the CDF of the reply distribution and the number of replies as the number to cover for 99% of the replies. For now, assume that this is number is 2. */ uint32_t queue_size = rte_align32pow2(targ->n_concur_conn * 2); PROX_PANIC(queue_size == 0, "Overflow resulted in queue size 0\n"); task->fqueue = fqueue_create(queue_size, socket_id); PROX_PANIC(task->fqueue == NULL, "Failed to allocate local queue\n"); uint32_t n_descriptors; if (targ->nb_txports) { PROX_PANIC(targ->nb_txports != 1, "Need exactly one TX port for L4 generation\n"); n_descriptors = prox_port_cfg[targ->tx_port_queue[0].port].n_txd; } else { PROX_PANIC(targ->nb_txrings != 1, "Need exactly one TX ring for L4 generation\n"); n_descriptors = 256; } struct token_time_cfg tt_cfg = { .bpp = targ->rate_bps, .period = rte_get_tsc_hz(), .bytes_max = n_descriptors * (ETHER_MIN_LEN + 20), }; token_time_init(&task->token_time, &tt_cfg); } static void init_task_gen_client(struct task_base *tbase, struct task_args *targ) { struct task_gen_client *task = (struct task_gen_client *)tbase; static char name[] = "gen_pool"; const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id); name[0]++; task->mempool = rte_mempool_create(name, 4*1024 - 1, MBUF_SIZE, targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, socket, 0); PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1); /* streams contains a lua table. Go through it and read each stream with associated imix_fraction. */ uint32_t imix; uint32_t i = 0; int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams); PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams); lua_len(prox_lua(), -1); uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1); lua_pop(prox_lua(), 1); PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n"); plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs); struct hash_set *hs = prox_sh_find_socket(socket, "genl4_streams"); if (hs == NULL) { /* Expected number of streams per bundle = 8, hash_set will grow if full. */ hs = hash_set_create(n_bundle_cfgs * 8, socket); prox_sh_add_socket(socket, "genl4_streams", hs); } task->bundle_cfgs = prox_zmalloc(n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), socket); lua_pushnil(prox_lua()); int total_imix = 0; uint32_t *occur = prox_zmalloc(n_bundle_cfgs * sizeof(*occur), socket); struct cdf *cdf = cdf_create(n_bundle_cfgs, socket); while (lua_next(prox_lua(), -2)) { PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) || lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i], hs), "Failed to load bundle cfg:\n%s\n", get_lua_to_errors()); cdf_add(cdf, imix); occur[i] = imix; total_imix += imix; ++i; lua_pop(prox_lua(), 1); } lua_pop(prox_lua(), pop); cdf_setup(cdf); PROX_PANIC(targ->max_setup_rate == 0, "Max setup rate not set\n"); task->new_conn_cost = rte_get_tsc_hz()/targ->max_setup_rate; static char name2[] = "task_gen_hash"; name2[0]++; plogx_dbg("Creating bundle ctx pool\n"); if (bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, occur, n_bundle_cfgs, task->bundle_cfgs, socket)) { cmd_mem_stats(); PROX_PANIC(1, "Failed to create conn_ctx_pool\n"); } task->heap = heap_create(targ->n_concur_conn, socket); task->seed = rte_rdtsc(); /* task->token_time.bytes_max = MAX_PKT_BURST * (ETHER_MAX_LEN + 20); */ /* To avoid overflowing the tx descriptors, the token bucket size needs to be limited. The descriptors are filled most quickly with the smallest packets. For that reason, the token bucket size is given by "number of tx descriptors" * "smallest Ethernet packet". */ PROX_ASSERT(targ->nb_txports == 1); struct token_time_cfg tt_cfg = { .bpp = targ->rate_bps, .period = rte_get_tsc_hz(), .bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (ETHER_MIN_LEN + 20), }; token_time_init(&task->token_time, &tt_cfg); } static void start_task_gen_client(struct task_base *tbase) { struct task_gen_client *task = (struct task_gen_client *)tbase; token_time_reset(&task->token_time, rte_rdtsc(), 0); task->new_conn_tokens = 0; task->new_conn_last_tsc = rte_rdtsc(); } static void stop_task_gen_client(struct task_base *tbase) { struct task_gen_client *task = (struct task_gen_client *)tbase; struct bundle_ctx *bundle; while (!heap_is_empty(task->heap)) { bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats); } } static void start_task_gen_server(struct task_base *tbase) { struct task_gen_server *task = (struct task_gen_server *)tbase; token_time_reset(&task->token_time, rte_rdtsc(), 0); } static void stop_task_gen_server(struct task_base *tbase) { struct task_gen_server *task = (struct task_gen_server *)tbase; struct bundle_ctx *bundle; uint8_t out[MAX_PKT_BURST]; while (!heap_is_empty(task->heap)) { bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats); } if (task->cancelled) { struct rte_mbuf *mbuf = task->mbuf_saved; out[0] = OUT_DISCARD; task->cancelled = 0; task->base.tx_pkt(&task->base, &mbuf, 1, out); } do { if (task->cur_mbufs_beg == task->cur_mbufs_end) { task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST); task->cur_mbufs_beg = 0; if (task->cur_mbufs_end == 0) break; } uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg; struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg; if (n_pkts) { for (uint16_t j = 0; j < n_pkts; ++j) { out[j] = OUT_DISCARD; } task->base.tx_pkt(&task->base, mbufs, n_pkts, out); } } while (1); } static struct task_init task_init_gen1 = { .mode_str = "genl4", .sub_mode_str = "server", .init = init_task_gen, .handle = handle_gen_bulk, .start = start_task_gen_server, .stop = stop_task_gen_server, .flag_features = TASK_FEATURE_ZERO_RX, .size = sizeof(struct task_gen_server), .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, }; static struct task_init task_init_gen2 = { .mode_str = "genl4", .init = init_task_gen_client, .handle = handle_gen_bulk_client, .start = start_task_gen_client, .stop = stop_task_gen_client, .flag_features = TASK_FEATURE_ZERO_RX, .size = sizeof(struct task_gen_client), .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, }; __attribute__((constructor)) static void reg_task_gen(void) { reg_task(&task_init_gen1); reg_task(&task_init_gen2); }
static void init_task_gen(struct task_base *tbase, struct task_args *targ) { struct task_gen_server *task = (struct task_gen_server *)tbase; const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); static char name[] = "server_mempool"; name[0]++; task->mempool = rte_mempool_create(name, targ->nb_mbuf - 1, MBUF_SIZE, targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, socket_id, 0); int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams); PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams); lua_len(prox_lua(), -1); uint32_t n_listen = lua_tointeger(prox_lua(), -1); lua_pop(prox_lua(), 1); PROX_PANIC(n_listen == 0, "No services specified to listen on\n"); task->bundle_cfgs = rte_zmalloc_socket(NULL, n_listen * sizeof(task->bundle_cfgs[0]), RTE_CACHE_LINE_SIZE, socket_id); plogx_info("n_listen = %d\n", n_listen); const struct rte_hash_parameters listen_table = { .name = name, .entries = n_listen * 4, .key_len = sizeof(struct new_tuple), .hash_func = rte_hash_crc, .hash_func_init_val = 0, .socket_id = socket_id, }; name[0]++; task->listen_hash = rte_hash_create(&listen_table); task->listen_entries = rte_zmalloc_socket(NULL, listen_table.entries * sizeof(task->listen_entries[0]), RTE_CACHE_LINE_SIZE, socket_id); int idx = 0; lua_pushnil(prox_lua()); while (lua_next(prox_lua(), -2)) { task->bundle_cfgs[idx].n_stream_cfgs = 1; int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0]); PROX_PANIC(ret, "Failed to load stream cfg\n"); struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0]; // TODO: check mask and add to hash for each host struct new_tuple nt = { .dst_addr = stream->servers.ip, .proto_id = stream->proto, .dst_port = stream->servers.port, .l2_types[0] = 0x0008, }; ret = rte_hash_add_key(task->listen_hash, &nt); PROX_PANIC(ret < 0, "Failed to add\n"); task->listen_entries[ret] = &task->bundle_cfgs[idx]; plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port)); ++idx; lua_pop(prox_lua(), 1); } static char name2[] = "task_gen_hash2"; name2[0]++; PROX_PANIC(bundle_ctx_pool_create(name2, targ->n_concur_conn*2, &task->bundle_ctx_pool, socket_id), "Failed to create conn_ctx_pool"); task->heap = heap_create(targ->n_concur_conn*2, socket_id); task->seed = rte_rdtsc(); } static void init_task_gen_client(struct task_base *tbase, struct task_args *targ) { struct task_gen_client *task = (struct task_gen_client *)tbase; static char name[] = "gen_pool"; const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id); name[0]++; task->mempool = rte_mempool_create(name, targ->nb_mbuf - 1, MBUF_SIZE, targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, socket, 0); /* streams contains a lua table. Go through it and read each stream with associated imix_fraction. */ uint32_t imix; int i = 0; int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams); PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams); lua_len(prox_lua(), -1); uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1); lua_pop(prox_lua(), 1); PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n"); plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs); task->bundle_cfgs = rte_zmalloc_socket(NULL, n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), RTE_CACHE_LINE_SIZE, socket); lua_pushnil(prox_lua()); int total_imix = 0; struct cdf *cdf = cdf_create(n_bundle_cfgs, socket); while (lua_next(prox_lua(), -2)) { PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) || lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i]), "Failed to load bundle cfg:\n%s\n", get_lua_to_errors()); cdf_add(cdf, imix); total_imix += imix; ++i; lua_pop(prox_lua(), 1); } lua_pop(prox_lua(), pop); cdf_setup(cdf); task->tot_imix = total_imix; task->cdf = cdf; static char name2[] = "task_gen_hash"; name2[0]++; PROX_PANIC(bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, socket), "Failed to create conn_ctx_pool"); task->heap = heap_create(targ->n_concur_conn, socket); task->seed = rte_rdtsc(); } static struct task_init task_init_gen1 = { .mode_str = "genl4", .sub_mode_str = "server", .init = init_task_gen, .handle = handle_gen_bulk, .flag_features = TASK_ZERO_RX, .size = sizeof(struct task_gen_server), .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, }; static struct task_init task_init_gen2 = { .mode_str = "genl4", .init = init_task_gen_client, .handle = handle_gen_bulk_client, .flag_features = TASK_ZERO_RX, .size = sizeof(struct task_gen_client), .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, }; __attribute__((constructor)) static void reg_task_gen(void) { reg_task(&task_init_gen1); reg_task(&task_init_gen2); }