static int handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_gen_client *task = (struct task_gen_client *)tbase; uint8_t out[MAX_PKT_BURST] = {0}; struct bundle_ctx *conn; int ret; if (n_pkts) { for (int i = 0; i < n_pkts; ++i) { struct pkt_tuple pt; struct l4_meta l4_meta; if (parse_pkt(mbufs[i], &pt, &l4_meta)) { plogdx_err(mbufs[i], "Parsing failed\n"); out[i] = OUT_DISCARD; continue; } ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt); if (ret < 0) { plogx_dbg("Client: packet RX that does not belong to connection:" "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&pt.dst_addr)), rte_bswap16(pt.dst_port), IPv4_BYTES(((uint8_t*)&pt.src_addr)), rte_bswap16(pt.src_port)); plogdx_dbg(mbufs[i], NULL); if (pt.proto_id == IPPROTO_TCP) { stream_tcp_create_rst(mbufs[i], &l4_meta, &pt); out[i] = 0; continue; } else { out[i] = OUT_DISCARD; continue; } } conn = task->bundle_ctx_pool.hash_entries[ret]; ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[i] = ret == 0? 0: OUT_HANDLED; } task->base.tx_pkt(&task->base, mbufs, n_pkts, out); } /* If there is at least one callback to handle, handle at most MAX_PKT_BURST */ if (heap_top_is_lower(task->heap, rte_rdtsc())) { if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs)) return 0; uint16_t n_called_back = 0; while (heap_top_is_lower(task->heap, rte_rdtsc()) && n_called_back < MAX_PKT_BURST) { conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); /* handle packet TX (retransmit or delayed transmit) */ ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); if (ret == 0) { out[n_called_back] = 0; n_called_back++; } } plogx_dbg("During callback, will send %d packets\n", n_called_back); task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); task->n_new_mbufs -= n_called_back; } uint32_t n_new = task->bundle_ctx_pool.n_free_bundles; n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new; uint64_t diff = (rte_rdtsc() - task->new_conn_last_tsc)/task->new_conn_cost; task->new_conn_last_tsc += diff * task->new_conn_cost; task->new_conn_tokens += diff; if (task->new_conn_tokens > 16) task->new_conn_tokens = 16; if (n_new > task->new_conn_tokens) n_new = task->new_conn_tokens; task->new_conn_tokens -= n_new; if (n_new == 0) return 0; if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs)) return 0; for (uint32_t i = 0; i < n_new; ++i) { struct bundle_ctx *bundle_ctx = bundle_ctx_pool_get_w_cfg(&task->bundle_ctx_pool); PROX_ASSERT(bundle_ctx); struct pkt_tuple *pt = &bundle_ctx->tuple; int n_retries = 0; do { /* Note that the actual packet sent will contain swapped addresses and ports (i.e. pkt.src <=> tuple.dst). The incoming packet will match this struct. */ bundle_init(bundle_ctx, task->heap, PEER_CLIENT, &task->seed); ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt); if (ret >= 0) { if (n_retries++ == 1000) { plogx_err("Already tried 1K times\n"); } } } while (ret >= 0); ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt); if (ret < 0) { plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles); bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx); pkt_tuple_debug2(pt); out[i] = OUT_DISCARD; continue; } task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx; if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP) task->l4_stats.tcp_created++; else task->l4_stats.udp_created++; task->l4_stats.bundles_created++; ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[i] = ret == 0? 0: OUT_HANDLED; } int ret2 = task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out); task->n_new_mbufs -= n_new; return ret2; }
static void handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_gen_client *task = (struct task_gen_client *)tbase; uint8_t out[MAX_PKT_BURST] = {0}; struct bundle_ctx *conn; int ret; if (n_pkts) { for (int i = 0; i < n_pkts; ++i) { struct pkt_tuple pt; struct l4_meta l4_meta; if (parse_pkt(mbufs[i], &pt, &l4_meta)) { plogdx_err(mbufs[i], "Parsing failed\n"); out[i] = NO_PORT_AVAIL; continue; } ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt); if (ret < 0) { plogx_dbg("Client: packet RX that does not belong to connection:" "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&pt.dst_addr)), rte_bswap16(pt.dst_port), IPv4_BYTES(((uint8_t*)&pt.src_addr)), rte_bswap16(pt.src_port)); plogdx_dbg(mbufs[i], NULL); // if tcp, send RST /* pkt_tuple_debug2(&pt); */ out[i] = NO_PORT_AVAIL; continue; } conn = task->bundle_ctx_pool.hash_entries[ret]; ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[i] = ret == 0? 0: NO_PORT_AVAIL; } task->base.tx_pkt(&task->base, mbufs, n_pkts, out); } if (task->n_new_mbufs < MAX_PKT_BURST) { if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) { plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST); return ; } for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) { init_mbuf_seg(task->new_mbufs[i]); } task->n_new_mbufs = MAX_PKT_BURST; } /* If there is at least one callback to handle, handle at most MAX_PKT_BURST */ if (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)) { uint16_t n_called_back = 0; while (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap) && n_called_back < MAX_PKT_BURST) { conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); /* handle packet TX (retransmit or delayed transmit) */ ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); if (ret == 0) { out[n_called_back] = 0; n_called_back++; } } plogx_dbg("During callback, will send %d packets\n", n_called_back); task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); task->n_new_mbufs -= n_called_back; } int n_new = task->bundle_ctx_pool.n_free_bundles; n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new; if (n_new == 0) return ; if (task->n_new_mbufs < MAX_PKT_BURST) { if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) { plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST); return ; } for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) { init_mbuf_seg(task->new_mbufs[i]); } task->n_new_mbufs = MAX_PKT_BURST; } for (int i = 0; i < n_new; ++i) { int32_t ret = cdf_sample(task->cdf, &task->seed); /* Select a new bundle_cfg according to imix */ struct bundle_cfg *bundle_cfg = &task->bundle_cfgs[ret]; struct bundle_ctx *bundle_ctx; bundle_ctx = bundle_ctx_pool_get(&task->bundle_ctx_pool); /* Should be an assert: */ if (!bundle_ctx) { plogx_err("No more available bundles\n"); exit(-1); } struct pkt_tuple *pt = &bundle_ctx->tuple; int n_retries = 0; do { /* Note that the actual packet sent will contain swapped addresses and ports (i.e. pkt.src <=> tuple.dst). The incoming packet will match this struct. */ bundle_init(bundle_ctx, bundle_cfg, task->heap, PEER_CLIENT, &task->seed); ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt); if (n_retries == 1000) { plogx_err("Already tried 1K times\n"); } if (ret >= 0) { n_retries++; } } while (ret >= 0); ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt); if (ret < 0) { plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles); bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx); pkt_tuple_debug2(pt); out[i] = NO_PORT_AVAIL; continue; } task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx; if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP) task->l4_stats.tcp_created++; else task->l4_stats.udp_created++; ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[i] = ret == 0? 0: NO_PORT_AVAIL; } task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out); task->n_new_mbufs -= n_new; }