/* Create a new flow table made of an rte_hash table and a fixed size * data array for storing values. Only supports IPv4 5-tuple lookups. */ struct onvm_ft* onvm_ft_create(int cnt, int entry_size) { struct rte_hash* hash; struct onvm_ft* ft; struct rte_hash_parameters ipv4_hash_params = { .name = NULL, .entries = cnt, .key_len = sizeof(struct onvm_ft_ipv4_5tuple), .hash_func = NULL, .hash_func_init_val = 0, }; char s[64]; /* create ipv4 hash table. use core number and cycle counter to get a unique name. */ ipv4_hash_params.name = s; ipv4_hash_params.socket_id = rte_socket_id(); snprintf(s, sizeof(s), "onvm_ft_%d-%"PRIu64, rte_lcore_id(), rte_get_tsc_cycles()); hash = rte_hash_create(&ipv4_hash_params); if (hash == NULL) { return NULL; } ft = (struct onvm_ft*)rte_calloc("table", 1, sizeof(struct onvm_ft), 0); if (ft == NULL) { rte_hash_free(hash); return NULL; } ft->hash = hash; ft->cnt = cnt; ft->entry_size = entry_size; /* Create data array for storing values */ ft->data = rte_calloc("entry", cnt, entry_size, 0); if (ft->data == NULL) { rte_hash_free(hash); rte_free(ft); return NULL; } return ft; } /* Add an entry in flow table and set data to point to the new value. Returns: index in the array on success -EPROTONOSUPPORT if packet is not ipv4. -EINVAL if the parameters are invalid. -ENOSPC if there is no space in the hash for this key. */ int onvm_ft_add_pkt(struct onvm_ft* table, struct rte_mbuf *pkt, char** data) { int32_t tbl_index; struct onvm_ft_ipv4_5tuple key; int err; err = onvm_ft_fill_key(&key, pkt); if (err < 0) { return err; } tbl_index = rte_hash_add_key_with_hash(table->hash, (const void *)&key, pkt->hash.rss); if (tbl_index >= 0) { *data = &table->data[tbl_index*table->entry_size]; } return tbl_index; }
// FIXME: support packet sizes here static inline void main_loop_poisson(struct rte_ring* ring, uint8_t device, uint16_t queue, uint32_t target, uint32_t link_speed) { uint64_t tsc_hz = rte_get_tsc_hz(); // control IPGs instead of IDT as IDTs < packet_time are physically impossible std::default_random_engine rand; uint64_t next_send = 0; struct rte_mbuf* bufs[batch_size]; while (1) { int rc = ring_dequeue(ring, reinterpret_cast<void**>(bufs), batch_size); uint64_t cur = rte_get_tsc_cycles(); // nothing sent for 10 ms, restart rate control if (((int64_t) cur - (int64_t) next_send) > (int64_t) tsc_hz / 100) { next_send = cur; } if (rc == 0) { uint32_t sent = 0; while (sent < batch_size) { uint64_t pkt_time = (bufs[sent]->pkt.pkt_len + 24) * 8 / (link_speed / 1000); uint64_t avg = (uint64_t) (tsc_hz / (1000000000 / target) - pkt_time); std::exponential_distribution<double> distribution(1.0 / avg); while ((cur = rte_get_tsc_cycles()) < next_send); next_send += distribution(rand) + pkt_time; sent += rte_eth_tx_burst(device, queue, bufs + sent, 1); } } } }
/** * @brief Pause for a requested time in ns */ void DPDKAdapter::StreamInfo::nPause() { const uint64_t start = rte_get_tsc_cycles(); while ((rte_get_tsc_cycles() - start) < ticksDelay_) { rte_pause(); } }
void counter_firewall_pkt(void *arg, struct rte_mbuf **buffer, int nb_rx) { struct counter_t *counter = (struct counter_t *) arg; poll_counter(counter); if (nb_rx != 0) { uint64_t start_c = rte_get_tsc_cycles(), diff_c; // check table and send packet // check if <drop_at> votes are to drop the packet // if yes: drop it! // else send it struct indextable_entry *entry; struct rte_mbuf *ok_pkt; struct metadata_t *meta; struct ether_hdr *eth; for (unsigned i = 0; i < nb_rx; ++i) { struct ether_hdr *eth = rte_pktmbuf_mtod(buffer[i], struct ether_hdr *); if (!is_same_ether_addr(&counter->fw_port_mac, ð->d_addr)) { RTE_LOG(INFO, COUNTER, "Wrong d_MAC... "FORMAT_MAC"\n", ARG_V_MAC(eth->d_addr)); continue; } entry = indextable_get(counter->indextable, buffer[i]); if (entry != NULL) { ok_pkt = entry->packet; meta = &entry->meta; meta->decissions |= 1 << counter->chain_index; int decission_count = count_decissions(meta->decissions); counter->pkts_received_fw += nb_rx; if (decission_count >= counter->drop_at) { fwd_to_wrapper(counter, ok_pkt, meta); } else { rte_pktmbuf_free(ok_pkt); counter->pkts_dropped++; } indextable_delete(counter->indextable, entry); counter->nb_mbuf--; } else { RTE_LOG(WARNING, COUNTER, "Received unregistered packet.\n"); // print_packet_hex(buffer[i]); } } diff_c = rte_get_tsc_cycles() - start_c; counter->cTime += diff_c;//* 1000.0 / rte_get_tsc_hz(); }
/* * This function displays stats. It uses ANSI terminal codes to clear * screen when called. It is called from a single non-master * thread in the server process, when the process is run with more * than one lcore enabled. */ static void do_stats_display(struct rte_mbuf* pkt) { static uint64_t last_cycles; static uint64_t cur_pkts = 0; static uint64_t last_pkts = 0; const char clr[] = { 27, '[', '2', 'J', '\0' }; const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' }; (void)pkt; uint64_t cur_cycles = rte_get_tsc_cycles(); cur_pkts += print_delay; /* Clear screen and move to top left */ printf("%s%s", clr, topLeft); printf("Total packets: %9"PRIu64" \n", cur_pkts); printf("TX pkts per second: %9"PRIu64" \n", (cur_pkts - last_pkts) * rte_get_timer_hz() / (cur_cycles - last_cycles)); printf("Packets per group: %d\n", NUM_PKTS); last_pkts = cur_pkts; last_cycles = cur_cycles; printf("\n\n"); }
/** * @brief Initiate a capture * * @param devId Port number * @param captureData A pointer to the capture buffer * @param captureDataLength Size of a capture buffer * * @return true on success */ bool DPDKAdapter::startRx(uint8_t devId, char *captureData, unsigned int captureDataLength) { if(devId > RTE_MAX_ETHPORTS) { qCritical("Device ID is out of range"); return false; } qDebug("devId %u, allocated a capture buffer of size %u bytes ", devId, captureDataLength); memset(captureData, 0, captureDataLength); DeviceInfo& devInfo = devices[devId]; devInfo.captureDataLength = captureDataLength; devInfo.captureDataSize = 0; devInfo.captureData = captureData; // store the number of used descriptors in RX ring devInfo.rxQueueCount = rte_eth_rx_queue_count(devId, 0); qDebug("RX queue 0 count %d\n", devInfo.rxQueueCount); devInfo.rxDevStart = true; devInfo.rxTicksStart = rte_get_tsc_cycles(); rte_mb(); return true; }
/** * functional test for rte_meter_trtcm_color_blind_check */ static inline int tm_test_trtcm_color_blind_check(void) { #define TRTCM_BLIND_CHECK_MSG "trtcm_blind_check" uint64_t time; struct rte_meter_trtcm tm; uint64_t hz = rte_get_tsc_hz(); /* Test green */ if(rte_meter_trtcm_config(&tm, &tparams) != 0) melog(TRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_trtcm_color_blind_check( &tm, time, TM_TEST_TRTCM_CBS_DF - 1) != e_RTE_METER_GREEN) melog(TRTCM_BLIND_CHECK_MSG" GREEN"); /* Test yellow */ if(rte_meter_trtcm_config(&tm, &tparams) != 0) melog(TRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_trtcm_color_blind_check( &tm, time, TM_TEST_TRTCM_CBS_DF + 1) != e_RTE_METER_YELLOW) melog(TRTCM_BLIND_CHECK_MSG" YELLOW"); if(rte_meter_trtcm_config(&tm, &tparams) != 0) melog(TRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_trtcm_color_blind_check( &tm, time, TM_TEST_TRTCM_PBS_DF - 1) != e_RTE_METER_YELLOW) melog(TRTCM_BLIND_CHECK_MSG" YELLOW"); /* Test red */ if(rte_meter_trtcm_config(&tm, &tparams) != 0) melog(TRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_trtcm_color_blind_check( &tm, time, TM_TEST_TRTCM_PBS_DF + 1) != e_RTE_METER_RED) melog(TRTCM_BLIND_CHECK_MSG" RED"); return 0; }
/** * functional test for rte_meter_srtcm_color_blind_check */ static inline int tm_test_srtcm_color_blind_check(void) { #define SRTCM_BLIND_CHECK_MSG "srtcm_blind_check" struct rte_meter_srtcm sm; uint64_t time; uint64_t hz = rte_get_tsc_hz(); /* Test green */ if(rte_meter_srtcm_config(&sm, &sparams) != 0) melog(SRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_srtcm_color_blind_check( &sm, time, TM_TEST_SRTCM_CBS_DF - 1) != e_RTE_METER_GREEN) melog(SRTCM_BLIND_CHECK_MSG" GREEN"); /* Test yellow */ if(rte_meter_srtcm_config(&sm, &sparams) != 0) melog(SRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_srtcm_color_blind_check( &sm, time, TM_TEST_SRTCM_CBS_DF + 1) != e_RTE_METER_YELLOW) melog(SRTCM_BLIND_CHECK_MSG" YELLOW"); if(rte_meter_srtcm_config(&sm, &sparams) != 0) melog(SRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_srtcm_color_blind_check( &sm, time, (uint32_t)sm.ebs - 1) != e_RTE_METER_YELLOW) melog(SRTCM_BLIND_CHECK_MSG" YELLOW"); /* Test red */ if(rte_meter_srtcm_config(&sm, &sparams) != 0) melog(SRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_srtcm_color_blind_check( &sm, time, TM_TEST_SRTCM_EBS_DF + 1) != e_RTE_METER_RED) melog(SRTCM_BLIND_CHECK_MSG" RED"); return 0; }
static inline void rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, struct rte_event_eth_rx_adapter_stats *stats) { if (unlikely(!stats->rx_enq_start_ts)) stats->rx_enq_start_ts = rte_get_tsc_cycles(); if (likely(!rx_enq_blocked(rx_adapter))) return; rx_adapter->enq_block_count = 0; if (rx_adapter->rx_enq_block_start_ts) { stats->rx_enq_end_ts = rte_get_tsc_cycles(); stats->rx_enq_block_cycles += stats->rx_enq_end_ts - rx_adapter->rx_enq_block_start_ts; rx_adapter->rx_enq_block_start_ts = 0; } }
static void check_io(void) { uint64_t end, tsc_complete; rte_mb(); #if HAVE_LIBAIO if (g_ns->type == ENTRY_TYPE_AIO_FILE) { aio_check_io(); } else #endif { spdk_nvme_qpair_process_completions(g_ns->u.nvme.qpair, 0); } rte_mb(); end = rte_get_tsc_cycles(); if (g_ns->current_queue_depth == 1) { /* * Account for race condition in AIO case where interrupt occurs * after checking for queue depth. If the timestamp capture * is too big compared to the last capture, assume that an * interrupt fired, and do not bump the start tsc forward. This * will ensure this extra time is accounted for next time through * when we see current_queue_depth drop to 0. */ if (g_ns->type == ENTRY_TYPE_NVME_NS || (end - g_complete_tsc_start) < 500) { g_complete_tsc_start = end; } } else { tsc_complete = end - g_complete_tsc_start; g_tsc_complete += tsc_complete; if (tsc_complete < g_tsc_complete_min) { g_tsc_complete_min = tsc_complete; } if (tsc_complete > g_tsc_complete_max) { g_tsc_complete_max = tsc_complete; } g_io_completed++; if (!g_ns->is_draining) { submit_single_io(); } g_complete_tsc_start = rte_get_tsc_cycles(); } }
void counter_register_pkt(void *arg, struct rte_mbuf **buffer, int nb_rx) { if (nb_rx == 0) return; struct counter_t *counter = (struct counter_t *) arg; uint64_t start_a = rte_get_tsc_cycles(), diff_a; if (nb_rx > rte_ring_free_count(counter->ring)) { RTE_LOG(ERR, COUNTER, "Not enough free entries in ring!\n"); } // enqueue packet in ring // this methode must be thread safe struct rte_mbuf *bulk[nb_rx]; unsigned nb_registered = 0; for (unsigned i = 0; i < nb_rx; ++i) { struct ether_hdr *eth = rte_pktmbuf_mtod(buffer[i], struct ether_hdr *); if (!is_same_ether_addr(&counter->rx_register->mac, ð->d_addr)) { continue; } bulk[nb_registered] = rte_pktmbuf_clone(buffer[i], counter->clone_pool); if (bulk[nb_registered] == NULL) { RTE_LOG(ERR, COUNTER, "Could not clone mbuf!\n"); continue; } nb_registered += 1; } int n = rte_ring_enqueue_burst(counter->ring,(void * const*) &bulk, nb_registered); if (n < nb_rx) { RTE_LOG(ERR, COUNTER, "Could not enqueue every new packtes for registration! " "(%"PRIu32"/%"PRIu32") free: %"PRIu32"\n", n, nb_rx, rte_ring_free_count(counter->ring)); } diff_a = rte_get_tsc_cycles() - start_a; counter->aTime += diff_a;//* 1000.0 / rte_get_tsc_hz(); counter->nb_measurements_a += nb_rx; }
static void submit_single_io(void) { uint64_t offset_in_ios; uint64_t start; int rc; struct ns_entry *entry = g_ns; uint64_t tsc_submit; offset_in_ios = rand_r(&seed) % entry->size_in_ios; start = rte_get_tsc_cycles(); rte_mb(); #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(g_ns->u.aio.ctx, &g_task->iocb, entry->u.aio.fd, IO_CMD_PREAD, g_task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, g_task); } else #endif { rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, g_ns->u.nvme.qpair, g_task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, g_task, 0); } rte_mb(); tsc_submit = rte_get_tsc_cycles() - start; g_tsc_submit += tsc_submit; if (tsc_submit < g_tsc_submit_min) { g_tsc_submit_min = tsc_submit; } if (tsc_submit > g_tsc_submit_max) { g_tsc_submit_max = tsc_submit; } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } g_ns->current_queue_depth++; }
/** * @brief Check if it is time to transmit * * @return true if time is up */ bool DPDKAdapter::StreamInfo::isReadyTransmit() { uint64_t currentTicks = rte_get_tsc_cycles(); if ((lastTx_ == 0) || (currentTicks - lastTx_ >= ticksDelay_)) { lastTx_ = currentTicks; return true; } return false; }
static inline void rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter) { if (rx_adapter->rx_enq_block_start_ts) return; rx_adapter->enq_block_count++; if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD) return; rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles(); }
static void fwd_to_wrapper(struct counter_t *counter, struct rte_mbuf *m, struct metadata_t *meta) { struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *); ether_addr_copy(&counter->next_mac, ð->d_addr); ether_addr_copy(&counter->tx->send_port_mac, ð->s_addr); if (!counter->decap_on_send) { wrapper_add_data(m, meta); } uint64_t start_d = rte_get_tsc_cycles(), diff_d; int send = tx_put(counter->tx, &m, 1); while (send == 0) { send = tx_put(counter->tx, &m, 1); } diff_d = rte_get_tsc_cycles() - start_d; counter->dTime += diff_d; counter->pkts_send += send; counter->nb_mbuf--; }
/** * @in[4] : the flags packets carries. * @in[4] : the flags function expect to return. * It will do blind check at the time of 1 second from beginning. * At the time, it will use packets length of cbs -1, cbs + 1, * ebs -1 and ebs +1 with flag in[0], in[1], in[2] and in[3] to do * aware check, expect flag out[0], out[1], out[2] and out[3] */ static inline int tm_test_trtcm_aware_check (enum rte_meter_color in[4], enum rte_meter_color out[4]) { #define TRTCM_AWARE_CHECK_MSG "trtcm_aware_check" struct rte_meter_trtcm tm; uint64_t time; uint64_t hz = rte_get_tsc_hz(); if(rte_meter_trtcm_config(&tm, &tparams) != 0) melog(TRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_trtcm_color_aware_check( &tm, time, TM_TEST_TRTCM_CBS_DF - 1, in[0]) != out[0]) melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[0], out[0]); if(rte_meter_trtcm_config(&tm, &tparams) != 0) melog(TRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_trtcm_color_aware_check( &tm, time, TM_TEST_TRTCM_CBS_DF + 1, in[1]) != out[1]) melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[1], out[1]); if(rte_meter_trtcm_config(&tm, &tparams) != 0) melog(TRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_trtcm_color_aware_check( &tm, time, TM_TEST_TRTCM_PBS_DF - 1, in[2]) != out[2]) melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[2], out[2]); if(rte_meter_trtcm_config(&tm, &tparams) != 0) melog(TRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; if(rte_meter_trtcm_color_aware_check( &tm, time, TM_TEST_TRTCM_PBS_DF + 1, in[3]) != out[3]) melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[3], out[3]); return 0; }
static inline void main_loop_cbr(struct rte_ring* ring, uint8_t device, uint16_t queue, uint32_t target) { uint64_t tsc_hz = rte_get_tsc_hz(); uint64_t id_cycles = (uint64_t) (target / (1000000000.0 / ((double) tsc_hz))); uint64_t next_send = 0; struct rte_mbuf* bufs[batch_size]; while (1) { int rc = ring_dequeue(ring, reinterpret_cast<void**>(bufs), batch_size); uint64_t cur = rte_get_tsc_cycles(); // nothing sent for 10 ms, restart rate control if (((int64_t) cur - (int64_t) next_send) > (int64_t) tsc_hz / 100) { next_send = cur; } if (rc == 0) { uint32_t sent = 0; while (sent < batch_size) { while ((cur = rte_get_tsc_cycles()) < next_send); next_send += id_cycles; sent += rte_eth_tx_burst(device, queue, bufs + sent, 1); } } } }
void poll_receiver(struct receiver_t *receiver) { const uint16_t port = receiver->in_port; struct rte_mbuf **pkts_burst = receiver->burst_buffer; uint64_t start_a = rte_get_tsc_cycles(); uint64_t nb_rx = rte_eth_rx_burst((uint8_t) port, 0, pkts_burst, BURST_SIZE); if (nb_rx > 0) { receiver->time_b += rte_get_tsc_cycles() - start_a; } receiver->pkts_received += nb_rx; if (nb_rx != 0) { receiver->nb_polls++; } receiver->nb_rec += nb_rx; for (unsigned h_index = 0; h_index < receiver->nb_handler; ++h_index) { /* handover packet to handler. */ receiver->handler[h_index](receiver->args[h_index], pkts_burst, nb_rx); } for (unsigned p_index = 0; p_index < nb_rx; ++p_index) { // rte_pktmbuf_free(pkts_burst[p_index]); // if (rte_mbuf_refcnt_read(pkts_burst[p_index]) > 1) { // rte_mbuf_refcnt_update(pkts_burst[p_index], -1); // } else { rte_pktmbuf_free(pkts_burst[p_index]); // } } if (nb_rx > 0) { receiver->time_a += rte_get_tsc_cycles() - start_a; receiver->nb_measurements += nb_rx; } }
/** * @brief DPDKProfiler destructor */ DPDKProfiler::~DPDKProfiler() { uint64_t end = rte_get_tsc_cycles(); stats_[coreId_][name_].last_duration = (end - start_) * SEC_TO_NSEC / rte_get_tsc_hz(); stats_[coreId_][name_].total_duration += stats_[coreId_][name_].last_duration; stats_[coreId_][name_].invoke_cnt += 1; if (stats_[coreId_][name_].invoke_cnt == 10000000) { qWarning("%s on core %u : last duration %lu, medium duration %lu", name_.c_str(), coreId_, lastDurationGet(coreId_, name_), medDurationGet(coreId_, name_)); reset(coreId_, name_); } }
static int work_fn(void) { uint64_t tsc_end; printf("Starting work_fn on core %u\n", rte_lcore_id()); /* Allocate a queue pair for each namespace. */ if (init_ns_worker_ctx() != 0) { printf("ERROR: init_ns_worker_ctx() failed\n"); return 1; } tsc_end = rte_get_tsc_cycles() + g_time_in_sec * g_tsc_rate; /* Submit initial I/O for each namespace. */ submit_single_io(); g_complete_tsc_start = rte_get_tsc_cycles(); while (1) { /* * Check for completed I/O for each controller. A new * I/O will be submitted in the io_complete callback * to replace each I/O that is completed. */ check_io(); if (rte_get_tsc_cycles() > tsc_end) { break; } } drain_io(); cleanup_ns_worker_ctx(); return 0; }
/** * @brief Save mbuf burst to the capture buffer * * @param devId Port number * @param burstBuf mbuf burst * @param pktCount Number of packets in the burst * * @return true on success */ void DPDKAdapter::saveToBuf(uint8_t devId, MBuf_t** burstBuf, uint8_t pktCount) { MBuf_t* m = NULL; DeviceInfo& devInfo = devices[devId]; uint64_t rxTicksEnd = rte_get_tsc_cycles(); uint64_t ticksDiff = rxTicksEnd - devInfo.rxTicksStart; uint64_t timestamp = (SEC_TO_NSEC * ticksDiff) / rte_get_tsc_hz(); struct pcap_pkthdr hdr; memset(&hdr, 0, sizeof(pcap_pkthdr)); uint32_t sec = timestamp / SEC_TO_NSEC; hdr.ts.tv_sec = sec; uint32_t usec = (timestamp - hdr.ts.tv_sec * SEC_TO_NSEC) / MSEC_TO_NSEC; hdr.ts.tv_usec = usec; for(uint8_t pkt = 0; pkt < pktCount; pkt++) { m = burstBuf[pkt]; hdr.caplen = m->pkt.data_len; hdr.len = hdr.caplen; if(devInfo.captureDataSize + sizeof(hdr) + m->pkt.data_len > devInfo.captureDataLength) { qDebug("Capture buffer is full with %u bytes", devInfo.captureDataSize); devInfo.captureDataSize = 0; } memcpy(devInfo.captureData + devInfo.captureDataSize, &hdr, sizeof(hdr)); devInfo.captureDataSize += sizeof(hdr); memcpy(devInfo.captureData + devInfo.captureDataSize, m->pkt.data, m->pkt.data_len); devInfo.captureDataSize += m->pkt.data_len; } }
int tbl_index = onvm_ft_lookup_key(lb->ft, &key, (char **)&data); if (tbl_index == -ENOENT) { return table_add_entry(&key, flow); } else if (tbl_index < 0) { printf("Some other error occurred with the packet hashing\n"); return -1; } else { data->last_pkt_cycles = lb->elapsed_cycles; *flow = data; return 0; } } static int callback_handler(__attribute__((unused)) struct onvm_nf_info *nf_info) { lb->elapsed_cycles = rte_get_tsc_cycles(); if ((lb->elapsed_cycles - lb->last_cycles) / rte_get_timer_hz() > lb->expire_time) { lb->last_cycles = lb->elapsed_cycles; } return 0; } static int packet_handler(struct rte_mbuf *pkt, struct onvm_pkt_meta *meta, __attribute__((unused)) struct onvm_nf_info *nf_info) { static uint32_t counter = 0; struct ipv4_hdr* ip; struct ether_hdr *ehdr; struct flow_info *flow_info; int i, ret;
/** * @brief DPDKProfiler constructor * * @param coreId Core number * @param name Name of a profiled section * * @return true on success */ DPDKProfiler::DPDKProfiler(uint8_t coreId, const char* name) : start_(0), coreId_(coreId), name_(name) { start_ = rte_get_tsc_cycles(); }
int app_thread(void *arg) { struct app_params *app = (struct app_params *) arg; uint32_t core_id = rte_lcore_id(), i, j; struct app_thread_data *t = &app->thread_data[core_id]; uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular)); uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom)); for (i = 0; ; i++) { /* Run regular pipelines */ for (j = 0; j < n_regular; j++) { struct app_thread_pipeline_data *data = &t->regular[j]; struct pipeline *p = data->be; rte_pipeline_run(p->p); } /* Run custom pipelines */ for (j = 0; j < n_custom; j++) { struct app_thread_pipeline_data *data = &t->custom[j]; data->f_run(data->be); } /* Timer */ if ((i & 0xF) == 0) { uint64_t time = rte_get_tsc_cycles(); uint64_t t_deadline = UINT64_MAX; if (time < t->deadline) continue; /* Timer for regular pipelines */ for (j = 0; j < n_regular; j++) { struct app_thread_pipeline_data *data = &t->regular[j]; uint64_t p_deadline = data->deadline; if (p_deadline <= time) { data->f_timer(data->be); p_deadline = time + data->timer_period; data->deadline = p_deadline; } if (p_deadline < t_deadline) t_deadline = p_deadline; } /* Timer for custom pipelines */ for (j = 0; j < n_custom; j++) { struct app_thread_pipeline_data *data = &t->custom[j]; uint64_t p_deadline = data->deadline; if (p_deadline <= time) { data->f_timer(data->be); p_deadline = time + data->timer_period; data->deadline = p_deadline; } if (p_deadline < t_deadline) t_deadline = p_deadline; } t->deadline = t_deadline; } } return 0; }
static void app_init_threads(struct app_params *app) { uint64_t time = rte_get_tsc_cycles(); uint32_t p_id; for (p_id = 0; p_id < app->n_pipelines; p_id++) { struct app_pipeline_params *params = &app->pipeline_params[p_id]; struct app_pipeline_data *data = &app->pipeline_data[p_id]; struct pipeline_type *ptype; struct app_thread_data *t; struct app_thread_pipeline_data *p; int lcore_id; lcore_id = cpu_core_map_get_lcore_id(app->core_map, params->socket_id, params->core_id, params->hyper_th_id); if (lcore_id < 0) rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n", params->socket_id, params->core_id, (params->hyper_th_id) ? "h" : ""); t = &app->thread_data[lcore_id]; t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000; t->thread_req_deadline = time + t->timer_period; t->headroom_cycles = 0; t->headroom_time = rte_get_tsc_cycles(); t->headroom_ratio = 0.0; t->msgq_in = app_thread_msgq_in_get(app, params->socket_id, params->core_id, params->hyper_th_id); if (t->msgq_in == NULL) rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32, lcore_id); t->msgq_out = app_thread_msgq_out_get(app, params->socket_id, params->core_id, params->hyper_th_id); if (t->msgq_out == NULL) rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32, lcore_id); ptype = app_pipeline_type_find(app, params->type); if (ptype == NULL) rte_panic("Init error: Unknown pipeline " "type \"%s\"\n", params->type); p = (ptype->be_ops->f_run == NULL) ? &t->regular[t->n_regular] : &t->custom[t->n_custom]; p->pipeline_id = p_id; p->be = data->be; p->f_run = ptype->be_ops->f_run; p->f_timer = ptype->be_ops->f_timer; p->timer_period = data->timer_period; p->deadline = time + data->timer_period; data->enabled = 1; if (ptype->be_ops->f_run == NULL) t->n_regular++; else t->n_custom++; } }
void replay_packets() { int ret; uint64_t tick_start; struct pcaprec_hdr_t hdr; struct rte_mbuf * m= NULL; FILE * file; /* Open the trace */ printf("Opening file: %s\n", file_name); printf("Replay on %d interface(s)\n", number_of_ports); file = fopen(file_name, "r"); if (file == NULL){ printf("Unable to open file: %s\n", file_name); exit(1); } /* Prepare file pointer skiping pcap hdr, and setting large buffer */ fseek(file, sizeof(struct pcap_hdr_t), SEEK_SET); ret = setvbuf(file, NULL, _IOFBF, 33554432); if (ret != 0) FATAL_ERROR("Cannot set the size of the file pointer to the trace...\n"); /* Init start time */ ret = gettimeofday(&start_time, NULL); if (ret != 0) FATAL_ERROR("Error: gettimeofday failed. Quitting...\n"); last_time = start_time; tick_start = rte_get_tsc_cycles(); /* Start stats */ alarm(1); /* Infinite loop */ for (;;) { /* If the system is quitting, break the cycle */ if (unlikely(do_shutdown)) break; /* Read packet from trace */ ret = fread((void*)&hdr, sizeof (hdr), 1, file); if(unlikely(ret <= 0)) break; /* Alloc the buffer */ m = rte_pktmbuf_alloc (pktmbuf_pool); /* Read data from trace */ ret = fread((void*)((char*) m->buf_addr + m->data_off ), hdr.incl_len, 1 , file ); if(unlikely(ret <= 0)) break; /* Compile the buffer length */ m->data_len = m->pkt_len = hdr.incl_len; //while ( rte_eth_tx_burst (0/*port_id*/, 0, &m , 1) != 1) while ( vmxnet3_xmit_pkts(0/*port_id*/, 0, &m , 1) != 1) if (unlikely(do_shutdown)) break; /* Update stats */ num_pkt_good_sent += times; num_bytes_good_sent += (hdr.incl_len + 24) * times; /* 8 Preamble + 4 CRC + 12 IFG*/ } }
void replay_packets_full() { int ret; uint64_t tick_start; struct pcaprec_hdr_t *hdr; struct rte_mbuf * m= NULL; unsigned char *buffer=NULL; unsigned char *cur_buffer=NULL; unsigned char *endofbuffer=NULL; long fsize= 0; FILE * f= NULL; int bytes_read=0; struct rte_mbuf *tx_pkts[BURST_SIZE]; int nb_pkts; int bytessent=0; /* Open the trace */ printf("Opening file: %s\n", file_name); printf("Replay on %d interface(s)\n", number_of_ports); f = fopen(file_name, "r"); if (f == NULL){ printf("Unable to open file: %s\n", file_name); exit(1); } fseek(f, 0, SEEK_END); fsize = ftell(f); fseek(f, 0, SEEK_SET); buffer = malloc(fsize + 1); bytes_read= fread(buffer, fsize, 1, f); fclose(f); if (bytes_read == fsize){ printf ("Read less than the original file size\n"); } /* Prepare file pointer skiping pcap hdr, and setting large buffer */ //fseek(file, sizeof(struct pcap_hdr_t), SEEK_SET); //ret = setvbuf(file, NULL, _IOFBF, 33554432); //if (ret != 0) FATAL_ERROR("Cannot set the size of the file pointer to the trace...\n"); /* Init start time */ ret = gettimeofday(&start_time, NULL); if (ret != 0) FATAL_ERROR("Error: gettimeofday failed. Quitting...\n"); last_time = start_time; tick_start = rte_get_tsc_cycles(); /* Start stats */ alarm(1); endofbuffer= buffer + fsize; /* Infinite loop */ for (;;) { /* If the system is quitting, break the cycle */ if (unlikely(do_shutdown)){ printf("Stop sending packets...outer loop\n"); break; } cur_buffer= buffer + sizeof(struct pcap_hdr_t); for (;;) { /* If the system is quitting, break the cycle */ if (unlikely(do_shutdown)) break; /* Read packet from trace */ //ret = fread((void*)&hdr, sizeof (hdr), 1, file); //if(unlikely(ret <= 0)) break; nb_pkts= 0; bytessent=0; for (; nb_pkts < BURST_SIZE; ){ if ((cur_buffer + sizeof(struct pcaprec_hdr_t)) >= endofbuffer){ //printf("Reached end of buffer while reading the header\n"); break; } hdr= (struct pcaprec_hdr_t*) cur_buffer; if ((cur_buffer + hdr->incl_len) > endofbuffer){ //printf("Reached end of buffer while reading the packet\n"); break; } cur_buffer += sizeof(struct pcaprec_hdr_t); //printf("adding a packet\n"); /* Alloc the buffer */ m = rte_pktmbuf_alloc (pktmbuf_pool); //memcpy(((char*) m->buf_addr + m->data_off ), cur_buffer, hdr->incl_len); rte_memcpy(((char*) m->buf_addr + m->data_off ), cur_buffer, hdr->incl_len); /* Compile the buffer length */ m->data_len = m->pkt_len = hdr->incl_len; tx_pkts[nb_pkts++]= m; cur_buffer += hdr->incl_len; bytessent += (hdr->incl_len + 24) * times; } //printf("try sending %d packets\n", nb_pkts); //while ( rte_eth_tx_burst (0/*port_id*/, 0, &tx_pkts , nb_pkts) != 1) if (nb_pkts > 0){ //rte_eth_tx_burst (0/*port_id*/, 0, &tx_pkts[0] , nb_pkts); while ( rte_eth_tx_burst (0/*port_id*/, 0, &tx_pkts[0] , nb_pkts) != nb_pkts) if (unlikely(do_shutdown)){ printf("Stop sending packets while on transmit...\n"); break; } } if (unlikely(do_shutdown)){ printf("Stop sending packets...\n"); break; } /* Update stats */ num_pkt_good_sent += (nb_pkts * times); num_bytes_good_sent += bytessent; /* 8 Preamble + 4 CRC + 12 IFG*/ if (nb_pkts < BURST_SIZE){ //printf("Read less than %d packets\n", nb_pkts); break; } } //printf("next round\n"); } }