/* test case to time the number of cycles to round-trip a cache line between * two cores and back again. */ static void time_cache_line_switch(void) { /* allocate a full cache line for data, we use only first byte of it */ uint64_t data[RTE_CACHE_LINE_SIZE*3 / sizeof(uint64_t)]; unsigned i, slaveid = rte_get_next_lcore(rte_lcore_id(), 0, 0); volatile uint64_t *pdata = &data[0]; *pdata = 1; rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], slaveid); while (*pdata) rte_pause(); const uint64_t start_time = rte_rdtsc(); for (i = 0; i < (1 << ITER_POWER); i++) { while (*pdata) rte_pause(); *pdata = 1; } const uint64_t end_time = rte_rdtsc(); while (*pdata) rte_pause(); *pdata = 2; rte_eal_wait_lcore(slaveid); printf("==== Cache line switch test ===\n"); printf("Time for %u iterations = %"PRIu64" ticks\n", (1<<ITER_POWER), end_time-start_time); printf("Ticks per iteration = %"PRIu64"\n\n", (end_time-start_time) >> ITER_POWER); }
/* set affinity for current thread */ int rw_piot_thread_set_affinity(void) { int s; pthread_t thread; /* * According to the section VERSIONS of the CPU_ALLOC man page: * * The CPU_ZERO(), CPU_SET(), CPU_CLR(), and CPU_ISSET() macros were added * in glibc 2.3.3. * * CPU_COUNT() first appeared in glibc 2.6. * * CPU_AND(), CPU_OR(), CPU_XOR(), CPU_EQUAL(), CPU_ALLOC(), * CPU_ALLOC_SIZE(), CPU_FREE(), CPU_ZERO_S(), CPU_SET_S(), CPU_CLR_S(), * CPU_ISSET_S(), CPU_AND_S(), CPU_OR_S(), CPU_XOR_S(), and CPU_EQUAL_S() * first appeared in glibc 2.7. */ #if defined(CPU_ALLOC) size_t size; cpu_set_t *cpusetp; cpusetp = CPU_ALLOC(RTE_MAX_LCORE); if (cpusetp == NULL) { RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n"); return -1; } size = CPU_ALLOC_SIZE(RTE_MAX_LCORE); CPU_ZERO_S(size, cpusetp); CPU_SET_S(rte_lcore_id(), size, cpusetp); thread = pthread_self(); s = pthread_setaffinity_np(thread, size, cpusetp); if (s != 0) { RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); CPU_FREE(cpusetp); return -1; } CPU_FREE(cpusetp); #else /* CPU_ALLOC */ cpu_set_t cpuset; CPU_ZERO( &cpuset ); CPU_SET( rte_lcore_id(), &cpuset ); thread = pthread_self(); s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset); if (s != 0) { RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); return -1; } #endif return 0; }
static void spdk_nvmf_conn_destruct(struct spdk_nvmf_conn *conn) { struct spdk_event *event; SPDK_TRACELOG(SPDK_TRACE_DEBUG, "conn %p\n", conn); conn->state = CONN_STATE_INVALID; event = spdk_event_allocate(rte_lcore_id(), _conn_destruct, conn, NULL, NULL); spdk_poller_unregister(&conn->poller, event); rte_atomic32_dec(&g_num_connections[rte_lcore_id()]); }
/* * Create a scheduler on the current lcore */ struct lthread_sched *_lthread_sched_create(size_t stack_size) { int status; struct lthread_sched *new_sched; unsigned lcoreid = rte_lcore_id(); RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE); if (stack_size == 0) stack_size = LTHREAD_MAX_STACK_SIZE; new_sched = rte_calloc_socket(NULL, 1, sizeof(struct lthread_sched), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (new_sched == NULL) { RTE_LOG(CRIT, LTHREAD, "Failed to allocate memory for scheduler\n"); return NULL; } _lthread_key_pool_init(); new_sched->stack_size = stack_size; new_sched->birth = rte_rdtsc(); THIS_SCHED = new_sched; status = _lthread_sched_alloc_resources(new_sched); if (status != SCHED_ALLOC_OK) { RTE_LOG(CRIT, LTHREAD, "Failed to allocate resources for scheduler code = %d\n", status); rte_free(new_sched); return NULL; } bzero(&new_sched->ctx, sizeof(struct ctx)); new_sched->lcore_id = lcoreid; schedcore[lcoreid] = new_sched; new_sched->run_flag = 1; DIAG_EVENT(new_sched, LT_DIAG_SCHED_CREATE, rte_lcore_id(), 0); rte_wmb(); return new_sched; }
/* * Run the lthread scheduler * This loop is the heart of the system */ void lthread_run(void) { struct lthread_sched *sched = THIS_SCHED; struct lthread *lt = NULL; RTE_LOG(INFO, LTHREAD, "starting scheduler %p on lcore %u phys core %u\n", sched, rte_lcore_id(), rte_lcore_index(rte_lcore_id())); /* if more than one, wait for all schedulers to start */ _lthread_schedulers_sync_start(); /* * This is the main scheduling loop * So long as there are tasks in existence we run this loop. * We check for:- * expired timers, * the local ready queue, * and the peer ready queue, * * and resume lthreads ad infinitum. */ while (!_lthread_sched_isdone(sched)) { rte_timer_manage(); lt = _lthread_queue_poll(sched->ready); if (lt != NULL) _lthread_resume(lt); lt = _lthread_queue_poll(sched->pready); if (lt != NULL) _lthread_resume(lt); } /* if more than one wait for all schedulers to stop */ _lthread_schedulers_sync_stop(); (THIS_SCHED) = NULL; RTE_LOG(INFO, LTHREAD, "stopping scheduler %p on lcore %u phys core %u\n", sched, rte_lcore_id(), rte_lcore_index(rte_lcore_id())); fflush(stdout); }
int rte_thread_set_affinity(rte_cpuset_t *cpusetp) { int s; unsigned lcore_id; pthread_t tid; tid = pthread_self(); s = pthread_setaffinity_np(tid, sizeof(rte_cpuset_t), cpusetp); if (s != 0) { RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); return -1; } /* store socket_id in TLS for quick access */ RTE_PER_LCORE(_socket_id) = eal_cpuset_socket_id(cpusetp); /* store cpuset in TLS for quick access */ memmove(&RTE_PER_LCORE(_cpuset), cpusetp, sizeof(rte_cpuset_t)); lcore_id = rte_lcore_id(); if (lcore_id != (unsigned)LCORE_ID_ANY) { /* EAL thread will update lcore_config */ lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id); memmove(&lcore_config[lcore_id].cpuset, cpusetp, sizeof(rte_cpuset_t)); } return 0; }
/* * Defafult diagnostic callback */ static uint64_t _lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event, uint64_t diag_ref, const char *text, uint64_t p1, uint64_t p2) { uint64_t _p2; int lcore = (int) rte_lcore_id(); switch (diag_event) { case LT_DIAG_LTHREAD_CREATE: case LT_DIAG_MUTEX_CREATE: case LT_DIAG_COND_CREATE: _p2 = dummy_ref; break; default: _p2 = p2; break; } printf("%"PRIu64" %d %8.8lx %8.8lx %s %8.8lx %8.8lx\n", time, lcore, (uint64_t) lt, diag_ref, text, p1, _p2); return dummy_ref++; }
int32_t rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe) { /* run service on calling core, using all-ones as the service mask */ if (!service_valid(id)) return -EINVAL; struct core_state *cs = &lcore_states[rte_lcore_id()]; struct rte_service_spec_impl *s = &rte_services[id]; /* Atomically add this core to the mapped cores first, then examine if * we can run the service. This avoids a race condition between * checking the value, and atomically adding to the mapped count. */ if (serialize_mt_unsafe) rte_atomic32_inc(&s->num_mapped_cores); if (service_mt_safe(s) == 0 && rte_atomic32_read(&s->num_mapped_cores) > 1) { if (serialize_mt_unsafe) rte_atomic32_dec(&s->num_mapped_cores); return -EBUSY; } int ret = service_run(id, cs, UINT64_MAX); if (serialize_mt_unsafe) rte_atomic32_dec(&s->num_mapped_cores); return ret; }
static void l2sw_main_process(struct lcore_env *env) { struct rte_mbuf *pkt_burst[MAX_PKT_BURST]; uint8_t n_ports = rte_eth_dev_count(); unsigned lcore_id = rte_lcore_id(); uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; //RTE_LOG(INFO, MARIO, "[%u] Starting main processing.\n", lcore_id); prev_tsc = 0; timer_tsc = 0; while(1) { cur_tsc = rte_rdtsc(); diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { uint8_t port_id; for(port_id = 0; port_id < n_ports; port_id++) { if (env->tx_mbufs[port_id].len == 0) continue; l2sw_send_burst(env, port_id, env->tx_mbufs[port_id].len); env->tx_mbufs[port_id].len = 0; } /* if timer is enabled */ if (timer_period > 0) { /* advance the timer */ timer_tsc += diff_tsc; /* if timer has reached its timeout */ if (unlikely(timer_tsc >= (uint64_t) timer_period)) { /* do this only on master core */ if (lcore_id == rte_get_master_lcore()) { //print_stats(env); /* reset the timer */ timer_tsc = 0; } } } prev_tsc = cur_tsc; } /* RX */ uint8_t port_id; for (port_id = 0; port_id < n_ports; port_id++) { unsigned n_rx = rte_eth_rx_burst(port_id, lcore_id, pkt_burst, MAX_PKT_BURST); if (n_rx != 0) //RTE_LOG(INFO, MARIO, "[%u-%u] %u packet(s) came.\n", // lcore_id, port_id, n_rx); __sync_fetch_and_add(&port_statistics[port_id].rx, n_rx); ether_in(env, pkt_burst, n_rx, port_id); } } return ; }
/**************************************************************************//** * dbgPrintInfo - Print Debug information code routine. * * DESCRIPTION * A routine called from dbgPrintf() macro to print a debug message on the * * console port. The output displays the message plus pre-pending task name, * file name, function name and line number to the message. * * The routine is called with a set of values and a message string. * * \is * \i <pFunc> String pointer of the function from __FUNCTION__ macro. * \i <pFile> String pointer of the filename __FILE__ macro. * \i <line> A 32 bit integer of the line number __LINE__ the message statment * appeared. * \ie * * RETURNS: N/A. * * ERRNO: N/A */ void dbgPrintInfo( mInfo_t * mInfo, c8_t * pFunc, c8_t * pFile, const int32_t line, int8_t * pBuf ) { int8_t data[MAX_DBG_MESSAGE + 1]; c8_t * p; int32_t len; p = (c8_t *)strrchr(pFile, '\\'); if ( p == NULL ) p = (c8_t *)strrchr(pFile, '/'); /* * Make sure the message fits in the buffer space and format the first * part of the message. */ snprintf((char *)data, sizeof(data), "%d:%-10s (%s:%d)", rte_lcore_id(), (mInfo->curr) ? mInfo->curr->name : "???", (!p) ? pFile : ++p, line); /* * Verify the length of the message is 45 or greater and if not then * pad the string to make it 45 bytes of information before the message. */ len = ( strlen((char *)data) < 45 ) ? 45 - strlen((char *)data) : 0; fprintf(dbgFile, "%*s%*s:%s", 45 - len, data, len, pFunc, pBuf); fflush(dbgFile); }
int dpdk_packet_io(void) { int ret; struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[rte_lcore_id()]; wait_for_connection: RTE_LOG(DEBUG, VROUTER, "%s[%lx]: waiting for packet transport\n", __func__, pthread_self()); while (!vr_dpdk.packet_transport) { /* handle an IPC command */ if (unlikely(vr_dpdk_lcore_cmd_handle(lcore))) return -1; usleep(VR_DPDK_SLEEP_SERVICE_US); } RTE_LOG(DEBUG, VROUTER, "%s[%lx]: FD %d\n", __func__, pthread_self(), ((struct vr_usocket *)vr_dpdk.packet_transport)->usock_fd); ret = vr_usocket_io(vr_dpdk.packet_transport); if (ret < 0) { vr_dpdk.packet_transport = NULL; /* handle an IPC command */ if (unlikely(vr_dpdk_lcore_cmd_handle(lcore))) return -1; usleep(VR_DPDK_SLEEP_SERVICE_US); goto wait_for_connection; } return ret; }
void vr_dpdk_packet_wakeup(struct vr_interface *vif) { struct vr_interface_stats *stats; struct vrouter *router; if (unlikely(vif == NULL)) { /* get global agent vif */ router = vrouter_get(0); vif = router->vr_agent_if; } if (likely(vr_dpdk.packet_event_sock != NULL)) { if (likely(vif != NULL)) { stats = vif_get_stats(vif, rte_lcore_id()); stats->vis_port_osyscalls++; } else { /* no agent interface - no counter */ } if (vr_usocket_eventfd_write(vr_dpdk.packet_event_sock) < 0) { vr_usocket_close(vr_dpdk.packet_event_sock); vr_dpdk.packet_event_sock = NULL; } } }
void app_main_loop_rx(void) { uint32_t i; int ret; RTE_LOG(INFO, USER1, "Core %u is doing RX\n", rte_lcore_id()); for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { uint16_t n_mbufs; n_mbufs = rte_eth_rx_burst( app.ports[i], 0, app.mbuf_rx.array, app.burst_size_rx_read); if (n_mbufs == 0) continue; do { ret = rte_ring_sp_enqueue_bulk( app.rings_rx[i], (void **) app.mbuf_rx.array, n_mbufs); } while (ret < 0); } }
void app_main_loop_worker(void) { struct app_mbuf_array *worker_mbuf; uint32_t i; RTE_LOG(INFO, USER1, "Core %u is doing work (no pipeline)\n", rte_lcore_id()); worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (worker_mbuf == NULL) rte_panic("Worker thread: cannot allocate buffer space\n"); for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { int ret; ret = rte_ring_sc_dequeue_bulk( app.rings_rx[i], (void **) worker_mbuf->array, app.burst_size_worker_read); if (ret == -ENOENT) continue; do { ret = rte_ring_sp_enqueue_bulk( app.rings_tx[i ^ 1], (void **) worker_mbuf->array, app.burst_size_worker_write); } while (ret < 0); } }
/** * @brief RX routine */ void DPDKAdapter::rxRoutine() { uint8_t pkt = 0; uint8_t rxPktCount = 0; uint8_t devId = 0; uint8_t lcoreId = rte_lcore_id(); LcoreInfo& coreInfo = cores[lcoreId]; for(PortList_t::iterator itor = coreInfo.rxPortList.begin(); itor != coreInfo.rxPortList.end(); itor++) { devId = *itor; DeviceInfo& devInfo = devices[devId]; struct rte_eth_dev *dev = &rte_eth_devices[devId]; if(!dev || !dev->data->dev_started) { continue; } rxPktCount = rte_eth_rx_burst(devId, 0, devInfo.rxBurstBuf, DPDK_RX_MAX_PKT_BURST); if(isRxStarted(devId)) { saveToBuf(devId, devInfo.rxBurstBuf, rxPktCount); } for(pkt = 0; pkt < rxPktCount; pkt++) { rte_pktmbuf_free(devInfo.rxBurstBuf[pkt]); } } }
/** * Real function entrance ran in slave process **/ static int slave_proc_func(void) { struct rte_config *config; unsigned slave_id = rte_lcore_id(); struct lcore_stat *cfg = &core_cfg[slave_id]; if (prctl(PR_SET_PDEATHSIG, SIG_PARENT_EXIT, 0, 0, 0, 0) != 0) printf("Warning: Slave can't register for being notified in" "case master process exited\n"); else { struct sigaction act; memset(&act, 0 , sizeof(act)); act.sa_handler = sighand_parent_exit; if (sigaction(SIG_PARENT_EXIT, &act, NULL) != 0) printf("Fail to register signal handler:%d\n", SIG_PARENT_EXIT); } /* Set slave process to SECONDARY to avoid operation like dev_start/stop etc */ config = rte_eal_get_configuration(); if (NULL == config) printf("Warning:Can't get rte_config\n"); else config->process_type = RTE_PROC_SECONDARY; printf("Core %u is ready (pid=%d)\n", slave_id, (int)cfg->pid); exit(cfg->f(cfg->arg)); }
/* * Main thread that does the work, reading from INPUT_PORT * and writing to OUTPUT_PORT */ static __attribute__((noreturn)) void lcore_main(void) { uint8_t port = 0; if (rte_eth_dev_socket_id(port) > 0 && rte_eth_dev_socket_id(port) != (int)rte_socket_id()) printf("WARNING, port %u is on remote NUMA node to " "polling thread.\n\tPerformance will " "not be optimal.\n", port); printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n", rte_lcore_id()); for (;;) { struct rte_mbuf *bufs[BURST_SIZE]; const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs, BURST_SIZE); uint16_t buf; if (unlikely(nb_rx == 0)) continue; for (buf = 0; buf < nb_rx; buf++) { struct rte_mbuf *mbuf = bufs[buf]; unsigned int len = rte_pktmbuf_data_len(mbuf); rte_pktmbuf_dump(stdout, mbuf, len); rte_pktmbuf_free(mbuf); } } }
static void init_per_lcore() { lcore_conf_t *qconf; unsigned lcore_id = rte_lcore_id(); qconf = &sk.lcore_conf[lcore_id]; qconf->tsc_hz = rte_get_tsc_hz(); qconf->start_us = (uint64_t )ustime(); qconf->start_tsc = rte_rdtsc(); }
static int l2sw_launch_one_lcore(void *env) { //RTE_LOG(INFO, MARIO, "[%u]processing launch\n", rte_lcore_id()); uint8_t lcore_id = rte_lcore_id(); l2sw_main_process(((struct lcore_env**) env)[lcore_id]); return 0; }
/* signal handler to be notified after parent leaves */ static void sighand_parent_exit(int sig) { printf("lcore = %u : Find parent leaves, sig=%d\n", rte_lcore_id(), sig); printf("Child leaving\n"); exit(0); return; }
void lcore_cfg_alloc_hp(void) { size_t mem_size = RTE_MAX_LCORE * sizeof(struct lcore_cfg); lcore_cfg = prox_zmalloc(mem_size, rte_socket_id()); PROX_PANIC(lcore_cfg == NULL, "Could not allocate memory for core control structures\n"); rte_memcpy(lcore_cfg, lcore_cfg_init, mem_size); /* get thread ID for master core */ lcore_cfg[rte_lcore_id()].thread_id = pthread_self(); }
void mcos_init_callback( void * mInfo, void * arg ) { mcos_config_t * cfg = (mcos_config_t *)arg; struct lcore_conf * qconf = &lcore_conf[rte_lcore_id()]; // Save the mInfo structure pointer qconf->mInfo = mInfo; qconf->arg = cfg->arg; phil_demo_start(mInfo, cfg->arg); }
static int packet_launch_one_lcore(__rte_unused void* unused) { unsigned lcore; int i; struct txrx_queue *rxq; struct lcore_queue_conf *lcore_q; lcore = rte_lcore_id(); return 0; }
/* set affinity for current EAL thread */ static int eal_thread_set_affinity(void) { unsigned lcore_id = rte_lcore_id(); /* acquire system unique id */ rte_gettid(); /* update EAL thread core affinity */ return rte_thread_set_affinity(&lcore_config[lcore_id].cpuset); }
static void process_pkts(struct rte_mbuf *buf[], int n) { int i; uint8_t *pkt; int ret; uint32_t ft[5]; unsigned char *payload; int len; for(i = 0; i < n; i++) { #ifdef EXEC_MBUF_PA_CNT uint32_t lcoreid = rte_lcore_id(); uint32_t *count; struct rte_hash *h = lcore_args[lcoreid].pa_ht; if(rte_hash_lookup_data(h, (const void *)&(buf[i]->buf_physaddr), (void**)&count) >= 0) { *count = *count + 1; } else { if(pacnt_hash_add(h, (const void *)&(buf[i]->buf_physaddr), 1) < 0) { rte_exit(EINVAL, "pacnt hash add failed in lcore %d\n", lcoreid); } } #endif #if defined(EXEC_PC) || defined(EXEC_HASH) parse_packet_to_tuple(buf[i], ft); #ifdef EXEC_PC ret = packet_classifier_search(ft); if(ret < 0) { fprintf(stderr, "packet classifing failed!\n"); } #else ret = hash_table_lkup((void*)ft); #endif #endif #ifdef EXEC_CRC calc_chk_sum(buf[i]); #endif #ifdef EXEC_DPI ret = get_payload(buf[i], &payload, &len); if(ret < 0) { fprintf(stderr, "packet get payload failed!\n"); continue; } ret = dpi_engine_exec(payload, len); #endif } }
void vr_dpdk_packet_wakeup(void) { /* to wake up pkt0 thread we always use current lcore event sock */ struct vr_dpdk_lcore *lcorep = vr_dpdk.lcores[rte_lcore_id()]; if (likely(lcorep->lcore_event_sock != NULL)) { if (vr_usocket_eventfd_write(lcorep->lcore_event_sock) < 0) { vr_usocket_close(lcorep->lcore_event_sock); lcorep->lcore_event_sock = NULL; } } }
static inline void app_lcore_io_rx_buffer_to_send ( struct app_lcore_params_io *lp, uint32_t worker, struct rte_mbuf *mbuf, uint32_t bsz) { uint32_t pos; int ret; pos = lp->rx.mbuf_out[worker].n_mbufs; lp->rx.mbuf_out[worker].array[pos ++] = mbuf; if (likely(pos < bsz)) { lp->rx.mbuf_out[worker].n_mbufs = pos; return; } ret = rte_ring_sp_enqueue_bulk( lp->rx.rings[worker], (void **) lp->rx.mbuf_out[worker].array, bsz); if (unlikely(ret == -ENOBUFS)) { uint32_t k; for (k = 0; k < bsz; k ++) { struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k]; rte_pktmbuf_free(m); } } lp->rx.mbuf_out[worker].n_mbufs = 0; lp->rx.mbuf_out_flush[worker] = 0; #if APP_STATS lp->rx.rings_iters[worker] ++; if (likely(ret == 0)) { lp->rx.rings_count[worker] ++; } if (unlikely(lp->rx.rings_iters[worker] == APP_STATS)) { unsigned lcore = rte_lcore_id(); printf("\tI/O RX %u out (worker %u): enq success rate = %.2f\n", lcore, (unsigned)worker, ((double) lp->rx.rings_count[worker]) / ((double) lp->rx.rings_iters[worker])); lp->rx.rings_iters[worker] = 0; lp->rx.rings_count[worker] = 0; } #endif }
/* * The lcore main. This is the main thread that does the work, reading from * an input port and writing to an output port. */ static __attribute__((noreturn)) void lcore_main(void) { const uint8_t nb_ports = rte_eth_dev_count(); uint8_t port; /* * Check that the port is on the same NUMA node as the polling thread * for best performance. */ for (port = 0; port < nb_ports; port++) if (rte_eth_dev_socket_id(port) > 0 && rte_eth_dev_socket_id(port) != (int)rte_socket_id()) printf("WARNING, port %u is on remote NUMA node to " "polling thread.\n\tPerformance will " "not be optimal.\n", port); printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n", rte_lcore_id()); /* Run until the application is quit or killed. */ for (;;) { /* * Receive packets on a port and forward them on the paired * port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc. */ for (port = 0; port < nb_ports; port++) { /* Get burst of RX packets, from first port of pair. */ struct rte_mbuf *bufs[BURST_SIZE]; const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs, BURST_SIZE); if (unlikely(nb_rx == 0)) continue; /* Send burst of TX packets, to second port of pair. */ const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0, bufs, nb_rx); /* Free any unsent packets. */ if (unlikely(nb_tx < nb_rx)) { uint16_t buf; for (buf = nb_tx; buf < nb_rx; buf++) rte_pktmbuf_free(bufs[buf]); } } } }
void stat_mbuf(struct rte_mbuf *mbuf, uint8_t in, uint8_t drop) { struct net_device *ndev; ndev = net_device_get(mbuf->port); if ((ndev->flag & NET_DEV_F_DISABLE) == NET_DEV_F_DISABLE) return; if (in) { if (drop) { ndev->stat.rx.drop[rte_lcore_id()]++; } else { ndev->stat.rx.recv[rte_lcore_id()]++; } } else { if (drop) { ndev->stat.tx.drop[rte_lcore_id()]++; } else { ndev->stat.tx.xmit[rte_lcore_id()]++; } } }
static struct h_scalar *ftp_cache_create_fn(struct h_table *ht, void *key) { struct ftp_cache_key *k = (struct ftp_cache_key *)key; struct ftp_cache *fc; uint32_t lcore_id = rte_lcore_id(); if(rte_mempool_mc_get(ftp_cache_tbl.mem_cache[lcore_id], (void **)&fc) <0) return NULL; fc->sip = k->sip; fc->dip = k->dip; fc->dport = k->dport; fc->proto_mark = k->proto_mark; fc->lcore_id = lcore_id; rte_spinlock_init(&fc->lock); return &fc->h_scalar; }