/** * burst enqueue, place crypto operations on ingress queue for processing. * * @param __qp Queue Pair to process * @param ops Crypto operations for processing * @param nb_ops Number of crypto operations for processing * * @return * - Number of crypto operations enqueued */ static uint16_t aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops, uint16_t nb_ops) { struct aesni_mb_qp *qp = __qp; unsigned int nb_enqueued; nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue, (void **)ops, nb_ops, NULL); qp->stats.enqueued_count += nb_enqueued; return nb_enqueued; }
/** * This thread receives mbufs from the port and affects them an internal * sequence number to keep track of their order of arrival through an * mbuf structure. * The mbufs are then passed to the worker threads via the rx_to_workers * ring. */ static int rx_thread(struct rte_ring *ring_out) { const uint8_t nb_ports = rte_eth_dev_count(); uint32_t seqn = 0; uint16_t i, ret = 0; uint16_t nb_rx_pkts; uint8_t port_id; struct rte_mbuf *pkts[MAX_PKTS_BURST]; RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id()); while (!quit_signal) { for (port_id = 0; port_id < nb_ports; port_id++) { if ((portmask & (1 << port_id)) != 0) { /* receive packets */ nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts, MAX_PKTS_BURST); if (nb_rx_pkts == 0) { LOG_DEBUG(REORDERAPP, "%s():Received zero packets\n", __func__); continue; } app_stats.rx.rx_pkts += nb_rx_pkts; /* mark sequence number */ for (i = 0; i < nb_rx_pkts; ) pkts[i++]->seqn = seqn++; /* enqueue to rx_to_workers ring */ ret = rte_ring_enqueue_burst(ring_out, (void *) pkts, nb_rx_pkts); app_stats.rx.enqueue_pkts += ret; if (unlikely(ret < nb_rx_pkts)) { app_stats.rx.enqueue_failed_pkts += (nb_rx_pkts-ret); pktmbuf_free_bulk(&pkts[ret], nb_rx_pkts - ret); } } } } return 0; }
/** * This thread takes bursts of packets from the rx_to_workers ring and * Changes the input port value to output port value. And feds it to * workers_to_tx */ static int worker_thread(void *args_ptr) { const uint8_t nb_ports = rte_eth_dev_count(); uint16_t i, ret = 0; uint16_t burst_size = 0; struct worker_thread_args *args; struct rte_mbuf *burst_buffer[MAX_PKTS_BURST] = { NULL }; struct rte_ring *ring_in, *ring_out; const unsigned xor_val = (nb_ports > 1); args = (struct worker_thread_args *) args_ptr; ring_in = args->ring_in; ring_out = args->ring_out; RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id()); while (!quit_signal) { /* dequeue the mbufs from rx_to_workers ring */ burst_size = rte_ring_dequeue_burst(ring_in, (void *)burst_buffer, MAX_PKTS_BURST); if (unlikely(burst_size == 0)) continue; __sync_fetch_and_add(&app_stats.wkr.dequeue_pkts, burst_size); /* just do some operation on mbuf */ for (i = 0; i < burst_size;) burst_buffer[i++]->port ^= xor_val; /* enqueue the modified mbufs to workers_to_tx ring */ ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size); __sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret); if (unlikely(ret < burst_size)) { /* Return the mbufs to their respective pool, dropping packets */ __sync_fetch_and_add(&app_stats.wkr.enqueue_failed_pkts, (int)burst_size - ret); pktmbuf_free_bulk(&burst_buffer[ret], burst_size - ret); } } return 0; }
void counter_register_pkt(void *arg, struct rte_mbuf **buffer, int nb_rx) { if (nb_rx == 0) return; struct counter_t *counter = (struct counter_t *) arg; uint64_t start_a = rte_get_tsc_cycles(), diff_a; if (nb_rx > rte_ring_free_count(counter->ring)) { RTE_LOG(ERR, COUNTER, "Not enough free entries in ring!\n"); } // enqueue packet in ring // this methode must be thread safe struct rte_mbuf *bulk[nb_rx]; unsigned nb_registered = 0; for (unsigned i = 0; i < nb_rx; ++i) { struct ether_hdr *eth = rte_pktmbuf_mtod(buffer[i], struct ether_hdr *); if (!is_same_ether_addr(&counter->rx_register->mac, ð->d_addr)) { continue; } bulk[nb_registered] = rte_pktmbuf_clone(buffer[i], counter->clone_pool); if (bulk[nb_registered] == NULL) { RTE_LOG(ERR, COUNTER, "Could not clone mbuf!\n"); continue; } nb_registered += 1; } int n = rte_ring_enqueue_burst(counter->ring,(void * const*) &bulk, nb_registered); if (n < nb_rx) { RTE_LOG(ERR, COUNTER, "Could not enqueue every new packtes for registration! " "(%"PRIu32"/%"PRIu32") free: %"PRIu32"\n", n, nb_rx, rte_ring_free_count(counter->ring)); } diff_a = rte_get_tsc_cycles() - start_a; counter->aTime += diff_a;//* 1000.0 / rte_get_tsc_hz(); counter->nb_measurements_a += nb_rx; }
void send_loop(void) { RTE_LOG(INFO, APP, "send_loop()\n"); char pkt[PKT_SIZE] = {0}; int nreceived; int retval = 0; (void) retval; #ifdef CALC_CHECKSUM unsigned int kk = 0; #endif srand(time(NULL)); //Initializate packet contents int i; for(i = 0; i < PKT_SIZE; i++) pkt[i] = rand()%256; #if ALLOC_METHOD == ALLOC_APP struct rte_mempool * packets_pool = rte_mempool_lookup("ovs_mp_1500_0_262144"); //struct rte_mempool * packets_pool = rte_mempool_lookup("packets"); //Create mempool //struct rte_mempool * packets_pool = rte_mempool_create( // "packets", // NUM_PKTS, // MBUF_SIZE, // CACHE_SIZE, //This is the size of the mempool cache // sizeof(struct rte_pktmbuf_pool_private), // rte_pktmbuf_pool_init, // NULL, // rte_pktmbuf_init, // NULL, // rte_socket_id(), // 0 /*NO_FLAGS*/); if(packets_pool == NULL) { RTE_LOG(INFO, APP, "rte_errno: %s\n", rte_strerror(rte_errno)); rte_exit(EXIT_FAILURE, "Cannot find memory pool\n"); } RTE_LOG(INFO, APP, "There are %d free packets in the pool\n", rte_mempool_count(packets_pool)); #endif #ifdef USE_BURST struct rte_mbuf * packets_array[BURST_SIZE] = {0}; struct rte_mbuf * packets_array_rx[BURST_SIZE] = {0}; int ntosend; int n; (void) n; /* prealloc packets */ do { n = rte_mempool_get_bulk(packets_pool, (void **) packets_array, BURST_SIZE); } while(n != 0 && !stop); ntosend = BURST_SIZE; #else struct rte_mbuf * mbuf; /* prealloc packet */ do { mbuf = rte_pktmbuf_alloc(packets_pool); } while(mbuf == NULL); #endif RTE_LOG(INFO, APP, "Starting sender loop\n"); signal (SIGINT, crtl_c_handler); stop = 0; while(likely(!stop)) { while(pause_); #ifdef USE_BURST #if ALLOC_METHOD == ALLOC_OVS //Try to get BURS_SIZE free slots ntosend = rte_ring_dequeue_burst(alloc_q, (void **) packets_array, BURST_SIZE); #elif ALLOC_METHOD == ALLOC_APP //do //{ // n = rte_mempool_get_bulk(packets_pool, (void **) packets_array, BURST_SIZE); //} while(n != 0 && !stop); //ntosend = BURST_SIZE; #else #error "No implemented" #endif //Copy data to the buffers for(i = 0; i < ntosend; i++) { rte_memcpy(packets_array[i]->buf_addr, pkt, PKT_SIZE); //fill_packet(packets_array[i]->pkt.data); packets_array[i]->next = NULL; packets_array[i]->pkt_len = PKT_SIZE; packets_array[i]->data_len = PKT_SIZE; #ifdef CALC_CHECKSUM for(i = 0; i < ntosend; i++) for(kk = 0; kk < 8; kk++) checksum += ((uint64_t *)packets_array[i]->buf_addr)[kk]; #endif } //Enqueue data (try until all the allocated packets are enqueue) i = 0; while(i < ntosend && !stop) { i += rte_ring_enqueue_burst(tx_ring, (void **) &packets_array[i], ntosend - i); /* also dequeue some packets */ nreceived= rte_ring_dequeue_burst(rx_ring, (void **) packets_array_rx, BURST_SIZE); rx += nreceived; /* update statistics */ } #else // [NO] USE_BURST #if ALLOC_METHOD == ALLOC_OVS //Method 1 //Read a buffer to be used as a buffer for a packet retval = rte_ring_dequeue(alloc_q, (void **)&mbuf); if(retval != 0) { #ifdef CALC_ALLOC_STATS //stats.alloc_fails++; #endif continue; } #elif ALLOC_METHOD == ALLOC_APP //Method 2 //mbuf = rte_pktmbuf_alloc(packets_pool); //if(mbuf == NULL) //{ //#ifdef CALC_ALLOC_STATS // stats.alloc_fails++; //#endif // continue; //} #else #error "ALLOC_METHOD has a non valid value" #endif #if DELAY_CYCLES > 0 //This loop increases mumber of packets per second (don't ask me why) unsigned long long j = 0; for(j = 0; j < DELAY_CYCLES; j++) asm(""); #endif //Copy packet to the correct buffer rte_memcpy(mbuf->buf_addr, pkt, PKT_SIZE); //fill_packet(mbuf->pkt.data); //mbuf->pkt.next = NULL; //mbuf->pkt.pkt_len = PKT_SIZE; //mbuf->pkt.data_len = PKT_SIZE; (void) pkt; mbuf->next = NULL; mbuf->pkt_len = PKT_SIZE; mbuf->data_len = PKT_SIZE; #ifdef CALC_CHECKSUM for(kk = 0; kk < 8; kk++) checksum += ((uint64_t *)mbuf->buf_addr)[kk]; #endif //this method avoids dropping packets: //Simple tries until the packet is inserted in the queue tryagain: retval = rte_ring_enqueue(tx_ring, (void *) mbuf); if(retval == -ENOBUFS && !stop) { #ifdef CALC_TX_TRIES //stats.tx_retries++; #endif goto tryagain; } #ifdef CALC_TX_STATS //stats.tx++; #endif #endif //USE_BURST } #ifdef CALC_CHECKSUM printf("Checksum was %" PRIu64 "\n", checksum); #endif }