Exemplo n.º 1
1
/* test case to time the number of cycles to round-trip a cache line between
 * two cores and back again.
 */
static void
time_cache_line_switch(void)
{
	/* allocate a full cache line for data, we use only first byte of it */
	uint64_t data[RTE_CACHE_LINE_SIZE*3 / sizeof(uint64_t)];

	unsigned i, slaveid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
	volatile uint64_t *pdata = &data[0];
	*pdata = 1;
	rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], slaveid);
	while (*pdata)
		rte_pause();

	const uint64_t start_time = rte_rdtsc();
	for (i = 0; i < (1 << ITER_POWER); i++) {
		while (*pdata)
			rte_pause();
		*pdata = 1;
	}
	const uint64_t end_time = rte_rdtsc();

	while (*pdata)
		rte_pause();
	*pdata = 2;
	rte_eal_wait_lcore(slaveid);
	printf("==== Cache line switch test ===\n");
	printf("Time for %u iterations = %"PRIu64" ticks\n", (1<<ITER_POWER),
			end_time-start_time);
	printf("Ticks per iteration = %"PRIu64"\n\n",
			(end_time-start_time) >> ITER_POWER);
}
Exemplo n.º 2
0
/*
functional description:
token-bucket initialization module
author:jzheng
date:2014-06-02
*/
int sched_module_tb_init(dbg_local struct rte_mbuf*pktbuf,struct sched_stat_str *lpstat)
{
	if(lpstat->iDiscard==TRUE)
		return -1;
	#if 0
	if(lpstat->iFlowIdx==-1)
		return -1;
	#endif
	if(gFlow[lpstat->iFlowIdx].b_tb_initialized==TRUE)
		return 1;
	gFlow[lpstat->iFlowIdx].tb_uptream.eMeterMode=COLOR_BLIND;
	gFlow[lpstat->iFlowIdx].tb_uptream.uiCBS=DEF_COMMIT_BURST_SIZE;
	gFlow[lpstat->iFlowIdx].tb_uptream.uiCIR=DEF_COMMIT_INFO_RATE;
	gFlow[lpstat->iFlowIdx].tb_uptream.uiPBS=DEF_PEEK_BURST_SIZE;
	gFlow[lpstat->iFlowIdx].tb_uptream.uiPIR=DEF_PEEK_INFO_RATE;
	gFlow[lpstat->iFlowIdx].tb_uptream.uiCToken=gFlow[lpstat->iFlowIdx].tb_uptream.uiCBS;
	gFlow[lpstat->iFlowIdx].tb_uptream.uiPToken=gFlow[lpstat->iFlowIdx].tb_uptream.uiPBS;
	gFlow[lpstat->iFlowIdx].tb_uptream.uiCntLast=rte_rdtsc();
	
	gFlow[lpstat->iFlowIdx].tb_downstream.eMeterMode=COLOR_BLIND;
	gFlow[lpstat->iFlowIdx].tb_downstream.uiCBS=DEF_COMMIT_BURST_SIZE;
	gFlow[lpstat->iFlowIdx].tb_downstream.uiCIR=DEF_COMMIT_INFO_RATE;
	gFlow[lpstat->iFlowIdx].tb_downstream.uiPBS=DEF_PEEK_BURST_SIZE;
	gFlow[lpstat->iFlowIdx].tb_downstream.uiPIR=DEF_PEEK_INFO_RATE;
	gFlow[lpstat->iFlowIdx].tb_downstream.uiCToken=gFlow[lpstat->iFlowIdx].tb_downstream.uiCBS;
	gFlow[lpstat->iFlowIdx].tb_downstream.uiPToken=gFlow[lpstat->iFlowIdx].tb_downstream.uiPBS;
	gFlow[lpstat->iFlowIdx].tb_downstream.uiCntLast=rte_rdtsc();

	gFlow[lpstat->iFlowIdx].b_tb_initialized=TRUE;
	return 0;
}
Exemplo n.º 3
0
/**
 * get the clk frequency in Hz
 */
static uint64_t get_machclk_freq(void)
{
	uint64_t start = 0;
	uint64_t end = 0;
	uint64_t diff = 0;
	uint64_t clk_freq_hz = 0;
	struct timespec tv_start = {0, 0}, tv_end = {0, 0};
	struct timespec req = {0, 0};

	req.tv_sec = 1;
	req.tv_nsec = 0;

	clock_gettime(CLOCK_REALTIME, &tv_start);
	start = rte_rdtsc();

	if (nanosleep(&req, NULL) != 0) {
		perror("get_machclk_freq()");
		exit(EXIT_FAILURE);
	}

	clock_gettime(CLOCK_REALTIME, &tv_end);
	end = rte_rdtsc();

	diff = (uint64_t)(tv_end.tv_sec - tv_start.tv_sec) * USEC_PER_SEC
		+ ((tv_end.tv_nsec - tv_start.tv_nsec + TEST_NSEC_MARGIN) /
		   USEC_PER_MSEC); /**< diff is in micro secs */

	if (diff == 0)
		return 0;

	clk_freq_hz = ((end - start) * USEC_PER_SEC / diff);
	return clk_freq_hz;
}
Exemplo n.º 4
0
void
pktgen_interact(struct cmdline *cl)
{
	char c;
	struct pollfd fds;
	uint64_t curr_tsc;
	uint64_t next_poll;
	uint64_t reload;

	fds.fd      = cl->s_in;
	fds.events  = POLLIN;
	fds.revents = 0;

	c = -1;
	reload = (pktgen.hz / 1000);
	next_poll = rte_rdtsc() + reload;

	for (;; ) {
		rte_timer_manage();
		curr_tsc = rte_rdtsc();
		if (unlikely(curr_tsc >= next_poll)  ) {
			next_poll = curr_tsc + reload;
			if (poll(&fds, 1, 0) ) {
				if ( (fds.revents & (POLLERR | POLLNVAL | POLLHUP)) )
					break;
				if ( (fds.revents & POLLIN) ) {
					if (read(cl->s_in, &c, 1) < 0)
						break;
					if (cmdline_in(cl, &c, 1) < 0)
						break;
				}
			}
		}
	}
}
Exemplo n.º 5
0
static int
timed_deletes(struct efd_perf_params *params)
{
	unsigned int i, a;
	const uint64_t start_tsc = rte_rdtsc();
	int32_t ret;

	for (i = 0; i < KEYS_TO_ADD; i++) {
		ret = rte_efd_delete(params->efd_table, test_socket_id, keys[i],
				NULL);

		if (ret != 0) {
			printf("Error %d in rte_efd_delete - key=0x", ret);
			for (a = 0; a < params->key_size; a++)
				printf("%02x", keys[i][a]);
			printf("\n");

			return -1;
		}
	}

	const uint64_t end_tsc = rte_rdtsc();
	const uint64_t time_taken = end_tsc - start_tsc;

	cycles[params->cycle][DELETE] = time_taken / KEYS_TO_ADD;

	return 0;
}
Exemplo n.º 6
0
static int
timed_deletes(unsigned with_hash, unsigned with_data, unsigned table_index)
{
	unsigned i;
	const uint64_t start_tsc = rte_rdtsc();
	int32_t ret;

	for (i = 0; i < KEYS_TO_ADD; i++) {
		/* There are no delete functions with data, so just call two functions */
		if (with_hash)
			ret = rte_hash_del_key_with_hash(h[table_index],
							(const void *) keys[i],
							signatures[i]);
		else
			ret = rte_hash_del_key(h[table_index],
							(const void *) keys[i]);
		if (ret >= 0)
			positions[i] = ret;
		else {
			printf("Failed to add key number %u\n", ret);
			return -1;
		}
	}

	const uint64_t end_tsc = rte_rdtsc();
	const uint64_t time_taken = end_tsc - start_tsc;

	cycles[table_index][DELETE][with_hash][with_data] = time_taken/KEYS_TO_ADD;

	return 0;
}
Exemplo n.º 7
0
static int
timed_lookups(struct efd_perf_params *params)
{
	unsigned int i, j, a;
	const uint64_t start_tsc = rte_rdtsc();
	efd_value_t ret_data;

	for (i = 0; i < NUM_LOOKUPS / KEYS_TO_ADD; i++) {
		for (j = 0; j < KEYS_TO_ADD; j++) {
			ret_data = rte_efd_lookup(params->efd_table,
					test_socket_id, keys[j]);
			if (ret_data != data[j]) {
				printf("Value mismatch using rte_efd_lookup: "
						"key #%d (0x", i);
				for (a = 0; a < params->key_size; a++)
					printf("%02x", keys[i][a]);
				printf(")\n");
				printf("  Expected %d, got %d\n", data[i],
						ret_data);

				return -1;
			}

		}
	}

	const uint64_t end_tsc = rte_rdtsc();
	const uint64_t time_taken = end_tsc - start_tsc;

	cycles[params->cycle][LOOKUP] = time_taken / NUM_LOOKUPS;

	return 0;
}
Exemplo n.º 8
0
uint16_t rx_pkt_sw(struct rte_mbuf **rx_mbuf, struct task_base *ptask)
{
	START_EMPTY_MEASSURE();
#ifdef BRAS_RX_BULK
	if (unlikely (rte_ring_sc_dequeue_bulk(ptask->rx_params_sw.rx_rings[ptask->rx_params_sw.last_read_ring], (void **)rx_mbuf, MAX_RING_BURST)) < 0) {
		++ptask->rx_params_sw.last_read_ring;
		if (unlikely(ptask->rx_params_sw.last_read_ring == ptask->rx_params_sw.nb_rxrings)) {
			ptask->rx_params_sw.last_read_ring = 0;
		}
		INCR_EMPTY_CYCLES(ptask->stats, rte_rdtsc() - cur_tsc);
		return 0;
	}
	else {

		return MAX_RING_BURST;
	}
#else
	uint16_t nb_rx = rte_ring_sc_dequeue_burst(ptask->rx_params_sw.rx_rings[ptask->rx_params_sw.last_read_ring], (void **)rx_mbuf, MAX_RING_BURST);
	++ptask->rx_params_sw.last_read_ring;
	if (unlikely(ptask->rx_params_sw.last_read_ring == ptask->rx_params_sw.nb_rxrings)) {
		ptask->rx_params_sw.last_read_ring = 0;
	}

	if (nb_rx != 0) {
		return nb_rx;
	}
	else {
		INCR_EMPTY_CYCLES(ptask->stats, rte_rdtsc() - cur_tsc);
		return 0;
	}
#endif
}
Exemplo n.º 9
0
s32 time_update(void)
{
#if 0
	//int ret = US_RET_OK;
	//int sec_delta = 0; 
	//struct timeval tv;

	//cycles = rte_get_hpet_cycles();
	cycles = rte_rdtsc();

	if( (ret = gettimeofday((struct timeval*)&tv, NULL))< 0){
		return ret;
	}	

	sec_delta = ((tv.tv_sec - Ts.tv_sec)*1000000 + (tv.tv_usec - Ts.tv_nsec/1000))/1000;

	if (sec_delta > 0 ){
		jiffies += sec_delta;
		
		Ts.tv_sec = tv.tv_sec;
		Ts.tv_nsec = tv.tv_usec*1000;	
	}

#else
	
	u64 sec_1_cnt ;
	u64 ms_1_cnt ;
	u64	us_1_cnt ;
	u64 t_delta;
	
	sec_1_cnt = rte_get_timer_hz();
	ms_1_cnt  = sec_1_cnt/1000;
	us_1_cnt  = ms_1_cnt/1000;
	
	cycles = rte_rdtsc();

	t_delta = cycles - last_1us_cycle;
	if(t_delta > us_1_cnt){
		tv_usec += t_delta/us_1_cnt;
		if(Ts.tv_nsec >1000000){
			Ts.tv_nsec = 0;
			Ts.tv_sec++;
		}else{
			Ts.tv_nsec = tv_usec*1000;
		}
		last_1us_cycle = cycles;
	}
	
	t_delta = cycles - last_1ms_cycle;
	if(t_delta > ms_1_cnt){
		jiffies += t_delta/ms_1_cnt ;
		last_1ms_cycle = cycles;
	}
	
	return US_RET_OK;
#endif	
}
Exemplo n.º 10
0
/*
 * Softnic packet forward
 */
static void
softnic_fwd(struct fwd_stream *fs)
{
	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
	uint16_t nb_rx;
	uint16_t nb_tx;
	uint32_t retry;

#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	uint64_t start_tsc;
	uint64_t end_tsc;
	uint64_t core_cycles;
#endif

#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	start_tsc = rte_rdtsc();
#endif

	/*  Packets Receive */
	nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
			pkts_burst, nb_pkt_per_burst);
	fs->rx_packets += nb_rx;

#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif

	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
			pkts_burst, nb_rx);

	/* Retry if necessary */
	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
		retry = 0;
		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
			rte_delay_us(burst_tx_delay_time);
			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
					&pkts_burst[nb_tx], nb_rx - nb_tx);
		}
	}
	fs->tx_packets += nb_tx;

#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#endif

	if (unlikely(nb_tx < nb_rx)) {
		fs->fwd_dropped += (nb_rx - nb_tx);
		do {
			rte_pktmbuf_free(pkts_burst[nb_tx]);
		} while (++nb_tx < nb_rx);
	}
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	end_tsc = rte_rdtsc();
	core_cycles = (end_tsc - start_tsc);
	fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
#endif
}
Exemplo n.º 11
0
static uint64_t
estimate_tsc_freq(void)
{
	RTE_LOG(WARNING, EAL, "WARNING: TSC frequency estimated roughly"
		" - clock timings may be less accurate.\n");
	/* assume that the sleep(1) will sleep for 1 second */
	uint64_t start = rte_rdtsc();
	sleep(1);
	return rte_rdtsc() - start;
}
Exemplo n.º 12
0
static __rte_always_inline uint32_t
enqueue_check(struct opdl_port *p,
		const struct rte_event ev[],
		uint16_t num,
		uint16_t num_events)
{
	uint16_t i;

	if (p->opdl->do_validation) {

		for (i = 0; i < num; i++) {
			if (ev[i].queue_id != p->next_external_qid) {
				PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
					     "ERROR - port:[%u] - event wants"
					     " to enq to q_id[%u],"
					     " but should be [%u]",
					     opdl_pmd_dev_id(p->opdl),
					     p->id,
					     ev[i].queue_id,
					     p->next_external_qid);
				rte_errno = -EINVAL;
				return 0;
			}
		}

		/* Stats */
		if (p->p_type == OPDL_PURE_RX_PORT ||
				p->p_type == OPDL_ASYNC_PORT) {
			/* Stats */
			if (num_events) {
				p->port_stat[claim_pkts_requested] += num;
				p->port_stat[claim_pkts_granted] += num_events;
				p->port_stat[claim_non_empty]++;
				p->start_cycles = rte_rdtsc();
			} else {
				p->port_stat[claim_empty]++;
				p->start_cycles = 0;
			}
		} else {
			if (p->start_cycles) {
				uint64_t end_cycles = rte_rdtsc();
				p->port_stat[total_cycles] +=
					end_cycles - p->start_cycles;
			}
		}
	} else {
		if (num > 0 &&
				ev[0].queue_id != p->next_external_qid) {
			rte_errno = -EINVAL;
			return 0;
		}
	}

	return num;
}
Exemplo n.º 13
0
int thread_lb(struct lcore_cfg *lconf)
{
	struct rte_mbuf *rx_mbuf[MAX_RING_BURST] __rte_cache_aligned;
	struct task_base *task[MAX_TASKS_PER_CORE];
	uint64_t cur_tsc = rte_rdtsc();
	uint64_t next_term_tsc = cur_tsc + TERM_TIMEOUT;
	uint64_t drain_tsc = cur_tsc + DRAIN_TIMEOUT;
	const uint8_t nb_tasks = lconf->nb_tasks;

	for (uint8_t task_id = 0; task_id < nb_tasks; ++task_id) {
		task[task_id] = lconf->task[task_id];
	}

	for (;;) {
		cur_tsc = rte_rdtsc();
		if (cur_tsc > drain_tsc) {
			drain_tsc = cur_tsc + DRAIN_TIMEOUT;

			FLUSH_STATS(lconf);
			/* check for termination request every timeout */
			if (cur_tsc > next_term_tsc) {
				next_term_tsc = cur_tsc + TERM_TIMEOUT;
				if (is_terminated(lconf)) {
					break;
				}


			}
			for (uint8_t task_id = 0; task_id < nb_tasks; ++task_id) {

				if (!(task[task_id]->flags & (FLAG_TX_FLUSH | FLAG_NEVER_FLUSH))) {
					// Do not flush packets if we transmitted packets in last drain_timeout
					// This avoid flushing queue under load every x seconds
					task[task_id]->flags |= FLAG_TX_FLUSH;
					continue;
				}
				/* This part of the code is only run on low load - when we need to flush,
				   i.e. when we did not send a bulk packets within last drain_timeout (16kpps if DRAIN_TIMEOUT=2msec).
				   All queues are flushed in this case */
				task[task_id]->flush_queues(task[task_id]);
			}
		}

		for (uint8_t task_id = 0; task_id < nb_tasks; ++task_id) {
			uint16_t nb_rx = task[task_id]->rx_pkt(rx_mbuf, task[task_id]);

			if (likely(nb_rx)) {
				INCR_NBRX(nb_rx);
				INCR_RX_PKT_COUNT(task[task_id]->stats, nb_rx);
				task[task_id]->handle_pkt_bulk(rx_mbuf, task[task_id], nb_rx);
			}
		}
	}
	return 0;
}
Exemplo n.º 14
0
static int
timed_adds(unsigned with_hash, unsigned with_data, unsigned table_index)
{
	unsigned i;
	const uint64_t start_tsc = rte_rdtsc();
	void *data;
	int32_t ret;

	for (i = 0; i < KEYS_TO_ADD; i++) {
		data = (void *) ((uintptr_t) signatures[i]);
		if (with_hash && with_data) {
			ret = rte_hash_add_key_with_hash_data(h[table_index],
						(const void *) keys[i],
						signatures[i], data);
			if (ret < 0) {
				printf("Failed to add key number %u\n", ret);
				return -1;
			}
		} else if (with_hash && !with_data) {
			ret = rte_hash_add_key_with_hash(h[table_index],
						(const void *) keys[i],
						signatures[i]);
			if (ret >= 0)
				positions[i] = ret;
			else {
				printf("Failed to add key number %u\n", ret);
				return -1;
			}
		} else if (!with_hash && with_data) {
			ret = rte_hash_add_key_data(h[table_index],
						(const void *) keys[i],
						data);
			if (ret < 0) {
				printf("Failed to add key number %u\n", ret);
				return -1;
			}
		} else {
			ret = rte_hash_add_key(h[table_index], keys[i]);
			if (ret >= 0)
				positions[i] = ret;
			else {
				printf("Failed to add key number %u\n", ret);
				return -1;
			}
		}
	}

	const uint64_t end_tsc = rte_rdtsc();
	const uint64_t time_taken = end_tsc - start_tsc;

	cycles[table_index][ADD][with_hash][with_data] = time_taken/KEYS_TO_ADD;

	return 0;
}
Exemplo n.º 15
0
Arquivo: init.c Projeto: Cosios/dpdk
void
app_ping(void)
{
	unsigned i;
	uint64_t timestamp, diff_tsc;

	const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC;

	for (i = 0; i < RTE_MAX_LCORE; i++) {
		struct app_core_params *p = &app.cores[i];
		struct rte_ring *ring_req, *ring_resp;
		void *msg;
		struct app_msg_req *req;
		int status;

		if ((p->core_type != APP_CORE_FC) &&
		    (p->core_type != APP_CORE_FW) &&
			(p->core_type != APP_CORE_RT) &&
			(p->core_type != APP_CORE_RX))
			continue;

		ring_req = app_get_ring_req(p->core_id);
		ring_resp = app_get_ring_resp(p->core_id);

		/* Fill request message */
		msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool);
		if (msg == NULL)
			rte_panic("Unable to allocate new message\n");

		req = (struct app_msg_req *)
				rte_ctrlmbuf_data((struct rte_mbuf *)msg);
		req->type = APP_MSG_REQ_PING;

		/* Send request */
		do {
			status = rte_ring_sp_enqueue(ring_req, msg);
		} while (status == -ENOBUFS);

		/* Wait for response */
		timestamp = rte_rdtsc();
		do {
			status = rte_ring_sc_dequeue(ring_resp, &msg);
			diff_tsc = rte_rdtsc() - timestamp;

			if (unlikely(diff_tsc > timeout))
				rte_panic("Core %u of type %d does not respond "
					"to requests\n", p->core_id,
					p->core_type);
		} while (status != 0);

		/* Free message buffer */
		rte_ctrlmbuf_free(msg);
	}
}
Exemplo n.º 16
0
/*
 * Do a single performance test, of one type of operation.
 *
 * @param h
 *   hash table to run test on
 * @param func
 *   function to call (add, delete or lookup function)
 * @param avg_occupancy
 *   The average number of entries in each bucket of the hash table
 * @param invalid_pos_count
 *   The amount of errors (e.g. due to a full bucket).
 * @return
 *   The average number of ticks per hash function call. A negative number
 *   signifies failure.
 */
static double
run_single_tbl_perf_test(const struct rte_hash *h, hash_operation func,
		const struct tbl_perf_test_params *params, double *avg_occupancy,
		uint32_t *invalid_pos_count)
{
	uint64_t begin, end, ticks = 0;
	uint8_t *key = NULL;
	uint32_t *bucket_occupancies = NULL;
	uint32_t num_buckets, i, j;
	int32_t pos;

	/* Initialise */
	num_buckets = params->entries / params->bucket_entries;
	key = (uint8_t *) rte_zmalloc("hash key",
			params->key_len * sizeof(uint8_t), 16);
	if (key == NULL)
		return -1;

	bucket_occupancies = (uint32_t *) rte_zmalloc("bucket occupancies",
			num_buckets * sizeof(uint32_t), 16);
	if (bucket_occupancies == NULL) {
		rte_free(key);
		return -1;
	}

	ticks = 0;
	*invalid_pos_count = 0;

	for (i = 0; i < params->num_iterations; i++) {
		/* Prepare inputs for the current iteration */
		for (j = 0; j < params->key_len; j++)
			key[j] = (uint8_t) rte_rand();

		/* Perform operation, and measure time it takes */
		begin = rte_rdtsc();
		pos = func(h, key);
		end = rte_rdtsc();
		ticks += end - begin;

		/* Other work per iteration */
		if (pos < 0)
			*invalid_pos_count += 1;
		else
			bucket_occupancies[pos / params->bucket_entries]++;
	}
	*avg_occupancy = get_avg(bucket_occupancies, num_buckets);

	rte_free(bucket_occupancies);
	rte_free(key);

	return (double)ticks / params->num_iterations;
}
Exemplo n.º 17
0
static inline void
rte_service_runner_do_callback(struct rte_service_spec_impl *s,
			       struct core_state *cs, uint32_t service_idx)
{
	void *userdata = s->spec.callback_userdata;

	if (service_stats_enabled(s)) {
		uint64_t start = rte_rdtsc();
		s->spec.callback(userdata);
		uint64_t end = rte_rdtsc();
		s->cycles_spent += end - start;
		cs->calls_per_service[service_idx]++;
		s->calls++;
	} else
		s->spec.callback(userdata);
}
Exemplo n.º 18
0
uint16_t rx_pkt_hw(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
	uint8_t last_read_portid;
	uint16_t nb_rx;

	START_EMPTY_MEASSURE();
	*mbufs = tbase->ws_mbuf->mbuf[0] +
		(RTE_ALIGN_CEIL(tbase->ws_mbuf->idx[0].prod, 2) & WS_MBUF_MASK);

	last_read_portid = tbase->rx_params_hw.last_read_portid;
	nb_rx = rte_eth_rx_burst(tbase->rx_params_hw.rx_pq[last_read_portid].port,
				 tbase->rx_params_hw.rx_pq[last_read_portid].queue,
				 *mbufs, MAX_PKT_BURST);

	++tbase->rx_params_hw.last_read_portid;
	if (unlikely(tbase->rx_params_hw.last_read_portid == tbase->rx_params_hw.nb_rxports)) {
		tbase->rx_params_hw.last_read_portid = 0;
	}
	if (likely(nb_rx > 0)) {
		TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx);
		return nb_rx;
	}
	TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc);
	return 0;
}
Exemplo n.º 19
0
/* Get CPU frequency */
static void
measure_cpu_frequency(void)
{
	uint64_t before = 0;
	uint64_t after = 0;

	/* How TSC changed in 1 second - it is the CPU frequency */
	before = rte_rdtsc();
	sleep(1);
	after = rte_rdtsc();
	cpu_freq = after - before;

	/* Round to millions */
	cpu_freq /= 1000000;
	cpu_freq *= 1000000;
}
Exemplo n.º 20
0
static inline void
do_vswitchd(void)
{
	static uint64_t last_stats_display_tsc = 0;
	static uint64_t next_tsc = 0;
	uint64_t curr_tsc_local;

	/* handle any packets from vswitchd */
	handle_request_from_vswitchd();

	/* 
	 * curr_tsc is accessed by all cores but is updated here for each loop
	 * which causes cacheline contention. By setting a defined update
	 * period for curr_tsc of 1us this contention is removed.
	 */
	curr_tsc_local = rte_rdtsc();
	if (curr_tsc_local >= next_tsc) {
		curr_tsc = curr_tsc_local;
		next_tsc = curr_tsc_local + tsc_update_period;
	}

	/* display stats every 'stats' sec */
	if ((curr_tsc - last_stats_display_tsc) / cpu_freq >= stats_display_interval
	              && stats_display_interval != 0)
	{
		last_stats_display_tsc = curr_tsc;
		stats_display();
	}
	flush_clients();
	flush_ports();
	flush_vhost_devs();
}
Exemplo n.º 21
0
struct rte_mbuf *
ipv4_reassemble(lcore_conf_t *qconf, struct rte_mbuf *m,
                struct ether_hdr **eth_hdr_pp,
                struct ipv4_hdr **ip_hdr_pp)
{
    struct ipv4_hdr *ip_hdr = *ip_hdr_pp;
    struct rte_ip_frag_tbl *tbl;
    struct rte_ip_frag_death_row *dr;

		/* if it is a fragmented packet, then try to reassemble. */
		if (rte_ipv4_frag_pkt_is_fragmented(ip_hdr)) {
        struct rte_mbuf *mo;

        tbl = qconf->frag_tbl;
        dr = &qconf->death_row;

        /* process this fragment. */
        mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, rte_rdtsc(), ip_hdr);
        if (mo == NULL)
            /* no packet to send out. */
            return NULL;

        /* we have our packet reassembled. */
        if (mo != m) {
            m = mo;
            *eth_hdr_pp = rte_pktmbuf_mtod(m, struct ether_hdr *);
            *ip_hdr_pp = (struct ipv4_hdr *)(*eth_hdr_pp + 1);
        }
	 }
Exemplo n.º 22
0
static void
process_ipv6(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
{
	/* Assume there is no ethernet header */
	struct ipv6_hdr *pkt_hdr = rte_pktmbuf_mtod(pkt, struct ipv6_hdr *);

	struct ipv6_extension_fragment *frag_hdr;
	uint16_t frag_data = 0;
	frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(pkt_hdr);
	if (frag_hdr != NULL)
		frag_data = rte_be_to_cpu_16(frag_hdr->frag_data);

	/* If it is a fragmented packet, then try to reassemble */
	if ((frag_data & RTE_IPV6_FRAG_USED_MASK) == 0)
		p->tx_buf[p->tx_buf_count++] = pkt;
	else {
		struct rte_mbuf *mo;
		struct rte_ip_frag_tbl *tbl = p->frag_tbl;
		struct rte_ip_frag_death_row *dr = &p->death_row;

		pkt->l3_len = sizeof(*pkt_hdr) + sizeof(*frag_hdr);

		/* Process this fragment */
		mo = rte_ipv6_frag_reassemble_packet(tbl, dr, pkt, rte_rdtsc(), pkt_hdr,
				frag_hdr);
		if (mo != NULL)
			p->tx_buf[p->tx_buf_count++] = mo;

		rte_ip_frag_free_death_row(&p->death_row, 3);
	}
}
Exemplo n.º 23
0
static void
process_ipv4(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
{
	/* Assume there is no ethernet header */
	struct ipv4_hdr *pkt_hdr = rte_pktmbuf_mtod(pkt, struct ipv4_hdr *);

	/* Get "More fragments" flag and fragment offset */
	uint16_t frag_field = rte_be_to_cpu_16(pkt_hdr->fragment_offset);
	uint16_t frag_offset = (uint16_t)(frag_field & IPV4_HDR_OFFSET_MASK);
	uint16_t frag_flag = (uint16_t)(frag_field & IPV4_HDR_MF_FLAG);

	/* If it is a fragmented packet, then try to reassemble */
	if ((frag_flag == 0) && (frag_offset == 0))
		p->tx_buf[p->tx_buf_count++] = pkt;
	else {
		struct rte_mbuf *mo;
		struct rte_ip_frag_tbl *tbl = p->frag_tbl;
		struct rte_ip_frag_death_row *dr = &p->death_row;

		pkt->l3_len = sizeof(*pkt_hdr);

		/* Process this fragment */
		mo = rte_ipv4_frag_reassemble_packet(tbl, dr, pkt, rte_rdtsc(),
				pkt_hdr);
		if (mo != NULL)
			p->tx_buf[p->tx_buf_count++] = mo;

		rte_ip_frag_free_death_row(&p->death_row, 3);
	}
}
Exemplo n.º 24
0
Arquivo: router.c Projeto: sdnnfv/dpdk
static void
l2sw_main_process(struct lcore_env *env)
{
    struct rte_mbuf *pkt_burst[MAX_PKT_BURST];
    uint8_t n_ports = rte_eth_dev_count();
    unsigned lcore_id = rte_lcore_id();
    uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
    const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S
                               * BURST_TX_DRAIN_US;

    //RTE_LOG(INFO, MARIO, "[%u] Starting main processing.\n", lcore_id);

    prev_tsc = 0;
    timer_tsc = 0;
    while(1) {
        cur_tsc = rte_rdtsc();

        diff_tsc = cur_tsc - prev_tsc;
        if (unlikely(diff_tsc > drain_tsc)) {
            uint8_t port_id;
            for(port_id = 0; port_id < n_ports; port_id++) {
                if (env->tx_mbufs[port_id].len == 0)
                    continue;
                l2sw_send_burst(env, port_id, env->tx_mbufs[port_id].len);
                env->tx_mbufs[port_id].len = 0;
            }

            /* if timer is enabled */
            if (timer_period > 0) {
                /* advance the timer */
                timer_tsc += diff_tsc;
                /* if timer has reached its timeout */
                if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
                    /* do this only on master core */
                    if (lcore_id == rte_get_master_lcore()) {
                        //print_stats(env);
                        /* reset the timer */
                        timer_tsc = 0;
                    }
                }
            }
            prev_tsc = cur_tsc;
        }

        /* RX */
        uint8_t port_id;
        for (port_id = 0; port_id < n_ports; port_id++) {
            unsigned n_rx = rte_eth_rx_burst(port_id, lcore_id,
                                             pkt_burst, MAX_PKT_BURST);
            if (n_rx != 0)
                //RTE_LOG(INFO, MARIO, "[%u-%u] %u packet(s) came.\n",
                //        lcore_id, port_id,  n_rx);

                __sync_fetch_and_add(&port_statistics[port_id].rx, n_rx);

            ether_in(env, pkt_burst, n_rx, port_id);
        }
    }
    return ;
}
Exemplo n.º 25
0
static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
	struct task_gen_server *task = (struct task_gen_server *)tbase;
	struct bundle_ctx *conn;
	int ret, ret2 = 0;

	token_time_update(&task->token_time, rte_rdtsc());

	if ((ret = fqueue_put(task->fqueue, mbufs, n_pkts)) != n_pkts) {
		uint8_t out[MAX_PKT_BURST];
		for (uint16_t j = 0; j < n_pkts - ret; ++j)
			out[j] = OUT_DISCARD;

		ret2 = task->base.tx_pkt(&task->base, mbufs + ret, n_pkts - ret, out);
	}
	if (task->handle_state == HANDLE_QUEUED) {
		if (handle_gen_queued(task) == 0) {
			if (handle_gen_scheduled(task) != 0)
				task->handle_state = HANDLE_SCHEDULED;
		}
	}
	else {
		if (handle_gen_scheduled(task) == 0) {
			if (handle_gen_queued(task) != 0)
				task->handle_state = HANDLE_QUEUED;
		}
	}
	return ret2;
}
Exemplo n.º 26
0
static __rte_always_inline void
update_on_dequeue(struct opdl_port *p,
		struct rte_event ev[],
		uint16_t num,
		uint16_t num_events)
{
	if (p->opdl->do_validation) {
		int16_t i;
		for (i = 0; i < num; i++)
			ev[i].queue_id =
				p->opdl->queue[p->queue_id].external_qid;

		/* Stats */
		if (num_events) {
			p->port_stat[claim_pkts_requested] += num;
			p->port_stat[claim_pkts_granted] += num_events;
			p->port_stat[claim_non_empty]++;
			p->start_cycles = rte_rdtsc();
		} else {
			p->port_stat[claim_empty]++;
			p->start_cycles = 0;
		}
	} else {
		if (num > 0)
			ev[0].queue_id =
				p->opdl->queue[p->queue_id].external_qid;
	}
}
Exemplo n.º 27
0
void input_proc_until(uint64_t deadline)
{
    struct timeval tv;
    fd_set in_fd;
    int ret = 1;

    /* Keep checking for input until select() returned 0 (timeout
       occurred before input was read) or current time has passed
       the deadline (which occurs when time progresses past the
       deadline between return of select() and the next
       iteration). */
    while (ret != 0 && tsc_diff_to_tv(rte_rdtsc(), deadline, &tv) == 0) {
        FD_ZERO(&in_fd);

        for (int i = 0; i < n_inputs; ++i) {
            FD_SET(inputs[i]->fd, &in_fd);
        }

        ret = select(max_input_fd + 1, &in_fd, NULL, NULL, &tv);

        if (ret > 0) {
            for (int i = 0; i < n_inputs; ++i) {
                if (FD_ISSET(inputs[i]->fd, &in_fd)) {
                    inputs[i]->proc_input(inputs[i]);
                }
            }
        }
    }
}
Exemplo n.º 28
0
void
rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer,
	void *ptr_data)
{
	struct rte_keepalive *keepcfg = ptr_data;
	int idx_core;

	for (idx_core = 0; idx_core < RTE_KEEPALIVE_MAXCORES; idx_core++) {
		if (keepcfg->active_cores[idx_core] == 0)
			continue;

		switch (keepcfg->state_flags[idx_core]) {
		case ALIVE: /* Alive */
			keepcfg->state_flags[idx_core] = MISSING;
			keepcfg->last_alive[idx_core] = rte_rdtsc();
			break;
		case MISSING: /* MIA */
			print_trace("Core MIA. ", keepcfg, idx_core);
			keepcfg->state_flags[idx_core] = DEAD;
			break;
		case DEAD: /* Dead */
			keepcfg->state_flags[idx_core] = GONE;
			print_trace("Core died. ", keepcfg, idx_core);
			if (keepcfg->callback)
				keepcfg->callback(
					keepcfg->callback_data,
					idx_core
					);
			break;
		case GONE: /* Buried */
			break;
		}
	}
}
Exemplo n.º 29
0
uint16_t rx_pkt_hw(struct rte_mbuf **rx_mbuf, struct task_base *ptask)
{
	START_EMPTY_MEASSURE();
#ifdef BRAS_RX_BULK
	uint16_t nb_rx = rte_eth_rx_burst(ptask->rx_params_hw.rx_port, ptask->rx_params_hw.rx_queue, rx_mbuf + ptask->rx_params_hw.nb_rxbulk, MAX_PKT_BURST - ptask->rx_params_hw.nb_rxbulk);
	if (likely(nb_rx > 0)) {
		ptask->rx_params_hw.nb_rxbulk += nb_rx;
		if (ptask->rx_params_hw.nb_rxbulk == MAX_PKT_BURST) {
			ptask->rx_params_hw.nb_rxbulk = 0;
			return MAX_PKT_BURST;
		}
		else {
			/* Don't increment EMPTY cycles. */
			return 0;
		}
	}
#else
	uint16_t nb_rx = rte_eth_rx_burst(ptask->rx_params_hw.rx_port, ptask->rx_params_hw.rx_queue, rx_mbuf, MAX_PKT_BURST);
	if (likely(nb_rx > 0)) {
		return nb_rx;
	}
#endif
	INCR_EMPTY_CYCLES(ptask->stats, rte_rdtsc() - cur_tsc);
	return 0;
}
Exemplo n.º 30
0
int user_on_transmission_opportunity(struct socket *sock)
{
	struct rte_mbuf *mbuf;
	struct msghdr msghdr;
	struct sockaddr_in sockaddrin;
	struct iovec iov;
	int i = 0;
	uint32_t to_send_this_time;
	uint64_t ts = rte_rdtsc();
	user_on_tx_opportunity_called++;

	to_send_this_time = app_glue_calc_size_of_data_to_send(sock);

	if(likely(to_send_this_time > 0))
	{
		mbuf = app_glue_get_buffer();
	        if (unlikely(mbuf == NULL)) {
			user_on_tx_opportunity_cannot_get_buff++;
			return 0;
		}
		mbuf->pkt.data_len = 1448;
		sockaddrin.sin_family = AF_INET;
		sockaddrin.sin_addr.s_addr = inet_addr("192.168.1.2");
		sockaddrin.sin_port = htons(7777);
		msghdr.msg_namelen = sizeof(sockaddrin);
		msghdr.msg_name = &sockaddrin;
		msghdr.msg_iov = &iov;
		iov.head = mbuf;
		msghdr.msg_iovlen = 1;
		msghdr.msg_controllen = 0;
		msghdr.msg_control = 0;
		msghdr.msg_flags = 0;
		sock->sk->sk_route_caps |= NETIF_F_SG | NETIF_F_ALL_CSUM;
		i = kernel_sendmsg(sock, &msghdr, 1448);
		if(i <= 0) {
                        rte_pktmbuf_free(mbuf);
			user_on_tx_opportunity_api_failed++;
                }
	}
	else
	{
		user_on_tx_opportunity_api_not_called++;
	}
	user_on_tx_opportunity_cycles += rte_rdtsc() - ts;
	return i;
}