Пример #1
0
/*
 * Run a single instance of the throughput test. When attempting to determine
 * the maximum packet rate this will be invoked multiple times with the only
 * difference between runs being the target PPS rate.
 */
static int run_test_single(odp_cpumask_t *thd_mask_tx,
			   odp_cpumask_t *thd_mask_rx,
			   test_status_t *status)
{
	odph_odpthread_t thd_tbl[MAX_WORKERS];
	thread_args_t args_tx, args_rx;
	uint64_t expected_tx_cnt;
	int num_tx_workers, num_rx_workers;
	odph_odpthread_params_t thr_params;

	memset(&thr_params, 0, sizeof(thr_params));
	thr_params.thr_type = ODP_THREAD_WORKER;
	thr_params.instance = gbl_args->instance;

	odp_atomic_store_u32(&shutdown, 0);

	memset(thd_tbl, 0, sizeof(thd_tbl));
	memset(gbl_args->rx_stats, 0, gbl_args->rx_stats_size);
	memset(gbl_args->tx_stats, 0, gbl_args->tx_stats_size);

	expected_tx_cnt = status->pps_curr * gbl_args->args.duration;

	/* start receiver threads first */
	thr_params.start  = run_thread_rx;
	thr_params.arg    = &args_rx;
	args_rx.batch_len = gbl_args->args.rx_batch_len;
	odph_odpthreads_create(&thd_tbl[0], thd_mask_rx, &thr_params);
	odp_barrier_wait(&gbl_args->rx_barrier);
	num_rx_workers = odp_cpumask_count(thd_mask_rx);

	/* then start transmitters */
	thr_params.start  = run_thread_tx;
	thr_params.arg    = &args_tx;
	num_tx_workers    = odp_cpumask_count(thd_mask_tx);
	args_tx.pps       = status->pps_curr / num_tx_workers;
	args_tx.duration  = gbl_args->args.duration;
	args_tx.batch_len = gbl_args->args.tx_batch_len;
	odph_odpthreads_create(&thd_tbl[num_rx_workers], thd_mask_tx,
			       &thr_params);
	odp_barrier_wait(&gbl_args->tx_barrier);

	/* wait for transmitter threads to terminate */
	odph_odpthreads_join(&thd_tbl[num_rx_workers]);

	/* delay to allow transmitted packets to reach the receivers */
	odp_time_wait_ns(SHUTDOWN_DELAY_NS);

	/* indicate to the receivers to exit */
	odp_atomic_store_u32(&shutdown, 1);

	/* wait for receivers */
	odph_odpthreads_join(&thd_tbl[0]);

	if (!status->warmup)
		return process_results(expected_tx_cnt, status);

	return 1;
}
Пример #2
0
/*
 * Run a single instance of the throughput test. When attempting to determine
 * the maximum packet rate this will be invoked multiple times with the only
 * difference between runs being the target PPS rate.
 */
static int run_test_single(odp_cpumask_t *thd_mask_tx,
			   odp_cpumask_t *thd_mask_rx,
			   test_status_t *status)
{
	odph_linux_pthread_t thd_tbl[MAX_WORKERS];
	thread_args_t args_tx, args_rx;
	uint64_t expected_tx_cnt;
	int num_tx_workers, num_rx_workers;

	odp_atomic_store_u32(&shutdown, 0);

	memset(thd_tbl, 0, sizeof(thd_tbl));
	memset(&gbl_args->rx_stats, 0, sizeof(gbl_args->rx_stats));
	memset(&gbl_args->tx_stats, 0, sizeof(gbl_args->tx_stats));

	expected_tx_cnt = status->pps_curr * gbl_args->args.duration;

	/* start receiver threads first */
	args_rx.batch_len = gbl_args->args.rx_batch_len;
	odph_linux_pthread_create(&thd_tbl[0], thd_mask_rx,
				  run_thread_rx, &args_rx);
	odp_barrier_wait(&gbl_args->rx_barrier);
	num_rx_workers = odp_cpumask_count(thd_mask_rx);

	/* then start transmitters */
	num_tx_workers    = odp_cpumask_count(thd_mask_tx);
	args_tx.pps       = status->pps_curr / num_tx_workers;
	args_tx.duration  = gbl_args->args.duration;
	args_tx.batch_len = gbl_args->args.tx_batch_len;
	odph_linux_pthread_create(&thd_tbl[num_rx_workers], thd_mask_tx,
				  run_thread_tx, &args_tx);
	odp_barrier_wait(&gbl_args->tx_barrier);

	/* wait for transmitter threads to terminate */
	odph_linux_pthread_join(&thd_tbl[num_rx_workers],
				num_tx_workers);

	/* delay to allow transmitted packets to reach the receivers */
	busy_loop_ns(SHUTDOWN_DELAY_NS);

	/* indicate to the receivers to exit */
	odp_atomic_store_u32(&shutdown, 1);

	/* wait for receivers */
	odph_linux_pthread_join(&thd_tbl[0], num_rx_workers);

	return process_results(expected_tx_cnt, status);
}
Пример #3
0
/**
 * @internal Worker thread
 *
 * @param ptr  Pointer to test arguments
 *
 * @return Pointer to exit status
 */
static void *run_thread(void *ptr)
{
	int thr;
	odp_pool_t msg_pool;
	test_globals_t *gbls;

	gbls = ptr;
	thr  = odp_thread_id();

	printf("Thread %i starts on cpu %i\n", thr, odp_cpu_id());

	/*
	 * Find the pool
	 */
	msg_pool = odp_pool_lookup("msg_pool");

	if (msg_pool == ODP_POOL_INVALID) {
		EXAMPLE_ERR("  [%i] msg_pool not found\n", thr);
		return NULL;
	}

	odp_barrier_wait(&gbls->test_barrier);

	test_abs_timeouts(thr, gbls);


	printf("Thread %i exits\n", thr);
	fflush(NULL);
	return NULL;
}
Пример #4
0
/**
 * Packet IO worker thread using bursts from/to IO resources
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_ifburst_thread(void *arg)
{
	int thr;
	thread_args_t *thr_args;
	int pkts, pkts_ok;
	odp_packet_t pkt_tbl[MAX_PKT_BURST];
	int src_idx, dst_idx;
	odp_pktio_t pktio_src, pktio_dst;

	thr = odp_thread_id();
	thr_args = arg;

	stats_t *stats = calloc(1, sizeof(stats_t));
	*thr_args->stats = stats;

	src_idx = thr_args->src_idx;
	dst_idx = (src_idx % 2 == 0) ? src_idx+1 : src_idx-1;
	pktio_src = gbl_args->pktios[src_idx];
	pktio_dst = gbl_args->pktios[dst_idx];

	printf("[%02i] srcif:%s dstif:%s spktio:%02" PRIu64
	       " dpktio:%02" PRIu64 " BURST mode\n",
	       thr,
	       gbl_args->appl.if_names[src_idx],
	       gbl_args->appl.if_names[dst_idx],
	       odp_pktio_to_u64(pktio_src), odp_pktio_to_u64(pktio_dst));
	odp_barrier_wait(&barrier);

	/* Loop packets */
	while (!exit_threads) {
		pkts = odp_pktio_recv(pktio_src, pkt_tbl, MAX_PKT_BURST);
		if (pkts <= 0)
			continue;

		/* Drop packets with errors */
		pkts_ok = drop_err_pkts(pkt_tbl, pkts);
		if (pkts_ok > 0) {
			int sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts_ok);

			sent = sent > 0 ? sent : 0;
			if (odp_unlikely(sent < pkts_ok)) {
				stats->drops += pkts_ok - sent;
				do
					odp_packet_free(pkt_tbl[sent]);
				while (++sent < pkts_ok);
			}
		}

		if (odp_unlikely(pkts_ok != pkts))
			stats->drops += pkts - pkts_ok;

		if (pkts_ok == 0)
			continue;

		stats->packets += pkts_ok;
	}

	free(stats);
	return NULL;
}
Пример #5
0
static int chaos_thread(void *arg)
{
	uint64_t i, wait;
	int rc;
	chaos_buf *cbuf;
	odp_event_t ev;
	odp_queue_t from;
	thread_args_t *args = (thread_args_t *)arg;
	test_globals_t *globals = args->globals;
	int me = odp_thread_id();
	odp_time_t start_time, end_time, diff;

	if (CHAOS_DEBUG)
		printf("Chaos thread %d starting...\n", me);

	/* Wait for all threads to start */
	odp_barrier_wait(&globals->barrier);
	start_time = odp_time_local();

	/* Run the test */
	wait = odp_schedule_wait_time(5 * ODP_TIME_MSEC_IN_NS);
	for (i = 0; i < CHAOS_NUM_ROUNDS; i++) {
		ev = odp_schedule(&from, wait);
		if (ev == ODP_EVENT_INVALID)
			continue;

		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		CU_ASSERT_FATAL(cbuf != NULL);
		if (CHAOS_DEBUG)
			printf("Thread %d received event %" PRIu64
			       " seq %" PRIu64
			       " from Q %s, sending to Q %s\n",
			       me, cbuf->evno, cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].name,
			       globals->
			       chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name);

		rc = odp_queue_enq(
			globals->
			chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle,
			ev);
		CU_ASSERT_FATAL(rc == 0);
	}

	if (CHAOS_DEBUG)
		printf("Thread %d completed %d rounds...terminating\n",
		       odp_thread_id(), CHAOS_NUM_EVENTS);

	exit_schedule_loop();

	end_time = odp_time_local();
	diff = odp_time_diff(end_time, start_time);

	printf("Thread %d ends, elapsed time = %" PRIu64 "us\n",
	       odp_thread_id(), odp_time_to_ns(diff) / 1000);

	return 0;
}
Пример #6
0
static void *run_thread(void *arg)
{
	pthrd_arg *parg = (pthrd_arg *)arg;
	int thr;

	thr = odp_thread_id();

	LOG_DBG("Thread %i starts\n", thr);

	/* Wait here until all threads have arrived */
	/* Use multiple barriers to verify that it handles wrap around and
	 * has no race conditions which could be exposed when invoked back-
	 * to-back */
	odp_barrier_wait(&barrier);
	odp_barrier_wait(&barrier);
	odp_barrier_wait(&barrier);
	odp_barrier_wait(&barrier);

	gettimeofday(&tv0[thr], NULL);

	switch (parg->testcase) {
	case TEST_MIX:
		test_atomic_basic();
		break;
	case TEST_INC_DEC_U32:
		test_atomic_inc_dec_u32();
		break;
	case TEST_ADD_SUB_U32:
		test_atomic_add_sub_u32();
		break;
	case TEST_INC_DEC_64:
		test_atomic_inc_dec_64();
		break;
	case TEST_ADD_SUB_64:
		test_atomic_add_sub_64();
		break;
	}
	gettimeofday(&tv1[thr], NULL);
	fflush(NULL);

	printf("Time taken in thread %02d to complete op is %lld usec\n", thr,
	       (tv1[thr].tv_sec - tv0[thr].tv_sec) * 1000000ULL +
	       (tv1[thr].tv_usec - tv0[thr].tv_usec));

	return parg;
}
Пример #7
0
void *other_thread(void *arg)
{
	odp_barrier_t *barrier = (odp_barrier_t *)arg;

	/* Initialize this thread's ofp_errno. */
	ofp_errno = 0;

	/* Test 1 */
	odp_barrier_wait(barrier);
	/* ... */
	odp_barrier_wait(barrier);
	CU_ASSERT_EQUAL(ofp_errno, 0);

	/* Test 2 */
	odp_barrier_wait(barrier);
	ofp_errno = OFP_ENOENT;
	odp_barrier_wait(barrier);
	CU_ASSERT_EQUAL(ofp_errno, OFP_ENOENT);

	return NULL;
}
Пример #8
0
/**
 *  Print statistics
 *
 * @param num_workers Number of worker threads
 * @param thr_stats Pointer to stats storage
 * @param duration Number of seconds to loop in
 * @param timeout Number of seconds for stats calculation
 *
 */
static int print_speed_stats(int num_workers, stats_t *thr_stats,
			     int duration, int timeout)
{
	uint64_t pkts = 0;
	uint64_t pkts_prev = 0;
	uint64_t pps;
	uint64_t rx_drops, tx_drops;
	uint64_t maximum_pps = 0;
	int i;
	int elapsed = 0;
	int stats_enabled = 1;
	int loop_forever = (duration == 0);

	if (timeout <= 0) {
		stats_enabled = 0;
		timeout = 1;
	}
	/* Wait for all threads to be ready*/
	odp_barrier_wait(&barrier);

	do {
		pkts = 0;
		rx_drops = 0;
		tx_drops = 0;

		sleep(timeout);

		for (i = 0; i < num_workers; i++) {
			pkts += LOAD_U64(thr_stats[i].s.packets);
			rx_drops += LOAD_U64(thr_stats[i].s.rx_drops);
			tx_drops += LOAD_U64(thr_stats[i].s.tx_drops);
		}
		if (stats_enabled) {
			pps = (pkts - pkts_prev) / timeout;
			if (pps > maximum_pps)
				maximum_pps = pps;
			printf("%" PRIu64 " pps, %" PRIu64 " max pps, ",  pps,
			       maximum_pps);

			printf(" %" PRIu64 " rx drops, %" PRIu64 " tx drops\n",
			       rx_drops, tx_drops);

			pkts_prev = pkts;
		}
		elapsed += timeout;
	} while (loop_forever || (elapsed < duration));

	if (stats_enabled)
		printf("TEST RESULT: %" PRIu64 " maximum packets per second.\n",
		       maximum_pps);

	return pkts > 100 ? 0 : -1;
}
Пример #9
0
static void test_tls_errno(void)
{
	odp_cpumask_t cpumask;
	odph_linux_pthread_t threads;
	odp_barrier_t barrier__;
	odp_barrier_t *barrier;

	CU_ASSERT(1 == odp_cpumask_default_worker(&cpumask, 1));

	barrier = &barrier__;
	odp_barrier_init(barrier, 2);

	CU_ASSERT(1 == ofp_linux_pthread_create(
			&threads,
			&cpumask,
			other_thread,
			(void *)barrier,
			ODP_THREAD_CONTROL));

	/* Initialize this thread's ofp_errno. */
	ofp_errno = 0;

	/* Test 1 - Test that an assignment to the current thread's ofp_errno
	*           does not modify the ofp_errno of other_thread.
	*/
	odp_barrier_wait(barrier);
	ofp_errno = OFP_EIO;
	odp_barrier_wait(barrier);
	CU_ASSERT_EQUAL(ofp_errno, OFP_EIO);

	/* Test 2 - Test both threads. */
	odp_barrier_wait(barrier);
	ofp_errno = OFP_EPERM;
	odp_barrier_wait(barrier);
	CU_ASSERT_EQUAL(ofp_errno, OFP_EPERM);

	odph_linux_pthread_join(&threads, 1);
}
Пример #10
0
static void *run_thread_rx(void *arg)
{
	test_globals_t *globals;
	int thr_id, batch_len;
	odp_queue_t pollq = ODP_QUEUE_INVALID;

	thread_args_t *targs = arg;

	batch_len = targs->batch_len;

	if (batch_len > BATCH_LEN_MAX)
		batch_len = BATCH_LEN_MAX;

	thr_id = odp_thread_id();

	globals = odp_shm_addr(odp_shm_lookup("test_globals"));

	pkt_rx_stats_t *stats = &globals->rx_stats[thr_id];

	if (gbl_args->args.schedule == 0) {
		pollq = odp_pktio_inq_getdef(globals->pktio_rx);
		if (pollq == ODP_QUEUE_INVALID)
			LOG_ABORT("Invalid input queue.\n");
	}

	odp_barrier_wait(&globals->rx_barrier);
	while (1) {
		odp_event_t ev[BATCH_LEN_MAX];
		int i, n_ev;

		n_ev = receive_packets(pollq, ev, batch_len);

		for (i = 0; i < n_ev; ++i) {
			if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) {
				odp_packet_t pkt = odp_packet_from_event(ev[i]);
				if (pktio_pkt_has_magic(pkt))
					stats->s.rx_cnt++;
				else
					stats->s.rx_ignore++;
			}
			odp_buffer_free(odp_buffer_from_event(ev[i]));
		}
		if (n_ev == 0 && odp_atomic_load_u32(&shutdown))
			break;
	}

	return NULL;
}
Пример #11
0
/**
 * Packet IO worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	int thr;
	odp_queue_t outq_def;
	odp_packet_t pkt;
	odp_event_t ev;
	thread_args_t *thr_args = arg;

	stats_t *stats = calloc(1, sizeof(stats_t));
	*thr_args->stats = stats;

	thr = odp_thread_id();

	printf("[%02i] QUEUE mode\n", thr);
	odp_barrier_wait(&barrier);

	/* Loop packets */
	while (!exit_threads) {
		/* Use schedule to get buf from any input queue */
		ev  = odp_schedule(NULL, ODP_SCHED_WAIT);
		pkt = odp_packet_from_event(ev);

		/* Drop packets with errors */
		if (odp_unlikely(drop_err_pkts(&pkt, 1) == 0)) {
			stats->drops += 1;
			continue;
		}

		outq_def = lookup_dest_q(pkt);

		/* Enqueue the packet for output */
		if (odp_queue_enq(outq_def, ev)) {
			printf("  [%i] Queue enqueue failed.\n", thr);
			odp_packet_free(pkt);
			continue;
		}

		stats->packets += 1;
	}

	free(stats);
	return NULL;
}
Пример #12
0
/**
 *  Print statistics
 *
 * @param num_workers Number of worker threads
 * @param thr_stats Pointer to stats storage
 * @param duration Number of seconds to loop in
 * @param timeout Number of seconds for stats calculation
 *
 */
static void print_speed_stats(int num_workers, stats_t **thr_stats,
			      int duration, int timeout)
{
	uint64_t pkts, pkts_prev = 0, pps, drops, maximum_pps = 0;
	int i, elapsed = 0;
	int loop_forever = (duration == 0);

	/* Wait for all threads to be ready*/
	odp_barrier_wait(&barrier);

	do {
		pkts = 0;
		drops = 0;

		sleep(timeout);

		for (i = 0; i < num_workers; i++) {
			pkts += thr_stats[i]->packets;
			drops += thr_stats[i]->drops;
		}
		pps = (pkts - pkts_prev) / timeout;
		if (pps > maximum_pps)
			maximum_pps = pps;
		printf("%" PRIu64 " pps, %" PRIu64 " max pps, ",  pps,
		       maximum_pps);

		printf(" %" PRIu64 " total drops\n", drops);

		elapsed += timeout;
		pkts_prev = pkts;
	} while (loop_forever || (elapsed < duration));

	printf("TEST RESULT: %" PRIu64 " maximum packets per second.\n",
	       maximum_pps);
	return;
}
Пример #13
0
static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
			     odp_bool_t no_barrier_test)
{
	global_shared_mem_t *global_mem;
	uint32_t barrier_errs, iterations, cnt, i_am_slow_thread;
	uint32_t thread_num, slow_thread_num, next_slow_thread, num_threads;
	uint32_t lock_owner_delay, barrier_cnt1, barrier_cnt2;

	thread_num = odp_thread_id();
	global_mem = per_thread_mem->global_mem;
	num_threads = global_mem->g_num_threads;
	iterations = BARRIER_ITERATIONS;

	barrier_errs = 0;
	lock_owner_delay = SLOW_BARRIER_DELAY;

	for (cnt = 1; cnt < iterations; cnt++) {
		/* Wait here until all of the threads reach this point */
		custom_barrier_wait(&global_mem->custom_barrier1[cnt]);

		barrier_cnt1 = LOAD_U32(global_mem->barrier_cnt1);
		barrier_cnt2 = LOAD_U32(global_mem->barrier_cnt2);

		if ((barrier_cnt1 != cnt) || (barrier_cnt2 != cnt)) {
			printf("thread_num=%" PRIu32 " barrier_cnts of %" PRIu32
				   " %" PRIu32 " cnt=%" PRIu32 "\n",
			       thread_num, barrier_cnt1, barrier_cnt2, cnt);
			barrier_errs++;
		}

		/* Wait here until all of the threads reach this point */
		custom_barrier_wait(&global_mem->custom_barrier2[cnt]);

		slow_thread_num = LOAD_U32(global_mem->slow_thread_num);
		i_am_slow_thread = thread_num == slow_thread_num;
		next_slow_thread = slow_thread_num + 1;
		if (num_threads < next_slow_thread)
			next_slow_thread = 1;

		/*
		* Now run the test, which involves having all but one thread
		* immediately calling odp_barrier_wait(), and one thread wait a
		* moderate amount of time and then calling odp_barrier_wait().
		* The test fails if any of the first group of threads
		* has not waited for the "slow" thread. The "slow" thread is
		* responsible for re-initializing the barrier for next trial.
		*/
		if (i_am_slow_thread) {
			thread_delay(per_thread_mem, lock_owner_delay);
			lock_owner_delay += BASE_DELAY;
			if ((LOAD_U32(global_mem->barrier_cnt1) != cnt) ||
			    (LOAD_U32(global_mem->barrier_cnt2) != cnt) ||
			    (LOAD_U32(global_mem->slow_thread_num)
					!= slow_thread_num))
				barrier_errs++;
		}

		if (no_barrier_test == 0)
			odp_barrier_wait(&global_mem->test_barriers[cnt]);

		STORE_U32(global_mem->barrier_cnt1, cnt + 1);
		odp_mb_full();

		if (i_am_slow_thread) {
			STORE_U32(global_mem->slow_thread_num, next_slow_thread);
			STORE_U32(global_mem->barrier_cnt2, cnt + 1);
			odp_mb_full();
		} else {
			uint32_t cnt2 = LOAD_U32(global_mem->barrier_cnt2);
			while (cnt2 != (cnt + 1)){
				thread_delay(per_thread_mem, BASE_DELAY);
				cnt2 = LOAD_U32(global_mem->barrier_cnt2);
			}
		}
	}

	if ((global_mem->g_verbose) && (barrier_errs != 0))
		printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
		       " barrier_errs in %" PRIu32 " iterations\n", thread_num,
		       per_thread_mem->thread_id,
		       per_thread_mem->thread_core, barrier_errs, iterations);

	return barrier_errs;
}
Пример #14
0
static void *chaos_thread(void *arg)
{
	uint64_t i, wait;
	int rc;
	chaos_buf *cbuf;
	odp_event_t ev;
	odp_queue_t from;
	thread_args_t *args = (thread_args_t *)arg;
	test_globals_t *globals = args->globals;
	int me = odp_thread_id();

	if (CHAOS_DEBUG)
		printf("Chaos thread %d starting...\n", me);

	/* Wait for all threads to start */
	odp_barrier_wait(&globals->barrier);

	/* Run the test */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	for (i = 0; i < CHAOS_NUM_ROUNDS * CHAOS_NUM_EVENTS; i++) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		CU_ASSERT_FATAL(cbuf != NULL);
		INVALIDATE(cbuf);
		if (CHAOS_DEBUG)
			printf("Thread %d received event %" PRIu64
			       " seq %" PRIu64
			       " from Q %s, sending to Q %s\n",
			       me, cbuf->evno, cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].name,
			       globals->
			       chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name);

		rc = odp_queue_enq(
			globals->
			chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle,
			ev);
		CU_ASSERT(rc == 0);
	}

	if (CHAOS_DEBUG)
		printf("Thread %d completed %d rounds...terminating\n",
		       odp_thread_id(), CHAOS_NUM_EVENTS);

	/* Thread complete--drain locally cached scheduled events */
	odp_schedule_pause();

	while (odp_atomic_load_u32(&globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
		if (ev == ODP_EVENT_INVALID)
			break;
		odp_atomic_dec_u32(&globals->chaos_pending_event_count);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Thread %d drained event %" PRIu64
			       " seq %" PRIu64
			       " from Q %s\n",
			       odp_thread_id(), cbuf->evno, cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	return NULL;
}
Пример #15
0
/**
 *  Print statistics
 *
 * @param num_workers Number of worker threads
 * @param thr_stats   Pointer to stats storage
 * @param duration    Number of seconds to loop in
 * @param timeout     Number of seconds for stats calculation
 *
 * @retval 0 on success
 * @retval -1 on failure
 */
static int print_speed_stats(int num_workers, stats_t (*thr_stats)[MAX_PKTIOS],
			     int duration, int timeout)
{
	uint64_t rx_pkts_prev[MAX_PKTIOS] = {0};
	uint64_t tx_pkts_prev[MAX_PKTIOS] = {0};
	uint64_t rx_pkts_tot;
	uint64_t tx_pkts_tot;
	uint64_t rx_pps;
	uint64_t tx_pps;
	int i, j;
	int elapsed = 0;
	int stats_enabled = 1;
	int loop_forever = (duration == 0);
	int num_ifaces = gbl_args->appl.if_count;

	if (timeout <= 0) {
		stats_enabled = 0;
		timeout = 1;
	}
	/* Wait for all threads to be ready*/
	odp_barrier_wait(&barrier);

	do {
		uint64_t rx_pkts[MAX_PKTIOS] = {0};
		uint64_t tx_pkts[MAX_PKTIOS] = {0};
		uint64_t rx_drops = 0;
		uint64_t tx_drops = 0;

		rx_pkts_tot = 0;
		tx_pkts_tot = 0;

		sleep(timeout);
		elapsed += timeout;

		for (i = 0; i < num_workers; i++) {
			for (j = 0; j < num_ifaces; j++) {
				rx_pkts[j] += thr_stats[i][j].s.rx_packets;
				tx_pkts[j] += thr_stats[i][j].s.tx_packets;
				rx_drops += thr_stats[i][j].s.rx_drops;
				tx_drops += thr_stats[i][j].s.tx_drops;
			}
		}

		if (!stats_enabled)
			continue;

		for (j = 0; j < num_ifaces; j++) {
			rx_pps = (rx_pkts[j] - rx_pkts_prev[j]) / timeout;
			tx_pps = (tx_pkts[j] - tx_pkts_prev[j]) / timeout;
			printf("  Port %d: %" PRIu64 " rx pps, %" PRIu64
			       " tx pps, %" PRIu64 " rx pkts, %" PRIu64
			       " tx pkts\n", j, rx_pps, tx_pps, rx_pkts[j],
			       tx_pkts[j]);

			rx_pkts_prev[j] = rx_pkts[j];
			tx_pkts_prev[j] = tx_pkts[j];
			rx_pkts_tot += rx_pkts[j];
			tx_pkts_tot += tx_pkts[j];
		}

		printf("Total: %" PRIu64 " rx pkts, %" PRIu64 " tx pkts, %"
		       PRIu64 " rx drops, %" PRIu64 " tx drops\n", rx_pkts_tot,
		       tx_pkts_tot, rx_drops, tx_drops);

	} while (loop_forever || (elapsed < duration));

	return rx_pkts_tot >= 100 ? 0 : -1;
}
Пример #16
0
/**
 * Switch worker thread
 *
 * @param arg  Thread arguments of type 'thread_args_t *'
 */
static int run_worker(void *arg)
{
	thread_args_t *thr_args = arg;
	odp_packet_t pkt_tbl[MAX_PKT_BURST];
	odp_pktin_queue_t pktin;
	odp_pktout_queue_t pktout;
	unsigned num_pktio;
	unsigned pktio = 0;
	uint8_t port_in;
	uint8_t port_out;
	int pkts;

	num_pktio = thr_args->num_rx_pktio;
	pktin     = thr_args->rx_pktio[pktio].pktin;
	port_in  = thr_args->rx_pktio[pktio].port_idx;

	odp_barrier_wait(&barrier);

	while (!exit_threads) {
		int sent;
		unsigned drops;

		if (num_pktio > 1) {
			pktin     = thr_args->rx_pktio[pktio].pktin;
			port_in = thr_args->rx_pktio[pktio].port_idx;
			pktio++;
			if (pktio == num_pktio)
				pktio = 0;
		}

		pkts = odp_pktin_recv(pktin, pkt_tbl, MAX_PKT_BURST);
		if (odp_unlikely(pkts <= 0))
			continue;

		thr_args->stats[port_in]->s.rx_packets += pkts;

		/* Sort packets to thread local tx buffers */
		forward_packets(pkt_tbl, pkts, thr_args, port_in);

		/* Empty all thread local tx buffers */
		for (port_out = 0; port_out < gbl_args->appl.if_count;
				port_out++) {
			unsigned tx_pkts;
			odp_packet_t *tx_pkt_tbl;

			if (port_out == port_in ||
			    thr_args->tx_pktio[port_out].buf.len == 0)
				continue;

			tx_pkts = thr_args->tx_pktio[port_out].buf.len;
			thr_args->tx_pktio[port_out].buf.len = 0;

			tx_pkt_tbl = thr_args->tx_pktio[port_out].buf.pkt;

			pktout = thr_args->tx_pktio[port_out].pktout;

			sent = odp_pktout_send(pktout, tx_pkt_tbl, tx_pkts);
			sent = odp_unlikely(sent < 0) ? 0 : sent;

			thr_args->stats[port_out]->s.tx_packets += sent;

			drops = tx_pkts - sent;

			if (odp_unlikely(drops)) {
				unsigned i;

				thr_args->stats[port_out]->s.tx_drops += drops;

				/* Drop rejected packets */
				for (i = sent; i < tx_pkts; i++)
					odp_packet_free(tx_pkt_tbl[i]);
			}
		}
	}

	/* Make sure that latest stat writes are visible to other threads */
	odp_mb_full();

	return 0;
}
Пример #17
0
/**
 * Packet IO worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	odp_event_t  ev_tbl[MAX_PKT_BURST];
	odp_packet_t pkt_tbl[MAX_PKT_BURST];
	int pkts;
	int thr;
	uint64_t wait;
	int dst_idx;
	odp_pktio_t pktio_dst;
	thread_args_t *thr_args = arg;
	stats_t *stats = thr_args->stats;

	thr = odp_thread_id();

	printf("[%02i] QUEUE mode\n", thr);
	odp_barrier_wait(&barrier);

	wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS * 100);

	/* Loop packets */
	while (!exit_threads) {
		int sent, i;
		unsigned tx_drops;

		pkts = odp_schedule_multi(NULL, wait, ev_tbl, MAX_PKT_BURST);

		if (pkts <= 0)
			continue;

		for (i = 0; i < pkts; i++)
			pkt_tbl[i] = odp_packet_from_event(ev_tbl[i]);

		if (gbl_args->appl.error_check) {
			int rx_drops;

			/* Drop packets with errors */
			rx_drops = drop_err_pkts(pkt_tbl, pkts);

			if (odp_unlikely(rx_drops)) {
				stats->s.rx_drops += rx_drops;
				if (pkts == rx_drops)
					continue;

				pkts -= rx_drops;
			}
		}

		/* packets from the same queue are from the same interface */
		dst_idx = lookup_dest_port(pkt_tbl[0]);
		fill_eth_addrs(pkt_tbl, pkts, dst_idx);
		pktio_dst = gbl_args->pktios[dst_idx];

		sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts);

		sent     = odp_unlikely(sent < 0) ? 0 : sent;
		tx_drops = pkts - sent;

		if (odp_unlikely(tx_drops)) {
			stats->s.tx_drops += tx_drops;

			/* Drop rejected packets */
			for (i = sent; i < pkts; i++)
				odp_packet_free(pkt_tbl[i]);
		}

		stats->s.packets += pkts;
	}

	/* Make sure that latest stat writes are visible to other threads */
	odp_mb_full();

	return NULL;
}
Пример #18
0
/*
 * Main packet transmit routine. Transmit packets at a fixed rate for
 * specified length of time.
 */
static int run_thread_tx(void *arg)
{
	test_globals_t *globals;
	int thr_id;
	odp_pktout_queue_t pktout;
	pkt_tx_stats_t *stats;
	odp_time_t cur_time, send_time_end, send_duration;
	odp_time_t burst_gap_end, burst_gap;
	uint32_t batch_len;
	int unsent_pkts = 0;
	odp_packet_t tx_packet[BATCH_LEN_MAX];
	odp_time_t idle_start = ODP_TIME_NULL;

	thread_args_t *targs = arg;

	batch_len = targs->batch_len;

	if (batch_len > BATCH_LEN_MAX)
		batch_len = BATCH_LEN_MAX;

	thr_id = odp_thread_id();

	globals = odp_shm_addr(odp_shm_lookup("test_globals"));
	stats = &globals->tx_stats[thr_id];

	if (odp_pktout_queue(globals->pktio_tx, &pktout, 1) != 1)
		LOG_ABORT("Failed to get output queue for thread %d\n", thr_id);

	burst_gap = odp_time_local_from_ns(
			ODP_TIME_SEC_IN_NS / (targs->pps / targs->batch_len));
	send_duration =
		odp_time_local_from_ns(targs->duration * ODP_TIME_SEC_IN_NS);

	odp_barrier_wait(&globals->tx_barrier);

	cur_time     = odp_time_local();
	send_time_end = odp_time_sum(cur_time, send_duration);
	burst_gap_end = cur_time;
	while (odp_time_cmp(send_time_end, cur_time) > 0) {
		unsigned alloc_cnt = 0, tx_cnt;

		if (odp_time_cmp(burst_gap_end, cur_time) > 0) {
			cur_time = odp_time_local();
			if (!odp_time_cmp(idle_start, ODP_TIME_NULL))
				idle_start = cur_time;
			continue;
		}

		if (odp_time_cmp(idle_start, ODP_TIME_NULL) > 0) {
			odp_time_t diff = odp_time_diff(cur_time, idle_start);

			stats->s.idle_ticks =
				odp_time_sum(diff, stats->s.idle_ticks);

			idle_start = ODP_TIME_NULL;
		}

		burst_gap_end = odp_time_sum(burst_gap_end, burst_gap);

		alloc_cnt = alloc_packets(tx_packet, batch_len - unsent_pkts);
		if (alloc_cnt != batch_len)
			stats->s.alloc_failures++;

		tx_cnt = send_packets(pktout, tx_packet, alloc_cnt);
		unsent_pkts = alloc_cnt - tx_cnt;
		stats->s.enq_failures += unsent_pkts;
		stats->s.tx_cnt += tx_cnt;

		cur_time = odp_time_local();
	}

	VPRINT(" %02d: TxPkts %-8" PRIu64 " EnqFail %-6" PRIu64
	       " AllocFail %-6" PRIu64 " Idle %" PRIu64 "ms\n",
	       thr_id, stats->s.tx_cnt,
	       stats->s.enq_failures, stats->s.alloc_failures,
	       odp_time_to_ns(stats->s.idle_ticks) /
	       (uint64_t)ODP_TIME_MSEC_IN_NS);

	return 0;
}
Пример #19
0
/**
 * Packet IO worker thread accessing IO resources directly
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_direct_recv_thread(void *arg)
{
	int thr;
	int pkts;
	odp_packet_t pkt_tbl[MAX_PKT_BURST];
	int src_idx, dst_idx;
	odp_pktio_t pktio_src, pktio_dst;
	thread_args_t *thr_args = arg;
	stats_t *stats = thr_args->stats;

	thr = odp_thread_id();

	src_idx = thr_args->src_idx;
	dst_idx = gbl_args->dst_port[src_idx];
	pktio_src = gbl_args->pktios[src_idx];
	pktio_dst = gbl_args->pktios[dst_idx];

	printf("[%02i] srcif:%s dstif:%s spktio:%02" PRIu64
	       " dpktio:%02" PRIu64 " DIRECT RECV mode\n",
	       thr,
	       gbl_args->appl.if_names[src_idx],
	       gbl_args->appl.if_names[dst_idx],
	       odp_pktio_to_u64(pktio_src), odp_pktio_to_u64(pktio_dst));
	odp_barrier_wait(&barrier);

	/* Loop packets */
	while (!exit_threads) {
		int sent, i;
		unsigned tx_drops;

		pkts = odp_pktio_recv(pktio_src, pkt_tbl, MAX_PKT_BURST);
		if (odp_unlikely(pkts <= 0))
			continue;

		if (gbl_args->appl.error_check) {
			int rx_drops;

			/* Drop packets with errors */
			rx_drops = drop_err_pkts(pkt_tbl, pkts);

			if (odp_unlikely(rx_drops)) {
				stats->s.rx_drops += rx_drops;
				if (pkts == rx_drops)
					continue;

				pkts -= rx_drops;
			}
		}

		fill_eth_addrs(pkt_tbl, pkts, dst_idx);

		sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts);

		sent     = odp_unlikely(sent < 0) ? 0 : sent;
		tx_drops = pkts - sent;

		if (odp_unlikely(tx_drops)) {
			stats->s.tx_drops += tx_drops;

			/* Drop rejected packets */
			for (i = sent; i < pkts; i++)
				odp_packet_free(pkt_tbl[i]);
		}

		stats->s.packets += pkts;
	}

	/* Make sure that latest stat writes are visible to other threads */
	odp_mb_full();

	return NULL;
}
Пример #20
0
static int schedule_common_(void *arg)
{
	thread_args_t *args = (thread_args_t *)arg;
	odp_schedule_sync_t sync;
	test_globals_t *globals;
	queue_context *qctx;
	buf_contents *bctx, *bctx_cpy;
	odp_pool_t pool;
	int locked;
	int num;
	odp_event_t ev;
	odp_buffer_t buf, buf_cpy;
	odp_queue_t from;

	globals = args->globals;
	sync = args->sync;

	pool = odp_pool_lookup(MSG_POOL_NAME);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);

	if (args->num_workers > 1)
		odp_barrier_wait(&globals->barrier);

	while (1) {
		from = ODP_QUEUE_INVALID;
		num = 0;

		odp_ticketlock_lock(&globals->lock);
		if (globals->buf_count == 0) {
			odp_ticketlock_unlock(&globals->lock);
			break;
		}
		odp_ticketlock_unlock(&globals->lock);

		if (args->enable_schd_multi) {
			odp_event_t events[BURST_BUF_SIZE],
				ev_cpy[BURST_BUF_SIZE];
			odp_buffer_t buf_cpy[BURST_BUF_SIZE];
			int j;

			num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
						 events, BURST_BUF_SIZE);
			CU_ASSERT(num >= 0);
			CU_ASSERT(num <= BURST_BUF_SIZE);
			if (num == 0)
				continue;

			if (sync == ODP_SCHED_SYNC_ORDERED) {
				int ndx;
				int ndx_max;
				int rc;

				ndx_max = odp_queue_lock_count(from);
				CU_ASSERT_FATAL(ndx_max >= 0);

				qctx = odp_queue_context(from);

				for (j = 0; j < num; j++) {
					bctx = odp_buffer_addr(
						odp_buffer_from_event
						(events[j]));

					buf_cpy[j] = odp_buffer_alloc(pool);
					CU_ASSERT_FATAL(buf_cpy[j] !=
							ODP_BUFFER_INVALID);
					bctx_cpy = odp_buffer_addr(buf_cpy[j]);
					memcpy(bctx_cpy, bctx,
					       sizeof(buf_contents));
					bctx_cpy->output_sequence =
						bctx_cpy->sequence;
					ev_cpy[j] =
						odp_buffer_to_event(buf_cpy[j]);
				}

				rc = odp_queue_enq_multi(qctx->pq_handle,
							 ev_cpy, num);
				CU_ASSERT(rc == num);

				bctx = odp_buffer_addr(
					odp_buffer_from_event(events[0]));
				for (ndx = 0; ndx < ndx_max; ndx++) {
					odp_schedule_order_lock(ndx);
					CU_ASSERT(bctx->sequence ==
						  qctx->lock_sequence[ndx]);
					qctx->lock_sequence[ndx] += num;
					odp_schedule_order_unlock(ndx);
				}
			}

			for (j = 0; j < num; j++)
				odp_event_free(events[j]);
		} else {
			ev  = odp_schedule(&from, ODP_SCHED_NO_WAIT);
			if (ev == ODP_EVENT_INVALID)
				continue;

			buf = odp_buffer_from_event(ev);
			num = 1;
			if (sync == ODP_SCHED_SYNC_ORDERED) {
				int ndx;
				int ndx_max;
				int rc;

				ndx_max = odp_queue_lock_count(from);
				CU_ASSERT_FATAL(ndx_max >= 0);

				qctx = odp_queue_context(from);
				bctx = odp_buffer_addr(buf);
				buf_cpy = odp_buffer_alloc(pool);
				CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID);
				bctx_cpy = odp_buffer_addr(buf_cpy);
				memcpy(bctx_cpy, bctx, sizeof(buf_contents));
				bctx_cpy->output_sequence = bctx_cpy->sequence;

				rc = odp_queue_enq(qctx->pq_handle,
						   odp_buffer_to_event
						   (buf_cpy));
				CU_ASSERT(rc == 0);

				for (ndx = 0; ndx < ndx_max; ndx++) {
					odp_schedule_order_lock(ndx);
					CU_ASSERT(bctx->sequence ==
						  qctx->lock_sequence[ndx]);
					qctx->lock_sequence[ndx] += num;
					odp_schedule_order_unlock(ndx);
				}
			}

			odp_buffer_free(buf);
		}

		if (args->enable_excl_atomic) {
			locked = odp_spinlock_trylock(&globals->atomic_lock);
			CU_ASSERT(locked != 0);
			CU_ASSERT(from != ODP_QUEUE_INVALID);
			if (locked) {
				int cnt;
				odp_time_t time = ODP_TIME_NULL;
				/* Do some work here to keep the thread busy */
				for (cnt = 0; cnt < 1000; cnt++)
					time = odp_time_sum(time,
							    odp_time_local());

				odp_spinlock_unlock(&globals->atomic_lock);
			}
		}

		if (sync == ODP_SCHED_SYNC_ATOMIC)
			odp_schedule_release_atomic();

		if (sync == ODP_SCHED_SYNC_ORDERED)
			odp_schedule_release_ordered();

		odp_ticketlock_lock(&globals->lock);

		globals->buf_count -= num;

		if (globals->buf_count < 0) {
			odp_ticketlock_unlock(&globals->lock);
			CU_FAIL_FATAL("Buffer counting failed");
		}

		odp_ticketlock_unlock(&globals->lock);
	}

	if (args->num_workers > 1)
		odp_barrier_wait(&globals->barrier);

	if (sync == ODP_SCHED_SYNC_ORDERED)
		locked = odp_ticketlock_trylock(&globals->lock);
	else
		locked = 0;

	if (locked && globals->buf_count_cpy > 0) {
		odp_event_t ev;
		odp_queue_t pq;
		uint64_t seq;
		uint64_t bcount = 0;
		int i, j;
		char name[32];
		uint64_t num_bufs = args->num_bufs;
		uint64_t buf_count = globals->buf_count_cpy;

		for (i = 0; i < args->num_prio; i++) {
			for (j = 0; j < args->num_queues; j++) {
				snprintf(name, sizeof(name),
					 "plain_%d_%d_o", i, j);
				pq = odp_queue_lookup(name);
				CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID);

				seq = 0;
				while (1) {
					ev = odp_queue_deq(pq);

					if (ev == ODP_EVENT_INVALID) {
						CU_ASSERT(seq == num_bufs);
						break;
					}

					bctx = odp_buffer_addr(
						odp_buffer_from_event(ev));

					CU_ASSERT(bctx->sequence == seq);
					seq++;
					bcount++;
					odp_event_free(ev);
				}
			}
		}
		CU_ASSERT(bcount == buf_count);
		globals->buf_count_cpy = 0;
	}

	if (locked)
		odp_ticketlock_unlock(&globals->lock);

	/* Clear scheduler atomic / ordered context between tests */
	num = exit_schedule_loop();

	CU_ASSERT(num == 0);

	if (num)
		printf("\nDROPPED %i events\n\n", num);

	return 0;
}
Пример #21
0
/*
 * Main packet transmit routine. Transmit packets at a fixed rate for
 * specified length of time.
 */
static void *run_thread_tx(void *arg)
{
	test_globals_t *globals;
	int thr_id;
	odp_queue_t outq;
	pkt_tx_stats_t *stats;
	uint64_t next_tx_cycles, end_cycles, cur_cycles;
	uint64_t burst_gap_cycles;
	uint32_t batch_len;
	int unsent_pkts = 0;
	odp_event_t  tx_event[BATCH_LEN_MAX];
	uint64_t idle_start = 0;

	thread_args_t *targs = arg;

	batch_len = targs->batch_len;

	if (batch_len > BATCH_LEN_MAX)
		batch_len = BATCH_LEN_MAX;

	thr_id = odp_thread_id();

	globals = odp_shm_addr(odp_shm_lookup("test_globals"));
	stats = &globals->tx_stats[thr_id];

	outq = odp_pktio_outq_getdef(globals->pktio_tx);
	if (outq == ODP_QUEUE_INVALID)
		LOG_ABORT("Failed to get output queue for thread %d\n", thr_id);

	burst_gap_cycles = odp_time_ns_to_cycles(
						 (ODP_TIME_SEC * 999) / (1000 * targs->pps / (targs->batch_len)));

	odp_barrier_wait(&globals->tx_barrier);

	cur_cycles     = odp_time_cycles();
	next_tx_cycles = cur_cycles;
	end_cycles     = cur_cycles +
			 odp_time_ns_to_cycles(targs->duration * ODP_TIME_SEC);

	while (cur_cycles < end_cycles) {
		unsigned alloc_cnt = 0, tx_cnt;

		if (cur_cycles < next_tx_cycles) {
			cur_cycles = odp_time_cycles();
			if (idle_start == 0)
				idle_start = cur_cycles;
			continue;
		}

		if (idle_start) {
			stats->s.idle_cycles += odp_time_diff_cycles(
							idle_start, cur_cycles);
			idle_start = 0;
		}

		next_tx_cycles += burst_gap_cycles;

		alloc_cnt = alloc_packets(tx_event, batch_len - unsent_pkts);
		if (alloc_cnt != batch_len)
			stats->s.alloc_failures++;

		tx_cnt = send_packets(outq, tx_event, alloc_cnt);
		unsent_pkts = alloc_cnt - tx_cnt;
		stats->s.enq_failures += unsent_pkts;
		stats->s.tx_cnt += tx_cnt;

		cur_cycles = odp_time_cycles();
	}

	VPRINT(" %02d: TxPkts %-8"PRIu64" EnqFail %-6"PRIu64
	       " AllocFail %-6"PRIu64" Idle %"PRIu64"ms\n",
	       thr_id, stats->s.tx_cnt,
	       stats->s.enq_failures, stats->s.alloc_failures,
	       odp_time_cycles_to_ns(stats->s.idle_cycles)/1000/1000);

	return NULL;
}