Пример #1
0
void scheduler_test_pause_resume(void)
{
	odp_queue_t queue;
	odp_buffer_t buf;
	odp_event_t ev;
	odp_queue_t from;
	int i;
	int local_bufs = 0;

	queue = odp_queue_lookup("sched_0_0_n");
	CU_ASSERT(queue != ODP_QUEUE_INVALID);

	pool = odp_pool_lookup(MSG_POOL_NAME);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);

	for (i = 0; i < NUM_BUFS_PAUSE; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		ev = odp_buffer_to_event(buf);
		if (odp_queue_enq(queue, ev))
			odp_buffer_free(buf);
	}

	for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) {
		from = ODP_QUEUE_INVALID;
		ev = odp_schedule(&from, ODP_SCHED_WAIT);
		CU_ASSERT(from == queue);
		buf = odp_buffer_from_event(ev);
		odp_buffer_free(buf);
	}

	odp_schedule_pause();

	while (1) {
		ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
		if (ev == ODP_EVENT_INVALID)
			break;

		CU_ASSERT(from == queue);
		buf = odp_buffer_from_event(ev);
		odp_buffer_free(buf);
		local_bufs++;
	}

	CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE);

	odp_schedule_resume();

	for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE; i++) {
		ev = odp_schedule(&from, ODP_SCHED_WAIT);
		CU_ASSERT(from == queue);
		buf = odp_buffer_from_event(ev);
		odp_buffer_free(buf);
	}

	CU_ASSERT(exit_schedule_loop() == 0);
}
Пример #2
0
void scheduler_test_wait_time(void)
{
	int i;
	odp_queue_t queue;
	uint64_t wait_time;
	odp_queue_param_t qp;
	odp_time_t lower_limit, upper_limit;
	odp_time_t start_time, end_time, diff;

	/* check on read */
	wait_time = odp_schedule_wait_time(0);
	wait_time = odp_schedule_wait_time(1);

	/* check ODP_SCHED_NO_WAIT */
	odp_queue_param_init(&qp);
	qp.type        = ODP_QUEUE_TYPE_SCHED;
	qp.sched.sync  = ODP_SCHED_SYNC_PARALLEL;
	qp.sched.prio  = ODP_SCHED_PRIO_NORMAL;
	qp.sched.group = ODP_SCHED_GROUP_ALL;
	queue = odp_queue_create("dummy_queue", &qp);
	CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);

	wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
	start_time = odp_time_local();
	odp_schedule(&queue, ODP_SCHED_NO_WAIT);
	end_time = odp_time_local();

	diff = odp_time_diff(end_time, start_time);
	lower_limit = ODP_TIME_NULL;
	upper_limit = odp_time_local_from_ns(ODP_WAIT_TOLERANCE);

	CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
	CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);

	/* check time correctness */
	start_time = odp_time_local();
	for (i = 1; i < 6; i++) {
		odp_schedule(&queue, wait_time);
		printf("%d..", i);
	}
	end_time = odp_time_local();

	diff = odp_time_diff(end_time, start_time);
	lower_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS -
							ODP_WAIT_TOLERANCE);
	upper_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS +
							ODP_WAIT_TOLERANCE);

	CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
	CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);

	CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
}
Пример #3
0
void scheduler_test_wait_time(void)
{
	odp_queue_t queue;
	uint64_t wait_time;
	odp_queue_param_t qp;
	odp_time_t lower_limit, upper_limit;
	odp_time_t start_time, end_time, diff;

	/* check on read */
	wait_time = odp_schedule_wait_time(0);
	wait_time = odp_schedule_wait_time(1);
	(void)wait_time;

	/* check ODP_SCHED_NO_WAIT */
	odp_queue_param_init(&qp);
	queue = odp_queue_create("dummy_queue", ODP_QUEUE_TYPE_SCHED, &qp);
	CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);

	wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
	start_time = odp_time_local();
	odp_schedule(&queue, ODP_SCHED_NO_WAIT);
	end_time = odp_time_local();

	diff = odp_time_diff(end_time, start_time);
	lower_limit = ODP_TIME_NULL;
	upper_limit = odp_time_local_from_ns(ODP_WAIT_TOLERANCE);

	CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
	CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);

#ifndef MAGIC_SCALL
	int i;

	/* check time correctness */
	start_time = odp_time_local();
	for (i = 1; i < 6; i++) {
		odp_schedule(&queue, wait_time);
		/* printf("%d..", i); */
	}
	end_time = odp_time_local();

	diff = odp_time_diff(end_time, start_time);
	lower_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS -
							ODP_WAIT_TOLERANCE);
	upper_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS +
							ODP_WAIT_TOLERANCE);
	CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
	CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);
#endif

	CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
}
Пример #4
0
/*
 * Should receive timeouts only
 */
static void *event_dispatcher(void *arg)
{
	odp_event_t ev;

	(void)arg;

	ofp_init_local();

	while (1) {
		ev = odp_schedule(NULL, ODP_SCHED_WAIT);

		if (ev == ODP_EVENT_INVALID)
			continue;

		if (odp_event_type(ev) == ODP_EVENT_TIMEOUT) {
			ofp_timer_handle(ev);
			continue;
		}

		OFP_ERR("Error: unexpected event type: %u\n",
			  odp_event_type(ev));

		odp_buffer_free(odp_buffer_from_event(ev));
	}

	/* Never reached */
	return NULL;
}
Пример #5
0
static int destroy_inq(odp_pktio_t pktio)
{
	odp_queue_t inq;
	odp_event_t ev;
	odp_queue_type_t q_type;

	inq = odp_pktio_inq_getdef(pktio);

	if (inq == ODP_QUEUE_INVALID) {
		CU_FAIL("attempting to destroy invalid inq");
		return -1;
	}

	CU_ASSERT(odp_pktio_inq_remdef(pktio) == 0);

	q_type = odp_queue_type(inq);

	/* flush any pending events */
	while (1) {
		if (q_type == ODP_QUEUE_TYPE_POLL)
			ev = odp_queue_deq(inq);
		else
			ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);

		if (ev != ODP_EVENT_INVALID)
			odp_event_free(ev);
		else
			break;
	}

	return odp_queue_destroy(inq);
}
Пример #6
0
static int chaos_thread(void *arg)
{
	uint64_t i, wait;
	int rc;
	chaos_buf *cbuf;
	odp_event_t ev;
	odp_queue_t from;
	thread_args_t *args = (thread_args_t *)arg;
	test_globals_t *globals = args->globals;
	int me = odp_thread_id();
	odp_time_t start_time, end_time, diff;

	if (CHAOS_DEBUG)
		printf("Chaos thread %d starting...\n", me);

	/* Wait for all threads to start */
	odp_barrier_wait(&globals->barrier);
	start_time = odp_time_local();

	/* Run the test */
	wait = odp_schedule_wait_time(5 * ODP_TIME_MSEC_IN_NS);
	for (i = 0; i < CHAOS_NUM_ROUNDS; i++) {
		ev = odp_schedule(&from, wait);
		if (ev == ODP_EVENT_INVALID)
			continue;

		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		CU_ASSERT_FATAL(cbuf != NULL);
		if (CHAOS_DEBUG)
			printf("Thread %d received event %" PRIu64
			       " seq %" PRIu64
			       " from Q %s, sending to Q %s\n",
			       me, cbuf->evno, cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].name,
			       globals->
			       chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name);

		rc = odp_queue_enq(
			globals->
			chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle,
			ev);
		CU_ASSERT_FATAL(rc == 0);
	}

	if (CHAOS_DEBUG)
		printf("Thread %d completed %d rounds...terminating\n",
		       odp_thread_id(), CHAOS_NUM_EVENTS);

	exit_schedule_loop();

	end_time = odp_time_local();
	diff = odp_time_diff(end_time, start_time);

	printf("Thread %d ends, elapsed time = %" PRIu64 "us\n",
	       odp_thread_id(), odp_time_to_ns(diff) / 1000);

	return 0;
}
Пример #7
0
int destroy_inq(odp_pktio_t pktio)
{
	odp_queue_t inq;
	odp_event_t ev;

	inq = odp_pktio_inq_getdef(pktio);

	if (inq == ODP_QUEUE_INVALID) {
		CU_FAIL("attempting to destroy invalid inq");
		return -1;
	}

	if (0 > odp_pktio_inq_remdef(pktio))
		return -1;

	while (1) {
		ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);

		if (ev != ODP_EVENT_INVALID)
			odp_event_free(ev);
		else
			break;
	}

	return odp_queue_destroy(inq);
}
Пример #8
0
odp_packet_t receive_packet(odp_queue_t *queue, uint64_t ns)
{
	odp_event_t ev;

	ev = odp_schedule(queue, ns);
	return odp_packet_from_event(ev);
}
Пример #9
0
static int destroy_inq(odp_pktio_t pktio)
{
	odp_queue_t inq;
	odp_event_t ev;
	odp_queue_type_t q_type;

	inq = odp_pktio_inq_getdef(pktio);

	if (inq == ODP_QUEUE_INVALID)
		return -1;

	odp_pktio_inq_remdef(pktio);

	q_type = odp_queue_type(inq);

	/* flush any pending events */
	while (1) {
		if (q_type == ODP_QUEUE_TYPE_POLL)
			ev = odp_queue_deq(inq);
		else
			ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);

		if (ev != ODP_EVENT_INVALID)
			odp_buffer_free(odp_buffer_from_event(ev));
		else
			break;
	}

	return odp_queue_destroy(inq);
}
Пример #10
0
static int receive_packets(odp_queue_t pollq,
			   odp_event_t *event_tbl, unsigned num_pkts)
{
	int n_ev = 0;

	if (num_pkts == 0)
		return 0;

	if (pollq != ODP_QUEUE_INVALID) {
		if (num_pkts == 1) {
			event_tbl[0] = odp_queue_deq(pollq);
			n_ev = event_tbl[0] != ODP_EVENT_INVALID;
		} else {
			n_ev = odp_queue_deq_multi(pollq, event_tbl, num_pkts);
		}
	} else {
		if (num_pkts == 1) {
			event_tbl[0] = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
			n_ev = event_tbl[0] != ODP_EVENT_INVALID;
		} else {
			n_ev = odp_schedule_multi(NULL, ODP_SCHED_NO_WAIT,
						  event_tbl, num_pkts);
		}
	}
	return n_ev;
}
Пример #11
0
void pktio_test_inq_remdef(void)
{
	odp_pktio_t pktio;
	odp_queue_t inq;
	odp_event_t ev;
	uint64_t wait;
	int i;

	pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
			     ODP_PKTOUT_MODE_SEND);
	CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
	CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_POLL) == 0);
	inq = odp_pktio_inq_getdef(pktio);
	CU_ASSERT(inq != ODP_QUEUE_INVALID);
	CU_ASSERT(odp_pktio_inq_remdef(pktio) == 0);

	wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
	for (i = 0; i < 100; i++) {
		ev = odp_schedule(NULL, wait);
		if (ev != ODP_EVENT_INVALID) {
			odp_event_free(ev);
			CU_FAIL("received unexpected event");
		}
	}

	CU_ASSERT(odp_queue_destroy(inq) == 0);
	CU_ASSERT(odp_pktio_close(pktio) == 0);
}
Пример #12
0
static int empty_inq(odp_pktio_t pktio)
{
	odp_queue_t queue;
	odp_event_t ev;
	odp_queue_type_t q_type;

	if (odp_pktin_event_queue(pktio, &queue, 1) != 1)
		return -1;

	q_type = odp_queue_type(queue);

	/* flush any pending events */
	while (1) {
		if (q_type == ODP_QUEUE_TYPE_PLAIN)
			ev = odp_queue_deq(queue);
		else
			ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);

		if (ev != ODP_EVENT_INVALID)
			odp_event_free(ev);
		else
			break;
	}

	return 0;
}
Пример #13
0
/** @private test timeout */
static void remove_prescheduled_events(void)
{
	odp_event_t ev;
	odp_queue_t queue;
	odp_schedule_pause();
	while ((ev = odp_schedule(&queue, ODP_SCHED_NO_WAIT)) !=
			ODP_EVENT_INVALID) {
		odp_event_free(ev);
	}
}
Пример #14
0
static int drain_queues(void)
{
	odp_event_t ev;
	uint64_t wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
	int ret = 0;

	while ((ev = odp_schedule(NULL, wait)) != ODP_EVENT_INVALID) {
		odp_event_free(ev);
		ret++;
	}

	return ret;
}
Пример #15
0
static int exit_schedule_loop(void)
{
	odp_event_t ev;
	int ret = 0;

	odp_schedule_pause();

	while ((ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT))
	      != ODP_EVENT_INVALID) {
		odp_event_free(ev);
		ret++;
	}

	return ret;
}
Пример #16
0
/**
 * Main receive function
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *gen_recv_thread(void *arg)
{
	int thr;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	odp_packet_t pkt;
	odp_event_t ev;

	thr = odp_thread_id();
	thr_args = arg;

	pktio = odp_pktio_lookup(thr_args->pktio_dev);
	if (pktio == ODP_PKTIO_INVALID) {
		EXAMPLE_ERR("  [%02i] Error: lookup of pktio %s failed\n",
			    thr, thr_args->pktio_dev);
		return NULL;
	}

	printf("  [%02i] created mode: RECEIVE\n", thr);

	for (;;) {
		if (args->appl.number != -1 &&
		    odp_atomic_load_u64(&counters.icmp) >=
		    (unsigned int)args->appl.number) {
			break;
		}

		/* Use schedule to get buf from any input queue */
		ev = odp_schedule(NULL, ODP_SCHED_WAIT);

		pkt = odp_packet_from_event(ev);
		/* Drop packets with errors */
		if (odp_unlikely(odp_packet_has_error(pkt))) {
			odp_packet_free(pkt);
			continue;
		}

		print_pkts(thr, &pkt, 1);

		odp_packet_free(pkt);
	}

	return arg;
}
Пример #17
0
/**
 * Packet IO worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	int thr;
	odp_queue_t outq_def;
	odp_packet_t pkt;
	odp_event_t ev;
	thread_args_t *thr_args = arg;

	stats_t *stats = calloc(1, sizeof(stats_t));
	*thr_args->stats = stats;

	thr = odp_thread_id();

	printf("[%02i] QUEUE mode\n", thr);
	odp_barrier_wait(&barrier);

	/* Loop packets */
	while (!exit_threads) {
		/* Use schedule to get buf from any input queue */
		ev  = odp_schedule(NULL, ODP_SCHED_WAIT);
		pkt = odp_packet_from_event(ev);

		/* Drop packets with errors */
		if (odp_unlikely(drop_err_pkts(&pkt, 1) == 0)) {
			stats->drops += 1;
			continue;
		}

		outq_def = lookup_dest_q(pkt);

		/* Enqueue the packet for output */
		if (odp_queue_enq(outq_def, ev)) {
			printf("  [%i] Queue enqueue failed.\n", thr);
			odp_packet_free(pkt);
			continue;
		}

		stats->packets += 1;
	}

	free(stats);
	return NULL;
}
Пример #18
0
static odp_packet_t wait_for_packet(pktio_info_t *pktio_rx,
				    uint32_t seq, uint64_t ns)
{
	odp_time_t wait_time, end;
	odp_event_t ev;
	odp_packet_t pkt;
	uint64_t wait;

	wait = odp_schedule_wait_time(ns);
	wait_time = odp_time_local_from_ns(ns);
	end = odp_time_sum(odp_time_local(), wait_time);
	do {
		pkt = ODP_PACKET_INVALID;

		if (pktio_rx->in_mode == ODP_PKTIN_MODE_RECV) {
			odp_pktio_recv(pktio_rx->id, &pkt, 1);
		} else {
			if (pktio_rx->in_mode == ODP_PKTIN_MODE_POLL)
				ev = queue_deq_wait_time(pktio_rx->inq, ns);
			else
				ev = odp_schedule(NULL, wait);

			if (ev != ODP_EVENT_INVALID) {
				if (odp_event_type(ev) == ODP_EVENT_PACKET)
					pkt = odp_packet_from_event(ev);
				else
					odp_event_free(ev);
			}
		}

		if (pkt != ODP_PACKET_INVALID) {
			if (pktio_pkt_seq(pkt) == seq)
				return pkt;

			odp_packet_free(pkt);
		}
	} while (odp_time_cmp(end, odp_time_local()) > 0);

	CU_FAIL("failed to receive transmitted packet");

	return ODP_PACKET_INVALID;
}
Пример #19
0
void *pp_thread(void *arg)
{
	ALLOW_UNUSED_LOCAL(arg);
	if (ofp_init_local()) {
		OFP_ERR("ofp_init_local failed");
		return NULL;
	}

	while (odp_atomic_load_u32(&still_running)) {
		odp_event_t event;
		odp_queue_t source_queue;

		event = odp_schedule(&source_queue, ODP_SCHED_WAIT);

		if (odp_event_type(event) != ODP_EVENT_TIMEOUT) {
			OFP_ERR("Unexpected event type %d",
				odp_event_type(event));
			continue;
		}

		ofp_timer_handle(event);
	}
	return NULL;
}
Пример #20
0
static void schedule_shutdown(void)
{
	odp_event_t evt;
	odp_queue_t from;

	while (1) {
		evt = odp_schedule(&from, ODP_SCHED_NO_WAIT);
		if (evt == ODP_EVENT_INVALID)
			break;
		switch (odp_event_type(evt)) {
		case ODP_EVENT_TIMEOUT:
			{
				ofp_timer_evt_cleanup(evt);
				break;
			}
		case ODP_EVENT_PACKET:
			{
				odp_packet_free(odp_packet_from_event(evt));
				break;
			}
		case ODP_EVENT_BUFFER:
			{
				odp_buffer_free(odp_buffer_from_event(evt));
				break;
			}
		case ODP_EVENT_CRYPTO_COMPL:
			{
				odp_crypto_compl_free(
					odp_crypto_compl_from_event(evt));
				break;
			}
		}
	}

	odp_schedule_pause();
}
Пример #21
0
static inline
odp_event_t odp_schedule_cb(odp_queue_t *from)
{
	return odp_schedule(from, ODP_SCHED_WAIT);
}
Пример #22
0
/**
 * Packet IO loopback worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	int thr;
	odp_buffer_pool_t pkt_pool;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	odp_queue_t outq_def;
	odp_queue_t inq_def;
	char inq_name[ODP_QUEUE_NAME_LEN];
	odp_queue_param_t qparam;
	odp_packet_t pkt;
	odp_buffer_t buf;
	int ret;
	unsigned long pkt_cnt = 0;
	unsigned long err_cnt = 0;
	odp_pktio_params_t params;
	socket_params_t *sock_params = &params.sock_params;

	thr = odp_thread_id();
	thr_args = arg;

	printf("Pktio thread [%02i] starts, pktio_dev:%s\n", thr,
	       thr_args->pktio_dev);

	/* Lookup the packet pool */
	pkt_pool = odp_buffer_pool_lookup("packet_pool");
	if (pkt_pool == ODP_BUFFER_POOL_INVALID || pkt_pool != thr_args->pool) {
		ODP_ERR("  [%02i] Error: pkt_pool not found\n", thr);
		return NULL;
	}

	/* Open a packet IO instance for this thread */
	sock_params->type = thr_args->type;
	sock_params->fanout = thr_args->fanout;
	pktio = odp_pktio_open(thr_args->pktio_dev, pkt_pool, &params);
	if (pktio == ODP_PKTIO_INVALID) {
		ODP_ERR("  [%02i] Error: pktio create failed\n", thr);
		return NULL;
	}

	/*
	 * Create and set the default INPUT queue associated with the 'pktio'
	 * resource
	 */
	qparam.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qparam.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
	qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;
	snprintf(inq_name, sizeof(inq_name), "%i-pktio_inq_def", (int)pktio);
	inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';

	inq_def = odp_queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam);
	if (inq_def == ODP_QUEUE_INVALID) {
		ODP_ERR("  [%02i] Error: pktio queue creation failed\n", thr);
		return NULL;
	}

	ret = odp_pktio_inq_setdef(pktio, inq_def);
	if (ret != 0) {
		ODP_ERR("  [%02i] Error: default input-Q setup\n", thr);
		return NULL;
	}

	printf("  [%02i] created pktio:%02i, queue mode (ATOMIC queues)\n"
	       "          default pktio%02i-INPUT queue:%u\n",
		thr, pktio, pktio, inq_def);

	/* Loop packets */
	for (;;) {
		odp_pktio_t pktio_tmp;

#if 1
		/* Use schedule to get buf from any input queue */
		buf = odp_schedule(NULL, ODP_SCHED_WAIT);
#else
		/* Always dequeue from the same input queue */
		buf = odp_queue_deq(inq_def);
		if (!odp_buffer_is_valid(buf))
			continue;
#endif

		pkt = odp_packet_from_buffer(buf);

		/* Drop packets with errors */
		if (odp_unlikely(drop_err_pkts(&pkt, 1) == 0)) {
			ODP_ERR("Drop frame - err_cnt:%lu\n", ++err_cnt);
			continue;
		}

		pktio_tmp = odp_pktio_get_input(pkt);
		outq_def = odp_pktio_outq_getdef(pktio_tmp);

		if (outq_def == ODP_QUEUE_INVALID) {
			ODP_ERR("  [%02i] Error: def output-Q query\n", thr);
			return NULL;
		}

		/* Swap Eth MACs and possibly IP-addrs before sending back */
		swap_pkt_addrs(&pkt, 1);

		/* Enqueue the packet for output */
		odp_queue_enq(outq_def, buf);

		/* Print packet counts every once in a while */
		if (odp_unlikely(pkt_cnt++ % 100000 == 0)) {
			printf("  [%02i] pkt_cnt:%lu\n", thr, pkt_cnt);
			fflush(NULL);
		}
	}

/* unreachable */
}
Пример #23
0
void scheduler_test_chaos(void)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	odp_event_t ev;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	odp_queue_t from;
	int i, rc;
	uint64_t wait;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
				      ODP_SCHED_SYNC_ATOMIC/* , */
				      /* ODP_SCHED_SYNC_ORDERED */};
	const int num_sync = (sizeof(sync) / sizeof(sync[0]));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(shm);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		qp.sched.sync = sync[i % num_sync];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[i % num_sync]);
		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name,
					 ODP_QUEUE_TYPE_SCHED,
					 &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	odp_atomic_init_u32(&globals->chaos_pending_event_count, 0);

	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
		odp_atomic_inc_u32(&globals->chaos_pending_event_count);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	/* Cleanup: Drain queues, free events */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	while (odp_atomic_fetch_dec_u32(
		       &globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Draining event %" PRIu64
			       " seq %" PRIu64 " from Q %s...\n",
			       cbuf->evno,
			       cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	odp_schedule_release_ordered();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}
Пример #24
0
void pktio_test_start_stop(void)
{
	odp_pktio_t pktio[MAX_NUM_IFACES];
	odp_packet_t pkt;
	odp_event_t tx_ev[100];
	odp_event_t ev;
	int i, pkts, ret, alloc = 0;
	odp_queue_t outq;
	uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);

	for (i = 0; i < num_ifaces; i++) {
		pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
					ODP_PKTOUT_MODE_SEND);
		CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
		create_inq(pktio[i],  ODP_QUEUE_TYPE_SCHED);
	}

	outq = odp_pktio_outq_getdef(pktio[0]);

	/* Interfaces are stopped by default,
	 * Check that stop when stopped generates an error */
	ret = odp_pktio_stop(pktio[0]);
	CU_ASSERT(ret <= 0);

	/* start first */
	ret = odp_pktio_start(pktio[0]);
	CU_ASSERT(ret == 0);
	/* Check that start when started generates an error */
	ret = odp_pktio_start(pktio[0]);
	CU_ASSERT(ret < 0);

	/* Test Rx on a stopped interface. Only works if there are 2 */
	if (num_ifaces > 1) {
		for (alloc = 0; alloc < 100; alloc++) {
			pkt = odp_packet_alloc(default_pkt_pool, packet_len);
			if (pkt == ODP_PACKET_INVALID)
				break;
			pktio_init_packet(pkt);

			pktio_pkt_set_macs(pkt, pktio[0], pktio[1]);
			if (pktio_fixup_checksums(pkt) != 0) {
				odp_packet_free(pkt);
				break;
			}

			tx_ev[alloc] = odp_packet_to_event(pkt);
		}

		for (pkts = 0; pkts != alloc; ) {
			ret = odp_queue_enq_multi(outq, &tx_ev[pkts],
						  alloc - pkts);
			if (ret < 0) {
				CU_FAIL("unable to enqueue packet\n");
				break;
			}
			pkts += ret;
		}
		/* check that packets did not arrive */
		for (i = 0, pkts = 0; i < 1000; i++) {
			ev = odp_schedule(NULL, wait);
			if (ev == ODP_EVENT_INVALID)
				continue;

			if (odp_event_type(ev) == ODP_EVENT_PACKET) {
				pkt = odp_packet_from_event(ev);
				if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
					pkts++;
			}
			odp_event_free(ev);
		}
		if (pkts)
			CU_FAIL("pktio stopped, received unexpected events");

		/* start both, send and get packets */
		/* 0 already started */
		ret = odp_pktio_start(pktio[1]);
		CU_ASSERT(ret == 0);

		/* flush packets with magic number in pipes */
		for (i = 0; i < 1000; i++) {
			ev = odp_schedule(NULL, wait);
			if (ev != ODP_EVENT_INVALID)
				odp_event_free(ev);
		}
	}

	/* alloc */
	for (alloc = 0; alloc < 100; alloc++) {
		pkt = odp_packet_alloc(default_pkt_pool, packet_len);
		if (pkt == ODP_PACKET_INVALID)
			break;
		pktio_init_packet(pkt);
		if (num_ifaces > 1) {
			pktio_pkt_set_macs(pkt, pktio[0], pktio[1]);
			if (pktio_fixup_checksums(pkt) != 0) {
				odp_packet_free(pkt);
				break;
			}
		}
		tx_ev[alloc] = odp_packet_to_event(pkt);
	}

	/* send */
	for (pkts = 0; pkts != alloc; ) {
		ret = odp_queue_enq_multi(outq, &tx_ev[pkts], alloc - pkts);
		if (ret < 0) {
			CU_FAIL("unable to enqueue packet\n");
			break;
		}
		pkts += ret;
	}

	/* get */
	for (i = 0, pkts = 0; i < 100; i++) {
		ev = odp_schedule(NULL, wait);
		if (ev != ODP_EVENT_INVALID) {
			if (odp_event_type(ev) == ODP_EVENT_PACKET) {
				pkt = odp_packet_from_event(ev);
				if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
					pkts++;
			}
			odp_event_free(ev);
		}
	}
	CU_ASSERT(pkts == alloc);

	for (i = 0; i < num_ifaces; i++) {
		CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
		destroy_inq(pktio[i]);
		CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
	}
}
Пример #25
0
static int schedule_common_(void *arg)
{
	thread_args_t *args = (thread_args_t *)arg;
	odp_schedule_sync_t sync;
	test_globals_t *globals;
	queue_context *qctx;
	buf_contents *bctx, *bctx_cpy;
	odp_pool_t pool;
	int locked;
	int num;
	odp_event_t ev;
	odp_buffer_t buf, buf_cpy;
	odp_queue_t from;

	globals = args->globals;
	sync = args->sync;

	pool = odp_pool_lookup(MSG_POOL_NAME);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);

	if (args->num_workers > 1)
		odp_barrier_wait(&globals->barrier);

	while (1) {
		from = ODP_QUEUE_INVALID;
		num = 0;

		odp_ticketlock_lock(&globals->lock);
		if (globals->buf_count == 0) {
			odp_ticketlock_unlock(&globals->lock);
			break;
		}
		odp_ticketlock_unlock(&globals->lock);

		if (args->enable_schd_multi) {
			odp_event_t events[BURST_BUF_SIZE],
				ev_cpy[BURST_BUF_SIZE];
			odp_buffer_t buf_cpy[BURST_BUF_SIZE];
			int j;

			num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
						 events, BURST_BUF_SIZE);
			CU_ASSERT(num >= 0);
			CU_ASSERT(num <= BURST_BUF_SIZE);
			if (num == 0)
				continue;

			if (sync == ODP_SCHED_SYNC_ORDERED) {
				int ndx;
				int ndx_max;
				int rc;

				ndx_max = odp_queue_lock_count(from);
				CU_ASSERT_FATAL(ndx_max >= 0);

				qctx = odp_queue_context(from);

				for (j = 0; j < num; j++) {
					bctx = odp_buffer_addr(
						odp_buffer_from_event
						(events[j]));

					buf_cpy[j] = odp_buffer_alloc(pool);
					CU_ASSERT_FATAL(buf_cpy[j] !=
							ODP_BUFFER_INVALID);
					bctx_cpy = odp_buffer_addr(buf_cpy[j]);
					memcpy(bctx_cpy, bctx,
					       sizeof(buf_contents));
					bctx_cpy->output_sequence =
						bctx_cpy->sequence;
					ev_cpy[j] =
						odp_buffer_to_event(buf_cpy[j]);
				}

				rc = odp_queue_enq_multi(qctx->pq_handle,
							 ev_cpy, num);
				CU_ASSERT(rc == num);

				bctx = odp_buffer_addr(
					odp_buffer_from_event(events[0]));
				for (ndx = 0; ndx < ndx_max; ndx++) {
					odp_schedule_order_lock(ndx);
					CU_ASSERT(bctx->sequence ==
						  qctx->lock_sequence[ndx]);
					qctx->lock_sequence[ndx] += num;
					odp_schedule_order_unlock(ndx);
				}
			}

			for (j = 0; j < num; j++)
				odp_event_free(events[j]);
		} else {
			ev  = odp_schedule(&from, ODP_SCHED_NO_WAIT);
			if (ev == ODP_EVENT_INVALID)
				continue;

			buf = odp_buffer_from_event(ev);
			num = 1;
			if (sync == ODP_SCHED_SYNC_ORDERED) {
				int ndx;
				int ndx_max;
				int rc;

				ndx_max = odp_queue_lock_count(from);
				CU_ASSERT_FATAL(ndx_max >= 0);

				qctx = odp_queue_context(from);
				bctx = odp_buffer_addr(buf);
				buf_cpy = odp_buffer_alloc(pool);
				CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID);
				bctx_cpy = odp_buffer_addr(buf_cpy);
				memcpy(bctx_cpy, bctx, sizeof(buf_contents));
				bctx_cpy->output_sequence = bctx_cpy->sequence;

				rc = odp_queue_enq(qctx->pq_handle,
						   odp_buffer_to_event
						   (buf_cpy));
				CU_ASSERT(rc == 0);

				for (ndx = 0; ndx < ndx_max; ndx++) {
					odp_schedule_order_lock(ndx);
					CU_ASSERT(bctx->sequence ==
						  qctx->lock_sequence[ndx]);
					qctx->lock_sequence[ndx] += num;
					odp_schedule_order_unlock(ndx);
				}
			}

			odp_buffer_free(buf);
		}

		if (args->enable_excl_atomic) {
			locked = odp_spinlock_trylock(&globals->atomic_lock);
			CU_ASSERT(locked != 0);
			CU_ASSERT(from != ODP_QUEUE_INVALID);
			if (locked) {
				int cnt;
				odp_time_t time = ODP_TIME_NULL;
				/* Do some work here to keep the thread busy */
				for (cnt = 0; cnt < 1000; cnt++)
					time = odp_time_sum(time,
							    odp_time_local());

				odp_spinlock_unlock(&globals->atomic_lock);
			}
		}

		if (sync == ODP_SCHED_SYNC_ATOMIC)
			odp_schedule_release_atomic();

		if (sync == ODP_SCHED_SYNC_ORDERED)
			odp_schedule_release_ordered();

		odp_ticketlock_lock(&globals->lock);

		globals->buf_count -= num;

		if (globals->buf_count < 0) {
			odp_ticketlock_unlock(&globals->lock);
			CU_FAIL_FATAL("Buffer counting failed");
		}

		odp_ticketlock_unlock(&globals->lock);
	}

	if (args->num_workers > 1)
		odp_barrier_wait(&globals->barrier);

	if (sync == ODP_SCHED_SYNC_ORDERED)
		locked = odp_ticketlock_trylock(&globals->lock);
	else
		locked = 0;

	if (locked && globals->buf_count_cpy > 0) {
		odp_event_t ev;
		odp_queue_t pq;
		uint64_t seq;
		uint64_t bcount = 0;
		int i, j;
		char name[32];
		uint64_t num_bufs = args->num_bufs;
		uint64_t buf_count = globals->buf_count_cpy;

		for (i = 0; i < args->num_prio; i++) {
			for (j = 0; j < args->num_queues; j++) {
				snprintf(name, sizeof(name),
					 "plain_%d_%d_o", i, j);
				pq = odp_queue_lookup(name);
				CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID);

				seq = 0;
				while (1) {
					ev = odp_queue_deq(pq);

					if (ev == ODP_EVENT_INVALID) {
						CU_ASSERT(seq == num_bufs);
						break;
					}

					bctx = odp_buffer_addr(
						odp_buffer_from_event(ev));

					CU_ASSERT(bctx->sequence == seq);
					seq++;
					bcount++;
					odp_event_free(ev);
				}
			}
		}
		CU_ASSERT(bcount == buf_count);
		globals->buf_count_cpy = 0;
	}

	if (locked)
		odp_ticketlock_unlock(&globals->lock);

	/* Clear scheduler atomic / ordered context between tests */
	num = exit_schedule_loop();

	CU_ASSERT(num == 0);

	if (num)
		printf("\nDROPPED %i events\n\n", num);

	return 0;
}
Пример #26
0
void scheduler_test_groups(void)
{
	odp_pool_t p;
	odp_pool_param_t params;
	odp_queue_t queue_grp1, queue_grp2;
	odp_buffer_t buf;
	odp_event_t ev;
	uint32_t *u32;
	int i, j, rc;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
				      ODP_SCHED_SYNC_ATOMIC,
				      ODP_SCHED_SYNC_ORDERED};
	int thr_id = odp_thread_id();
	odp_thrmask_t zeromask, mymask, testmask;
	odp_schedule_group_t mygrp1, mygrp2, lookup;
	odp_schedule_group_info_t info;

	odp_thrmask_zero(&zeromask);
	odp_thrmask_zero(&mymask);
	odp_thrmask_set(&mymask, thr_id);

	/* Can't find a group before we create it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create the group */
	mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask);
	CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID);

	/* Verify we can now find it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == mygrp1);

	/* Threadmask should be retrievable and be what we expect */
	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* Info struct */
	memset(&info, 0, sizeof(odp_schedule_group_info_t));
	rc = odp_schedule_group_info(mygrp1, &info);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_equal(&info.thrmask, &mymask) != 0);
	CU_ASSERT(strcmp(info.name, "Test Group 1") == 0);

	/* We can't join or leave an unknown group */
	rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	/* But we can leave our group */
	rc = odp_schedule_group_leave(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* We shouldn't be able to find our second group before creating it */
	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create it and verify we can find it */
	mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask);
	CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID);

	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == mygrp2);

	/* Verify we're not part of it */
	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp2, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* Now verify scheduler adherence to groups */
	odp_pool_param_init(&params);
	params.buf.size  = 100;
	params.buf.align = 0;
	params.buf.num   = 2;
	params.type      = ODP_POOL_BUFFER;

	p = odp_pool_create("sched_group_pool", &params);

	CU_ASSERT_FATAL(p != ODP_POOL_INVALID);

	for (i = 0; i < 3; i++) {
		odp_queue_param_t qp;
		odp_queue_t queue, from;
		odp_schedule_group_t mygrp[NUM_GROUPS];
		odp_queue_t queue_grp[NUM_GROUPS];
		int num = NUM_GROUPS;

		odp_queue_param_init(&qp);
		qp.type        = ODP_QUEUE_TYPE_SCHED;
		qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
		qp.sched.sync  = sync[i];
		qp.sched.group = mygrp1;

		/* Create and populate a group in group 1 */
		queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp);
		CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);

		buf = odp_buffer_alloc(p);

		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC1;

		ev = odp_buffer_to_event(buf);
		rc = odp_queue_enq(queue_grp1, ev);
		CU_ASSERT(rc == 0);
		if (rc)
			odp_buffer_free(buf);

		/* Now create and populate a queue in group 2 */
		qp.sched.group = mygrp2;
		queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp);
		CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);

		buf = odp_buffer_alloc(p);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC2;

		ev = odp_buffer_to_event(buf);
		rc = odp_queue_enq(queue_grp2, ev);
		CU_ASSERT(rc == 0);
		if (rc)
			odp_buffer_free(buf);

		/* Swap between two groups. Application should serve both
		 * groups to avoid potential head of line blocking in
		 * scheduler. */
		mygrp[0]     = mygrp1;
		mygrp[1]     = mygrp2;
		queue_grp[0] = queue_grp1;
		queue_grp[1] = queue_grp2;
		j = 0;

		/* Ensure that each test run starts from mygrp1 */
		odp_schedule_group_leave(mygrp1, &mymask);
		odp_schedule_group_leave(mygrp2, &mymask);
		odp_schedule_group_join(mygrp1, &mymask);

		while (num) {
			queue = queue_grp[j];
			ev    = odp_schedule(&from, ODP_SCHED_NO_WAIT);

			if (ev == ODP_EVENT_INVALID) {
				/* change group */
				rc = odp_schedule_group_leave(mygrp[j],
							      &mymask);
				CU_ASSERT_FATAL(rc == 0);

				j = (j + 1) % NUM_GROUPS;
				rc = odp_schedule_group_join(mygrp[j],
							     &mymask);
				CU_ASSERT_FATAL(rc == 0);
				continue;
			}

			CU_ASSERT_FATAL(from == queue);

			buf = odp_buffer_from_event(ev);
			u32 = odp_buffer_addr(buf);

			if (from == queue_grp1) {
				/* CU_ASSERT_FATAL needs these brackets */
				CU_ASSERT_FATAL(u32[0] == MAGIC1);
			} else {
				CU_ASSERT_FATAL(u32[0] == MAGIC2);
			}

			odp_buffer_free(buf);

			/* Tell scheduler we're about to request an event.
			 * Not needed, but a convenient place to test this API.
			 */
			odp_schedule_prefetch(1);

			num--;
		}

		/* Release schduler context and leave groups */
		odp_schedule_group_join(mygrp1, &mymask);
		odp_schedule_group_join(mygrp2, &mymask);
		CU_ASSERT(exit_schedule_loop() == 0);
		odp_schedule_group_leave(mygrp1, &mymask);
		odp_schedule_group_leave(mygrp2, &mymask);

		/* Done with queues for this round */
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0);
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0);

		/* Verify we can no longer find our queues */
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") ==
				ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") ==
				ODP_QUEUE_INVALID);
	}

	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0);
	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0);
	CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
}
Пример #27
0
void scheduler_test_queue_destroy(void)
{
	odp_pool_t p;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_queue_t queue, from;
	odp_buffer_t buf;
	odp_event_t ev;
	uint32_t *u32;
	int i;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
				      ODP_SCHED_SYNC_ATOMIC,
				      ODP_SCHED_SYNC_ORDERED};

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size  = 100;
	params.buf.align = 0;
	params.buf.num   = 1;
	params.type      = ODP_POOL_BUFFER;

	p = odp_pool_create("sched_destroy_pool", &params);

	CU_ASSERT_FATAL(p != ODP_POOL_INVALID);

	for (i = 0; i < 3; i++) {
		qp.type        = ODP_QUEUE_TYPE_SCHED;
		qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
		qp.sched.sync  = sync[i];
		qp.sched.group = ODP_SCHED_GROUP_ALL;

		queue = odp_queue_create("sched_destroy_queue", &qp);

		CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);

		buf = odp_buffer_alloc(p);

		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC;

		ev = odp_buffer_to_event(buf);
		if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0)))
			odp_buffer_free(buf);

		ev = odp_schedule(&from, ODP_SCHED_WAIT);

		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);

		CU_ASSERT_FATAL(from == queue);

		buf = odp_buffer_from_event(ev);
		u32 = odp_buffer_addr(buf);

		CU_ASSERT_FATAL(u32[0] == MAGIC);

		odp_buffer_free(buf);
		odp_schedule_release_ordered();

		CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
	}

	CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
}
Пример #28
0
/** @private test timeout */
static void test_abs_timeouts(int thr, test_globals_t *gbls)
{
    uint64_t    period;
    uint64_t    period_ns;
    odp_queue_t queue;
    uint64_t    tick;
    struct test_timer *ttp;
    odp_timeout_t tmo;
    uint32_t num_workers = gbls->num_workers;

    EXAMPLE_DBG("  [%i] test_timeouts\n", thr);

    queue      = odp_queue_lookup("timer_queue");

    period_ns  = gbls->args.period_us * ODP_TIME_USEC;
    period     = odp_timer_ns_to_tick(gbls->tp, period_ns);

    EXAMPLE_DBG("  [%i] period %d ticks,  %d ns\n", thr,
                period, period_ns);

    EXAMPLE_DBG("  [%i] current tick %d\n", thr,
                odp_timer_current_tick(gbls->tp));

    ttp = &gbls->tt[thr];
    ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp);
    if (ttp->tim == ODP_TIMER_INVALID) {
        EXAMPLE_ERR("Failed to allocate timer\n");
        return;
    }

    tmo = odp_timeout_alloc(gbls->pool);
    if (tmo == ODP_TIMEOUT_INVALID) {
        EXAMPLE_ERR("Failed to allocate timeout\n");
        return;
    }

    ttp->ev    = odp_timeout_to_event(tmo);
    tick       = odp_timer_current_tick(gbls->tp);

    while (1) {
        int wait = 0;
        odp_event_t ev;
        odp_timer_set_t rc;

        if (ttp) {
            tick  += period;
            rc     = odp_timer_set_abs(ttp->tim, tick, &ttp->ev);
            if (odp_unlikely(rc != ODP_TIMER_SUCCESS))
                /* Too early or too late timeout requested */
                EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n",
                              timerset2str(rc));
        }

        /* Get the next expired timeout.
         * We invoke the scheduler in a loop with a timeout because
         * we are not guaranteed to receive any more timeouts. The
         * scheduler isn't guaranteeing fairness when scheduling
         * buffers to threads.
         * Use 1.5 second timeout for scheduler */
        uint64_t sched_tmo =
            odp_schedule_wait_time(1500000000ULL);
        do {
            ev = odp_schedule(&queue, sched_tmo);

            /* Check if odp_schedule() timed out, possibly there
             * are no remaining timeouts to receive */
            if ((++wait > WAIT_NUM)
                    && (odp_atomic_load_u32(&gbls->remain) < num_workers))
                EXAMPLE_ABORT("At least one TMO was lost\n");
        } while (ev == ODP_EVENT_INVALID
                 && (int)odp_atomic_load_u32(&gbls->remain) > 0);

        if (ev == ODP_EVENT_INVALID)
            break;  /* No more timeouts */

        if (odp_event_type(ev) != ODP_EVENT_TIMEOUT)
            /* Not a default timeout event */
            EXAMPLE_ABORT("Unexpected event type (%u) received\n",
                          odp_event_type(ev));

        odp_timeout_t tmo = odp_timeout_from_event(ev);
        tick       = odp_timeout_tick(tmo);
        ttp        = odp_timeout_user_ptr(tmo);
        ttp->ev    = ev;
        if (!odp_timeout_fresh(tmo))
            /* Not the expected expiration tick, timer has
             * been reset or cancelled or freed */
            EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n",
                          ttp->tim, tick);

        EXAMPLE_DBG("  [%i] timeout, tick %d\n", thr, tick);

        uint32_t rx_num = odp_atomic_fetch_dec_u32(&gbls->remain);

        if (!rx_num)
            EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n",
                          ttp->tim, tick);
        else if (rx_num > num_workers)
            continue;

        odp_timeout_free(odp_timeout_from_event(ttp->ev));
        odp_timer_free(ttp->tim);
        ttp = NULL;
    }

    /* Remove any prescheduled events */
    remove_prescheduled_events();
}
Пример #29
0
void *pp_thread(void *arg)
{
	ALLOW_UNUSED_LOCAL(arg);

#if ODP_VERSION >= 102
	if (odp_init_local(ODP_THREAD_WORKER)) {
#else
	if (odp_init_local()) {
#endif
		OFP_ERR("odp_init_local failed");
		return NULL;
	}
	if (ofp_init_local()) {
		OFP_ERR("ofp_init_local failed");
		return NULL;
	}

	while (odp_atomic_load_u32(&still_running)) {
		odp_event_t event;
		odp_queue_t source_queue;

		event = odp_schedule(&source_queue, ODP_SCHED_WAIT);

		if (odp_event_type(event) != ODP_EVENT_TIMEOUT) {
			OFP_ERR("Unexpected event type %d",
				odp_event_type(event));
			continue;
		}

		ofp_timer_handle(event);
	}
	return NULL;
}

static void test_arp(void)
{
	struct ofp_ifnet mock_ifnet;
	struct in_addr ip;
	uint8_t mac[OFP_ETHER_ADDR_LEN] = { 0x00, 0xFF, 0x00, 0x00, 0xFF, 0x00, };

	/* The buffer passed into ofp_ipv4_lookup_mac() must be 8 bytes since
	 * a 64-bit operation is currently being used to copy a MAC address.
	 */
	uint8_t mac_result[OFP_ETHER_ADDR_LEN + 2];

	CU_ASSERT(0 == ofp_init_local());

	memset(&mock_ifnet, 0, sizeof(mock_ifnet));
	CU_ASSERT(0 != inet_aton("1.1.1.1", &ip));

	/* Test entry insert, lookup, and remove. */
	CU_ASSERT(-1 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));

	CU_ASSERT(0 == ofp_arp_ipv4_insert(ip.s_addr, mac, &mock_ifnet));

	memset(mac_result, 0xFF, OFP_ETHER_ADDR_LEN);
	CU_ASSERT(0 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));
	CU_ASSERT(0 == memcmp(mac, mac_result, OFP_ETHER_ADDR_LEN));

	CU_ASSERT(0 == ofp_arp_ipv4_remove(ip.s_addr, &mock_ifnet));
	CU_ASSERT(-1 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));

	/* Test entry is aged out. */
	CU_ASSERT(0 == ofp_arp_ipv4_insert(ip.s_addr, mac, &mock_ifnet));
	OFP_INFO("Inserted ARP entry");
	sleep(ARP_AGE_INTERVAL + ARP_ENTRY_TIMEOUT);
	CU_ASSERT(-1 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));

	/* Test entry is aged out after a few hits. */
	CU_ASSERT(0 == ofp_arp_ipv4_insert(ip.s_addr, mac, &mock_ifnet));
	OFP_INFO("Inserted ARP entry");
	sleep(ARP_AGE_INTERVAL);
	CU_ASSERT(0 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));
	sleep(ARP_AGE_INTERVAL);
	CU_ASSERT(0 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));
	sleep(ARP_AGE_INTERVAL + ARP_ENTRY_TIMEOUT);
	CU_ASSERT(-1 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));
}

int main(void)
{
	CU_pSuite ptr_suite = NULL;
	int nr_of_failed_tests = 0;
	int nr_of_failed_suites = 0;

	/* Initialize the CUnit test registry */
	if (CUE_SUCCESS != CU_initialize_registry())
		return CU_get_error();

	/* add a suite to the registry */
	ptr_suite = CU_add_suite("ofp errno", init_suite, end_suite);
	if (NULL == ptr_suite) {
		CU_cleanup_registry();
		return CU_get_error();
	}
	if (NULL == CU_ADD_TEST(ptr_suite, test_arp)) {
		CU_cleanup_registry();
		return CU_get_error();
	}

#if defined(OFP_TESTMODE_AUTO)
	CU_set_output_filename("CUnit-Util");
	CU_automated_run_tests();
#else
	/* Run all tests using the CUnit Basic interface */
	CU_basic_set_mode(CU_BRM_VERBOSE);
	CU_basic_run_tests();
#endif

	nr_of_failed_tests = CU_get_number_of_tests_failed();
	nr_of_failed_suites = CU_get_number_of_suites_failed();
	CU_cleanup_registry();

	return (nr_of_failed_suites > 0 ?
		nr_of_failed_suites : nr_of_failed_tests);
}
Пример #30
0
/** @private test timeout */
static void test_abs_timeouts(int thr, test_globals_t *gbls)
{
	uint64_t period;
	uint64_t period_ns;
	odp_queue_t queue;
	uint64_t tick;
	struct test_timer *ttp;
	odp_timeout_t tmo;

	EXAMPLE_DBG("  [%i] test_timeouts\n", thr);

	queue = odp_queue_lookup("timer_queue");

	period_ns = gbls->args.period_us*ODP_TIME_USEC;
	period    = odp_timer_ns_to_tick(gbls->tp, period_ns);

	EXAMPLE_DBG("  [%i] period %"PRIu64" ticks,  %"PRIu64" ns\n", thr,
		    period, period_ns);

	EXAMPLE_DBG("  [%i] current tick %"PRIu64"\n", thr,
		    odp_timer_current_tick(gbls->tp));

	ttp = &gbls->tt[thr];
	ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp);
	if (ttp->tim == ODP_TIMER_INVALID) {
		EXAMPLE_ERR("Failed to allocate timer\n");
		return;
	}
	tmo = odp_timeout_alloc(gbls->pool);
	if (tmo == ODP_TIMEOUT_INVALID) {
		EXAMPLE_ERR("Failed to allocate timeout\n");
		return;
	}
	ttp->ev = odp_timeout_to_event(tmo);
	tick = odp_timer_current_tick(gbls->tp);

	while ((int)odp_atomic_load_u32(&gbls->remain) > 0) {
		odp_event_t ev;
		odp_timer_set_t rc;

		tick += period;
		rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev);
		if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) {
			/* Too early or too late timeout requested */
			EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n",
				      timerset2str(rc));
		}

		/* Get the next expired timeout.
		 * We invoke the scheduler in a loop with a timeout because
		 * we are not guaranteed to receive any more timeouts. The
		 * scheduler isn't guaranteeing fairness when scheduling
		 * buffers to threads.
		 * Use 1.5 second timeout for scheduler */
		uint64_t sched_tmo =
			odp_schedule_wait_time(1500000000ULL);
		do {
			ev = odp_schedule(&queue, sched_tmo);
			/* Check if odp_schedule() timed out, possibly there
			 * are no remaining timeouts to receive */
		} while (ev == ODP_EVENT_INVALID &&
			 (int)odp_atomic_load_u32(&gbls->remain) > 0);

		if (ev == ODP_EVENT_INVALID)
			break; /* No more timeouts */
		if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) {
			/* Not a default timeout event */
			EXAMPLE_ABORT("Unexpected event type (%u) received\n",
				      odp_event_type(ev));
		}
		odp_timeout_t tmo = odp_timeout_from_event(ev);
		tick = odp_timeout_tick(tmo);
		ttp = odp_timeout_user_ptr(tmo);
		ttp->ev = ev;
		if (!odp_timeout_fresh(tmo)) {
			/* Not the expected expiration tick, timer has
			 * been reset or cancelled or freed */
			EXAMPLE_ABORT("Unexpected timeout received (timer %" PRIx32 ", tick %" PRIu64 ")\n",
				      ttp->tim, tick);
		}
		EXAMPLE_DBG("  [%i] timeout, tick %"PRIu64"\n", thr, tick);

		odp_atomic_dec_u32(&gbls->remain);
	}

	/* Cancel and free last timer used */
	(void)odp_timer_cancel(ttp->tim, &ttp->ev);
	if (ttp->ev != ODP_EVENT_INVALID)
		odp_timeout_free(odp_timeout_from_event(ttp->ev));
	else
		EXAMPLE_ERR("Lost timeout event at timer cancel\n");
	/* Since we have cancelled the timer, there is no timeout event to
	 * return from odp_timer_free() */
	(void)odp_timer_free(ttp->tim);

	/* Remove any prescheduled events */
	remove_prescheduled_events();
}