Esempio n. 1
0
int odp_schedule_term_global(void)
{
	int ret = 0;
	int rc = 0;
	int i, j;

	for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) {
		for (j = 0; j < QUEUES_PER_PRIO; j++) {
			odp_queue_t  pri_q;
			odp_event_t  ev;

			pri_q = sched->pri_queue[i][j];

			while ((ev = odp_queue_deq(pri_q)) !=
			      ODP_EVENT_INVALID) {
				odp_buffer_t buf;
				sched_cmd_t *sched_cmd;

				buf = odp_buffer_from_event(ev);
				sched_cmd = odp_buffer_addr(buf);

				if (sched_cmd->cmd == SCHED_CMD_DEQUEUE) {
					queue_entry_t *qe;
					odp_buffer_hdr_t *buf_hdr[1];
					int num;

					qe  = sched_cmd->qe;
					num = queue_deq_multi(qe, buf_hdr, 1);

					if (num < 0)
						queue_destroy_finalize(qe);

					if (num > 0)
						ODP_ERR("Queue not empty\n");
				} else
					odp_buffer_free(buf);
			}

			if (odp_queue_destroy(pri_q)) {
				ODP_ERR("Pri queue destroy fail.\n");
				rc = -1;
			}
		}
	}

	if (odp_pool_destroy(sched->pool) != 0) {
		ODP_ERR("Pool destroy fail.\n");
		rc = -1;
	}

	ret = odp_shm_free(sched->shm);
	if (ret < 0) {
		ODP_ERR("Shm free failed for odp_scheduler");
		rc = -1;
	}

	return rc;
}
Esempio n. 2
0
void scheduler_test_wait_time(void)
{
	int i;
	odp_queue_t queue;
	uint64_t wait_time;
	odp_queue_param_t qp;
	odp_time_t lower_limit, upper_limit;
	odp_time_t start_time, end_time, diff;

	/* check on read */
	wait_time = odp_schedule_wait_time(0);
	wait_time = odp_schedule_wait_time(1);

	/* check ODP_SCHED_NO_WAIT */
	odp_queue_param_init(&qp);
	qp.type        = ODP_QUEUE_TYPE_SCHED;
	qp.sched.sync  = ODP_SCHED_SYNC_PARALLEL;
	qp.sched.prio  = ODP_SCHED_PRIO_NORMAL;
	qp.sched.group = ODP_SCHED_GROUP_ALL;
	queue = odp_queue_create("dummy_queue", &qp);
	CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);

	wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
	start_time = odp_time_local();
	odp_schedule(&queue, ODP_SCHED_NO_WAIT);
	end_time = odp_time_local();

	diff = odp_time_diff(end_time, start_time);
	lower_limit = ODP_TIME_NULL;
	upper_limit = odp_time_local_from_ns(ODP_WAIT_TOLERANCE);

	CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
	CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);

	/* check time correctness */
	start_time = odp_time_local();
	for (i = 1; i < 6; i++) {
		odp_schedule(&queue, wait_time);
		printf("%d..", i);
	}
	end_time = odp_time_local();

	diff = odp_time_diff(end_time, start_time);
	lower_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS -
							ODP_WAIT_TOLERANCE);
	upper_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS +
							ODP_WAIT_TOLERANCE);

	CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
	CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);

	CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
}
Esempio n. 3
0
void scheduler_test_wait_time(void)
{
	odp_queue_t queue;
	uint64_t wait_time;
	odp_queue_param_t qp;
	odp_time_t lower_limit, upper_limit;
	odp_time_t start_time, end_time, diff;

	/* check on read */
	wait_time = odp_schedule_wait_time(0);
	wait_time = odp_schedule_wait_time(1);
	(void)wait_time;

	/* check ODP_SCHED_NO_WAIT */
	odp_queue_param_init(&qp);
	queue = odp_queue_create("dummy_queue", ODP_QUEUE_TYPE_SCHED, &qp);
	CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);

	wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
	start_time = odp_time_local();
	odp_schedule(&queue, ODP_SCHED_NO_WAIT);
	end_time = odp_time_local();

	diff = odp_time_diff(end_time, start_time);
	lower_limit = ODP_TIME_NULL;
	upper_limit = odp_time_local_from_ns(ODP_WAIT_TOLERANCE);

	CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
	CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);

#ifndef MAGIC_SCALL
	int i;

	/* check time correctness */
	start_time = odp_time_local();
	for (i = 1; i < 6; i++) {
		odp_schedule(&queue, wait_time);
		/* printf("%d..", i); */
	}
	end_time = odp_time_local();

	diff = odp_time_diff(end_time, start_time);
	lower_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS -
							ODP_WAIT_TOLERANCE);
	upper_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS +
							ODP_WAIT_TOLERANCE);
	CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
	CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);
#endif

	CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
}
Esempio n. 4
0
File: queue.c Progetto: nmorey/odp
void queue_test_mode(void)
{
	odp_queue_param_t qparams;
	odp_queue_t queue;
	int i, j;
	odp_queue_op_mode_t mode[3] = { ODP_QUEUE_OP_MT,
					ODP_QUEUE_OP_MT_UNSAFE,
					ODP_QUEUE_OP_DISABLED };

	odp_queue_param_init(&qparams);

	/* Plain queue modes */
	for (i = 0; i < 3; i++) {
		for (j = 0; j < 3; j++) {
			/* Should not disable both enq and deq */
			if (i == 2 && j == 2)
				break;

			qparams.enq_mode = mode[i];
			qparams.deq_mode = mode[j];
			queue = odp_queue_create("test_queue", &qparams);
			CU_ASSERT(queue != ODP_QUEUE_INVALID);
			if (queue != ODP_QUEUE_INVALID)
				CU_ASSERT(odp_queue_destroy(queue) == 0);
		}
	}

	odp_queue_param_init(&qparams);
	qparams.type = ODP_QUEUE_TYPE_SCHED;

	/* Scheduled queue modes. Dequeue mode is fixed. */
	for (i = 0; i < 3; i++) {
		qparams.enq_mode = mode[i];
		queue = odp_queue_create("test_queue", &qparams);
		CU_ASSERT(queue != ODP_QUEUE_INVALID);
		if (queue != ODP_QUEUE_INVALID)
			CU_ASSERT(odp_queue_destroy(queue) == 0);
	}
}
void classification_test_cos_set_queue(void)
{
	int retval;
	char cosname[ODP_COS_NAME_LEN];
	odp_cls_cos_param_t cls_param;
	odp_pool_t pool;
	odp_queue_t queue;
	odp_queue_t queue_cos;
	odp_cos_t cos_queue;
	odp_queue_t recvqueue;

	pool = pool_create("cls_basic_pool");
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);

	queue = queue_create("cls_basic_queue", true);
	CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);

	sprintf(cosname, "CoSQueue");
	odp_cls_cos_param_init(&cls_param);
	cls_param.pool = pool;
	cls_param.queue = queue;
	cls_param.drop_policy = ODP_COS_DROP_POOL;
	cos_queue = odp_cls_cos_create(cosname, &cls_param);
	CU_ASSERT_FATAL(cos_queue != ODP_COS_INVALID);

	queue_cos = queue_create("QueueCoS", true);
	CU_ASSERT_FATAL(queue_cos != ODP_QUEUE_INVALID);

	retval = odp_cos_queue_set(cos_queue, queue_cos);
	CU_ASSERT(retval == 0);
	recvqueue = odp_cos_queue(cos_queue);
	CU_ASSERT(recvqueue == queue_cos);

	odp_cos_destroy(cos_queue);
	odp_queue_destroy(queue_cos);
	odp_queue_destroy(queue);
	odp_pool_destroy(pool);
}
Esempio n. 6
0
int ofp_timer_term_global(void)
{
	int i;
	struct ofp_timer_internal *bufdata, *next;
	int rc = 0;

	if (ofp_timer_lookup_shared_memory())
		return -1;

	CHECK_ERROR(ofp_timer_stop_global(), rc);

/* Cleanup long timers*/
	for (i = 0; i < TIMER_NUM_LONG_SLOTS; i++) {
		bufdata = shm->long_table[i];
		if (!bufdata)
			continue;

		while (bufdata) {
			next = bufdata->next;
			odp_buffer_free(bufdata->buf);
			bufdata = next;
		}
	}

/* Cleanup timer related ODP objects*/
	if (shm->queue != ODP_QUEUE_INVALID) {
		CHECK_ERROR(odp_queue_destroy(shm->queue), rc);
		shm->queue = ODP_QUEUE_INVALID;
	}

	if (shm->socket_timer_pool != ODP_TIMER_POOL_INVALID) {
		odp_timer_pool_destroy(shm->socket_timer_pool);
		shm->socket_timer_pool = ODP_TIMER_POOL_INVALID;
	}

	if (shm->buf_pool != ODP_POOL_INVALID) {
		CHECK_ERROR(odp_pool_destroy(shm->buf_pool), rc);
		shm->buf_pool = ODP_POOL_INVALID;
	}

	if (shm->pool != ODP_POOL_INVALID) {
		CHECK_ERROR(odp_pool_destroy(shm->pool), rc);
		shm->pool = ODP_POOL_INVALID;
	}

	CHECK_ERROR(ofp_timer_free_shared_memory(), rc);

	return rc;
}
Esempio n. 7
0
static int destroy_queue(const char *name)
{
	odp_queue_t q;
	queue_context *qctx;

	q = odp_queue_lookup(name);

	if (q == ODP_QUEUE_INVALID)
		return -1;
	qctx = odp_queue_context(q);
	if (qctx)
		odp_buffer_free(qctx->ctx_handle);

	return odp_queue_destroy(q);
}
Esempio n. 8
0
File: queue.c Progetto: nmorey/odp
void queue_test_capa(void)
{
	odp_queue_capability_t capa;
	odp_queue_param_t qparams;
	char name[ODP_QUEUE_NAME_LEN];
	odp_queue_t queue[MAX_QUEUES];
	uint32_t num_queues, i;

	memset(&capa, 0, sizeof(odp_queue_capability_t));
	CU_ASSERT(odp_queue_capability(&capa) == 0);

	CU_ASSERT(capa.max_queues != 0);
	CU_ASSERT(capa.max_ordered_locks != 0);
	CU_ASSERT(capa.max_sched_groups != 0);
	CU_ASSERT(capa.sched_prios != 0);

	for (i = 0; i < ODP_QUEUE_NAME_LEN; i++)
		name[i] = 'A' + (i % 26);

	name[ODP_QUEUE_NAME_LEN - 1] = 0;

	if (capa.max_queues > MAX_QUEUES)
		num_queues = MAX_QUEUES;
	else
		num_queues = capa.max_queues;

	odp_queue_param_init(&qparams);

	for (i = 0; i < num_queues; i++) {
		generate_name(name, i);
		queue[i] = odp_queue_create(name, &qparams);

		if (queue[i] == ODP_QUEUE_INVALID) {
			CU_FAIL("Queue create failed");
			num_queues = i;
			break;
		}

		CU_ASSERT(odp_queue_lookup(name) != ODP_QUEUE_INVALID);
	}

	for (i = 0; i < num_queues; i++)
		CU_ASSERT(odp_queue_destroy(queue[i]) == 0);
}
Esempio n. 9
0
int odp_pktio_term_global(void)
{
	pktio_entry_t *pktio_entry;
	int ret = 0;
	int id;
	int pktio_if;

	for (pktio_if = 0; pktio_if_ops[pktio_if]; ++pktio_if)
		if (pktio_if_ops[pktio_if]->term)
			if (pktio_if_ops[pktio_if]->term())
				ODP_ERR("failed to terminate pktio type %d",
					pktio_if);

	for (id = 1; id <= ODP_CONFIG_PKTIO_ENTRIES; ++id) {
		pktio_entry = &pktio_tbl->entries[id - 1];
		odp_queue_destroy(pktio_entry->s.outq_default);
	}

	ret = odp_shm_free(odp_shm_lookup("odp_pktio_entries"));
	if (ret < 0)
		ODP_ERR("shm free failed for odp_pktio_entries");

	return ret;
}
Esempio n. 10
0
static void classification_test_cos_set_queue(void)
{
	int retval;
	char cosname[ODP_COS_NAME_LEN];
	char queuename[ODP_QUEUE_NAME_LEN];
	odp_queue_param_t qparam;
	odp_queue_t queue_cos;
	odp_cos_t cos_queue;
	sprintf(cosname, "CoSQueue");
	cos_queue = odp_cos_create(cosname);
	CU_ASSERT_FATAL(cos_queue != ODP_COS_INVALID);

	qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
	qparam.sched.sync = ODP_SCHED_SYNC_NONE;
	qparam.sched.group = ODP_SCHED_GROUP_ALL;
	sprintf(queuename, "%s", "QueueCoS");

	queue_cos = odp_queue_create(queuename,
				     ODP_QUEUE_TYPE_SCHED, &qparam);
	retval = odp_cos_set_queue(cos_queue, queue_cos);
	CU_ASSERT(retval == 0);
	odp_cos_destroy(cos_queue);
	odp_queue_destroy(queue_cos);
}
Esempio n. 11
0
int ofp_term_global(void)
{
	int rc = 0;
	uint16_t i;
	struct ofp_ifnet *ifnet;

	ofp_stop();

	/* Terminate CLI thread*/
	CHECK_ERROR(ofp_stop_cli_thread(), rc);

#ifdef SP
	/* Terminate Netlink thread*/
	if (shm->nl_thread_is_running) {
		odph_linux_pthread_join(&shm->nl_thread, 1);
		shm->nl_thread_is_running = 0;
	}
#endif /* SP */

	/* Cleanup interfaces: queues and pktios*/
	for (i = 0; i < VXLAN_PORTS; i++) {
		ifnet = ofp_get_ifnet((uint16_t)i, 0);
		if (!ifnet) {
			OFP_ERR("Failed to locate interface for port %d", i);
			rc = -1;
			continue;
		}
		if (ifnet->if_state == OFP_IFT_STATE_FREE)
			continue;

		if (ifnet->pktio == ODP_PKTIO_INVALID)
			continue;

		OFP_INFO("Cleaning device '%s' addr %s", ifnet->if_name,
			ofp_print_mac((uint8_t *)ifnet->mac));

		CHECK_ERROR(odp_pktio_stop(ifnet->pktio), rc);
#ifdef SP
		close(ifnet->fd);
		odph_linux_pthread_join(ifnet->rx_tbl, 1);
		odph_linux_pthread_join(ifnet->tx_tbl, 1);
		ifnet->fd = -1;
#endif /*SP*/

		/* Multicasting. */
		ofp_igmp_domifdetach(ifnet);
		ifnet->ii_inet.ii_igmp = NULL;

		if (ifnet->loopq_def != ODP_QUEUE_INVALID) {
			if (odp_queue_destroy(ifnet->loopq_def) < 0) {
				OFP_ERR("Failed to destroy loop queue for %s",
					ifnet->if_name);
				rc = -1;
			}
			ifnet->loopq_def = ODP_QUEUE_INVALID;
		}
#ifdef SP
		if (ifnet->spq_def != ODP_QUEUE_INVALID) {
			cleanup_pkt_queue(ifnet->spq_def);
			if (odp_queue_destroy(ifnet->spq_def) < 0) {
				OFP_ERR("Failed to destroy slow path "
					"queue for %s", ifnet->if_name);
				rc = -1;
			}
			ifnet->spq_def = ODP_QUEUE_INVALID;
		}
#endif /*SP*/
		ifnet->outq_def = ODP_QUEUE_INVALID;

		if (ifnet->pktio != ODP_PKTIO_INVALID) {
			if (odp_pktio_close(ifnet->pktio) < 0) {
				OFP_ERR("Failed to destroy pktio for %s",
					ifnet->if_name);
				rc = -1;
			}
			ifnet->pktio = ODP_PKTIO_INVALID;
		}

		if (ifnet->inq_def != ODP_QUEUE_INVALID) {
			cleanup_pkt_queue(ifnet->inq_def);
			if (odp_queue_destroy(ifnet->inq_def) < 0) {
				OFP_ERR("Failed to destroy default input "
					"queue for %s", ifnet->if_name);
				rc = -1;
			}
			ifnet->inq_def = ODP_QUEUE_INVALID;
		}
	}

	CHECK_ERROR(ofp_clean_vxlan_interface_queue(), rc);

	if (ofp_term_post_global(SHM_PACKET_POOL_NAME)) {
		OFP_ERR("Failed to cleanup resources\n");
		rc = -1;
	}

	return rc;
}
Esempio n. 12
0
void scheduler_test_groups(void)
{
	odp_pool_t p;
	odp_pool_param_t params;
	odp_queue_t queue_grp1, queue_grp2;
	odp_buffer_t buf;
	odp_event_t ev;
	uint32_t *u32;
	int i, j, rc;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
				      ODP_SCHED_SYNC_ATOMIC,
				      ODP_SCHED_SYNC_ORDERED};
	int thr_id = odp_thread_id();
	odp_thrmask_t zeromask, mymask, testmask;
	odp_schedule_group_t mygrp1, mygrp2, lookup;
	odp_schedule_group_info_t info;

	odp_thrmask_zero(&zeromask);
	odp_thrmask_zero(&mymask);
	odp_thrmask_set(&mymask, thr_id);

	/* Can't find a group before we create it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create the group */
	mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask);
	CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID);

	/* Verify we can now find it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == mygrp1);

	/* Threadmask should be retrievable and be what we expect */
	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* Info struct */
	memset(&info, 0, sizeof(odp_schedule_group_info_t));
	rc = odp_schedule_group_info(mygrp1, &info);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_equal(&info.thrmask, &mymask) != 0);
	CU_ASSERT(strcmp(info.name, "Test Group 1") == 0);

	/* We can't join or leave an unknown group */
	rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	/* But we can leave our group */
	rc = odp_schedule_group_leave(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* We shouldn't be able to find our second group before creating it */
	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create it and verify we can find it */
	mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask);
	CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID);

	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == mygrp2);

	/* Verify we're not part of it */
	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp2, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* Now verify scheduler adherence to groups */
	odp_pool_param_init(&params);
	params.buf.size  = 100;
	params.buf.align = 0;
	params.buf.num   = 2;
	params.type      = ODP_POOL_BUFFER;

	p = odp_pool_create("sched_group_pool", &params);

	CU_ASSERT_FATAL(p != ODP_POOL_INVALID);

	for (i = 0; i < 3; i++) {
		odp_queue_param_t qp;
		odp_queue_t queue, from;
		odp_schedule_group_t mygrp[NUM_GROUPS];
		odp_queue_t queue_grp[NUM_GROUPS];
		int num = NUM_GROUPS;

		odp_queue_param_init(&qp);
		qp.type        = ODP_QUEUE_TYPE_SCHED;
		qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
		qp.sched.sync  = sync[i];
		qp.sched.group = mygrp1;

		/* Create and populate a group in group 1 */
		queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp);
		CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);

		buf = odp_buffer_alloc(p);

		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC1;

		ev = odp_buffer_to_event(buf);
		rc = odp_queue_enq(queue_grp1, ev);
		CU_ASSERT(rc == 0);
		if (rc)
			odp_buffer_free(buf);

		/* Now create and populate a queue in group 2 */
		qp.sched.group = mygrp2;
		queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp);
		CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);

		buf = odp_buffer_alloc(p);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC2;

		ev = odp_buffer_to_event(buf);
		rc = odp_queue_enq(queue_grp2, ev);
		CU_ASSERT(rc == 0);
		if (rc)
			odp_buffer_free(buf);

		/* Swap between two groups. Application should serve both
		 * groups to avoid potential head of line blocking in
		 * scheduler. */
		mygrp[0]     = mygrp1;
		mygrp[1]     = mygrp2;
		queue_grp[0] = queue_grp1;
		queue_grp[1] = queue_grp2;
		j = 0;

		/* Ensure that each test run starts from mygrp1 */
		odp_schedule_group_leave(mygrp1, &mymask);
		odp_schedule_group_leave(mygrp2, &mymask);
		odp_schedule_group_join(mygrp1, &mymask);

		while (num) {
			queue = queue_grp[j];
			ev    = odp_schedule(&from, ODP_SCHED_NO_WAIT);

			if (ev == ODP_EVENT_INVALID) {
				/* change group */
				rc = odp_schedule_group_leave(mygrp[j],
							      &mymask);
				CU_ASSERT_FATAL(rc == 0);

				j = (j + 1) % NUM_GROUPS;
				rc = odp_schedule_group_join(mygrp[j],
							     &mymask);
				CU_ASSERT_FATAL(rc == 0);
				continue;
			}

			CU_ASSERT_FATAL(from == queue);

			buf = odp_buffer_from_event(ev);
			u32 = odp_buffer_addr(buf);

			if (from == queue_grp1) {
				/* CU_ASSERT_FATAL needs these brackets */
				CU_ASSERT_FATAL(u32[0] == MAGIC1);
			} else {
				CU_ASSERT_FATAL(u32[0] == MAGIC2);
			}

			odp_buffer_free(buf);

			/* Tell scheduler we're about to request an event.
			 * Not needed, but a convenient place to test this API.
			 */
			odp_schedule_prefetch(1);

			num--;
		}

		/* Release schduler context and leave groups */
		odp_schedule_group_join(mygrp1, &mymask);
		odp_schedule_group_join(mygrp2, &mymask);
		CU_ASSERT(exit_schedule_loop() == 0);
		odp_schedule_group_leave(mygrp1, &mymask);
		odp_schedule_group_leave(mygrp2, &mymask);

		/* Done with queues for this round */
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0);
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0);

		/* Verify we can no longer find our queues */
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") ==
				ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") ==
				ODP_QUEUE_INVALID);
	}

	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0);
	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0);
	CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
}
Esempio n. 13
0
static void chaos_run(unsigned int qtype)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	int i, rc;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
				      ODP_SCHED_SYNC_ATOMIC,
				      ODP_SCHED_SYNC_ORDERED};
	const unsigned num_sync = (sizeof(sync) / sizeof(odp_schedule_sync_t));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(globals);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.type        = ODP_QUEUE_TYPE_SCHED;
	qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qp.sched.group = ODP_SCHED_GROUP_ALL;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		uint32_t ndx = (qtype == num_sync ? i % num_sync : qtype);

		qp.sched.sync = sync[ndx];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[ndx]);

		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name, &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i), 0);
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	drain_queues();
	exit_schedule_loop();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}
Esempio n. 14
0
/**
 * Test main function
 */
int main(int argc, char *argv[])
{
	odph_linux_pthread_t thread_tbl[MAX_WORKERS];
	int num_workers;
	odp_queue_t queue;
	uint64_t tick, ns;
	odp_queue_param_t param;
	odp_pool_param_t params;
	odp_timer_pool_param_t tparams;
	odp_timer_pool_info_t tpinfo;
	odp_cpumask_t cpumask;
	char cpumaskstr[ODP_CPUMASK_STR_SIZE];
	odp_shm_t shm = ODP_SHM_INVALID;
	test_globals_t *gbls = NULL;
	int err = 0;

	printf("\nODP timer example starts\n");

	if (odp_init_global(NULL, NULL)) {
		err = 1;
		printf("ODP global init failed.\n");
		goto err_global;
	}

	/* Init this thread. */
	if (odp_init_local(ODP_THREAD_CONTROL)) {
		err = 1;
		printf("ODP local init failed.\n");
		goto err_local;
	}

	printf("\n");
	printf("ODP system info\n");
	printf("---------------\n");
	printf("ODP API version: %s\n",        odp_version_api_str());
	printf("CPU model:       %s\n",        odp_cpu_model_str());
	printf("CPU freq (hz):   %"PRIu64"\n", odp_cpu_hz_max());
	printf("Cache line size: %i\n",        odp_sys_cache_line_size());
	printf("Max CPU count:   %i\n",        odp_cpu_count());

	printf("\n");

	/* Reserve memory for test_globals_t from shared mem */
	shm = odp_shm_reserve("shm_test_globals", sizeof(test_globals_t),
			      ODP_CACHE_LINE_SIZE, 0);
	if (ODP_SHM_INVALID == shm) {
		err = 1;
		EXAMPLE_ERR("Error: shared mem reserve failed.\n");
		goto err;
	}

	gbls = odp_shm_addr(shm);
	if (NULL == gbls) {
		err = 1;
		EXAMPLE_ERR("Error: shared mem alloc failed.\n");
		goto err;
	}
	memset(gbls, 0, sizeof(test_globals_t));
	gbls->pool = ODP_POOL_INVALID;
	gbls->tp = ODP_TIMER_POOL_INVALID;

	parse_args(argc, argv, &gbls->args);

	memset(thread_tbl, 0, sizeof(thread_tbl));

	/* Default to system CPU count unless user specified */
	num_workers = MAX_WORKERS;
	if (gbls->args.cpu_count)
		num_workers = gbls->args.cpu_count;

	/* Get default worker cpumask */
	num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
	(void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));

	printf("num worker threads: %i\n", num_workers);
	printf("first CPU:          %i\n", odp_cpumask_first(&cpumask));
	printf("cpu mask:           %s\n", cpumaskstr);

	printf("resolution:         %i usec\n", gbls->args.resolution_us);
	printf("min timeout:        %i usec\n", gbls->args.min_us);
	printf("max timeout:        %i usec\n", gbls->args.max_us);
	printf("period:             %i usec\n", gbls->args.period_us);
	printf("timeouts:           %i\n", gbls->args.tmo_count);

	/*
	 * Create pool for timeouts
	 */
	odp_pool_param_init(&params);
	params.tmo.num   = NUM_TMOS;
	params.type      = ODP_POOL_TIMEOUT;

	gbls->pool = odp_pool_create("msg_pool", &params);

	if (gbls->pool == ODP_POOL_INVALID) {
		err = 1;
		EXAMPLE_ERR("Pool create failed.\n");
		goto err;
	}

	tparams.res_ns = gbls->args.resolution_us * ODP_TIME_USEC_IN_NS;
	tparams.min_tmo = gbls->args.min_us * ODP_TIME_USEC_IN_NS;
	tparams.max_tmo = gbls->args.max_us * ODP_TIME_USEC_IN_NS;
	tparams.num_timers = num_workers; /* One timer per worker */
	tparams.priv = 0; /* Shared */
	tparams.clk_src = ODP_CLOCK_CPU;
	gbls->tp = odp_timer_pool_create("timer_pool", &tparams);
	if (gbls->tp == ODP_TIMER_POOL_INVALID) {
		err = 1;
		EXAMPLE_ERR("Timer pool create failed.\n");
		goto err;
	}
	odp_timer_pool_start();

	odp_shm_print_all();
	(void)odp_timer_pool_info(gbls->tp, &tpinfo);
	printf("Timer pool\n");
	printf("----------\n");
	printf("  name: %s\n", tpinfo.name);
	printf("  resolution: %"PRIu64" ns\n", tpinfo.param.res_ns);
	printf("  min tmo: %"PRIu64" ticks\n", tpinfo.param.min_tmo);
	printf("  max tmo: %"PRIu64" ticks\n", tpinfo.param.max_tmo);
	printf("\n");

	/*
	 * Create a queue for timer test
	 */
	odp_queue_param_init(&param);
	param.type        = ODP_QUEUE_TYPE_SCHED;
	param.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	param.sched.sync  = ODP_SCHED_SYNC_PARALLEL;
	param.sched.group = ODP_SCHED_GROUP_ALL;

	queue = odp_queue_create("timer_queue", &param);

	if (queue == ODP_QUEUE_INVALID) {
		err = 1;
		EXAMPLE_ERR("Timer queue create failed.\n");
		goto err;
	}

	printf("CPU freq %"PRIu64" Hz\n", odp_cpu_hz_max());
	printf("Timer ticks vs nanoseconds:\n");
	ns = 0;
	tick = odp_timer_ns_to_tick(gbls->tp, ns);

	printf("  %12" PRIu64 " ns      ->  %12" PRIu64 " ticks\n", ns, tick);
	printf("  %12" PRIu64 " ticks   ->  %12" PRIu64 " ns\n", tick,
	       odp_timer_tick_to_ns(gbls->tp, tick));

	for (ns = 1; ns <= 100 * ODP_TIME_SEC_IN_NS; ns *= 10) {
		tick = odp_timer_ns_to_tick(gbls->tp, ns);

		printf("  %12" PRIu64 " ns      ->  %12" PRIu64 " ticks\n", ns,
		       tick);
		printf("  %12" PRIu64 " ticks   ->  %12" PRIu64 " ns\n", tick,
		       odp_timer_tick_to_ns(gbls->tp, tick));
	}

	printf("\n");

	gbls->num_workers = num_workers;

	/* Initialize number of timeouts to receive */
	odp_atomic_init_u32(&gbls->remain, gbls->args.tmo_count * num_workers);

	/* Barrier to sync test case execution */
	odp_barrier_init(&gbls->test_barrier, num_workers);

	/* Create and launch worker threads */
	odph_linux_pthread_create(thread_tbl, &cpumask,
				  run_thread, gbls, ODP_THREAD_WORKER);

	/* Wait for worker threads to exit */
	odph_linux_pthread_join(thread_tbl, num_workers);

	/* free resources */
	if (odp_queue_destroy(queue))
		err = 1;

err:

	if (gbls != NULL && gbls->tp != ODP_TIMER_POOL_INVALID)
		odp_timer_pool_destroy(gbls->tp);

	if (gbls != NULL && gbls->pool != ODP_TIMER_POOL_INVALID)
		if (odp_pool_destroy(gbls->pool))
			err = 1;

	if (shm != ODP_SHM_INVALID)
		if (odp_shm_free(shm))
			err = 1;

	if (odp_term_local())
		err = 1;
err_local:
	if (odp_term_global())
		err = 1;
err_global:
	if (err) {
		printf("Err: ODP timer test failed\n\n");
		return -1;
	}

	printf("ODP timer test complete\n\n");
	return 0;
}
Esempio n. 15
0
void scheduler_test_queue_destroy(void)
{
	odp_pool_t p;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_queue_t queue, from;
	odp_buffer_t buf;
	odp_event_t ev;
	uint32_t *u32;
	int i;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
				      ODP_SCHED_SYNC_ATOMIC,
				      ODP_SCHED_SYNC_ORDERED};

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size  = 100;
	params.buf.align = 0;
	params.buf.num   = 1;
	params.type      = ODP_POOL_BUFFER;

	p = odp_pool_create("sched_destroy_pool", &params);

	CU_ASSERT_FATAL(p != ODP_POOL_INVALID);

	for (i = 0; i < 3; i++) {
		qp.type        = ODP_QUEUE_TYPE_SCHED;
		qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
		qp.sched.sync  = sync[i];
		qp.sched.group = ODP_SCHED_GROUP_ALL;

		queue = odp_queue_create("sched_destroy_queue", &qp);

		CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);

		buf = odp_buffer_alloc(p);

		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC;

		ev = odp_buffer_to_event(buf);
		if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0)))
			odp_buffer_free(buf);

		ev = odp_schedule(&from, ODP_SCHED_WAIT);

		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);

		CU_ASSERT_FATAL(from == queue);

		buf = odp_buffer_from_event(ev);
		u32 = odp_buffer_addr(buf);

		CU_ASSERT_FATAL(u32[0] == MAGIC);

		odp_buffer_free(buf);
		odp_schedule_release_ordered();

		CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
	}

	CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
}
Esempio n. 16
0
void scheduler_test_groups(void)
{
	odp_pool_t p;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_queue_t queue_grp1, queue_grp2, from;
	odp_buffer_t buf;
	odp_event_t ev;
	uint32_t *u32;
	int i, j, rc;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
				      ODP_SCHED_SYNC_ATOMIC/* , */
				      /* ODP_SCHED_SYNC_ORDERED */};
	const int num_sync = (sizeof(sync) / sizeof(sync[0]));
	int thr_id = odp_thread_id();
	odp_thrmask_t zeromask, mymask, testmask;
	odp_schedule_group_t mygrp1, mygrp2, lookup;

	odp_thrmask_zero(&zeromask);
	odp_thrmask_zero(&mymask);
	odp_thrmask_set(&mymask, thr_id);

	/* Can't find a group before we create it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create the group */
	mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask);
	CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID);

	/* Verify we can now find it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == mygrp1);

	/* Threadmask should be retrievable and be what we expect */
	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* We can't join or leave an unknown group */
	rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	/* But we can leave our group */
	rc = odp_schedule_group_leave(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* We shouldn't be able to find our second group before creating it */
	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create it and verify we can find it */
	mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask);
	CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID);

	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == mygrp2);

	/* Verify we're not part of it */
	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp2, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* Now verify scheduler adherence to groups */
	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size  = 100;
	params.buf.align = 0;
	params.buf.num   = 2;
	params.type      = ODP_POOL_BUFFER;

	p = odp_pool_create("sched_group_pool", &params);

	CU_ASSERT_FATAL(p != ODP_POOL_INVALID);

	for (i = 0; i < num_sync; i++) {
		qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
		qp.sched.sync  = sync[i];
		qp.sched.group = mygrp1;

		/* Create and populate a group in group 1 */
		queue_grp1 = odp_queue_create("sched_group_test_queue_1",
					      ODP_QUEUE_TYPE_SCHED, &qp);
		CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);

		buf = odp_buffer_alloc(p);

		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC1;

		ev = odp_buffer_to_event(buf);
		if (!(CU_ASSERT(odp_queue_enq(queue_grp1, ev) == 0)))
			odp_buffer_free(buf);

		/* Now create and populate a queue in group 2 */
		qp.sched.group = mygrp2;
		queue_grp2 = odp_queue_create("sched_group_test_queue_2",
					      ODP_QUEUE_TYPE_SCHED, &qp);
		CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);

		buf = odp_buffer_alloc(p);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC2;

		ev = odp_buffer_to_event(buf);
		if (!(CU_ASSERT(odp_queue_enq(queue_grp2, ev) == 0)))
			odp_buffer_free(buf);

		/* Scheduler should give us the event from Group 2 */
		ev = odp_schedule(&from, ODP_SCHED_WAIT);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		CU_ASSERT_FATAL(from == queue_grp2);

		buf = odp_buffer_from_event(ev);
		u32 = odp_buffer_addr(buf);

		CU_ASSERT_FATAL(u32[0] == MAGIC2);

		odp_buffer_free(buf);

		/* Scheduler should not return anything now since we're
		 * not in Group 1 and Queue 2 is empty.  Do this several
		 * times to confirm.
		 */

		for (j = 0; j < 10; j++) {
			ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
			CU_ASSERT_FATAL(ev == ODP_EVENT_INVALID)
		}

		/* Now join group 1 and verify we can get the event */
		rc = odp_schedule_group_join(mygrp1, &mymask);
		CU_ASSERT_FATAL(rc == 0);

		/* Tell scheduler we're about to request an event.
		 * Not needed, but a convenient place to test this API.
		 */
		odp_schedule_prefetch(1);

		/* Now get the event from Queue 1 */
		ev = odp_schedule(&from, ODP_SCHED_WAIT);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		CU_ASSERT_FATAL(from == queue_grp1);

		buf = odp_buffer_from_event(ev);
		u32 = odp_buffer_addr(buf);

		CU_ASSERT_FATAL(u32[0] == MAGIC1);

		odp_buffer_free(buf);

		/* Leave group 1 for next pass */
		rc = odp_schedule_group_leave(mygrp1, &mymask);
		CU_ASSERT_FATAL(rc == 0);

		/* We must release order before destroying queues */
		odp_schedule_release_ordered();

		/* Done with queues for this round */
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0);
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0);

		/* Verify we can no longer find our queues */
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") ==
				ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") ==
				ODP_QUEUE_INVALID);
	}

	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0);
	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0);
	CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
}
Esempio n. 17
0
static odp_cos_t build_cos_w_queue(const char *name)
{
	odp_cos_t cos;
	odp_queue_t queue_cos;
	odp_queue_param_t qparam;

	cos = odp_cos_create(name);
	if (cos == ODP_COS_INVALID) {
		OFP_ERR("Failed to create COS");
		return ODP_COS_INVALID;
	}

	memset(&qparam, 0, sizeof(odp_queue_param_t));
	qparam.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qparam.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
	qparam.sched.group = ODP_SCHED_GROUP_ALL;

	queue_cos = odp_queue_create(name,
				ODP_QUEUE_TYPE_SCHED,
				&qparam);
	if (queue_cos == ODP_QUEUE_INVALID) {
		OFP_ERR("Failed to create queue\n");
		odp_cos_destroy(cos);
		return ODP_COS_INVALID;
	}

#if ODP_VERSION < 104
	if (odp_cos_set_queue(cos, queue_cos) < 0) {
#else
	if (odp_cos_queue_set(cos, queue_cos) < 0) {
#endif
		OFP_ERR("Failed to set queue on COS");
		odp_cos_destroy(cos);
		odp_queue_destroy(queue_cos);
		return ODP_COS_INVALID;
	}

	return cos;
}

static odp_cos_t build_cos_set_queue(const char *name, odp_queue_t queue_cos)
{
	odp_cos_t cos;

	cos = odp_cos_create(name);
	if (cos == ODP_COS_INVALID) {
		OFP_ERR("Failed to create COS");
		return ODP_COS_INVALID;
	}

#if ODP_VERSION < 104
	if (odp_cos_set_queue(cos, queue_cos) < 0) {
#else
	if (odp_cos_queue_set(cos, queue_cos) < 0) {
#endif
		OFP_ERR("Failed to set queue on COS");
		odp_cos_destroy(cos);
		return ODP_COS_INVALID;
	}

	return cos;
}

static odp_pmr_t build_udp_prm(void)
{
	uint32_t pmr_udp_val = TEST_PORT;
	uint32_t pmr_udp_mask = 0xffffffff;

#if ODP_VERSION < 104
	return odp_pmr_create(ODP_PMR_UDP_DPORT,
			      &pmr_udp_val,
			      &pmr_udp_mask,
			      1);
#else
	const odp_pmr_match_t match = {
		.term = ODP_PMR_UDP_DPORT,
		.val = &pmr_udp_val,
		.mask = &pmr_udp_mask,
		.val_sz = 1
	};

	return odp_pmr_create(&match);
#endif
}

static void app_processing(void)
{
	int fd_rcv = -1;
	char buf[1500];
	int len = sizeof(buf);

	do {
		struct ofp_sockaddr_in addr = {0};

		fd_rcv = ofp_socket(OFP_AF_INET, OFP_SOCK_DGRAM,
				OFP_IPPROTO_UDP);
		if (fd_rcv == -1) {
			OFP_ERR("Faild to create RCV socket (errno = %d)\n",
				ofp_errno);
			break;
		}

		addr.sin_len = sizeof(struct ofp_sockaddr_in);
		addr.sin_family = OFP_AF_INET;
		addr.sin_port = odp_cpu_to_be_16(TEST_PORT);
		addr.sin_addr.s_addr = IP4(192, 168, 100, 1);

		if (ofp_bind(fd_rcv, (const struct ofp_sockaddr *)&addr,
			sizeof(struct ofp_sockaddr_in)) == -1) {
			OFP_ERR("Faild to bind socket (errno = %d)\n",
				ofp_errno);
			break;
		}

		len = ofp_recv(fd_rcv, buf, len, 0);
		if (len == -1)
			OFP_ERR("Faild to receive data (errno = %d)\n",
				ofp_errno);
		else
			OFP_INFO("Data received: length = %d.\n", len);

	} while (0);

	if (fd_rcv != -1) {
		ofp_close(fd_rcv);
		fd_rcv = -1;
	}
	OFP_INFO("Test ended.\n");
}
Esempio n. 18
0
void scheduler_test_chaos(void)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	odp_event_t ev;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	odp_queue_t from;
	int i, rc;
	uint64_t wait;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
				      ODP_SCHED_SYNC_ATOMIC/* , */
				      /* ODP_SCHED_SYNC_ORDERED */};
	const int num_sync = (sizeof(sync) / sizeof(sync[0]));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(shm);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		qp.sched.sync = sync[i % num_sync];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[i % num_sync]);
		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name,
					 ODP_QUEUE_TYPE_SCHED,
					 &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	odp_atomic_init_u32(&globals->chaos_pending_event_count, 0);

	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
		odp_atomic_inc_u32(&globals->chaos_pending_event_count);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	/* Cleanup: Drain queues, free events */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	while (odp_atomic_fetch_dec_u32(
		       &globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Draining event %" PRIu64
			       " seq %" PRIu64 " from Q %s...\n",
			       cbuf->evno,
			       cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	odp_schedule_release_ordered();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}
Esempio n. 19
0
File: queue.c Progetto: nmorey/odp
void queue_test_param(void)
{
	odp_queue_t queue;
	odp_event_t enev[MAX_BUFFER_QUEUE];
	odp_event_t deev[MAX_BUFFER_QUEUE];
	odp_buffer_t buf;
	odp_event_t ev;
	odp_pool_t msg_pool;
	odp_event_t *pev_tmp;
	int i, deq_ret, ret;
	int nr_deq_entries = 0;
	int max_iteration = CONFIG_MAX_ITERATION;
	odp_queue_param_t qparams;
	odp_buffer_t enbuf;

	/* Schedule type queue */
	odp_queue_param_init(&qparams);
	qparams.type       = ODP_QUEUE_TYPE_SCHED;
	qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
	qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL;
	qparams.sched.group = ODP_SCHED_GROUP_WORKER;

	queue = odp_queue_create("test_queue", &qparams);
	CU_ASSERT(ODP_QUEUE_INVALID != queue);
	CU_ASSERT(odp_queue_to_u64(queue) !=
		  odp_queue_to_u64(ODP_QUEUE_INVALID));
	CU_ASSERT(queue == odp_queue_lookup("test_queue"));
	CU_ASSERT(ODP_QUEUE_TYPE_SCHED    == odp_queue_type(queue));
	CU_ASSERT(ODP_SCHED_PRIO_LOWEST   == odp_queue_sched_prio(queue));
	CU_ASSERT(ODP_SCHED_SYNC_PARALLEL == odp_queue_sched_type(queue));
	CU_ASSERT(ODP_SCHED_GROUP_WORKER  == odp_queue_sched_group(queue));

	CU_ASSERT(0 == odp_queue_context_set(queue, &queue_context,
					     sizeof(queue_context)));

	CU_ASSERT(&queue_context == odp_queue_context(queue));
	CU_ASSERT(odp_queue_destroy(queue) == 0);

	/* Plain type queue */
	odp_queue_param_init(&qparams);
	qparams.type        = ODP_QUEUE_TYPE_PLAIN;
	qparams.context     = &queue_context;
	qparams.context_len = sizeof(queue_context);

	queue = odp_queue_create("test_queue", &qparams);
	CU_ASSERT(ODP_QUEUE_INVALID != queue);
	CU_ASSERT(queue == odp_queue_lookup("test_queue"));
	CU_ASSERT(ODP_QUEUE_TYPE_PLAIN == odp_queue_type(queue));
	CU_ASSERT(&queue_context == odp_queue_context(queue));

	msg_pool = odp_pool_lookup("msg_pool");
	buf = odp_buffer_alloc(msg_pool);
	CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
	ev  = odp_buffer_to_event(buf);

	if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0))) {
		odp_buffer_free(buf);
	} else {
		CU_ASSERT(ev == odp_queue_deq(queue));
		odp_buffer_free(buf);
	}

	for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
		buf = odp_buffer_alloc(msg_pool);
		enev[i] = odp_buffer_to_event(buf);
	}

	/*
	 * odp_queue_enq_multi may return 0..n buffers due to the resource
	 * constraints in the implementation at that given point of time.
	 * But here we assume that we succeed in enqueuing all buffers.
	 */
	ret = odp_queue_enq_multi(queue, enev, MAX_BUFFER_QUEUE);
	CU_ASSERT(MAX_BUFFER_QUEUE == ret);
	i = ret < 0 ? 0 : ret;
	for ( ; i < MAX_BUFFER_QUEUE; i++)
		odp_event_free(enev[i]);

	pev_tmp = deev;
	do {
		deq_ret  = odp_queue_deq_multi(queue, pev_tmp,
					       MAX_BUFFER_QUEUE);
		nr_deq_entries += deq_ret;
		max_iteration--;
		pev_tmp += deq_ret;
		CU_ASSERT(max_iteration >= 0);
	} while (nr_deq_entries < MAX_BUFFER_QUEUE);

	for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
		enbuf = odp_buffer_from_event(enev[i]);
		CU_ASSERT(enev[i] == deev[i]);
		odp_buffer_free(enbuf);
	}

	CU_ASSERT(odp_queue_destroy(queue) == 0);
}
Esempio n. 20
0
File: timer.c Progetto: nmorey/odp
void timer_test_odp_timer_cancel(void)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_timer_pool_param_t tparam;
	odp_timer_pool_t tp;
	odp_queue_t queue;
	odp_timer_t tim;
	odp_event_t ev;
	odp_timeout_t tmo;
	odp_timer_set_t rc;
	uint64_t tick;

	odp_pool_param_init(&params);
	params.type    = ODP_POOL_TIMEOUT;
	params.tmo.num = 1;

	pool = odp_pool_create("tmo_pool_for_cancel", &params);

	if (pool == ODP_POOL_INVALID)
		CU_FAIL_FATAL("Timeout pool create failed");

	tparam.res_ns     = 100 * ODP_TIME_MSEC_IN_NS;
	tparam.min_tmo    = 1   * ODP_TIME_SEC_IN_NS;
	tparam.max_tmo    = 10  * ODP_TIME_SEC_IN_NS;
	tparam.num_timers = 1;
	tparam.priv       = 0;
	tparam.clk_src    = ODP_CLOCK_CPU;
	tp = odp_timer_pool_create("timer_pool0", &tparam);
	if (tp == ODP_TIMER_POOL_INVALID)
		CU_FAIL_FATAL("Timer pool create failed");

	/* Start all created timer pools */
	odp_timer_pool_start();

	queue = odp_queue_create("timer_queue", NULL);
	if (queue == ODP_QUEUE_INVALID)
		CU_FAIL_FATAL("Queue create failed");

	#define USER_PTR ((void *)0xdead)
	tim = odp_timer_alloc(tp, queue, USER_PTR);
	if (tim == ODP_TIMER_INVALID)
		CU_FAIL_FATAL("Failed to allocate timer");
	LOG_DBG("Timer handle: %" PRIu64 "\n", odp_timer_to_u64(tim));

	ev = odp_timeout_to_event(odp_timeout_alloc(pool));
	if (ev == ODP_EVENT_INVALID)
		CU_FAIL_FATAL("Failed to allocate timeout");

	tick = odp_timer_ns_to_tick(tp, 2 * ODP_TIME_SEC_IN_NS);

	rc = odp_timer_set_rel(tim, tick, &ev);
	if (rc != ODP_TIMER_SUCCESS)
		CU_FAIL_FATAL("Failed to set timer (relative time)");

	ev = ODP_EVENT_INVALID;
	if (odp_timer_cancel(tim, &ev) != 0)
		CU_FAIL_FATAL("Failed to cancel timer (relative time)");

	if (ev == ODP_EVENT_INVALID)
		CU_FAIL_FATAL("Cancel did not return event");

	tmo = odp_timeout_from_event(ev);
	if (tmo == ODP_TIMEOUT_INVALID)
		CU_FAIL_FATAL("Cancel did not return timeout");
	LOG_DBG("Timeout handle: %" PRIu64 "\n", odp_timeout_to_u64(tmo));

	if (odp_timeout_timer(tmo) != tim)
		CU_FAIL("Cancel invalid tmo.timer");

	if (odp_timeout_user_ptr(tmo) != USER_PTR)
		CU_FAIL("Cancel invalid tmo.user_ptr");

	odp_timeout_free(tmo);

	ev = odp_timer_free(tim);
	if (ev != ODP_EVENT_INVALID)
		CU_FAIL_FATAL("Free returned event");

	odp_timer_pool_destroy(tp);

	if (odp_queue_destroy(queue) != 0)
		CU_FAIL_FATAL("Failed to destroy queue");

	if (odp_pool_destroy(pool) != 0)
		CU_FAIL_FATAL("Failed to destroy pool");
}
Esempio n. 21
0
void queue_test_sunnydays(void)
{
	odp_queue_t queue_creat_id, queue_id;
	odp_event_t enev[MAX_BUFFER_QUEUE];
	odp_event_t deev[MAX_BUFFER_QUEUE];
	odp_buffer_t buf;
	odp_event_t ev;
	odp_pool_t msg_pool;
	odp_event_t *pev_tmp;
	int i, deq_ret, ret;
	int nr_deq_entries = 0;
	int max_iteration = CONFIG_MAX_ITERATION;
	void *prtn = NULL;
	odp_queue_param_t qparams;

	odp_queue_param_init(&qparams);
	qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
	qparams.sched.sync = ODP_SCHED_SYNC_NONE;
	qparams.sched.group = ODP_SCHED_GROUP_WORKER;

	queue_creat_id = odp_queue_create("test_queue",
					  ODP_QUEUE_TYPE_POLL, &qparams);
	CU_ASSERT(ODP_QUEUE_INVALID != queue_creat_id);

	CU_ASSERT_EQUAL(ODP_QUEUE_TYPE_POLL,
			odp_queue_type(queue_creat_id));

	queue_id = odp_queue_lookup("test_queue");
	CU_ASSERT_EQUAL(queue_creat_id, queue_id);

	CU_ASSERT_EQUAL(ODP_SCHED_GROUP_WORKER,
			odp_queue_sched_group(queue_id));
	CU_ASSERT_EQUAL(ODP_SCHED_PRIO_LOWEST, odp_queue_sched_prio(queue_id));
	CU_ASSERT_EQUAL(ODP_SCHED_SYNC_NONE, odp_queue_sched_type(queue_id));

	CU_ASSERT(0 == odp_queue_context_set(queue_id, &queue_contest));

	prtn = odp_queue_context(queue_id);
	CU_ASSERT(&queue_contest == (int *)prtn);

	msg_pool = odp_pool_lookup("msg_pool");
	buf = odp_buffer_alloc(msg_pool);
	CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
	ev  = odp_buffer_to_event(buf);

	if (!(CU_ASSERT(odp_queue_enq(queue_id, ev) == 0))) {
		odp_buffer_free(buf);
	} else {
		CU_ASSERT_EQUAL(ev, odp_queue_deq(queue_id));
		odp_buffer_free(buf);
	}

	for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
		odp_buffer_t buf = odp_buffer_alloc(msg_pool);
		enev[i] = odp_buffer_to_event(buf);
	}

	/*
	 * odp_queue_enq_multi may return 0..n buffers due to the resource
	 * constraints in the implementation at that given point of time.
	 * But here we assume that we succeed in enqueuing all buffers.
	 */
	ret = odp_queue_enq_multi(queue_id, enev, MAX_BUFFER_QUEUE);
	CU_ASSERT(MAX_BUFFER_QUEUE == ret);
	i = ret < 0 ? 0 : ret;
	for ( ; i < MAX_BUFFER_QUEUE; i++)
		odp_event_free(enev[i]);

	pev_tmp = deev;
	do {
		deq_ret  = odp_queue_deq_multi(queue_id, pev_tmp,
					       MAX_BUFFER_QUEUE);
		nr_deq_entries += deq_ret;
		max_iteration--;
		pev_tmp += deq_ret;
		CU_ASSERT(max_iteration >= 0);
	} while (nr_deq_entries < MAX_BUFFER_QUEUE);

	for (i = 0; i < MAX_BUFFER_QUEUE; i++) {
		odp_buffer_t enbuf = odp_buffer_from_event(enev[i]);
		CU_ASSERT_EQUAL(enev[i], deev[i]);
		odp_buffer_free(enbuf);
	}

	CU_ASSERT(odp_queue_destroy(queue_id) == 0);
}
Esempio n. 22
0
static int loopback_close(pktio_entry_t *pktio_entry)
{
	return odp_queue_destroy(pktio_entry->s.pkt_loop.loopq);
}