示例#1
0
void test_atomic_inc_u32(void)
{
	int i;

	for (i = 0; i < CNT; i++)
		odp_atomic_inc_u32(&a32u);
}
示例#2
0
文件: timer.c 项目: nmorey/odp
/* @private Handle a received (timeout) event */
static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
{
	CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); /* Internal error */
	if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) {
		/* Not a timeout event */
		CU_FAIL("Unexpected event type received");
		return;
	}
	/* Read the metadata from the timeout */
	odp_timeout_t tmo = odp_timeout_from_event(ev);
	odp_timer_t tim = odp_timeout_timer(tmo);
	uint64_t tick = odp_timeout_tick(tmo);
	struct test_timer *ttp = odp_timeout_user_ptr(tmo);

	if (tim == ODP_TIMER_INVALID)
		CU_FAIL("odp_timeout_timer() invalid timer");
	if (!ttp)
		CU_FAIL("odp_timeout_user_ptr() null user ptr");

	if (ttp && ttp->ev2 != ev)
		CU_FAIL("odp_timeout_user_ptr() wrong user ptr");
	if (ttp && ttp->tim != tim)
		CU_FAIL("odp_timeout_timer() wrong timer");
	if (stale) {
		if (odp_timeout_fresh(tmo))
			CU_FAIL("Wrong status (fresh) for stale timeout");
		/* Stale timeout => local timer must have invalid tick */
		if (ttp && ttp->tick != TICK_INVALID)
			CU_FAIL("Stale timeout for active timer");
	} else {
		if (!odp_timeout_fresh(tmo))
			CU_FAIL("Wrong status (stale) for fresh timeout");
		/* Fresh timeout => local timer must have matching tick */
		if (ttp && ttp->tick != tick) {
			LOG_DBG("Wrong tick: expected %" PRIu64
				" actual %" PRIu64 "\n",
				ttp->tick, tick);
			CU_FAIL("odp_timeout_tick() wrong tick");
		}
		/* Check that timeout was delivered 'timely' */
		if (tick > odp_timer_current_tick(tp))
			CU_FAIL("Timeout delivered early");
		if (tick < prev_tick) {
			LOG_DBG("Too late tick: %" PRIu64
				" prev_tick %" PRIu64"\n",
				tick, prev_tick);
			/* We don't report late timeouts using CU_FAIL */
			odp_atomic_inc_u32(&ndelivtoolate);
		}
	}

	if (ttp) {
		/* Internal error */
		CU_ASSERT_FATAL(ttp->ev == ODP_EVENT_INVALID);
		ttp->ev = ev;
	}
}
示例#3
0
void odp_rwlock_write_unlock(odp_rwlock_t *rwlock)
{
	odp_atomic_inc_u32((odp_atomic_u32_t *)(intptr_t)&rwlock->cnt);
}
示例#4
0
void scheduler_test_chaos(void)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	odp_event_t ev;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	odp_queue_t from;
	int i, rc;
	uint64_t wait;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
				      ODP_SCHED_SYNC_ATOMIC/* , */
				      /* ODP_SCHED_SYNC_ORDERED */};
	const int num_sync = (sizeof(sync) / sizeof(sync[0]));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(shm);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		qp.sched.sync = sync[i % num_sync];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[i % num_sync]);
		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name,
					 ODP_QUEUE_TYPE_SCHED,
					 &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	odp_atomic_init_u32(&globals->chaos_pending_event_count, 0);

	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
		odp_atomic_inc_u32(&globals->chaos_pending_event_count);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	/* Cleanup: Drain queues, free events */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	while (odp_atomic_fetch_dec_u32(
		       &globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Draining event %" PRIu64
			       " seq %" PRIu64 " from Q %s...\n",
			       cbuf->evno,
			       cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	odp_schedule_release_ordered();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}
示例#5
0
static int traffic_generator(uint32_t pkts_to_send)
{
	odp_pool_param_t pool_params;
	odp_tm_queue_t   tm_queue;
	odp_packet_t     pkt;
	odp_bool_t       tm_is_idle;
	uint32_t         svc_class, queue_num, pkt_len, pkts_into_tm;
	uint32_t         pkts_from_tm, pkt_cnt, millisecs, odp_tm_enq_errs;
	int              rc;

	memset(&pool_params, 0, sizeof(odp_pool_param_t));
	pool_params.type           = ODP_POOL_PACKET;
	pool_params.pkt.num        = pkts_to_send + 10;
	pool_params.pkt.len        = 1600;
	pool_params.pkt.seg_len    = 0;
	pool_params.pkt.uarea_size = 0;

	odp_pool        = odp_pool_create("MyPktPool", &pool_params);
	odp_tm_enq_errs = 0;

	pkt_cnt = 0;
	while (pkt_cnt < pkts_to_send) {
		svc_class = pkt_service_class();
		queue_num = random_16() & (TM_QUEUES_PER_CLASS - 1);
		tm_queue  = queue_num_tbls[svc_class][queue_num + 1];
		pkt_len   = ((uint32_t)((random_8() & 0x7F) + 2)) * 32;
		pkt_len   = MIN(pkt_len, 1500);
		pkt       = make_odp_packet(pkt_len);

		pkt_cnt++;
		rc = odp_tm_enq(tm_queue, pkt);
		if (rc < 0) {
			odp_tm_enq_errs++;
			continue;
		}

		odp_atomic_inc_u32(&atomic_pkts_into_tm);
	}

	printf("%s odp_tm_enq_errs=%u\n", __func__, odp_tm_enq_errs);

       /* Wait until the main traffic mgmt worker thread is idle and has no
	* outstanding events (i.e. no timers, empty work queue, etc), but
	* not longer than 60 seconds.
	*/
	for (millisecs = 0; millisecs < 600000; millisecs++) {
		usleep(100);
		tm_is_idle = odp_tm_is_idle(odp_tm_test);
		if (tm_is_idle)
			break;
	}

	if (!tm_is_idle)
		printf("%s WARNING stopped waiting for the TM system "
		       "to be IDLE!\n", __func__);

	/* Wait for up to 2 seconds for pkts_from_tm to match pkts_into_tm. */
	for (millisecs = 0; millisecs < 2000; millisecs++) {
		usleep(1000);
		pkts_into_tm = odp_atomic_load_u32(&atomic_pkts_into_tm);
		pkts_from_tm = odp_atomic_load_u32(&atomic_pkts_from_tm);
		if (pkts_into_tm <= pkts_from_tm)
			break;
	}

	return 0;
}
示例#6
0
void tester_egress_fcn(odp_packet_t odp_pkt ODP_UNUSED)
{
	odp_atomic_inc_u32(&atomic_pkts_from_tm);
}