Beispiel #1
0
int odp_ticketlock_trylock(odp_ticketlock_t *tklock)
{
	/* We read 'next_ticket' and 'cur_ticket' non-atomically which should
	 * not be a problem as they are not independent of each other.
	 * 'cur_ticket' is always <= to 'next_ticket' and if we see an
	 * older value of 'cur_ticket', this only means the lock will
	 * look busy and trylock will fail. */
	uint32_t next = odp_atomic_load_u32(&tklock->next_ticket);
	uint32_t cur = odp_atomic_load_u32(&tklock->cur_ticket);
	/* First check that lock is available and possible to take without
	 * spinning. */
	if (next == cur) {
		/* Then try to take the lock by incrementing 'next_ticket'
		 * but only if it still has the original value which is
		 * equal to 'cur_ticket'.
		 * We don't have to include 'cur_ticket' in the comparison
		 * because it cannot be larger than 'next_ticket' (only
		 * smaller if the lock is busy).
		 * If CAS fails, it means some other thread intercepted and
		 * took a ticket which means the lock is not available
		 * anymore */
		if (odp_atomic_cas_acq_u32(&tklock->next_ticket,
					   &next, next + 1))
			return 1;
	}
	return 0;
}
Beispiel #2
0
int odp_ticketlock_is_locked(odp_ticketlock_t *ticketlock)
{
	/* Compare 'cur_ticket' with 'next_ticket'. Ideally we should read
	 * both variables atomically but the information can become stale
	 * immediately anyway so the function can only be used reliably in
	 * a quiescent system where non-atomic loads should not pose a
	 * problem */
	return odp_atomic_load_u32(&ticketlock->cur_ticket) !=
		odp_atomic_load_u32(&ticketlock->next_ticket);
}
Beispiel #3
0
int main(int argc, char *argv[])
{
	struct sigaction signal_action;
	struct rlimit    rlimit;
	uint32_t pkts_into_tm, pkts_from_tm;
	odp_instance_t instance;
	int rc;

	memset(&signal_action, 0, sizeof(signal_action));
	signal_action.sa_handler = signal_handler;
	sigfillset(&signal_action.sa_mask);
	sigaction(SIGILL,  &signal_action, NULL);
	sigaction(SIGFPE,  &signal_action, NULL);
	sigaction(SIGSEGV, &signal_action, NULL);
	sigaction(SIGTERM, &signal_action, NULL);
	sigaction(SIGBUS,  &signal_action, NULL);

	getrlimit(RLIMIT_CORE, &rlimit);
	rlimit.rlim_cur = rlimit.rlim_max;
	setrlimit(RLIMIT_CORE, &rlimit);

	rc = odp_init_global(&instance, &ODP_INIT_PARAMS, NULL);
	if (rc != 0) {
		printf("Error: odp_init_global() failed, rc = %d\n", rc);
		abort();
	}
	rc = odp_init_local(instance, ODP_THREAD_CONTROL);
	if (rc != 0) {
		printf("Error: odp_init_local() failed, rc = %d\n", rc);
		abort();
	}

	if (process_cmd_line_options(argc, argv) < 0)
		return -1;

	create_and_config_tm();

	odp_random_data(random_buf, RANDOM_BUF_LEN, 1);
	next_rand_byte = 0;

	odp_atomic_init_u32(&atomic_pkts_into_tm, 0);
	odp_atomic_init_u32(&atomic_pkts_from_tm, 0);

	traffic_generator(g_num_pkts_to_send);

	pkts_into_tm = odp_atomic_load_u32(&atomic_pkts_into_tm);
	pkts_from_tm = odp_atomic_load_u32(&atomic_pkts_from_tm);
	printf("pkts_into_tm=%u pkts_from_tm=%u\n", pkts_into_tm, pkts_from_tm);

	odp_tm_stats_print(odp_tm_test);
	return 0;
}
Beispiel #4
0
int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf)
{
	odp_buffer_hdr_t *hdr;
	int len = 0;

	if (!odp_buffer_is_valid(buf)) {
		ODP_PRINT("Buffer is not valid.\n");
		return len;
	}

	hdr = odp_buf_to_hdr(buf);

	len += snprintf(&str[len], n-len,
			"Buffer\n");
	len += snprintf(&str[len], n-len,
			"  pool         %" PRIu64 "\n",
			odp_pool_to_u64(hdr->pool_hdl));
	len += snprintf(&str[len], n-len,
			"  addr         %p\n",        hdr->addr);
	len += snprintf(&str[len], n-len,
			"  size         %" PRIu32 "\n",        hdr->size);
	len += snprintf(&str[len], n-len,
			"  ref_count    %" PRIu32 "\n",
			odp_atomic_load_u32(&hdr->ref_count));
	len += snprintf(&str[len], n-len,
			"  type         %i\n",        hdr->type);

	return len;
}
Beispiel #5
0
int odp_pool_destroy(odp_pool_t pool_hdl)
{
	uint32_t pool_id = pool_handle_to_index(pool_hdl);
	pool_entry_t *pool = get_pool_entry(pool_id);
	int i;

	if (pool == NULL)
		return -1;

	POOL_LOCK(&pool->s.lock);

	/* Call fails if pool is not allocated or predefined*/
	if (pool->s.pool_shm == ODP_SHM_INVALID ||
	    pool->s.flags.predefined) {
		POOL_UNLOCK(&pool->s.lock);
		return -1;
	}

	/* Make sure local caches are empty */
	for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
		flush_cache(&pool->s.local_cache[i], &pool->s);

	/* Call fails if pool has allocated buffers */
	if (odp_atomic_load_u32(&pool->s.bufcount) < pool->s.buf_num) {
		POOL_UNLOCK(&pool->s.lock);
		return -1;
	}

	odp_shm_free(pool->s.pool_shm);
	pool->s.pool_shm = ODP_SHM_INVALID;
	POOL_UNLOCK(&pool->s.lock);

	return 0;
}
Beispiel #6
0
int test_atomic_validate(void)
{
	if (odp_atomic_load_u32(&a32u) != U32_INIT_VAL) {
		LOG_ERR("Atomic u32 usual functions failed\n");
		return -1;
	}

	if (odp_atomic_load_u64(&a64u) != U64_INIT_VAL) {
		LOG_ERR("Atomic u64 usual functions failed\n");
		return -1;
	}

	return 0;
}
Beispiel #7
0
void odp_ticketlock_unlock(odp_ticketlock_t *ticketlock)
{
	/* Release the lock by incrementing 'cur_ticket'. As we are the
	 * lock owner and thus the only thread that is allowed to write
	 * 'cur_ticket', we don't need to do this with an (expensive)
	 * atomic RMW operation. Instead load-relaxed the current value
	 * and a store-release of the incremented value */
	uint32_t cur = odp_atomic_load_u32(&ticketlock->cur_ticket);

	odp_atomic_store_rel_u32(&ticketlock->cur_ticket, cur + 1);

#if defined __OCTEON__
	odp_sync_stores(); /* SYNCW to flush write buffer */
#endif
}
Beispiel #8
0
static void custom_barrier_wait(custom_barrier_t *custom_barrier)
{
	volatile_u64_t counter = 1;
	uint32_t delay_cnt, wait_cnt;

	odp_atomic_sub_u32(&custom_barrier->wait_cnt, 1);

	wait_cnt = 1;
	while (wait_cnt != 0) {
		for (delay_cnt = 1; delay_cnt <= BARRIER_DELAY; delay_cnt++)
			counter++;

		wait_cnt = odp_atomic_load_u32(&custom_barrier->wait_cnt);
	}
}
Beispiel #9
0
static void *run_thread_rx(void *arg)
{
	test_globals_t *globals;
	int thr_id, batch_len;
	odp_queue_t pollq = ODP_QUEUE_INVALID;

	thread_args_t *targs = arg;

	batch_len = targs->batch_len;

	if (batch_len > BATCH_LEN_MAX)
		batch_len = BATCH_LEN_MAX;

	thr_id = odp_thread_id();

	globals = odp_shm_addr(odp_shm_lookup("test_globals"));

	pkt_rx_stats_t *stats = &globals->rx_stats[thr_id];

	if (gbl_args->args.schedule == 0) {
		pollq = odp_pktio_inq_getdef(globals->pktio_rx);
		if (pollq == ODP_QUEUE_INVALID)
			LOG_ABORT("Invalid input queue.\n");
	}

	odp_barrier_wait(&globals->rx_barrier);
	while (1) {
		odp_event_t ev[BATCH_LEN_MAX];
		int i, n_ev;

		n_ev = receive_packets(pollq, ev, batch_len);

		for (i = 0; i < n_ev; ++i) {
			if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) {
				odp_packet_t pkt = odp_packet_from_event(ev[i]);
				if (pktio_pkt_has_magic(pkt))
					stats->s.rx_cnt++;
				else
					stats->s.rx_ignore++;
			}
			odp_buffer_free(odp_buffer_from_event(ev[i]));
		}
		if (n_ev == 0 && odp_atomic_load_u32(&shutdown))
			break;
	}

	return NULL;
}
Beispiel #10
0
void odp_rwlock_read_lock(odp_rwlock_t *rwlock)
{
	uint32_t cnt;
	int  is_locked = 0;

	while (is_locked == 0) {
		cnt = odp_atomic_load_u32(&rwlock->cnt);
		/* waiting for read lock */
		if ((int32_t)cnt < 0) {
			odp_cpu_pause();
			continue;
		}
		is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt,
						   &cnt, cnt + 1);
	}
}
Beispiel #11
0
void odp_rwlock_write_lock(odp_rwlock_t *rwlock)
{
	uint32_t cnt;
	int is_locked = 0;

	while (is_locked == 0) {
		uint32_t zero = 0;

		cnt = odp_atomic_load_u32(&rwlock->cnt);
		/* lock acquired, wait */
		if (cnt != 0) {
			odp_cpu_pause();
			continue;
		}
		is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt,
						   &zero, (uint32_t)-1);
	}
}
Beispiel #12
0
void *pp_thread(void *arg)
{
	ALLOW_UNUSED_LOCAL(arg);
	if (ofp_init_local()) {
		OFP_ERR("ofp_init_local failed");
		return NULL;
	}

	while (odp_atomic_load_u32(&still_running)) {
		odp_event_t event;
		odp_queue_t source_queue;

		event = odp_schedule(&source_queue, ODP_SCHED_WAIT);

		if (odp_event_type(event) != ODP_EVENT_TIMEOUT) {
			OFP_ERR("Unexpected event type %d",
				odp_event_type(event));
			continue;
		}

		ofp_timer_handle(event);
	}
	return NULL;
}
Beispiel #13
0
/** @private test timeout */
static void test_abs_timeouts(int thr, test_globals_t *gbls)
{
	uint64_t period;
	uint64_t period_ns;
	odp_queue_t queue;
	uint64_t tick;
	struct test_timer *ttp;
	odp_timeout_t tmo;

	EXAMPLE_DBG("  [%i] test_timeouts\n", thr);

	queue = odp_queue_lookup("timer_queue");

	period_ns = gbls->args.period_us*ODP_TIME_USEC;
	period    = odp_timer_ns_to_tick(gbls->tp, period_ns);

	EXAMPLE_DBG("  [%i] period %"PRIu64" ticks,  %"PRIu64" ns\n", thr,
		    period, period_ns);

	EXAMPLE_DBG("  [%i] current tick %"PRIu64"\n", thr,
		    odp_timer_current_tick(gbls->tp));

	ttp = &gbls->tt[thr];
	ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp);
	if (ttp->tim == ODP_TIMER_INVALID) {
		EXAMPLE_ERR("Failed to allocate timer\n");
		return;
	}
	tmo = odp_timeout_alloc(gbls->pool);
	if (tmo == ODP_TIMEOUT_INVALID) {
		EXAMPLE_ERR("Failed to allocate timeout\n");
		return;
	}
	ttp->ev = odp_timeout_to_event(tmo);
	tick = odp_timer_current_tick(gbls->tp);

	while ((int)odp_atomic_load_u32(&gbls->remain) > 0) {
		odp_event_t ev;
		odp_timer_set_t rc;

		tick += period;
		rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev);
		if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) {
			/* Too early or too late timeout requested */
			EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n",
				      timerset2str(rc));
		}

		/* Get the next expired timeout.
		 * We invoke the scheduler in a loop with a timeout because
		 * we are not guaranteed to receive any more timeouts. The
		 * scheduler isn't guaranteeing fairness when scheduling
		 * buffers to threads.
		 * Use 1.5 second timeout for scheduler */
		uint64_t sched_tmo =
			odp_schedule_wait_time(1500000000ULL);
		do {
			ev = odp_schedule(&queue, sched_tmo);
			/* Check if odp_schedule() timed out, possibly there
			 * are no remaining timeouts to receive */
		} while (ev == ODP_EVENT_INVALID &&
			 (int)odp_atomic_load_u32(&gbls->remain) > 0);

		if (ev == ODP_EVENT_INVALID)
			break; /* No more timeouts */
		if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) {
			/* Not a default timeout event */
			EXAMPLE_ABORT("Unexpected event type (%u) received\n",
				      odp_event_type(ev));
		}
		odp_timeout_t tmo = odp_timeout_from_event(ev);
		tick = odp_timeout_tick(tmo);
		ttp = odp_timeout_user_ptr(tmo);
		ttp->ev = ev;
		if (!odp_timeout_fresh(tmo)) {
			/* Not the expected expiration tick, timer has
			 * been reset or cancelled or freed */
			EXAMPLE_ABORT("Unexpected timeout received (timer %" PRIx32 ", tick %" PRIu64 ")\n",
				      ttp->tim, tick);
		}
		EXAMPLE_DBG("  [%i] timeout, tick %"PRIu64"\n", thr, tick);

		odp_atomic_dec_u32(&gbls->remain);
	}

	/* Cancel and free last timer used */
	(void)odp_timer_cancel(ttp->tim, &ttp->ev);
	if (ttp->ev != ODP_EVENT_INVALID)
		odp_timeout_free(odp_timeout_from_event(ttp->ev));
	else
		EXAMPLE_ERR("Lost timeout event at timer cancel\n");
	/* Since we have cancelled the timer, there is no timeout event to
	 * return from odp_timer_free() */
	(void)odp_timer_free(ttp->tim);

	/* Remove any prescheduled events */
	remove_prescheduled_events();
}
Beispiel #14
0
void *pp_thread(void *arg)
{
	ALLOW_UNUSED_LOCAL(arg);

#if ODP_VERSION >= 102
	if (odp_init_local(ODP_THREAD_WORKER)) {
#else
	if (odp_init_local()) {
#endif
		OFP_ERR("odp_init_local failed");
		return NULL;
	}
	if (ofp_init_local()) {
		OFP_ERR("ofp_init_local failed");
		return NULL;
	}

	while (odp_atomic_load_u32(&still_running)) {
		odp_event_t event;
		odp_queue_t source_queue;

		event = odp_schedule(&source_queue, ODP_SCHED_WAIT);

		if (odp_event_type(event) != ODP_EVENT_TIMEOUT) {
			OFP_ERR("Unexpected event type %d",
				odp_event_type(event));
			continue;
		}

		ofp_timer_handle(event);
	}
	return NULL;
}

static void test_arp(void)
{
	struct ofp_ifnet mock_ifnet;
	struct in_addr ip;
	uint8_t mac[OFP_ETHER_ADDR_LEN] = { 0x00, 0xFF, 0x00, 0x00, 0xFF, 0x00, };

	/* The buffer passed into ofp_ipv4_lookup_mac() must be 8 bytes since
	 * a 64-bit operation is currently being used to copy a MAC address.
	 */
	uint8_t mac_result[OFP_ETHER_ADDR_LEN + 2];

	CU_ASSERT(0 == ofp_init_local());

	memset(&mock_ifnet, 0, sizeof(mock_ifnet));
	CU_ASSERT(0 != inet_aton("1.1.1.1", &ip));

	/* Test entry insert, lookup, and remove. */
	CU_ASSERT(-1 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));

	CU_ASSERT(0 == ofp_arp_ipv4_insert(ip.s_addr, mac, &mock_ifnet));

	memset(mac_result, 0xFF, OFP_ETHER_ADDR_LEN);
	CU_ASSERT(0 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));
	CU_ASSERT(0 == memcmp(mac, mac_result, OFP_ETHER_ADDR_LEN));

	CU_ASSERT(0 == ofp_arp_ipv4_remove(ip.s_addr, &mock_ifnet));
	CU_ASSERT(-1 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));

	/* Test entry is aged out. */
	CU_ASSERT(0 == ofp_arp_ipv4_insert(ip.s_addr, mac, &mock_ifnet));
	OFP_INFO("Inserted ARP entry");
	sleep(ARP_AGE_INTERVAL + ARP_ENTRY_TIMEOUT);
	CU_ASSERT(-1 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));

	/* Test entry is aged out after a few hits. */
	CU_ASSERT(0 == ofp_arp_ipv4_insert(ip.s_addr, mac, &mock_ifnet));
	OFP_INFO("Inserted ARP entry");
	sleep(ARP_AGE_INTERVAL);
	CU_ASSERT(0 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));
	sleep(ARP_AGE_INTERVAL);
	CU_ASSERT(0 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));
	sleep(ARP_AGE_INTERVAL + ARP_ENTRY_TIMEOUT);
	CU_ASSERT(-1 == ofp_ipv4_lookup_mac(ip.s_addr, mac_result, &mock_ifnet));
}

int main(void)
{
	CU_pSuite ptr_suite = NULL;
	int nr_of_failed_tests = 0;
	int nr_of_failed_suites = 0;

	/* Initialize the CUnit test registry */
	if (CUE_SUCCESS != CU_initialize_registry())
		return CU_get_error();

	/* add a suite to the registry */
	ptr_suite = CU_add_suite("ofp errno", init_suite, end_suite);
	if (NULL == ptr_suite) {
		CU_cleanup_registry();
		return CU_get_error();
	}
	if (NULL == CU_ADD_TEST(ptr_suite, test_arp)) {
		CU_cleanup_registry();
		return CU_get_error();
	}

#if defined(OFP_TESTMODE_AUTO)
	CU_set_output_filename("CUnit-Util");
	CU_automated_run_tests();
#else
	/* Run all tests using the CUnit Basic interface */
	CU_basic_set_mode(CU_BRM_VERBOSE);
	CU_basic_run_tests();
#endif

	nr_of_failed_tests = CU_get_number_of_tests_failed();
	nr_of_failed_suites = CU_get_number_of_suites_failed();
	CU_cleanup_registry();

	return (nr_of_failed_suites > 0 ?
		nr_of_failed_suites : nr_of_failed_tests);
}
/** @private test timeout */
static void test_abs_timeouts(int thr, test_globals_t *gbls)
{
    uint64_t    period;
    uint64_t    period_ns;
    odp_queue_t queue;
    uint64_t    tick;
    struct test_timer *ttp;
    odp_timeout_t tmo;
    uint32_t num_workers = gbls->num_workers;

    EXAMPLE_DBG("  [%i] test_timeouts\n", thr);

    queue      = odp_queue_lookup("timer_queue");

    period_ns  = gbls->args.period_us * ODP_TIME_USEC;
    period     = odp_timer_ns_to_tick(gbls->tp, period_ns);

    EXAMPLE_DBG("  [%i] period %d ticks,  %d ns\n", thr,
                period, period_ns);

    EXAMPLE_DBG("  [%i] current tick %d\n", thr,
                odp_timer_current_tick(gbls->tp));

    ttp = &gbls->tt[thr];
    ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp);
    if (ttp->tim == ODP_TIMER_INVALID) {
        EXAMPLE_ERR("Failed to allocate timer\n");
        return;
    }

    tmo = odp_timeout_alloc(gbls->pool);
    if (tmo == ODP_TIMEOUT_INVALID) {
        EXAMPLE_ERR("Failed to allocate timeout\n");
        return;
    }

    ttp->ev    = odp_timeout_to_event(tmo);
    tick       = odp_timer_current_tick(gbls->tp);

    while (1) {
        int wait = 0;
        odp_event_t ev;
        odp_timer_set_t rc;

        if (ttp) {
            tick  += period;
            rc     = odp_timer_set_abs(ttp->tim, tick, &ttp->ev);
            if (odp_unlikely(rc != ODP_TIMER_SUCCESS))
                /* Too early or too late timeout requested */
                EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n",
                              timerset2str(rc));
        }

        /* Get the next expired timeout.
         * We invoke the scheduler in a loop with a timeout because
         * we are not guaranteed to receive any more timeouts. The
         * scheduler isn't guaranteeing fairness when scheduling
         * buffers to threads.
         * Use 1.5 second timeout for scheduler */
        uint64_t sched_tmo =
            odp_schedule_wait_time(1500000000ULL);
        do {
            ev = odp_schedule(&queue, sched_tmo);

            /* Check if odp_schedule() timed out, possibly there
             * are no remaining timeouts to receive */
            if ((++wait > WAIT_NUM)
                    && (odp_atomic_load_u32(&gbls->remain) < num_workers))
                EXAMPLE_ABORT("At least one TMO was lost\n");
        } while (ev == ODP_EVENT_INVALID
                 && (int)odp_atomic_load_u32(&gbls->remain) > 0);

        if (ev == ODP_EVENT_INVALID)
            break;  /* No more timeouts */

        if (odp_event_type(ev) != ODP_EVENT_TIMEOUT)
            /* Not a default timeout event */
            EXAMPLE_ABORT("Unexpected event type (%u) received\n",
                          odp_event_type(ev));

        odp_timeout_t tmo = odp_timeout_from_event(ev);
        tick       = odp_timeout_tick(tmo);
        ttp        = odp_timeout_user_ptr(tmo);
        ttp->ev    = ev;
        if (!odp_timeout_fresh(tmo))
            /* Not the expected expiration tick, timer has
             * been reset or cancelled or freed */
            EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n",
                          ttp->tim, tick);

        EXAMPLE_DBG("  [%i] timeout, tick %d\n", thr, tick);

        uint32_t rx_num = odp_atomic_fetch_dec_u32(&gbls->remain);

        if (!rx_num)
            EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n",
                          ttp->tim, tick);
        else if (rx_num > num_workers)
            continue;

        odp_timeout_free(odp_timeout_from_event(ttp->ev));
        odp_timer_free(ttp->tim);
        ttp = NULL;
    }

    /* Remove any prescheduled events */
    remove_prescheduled_events();
}
Beispiel #16
0
static void *chaos_thread(void *arg)
{
	uint64_t i, wait;
	int rc;
	chaos_buf *cbuf;
	odp_event_t ev;
	odp_queue_t from;
	thread_args_t *args = (thread_args_t *)arg;
	test_globals_t *globals = args->globals;
	int me = odp_thread_id();

	if (CHAOS_DEBUG)
		printf("Chaos thread %d starting...\n", me);

	/* Wait for all threads to start */
	odp_barrier_wait(&globals->barrier);

	/* Run the test */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	for (i = 0; i < CHAOS_NUM_ROUNDS * CHAOS_NUM_EVENTS; i++) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		CU_ASSERT_FATAL(cbuf != NULL);
		INVALIDATE(cbuf);
		if (CHAOS_DEBUG)
			printf("Thread %d received event %" PRIu64
			       " seq %" PRIu64
			       " from Q %s, sending to Q %s\n",
			       me, cbuf->evno, cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].name,
			       globals->
			       chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name);

		rc = odp_queue_enq(
			globals->
			chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle,
			ev);
		CU_ASSERT(rc == 0);
	}

	if (CHAOS_DEBUG)
		printf("Thread %d completed %d rounds...terminating\n",
		       odp_thread_id(), CHAOS_NUM_EVENTS);

	/* Thread complete--drain locally cached scheduled events */
	odp_schedule_pause();

	while (odp_atomic_load_u32(&globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
		if (ev == ODP_EVENT_INVALID)
			break;
		odp_atomic_dec_u32(&globals->chaos_pending_event_count);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Thread %d drained event %" PRIu64
			       " seq %" PRIu64
			       " from Q %s\n",
			       odp_thread_id(), cbuf->evno, cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	return NULL;
}
Beispiel #17
0
void odp_pool_print(odp_pool_t pool_hdl)
{
	pool_entry_t *pool;
	uint32_t pool_id;

	pool_id = pool_handle_to_index(pool_hdl);
	pool    = get_pool_entry(pool_id);

	uint32_t bufcount  = odp_atomic_load_u32(&pool->s.bufcount);
	uint32_t blkcount  = odp_atomic_load_u32(&pool->s.blkcount);
	uint64_t bufallocs = odp_atomic_load_u64(&pool->s.poolstats.bufallocs);
	uint64_t buffrees  = odp_atomic_load_u64(&pool->s.poolstats.buffrees);
	uint64_t blkallocs = odp_atomic_load_u64(&pool->s.poolstats.blkallocs);
	uint64_t blkfrees  = odp_atomic_load_u64(&pool->s.poolstats.blkfrees);
	uint64_t bufempty  = odp_atomic_load_u64(&pool->s.poolstats.bufempty);
	uint64_t blkempty  = odp_atomic_load_u64(&pool->s.poolstats.blkempty);
	uint64_t hiwmct    =
		odp_atomic_load_u64(&pool->s.poolstats.high_wm_count);
	uint64_t lowmct    =
		odp_atomic_load_u64(&pool->s.poolstats.low_wm_count);

	ODP_DBG("Pool info\n");
	ODP_DBG("---------\n");
	ODP_DBG(" pool            %" PRIu64 "\n",
		odp_pool_to_u64(pool->s.pool_hdl));
	ODP_DBG(" name            %s\n",
		pool->s.flags.has_name ? pool->s.name : "Unnamed Pool");
	ODP_DBG(" pool type       %s\n",
		pool->s.params.type == ODP_POOL_BUFFER ? "buffer" :
	       (pool->s.params.type == ODP_POOL_PACKET ? "packet" :
	       (pool->s.params.type == ODP_POOL_TIMEOUT ? "timeout" :
		"unknown")));
	ODP_DBG(" pool storage    ODP managed shm handle %" PRIu64 "\n",
		odp_shm_to_u64(pool->s.pool_shm));
	ODP_DBG(" pool status     %s\n",
		pool->s.quiesced ? "quiesced" : "active");
	ODP_DBG(" pool opts       %s, %s, %s\n",
		pool->s.flags.unsegmented ? "unsegmented" : "segmented",
		pool->s.flags.zeroized ? "zeroized" : "non-zeroized",
		pool->s.flags.predefined  ? "predefined" : "created");
	ODP_DBG(" pool base       %p\n",  pool->s.pool_base_addr);
	ODP_DBG(" pool size       %lu(k)\n",
		pool->s.pool_size / 1024);
	ODP_DBG(" pool mdata base %p\n",  pool->s.pool_mdata_addr);
	ODP_DBG(" udata size      %u\n", pool->s.udata_size);
	ODP_DBG(" headroom        %u\n",  pool->s.headroom);
	ODP_DBG(" tailroom        %u\n",  pool->s.tailroom);
	if (pool->s.params.type == ODP_POOL_BUFFER) {
		ODP_DBG(" buf size        %1u\n", pool->s.params.buf.size);
		ODP_DBG(" buf align       %u requested, %u used\n",
			pool->s.params.buf.align, pool->s.buf_align);
	} else if (pool->s.params.type == ODP_POOL_PACKET) {
		ODP_DBG(" seg length      %u requested, %u used\n",
			pool->s.params.pkt.seg_len, pool->s.seg_size);
		ODP_DBG(" pkt length      %u requested, %u used\n",
			pool->s.params.pkt.len, pool->s.blk_size);
	}
	ODP_DBG(" num bufs        %u\n",  pool->s.buf_num);
	ODP_DBG(" bufs available  %u %s\n", bufcount,
		pool->s.low_wm_assert ? " **low wm asserted**" : "");
	ODP_DBG(" bufs in use     %u\n",  pool->s.buf_num - bufcount);
	ODP_DBG(" buf allocs      %lu\n", bufallocs);
	ODP_DBG(" buf frees       %lu\n", buffrees);
	ODP_DBG(" buf empty       %lu\n", bufempty);
	ODP_DBG(" blk size        %1u\n",
		pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0);
	ODP_DBG(" blks available  %u\n",  blkcount);
	ODP_DBG(" blk allocs      %lu\n", blkallocs);
	ODP_DBG(" blk frees       %lu\n", blkfrees);
	ODP_DBG(" blk empty       %lu\n", blkempty);
	ODP_DBG(" high wm value   %u\n", pool->s.high_wm);
	ODP_DBG(" high wm count   %lu\n", hiwmct);
	ODP_DBG(" low wm value    %u\n", pool->s.low_wm);
	ODP_DBG(" low wm count    %lu\n", lowmct);
}
Beispiel #18
0
static int traffic_generator(uint32_t pkts_to_send)
{
	odp_pool_param_t pool_params;
	odp_tm_queue_t   tm_queue;
	odp_packet_t     pkt;
	odp_bool_t       tm_is_idle;
	uint32_t         svc_class, queue_num, pkt_len, pkts_into_tm;
	uint32_t         pkts_from_tm, pkt_cnt, millisecs, odp_tm_enq_errs;
	int              rc;

	memset(&pool_params, 0, sizeof(odp_pool_param_t));
	pool_params.type           = ODP_POOL_PACKET;
	pool_params.pkt.num        = pkts_to_send + 10;
	pool_params.pkt.len        = 1600;
	pool_params.pkt.seg_len    = 0;
	pool_params.pkt.uarea_size = 0;

	odp_pool        = odp_pool_create("MyPktPool", &pool_params);
	odp_tm_enq_errs = 0;

	pkt_cnt = 0;
	while (pkt_cnt < pkts_to_send) {
		svc_class = pkt_service_class();
		queue_num = random_16() & (TM_QUEUES_PER_CLASS - 1);
		tm_queue  = queue_num_tbls[svc_class][queue_num + 1];
		pkt_len   = ((uint32_t)((random_8() & 0x7F) + 2)) * 32;
		pkt_len   = MIN(pkt_len, 1500);
		pkt       = make_odp_packet(pkt_len);

		pkt_cnt++;
		rc = odp_tm_enq(tm_queue, pkt);
		if (rc < 0) {
			odp_tm_enq_errs++;
			continue;
		}

		odp_atomic_inc_u32(&atomic_pkts_into_tm);
	}

	printf("%s odp_tm_enq_errs=%u\n", __func__, odp_tm_enq_errs);

       /* Wait until the main traffic mgmt worker thread is idle and has no
	* outstanding events (i.e. no timers, empty work queue, etc), but
	* not longer than 60 seconds.
	*/
	for (millisecs = 0; millisecs < 600000; millisecs++) {
		usleep(100);
		tm_is_idle = odp_tm_is_idle(odp_tm_test);
		if (tm_is_idle)
			break;
	}

	if (!tm_is_idle)
		printf("%s WARNING stopped waiting for the TM system "
		       "to be IDLE!\n", __func__);

	/* Wait for up to 2 seconds for pkts_from_tm to match pkts_into_tm. */
	for (millisecs = 0; millisecs < 2000; millisecs++) {
		usleep(1000);
		pkts_into_tm = odp_atomic_load_u32(&atomic_pkts_into_tm);
		pkts_from_tm = odp_atomic_load_u32(&atomic_pkts_from_tm);
		if (pkts_into_tm <= pkts_from_tm)
			break;
	}

	return 0;
}