Exemple #1
0
int odp_queue_term_global(void)
{
	int ret = 0;
	int rc = 0;
	queue_entry_t *queue;
	int i;

	for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
		queue = &queue_tbl->queue[i];
		LOCK(queue);
		if (LOAD_S32(queue->s.status) != QUEUE_STATUS_FREE) {

			ODP_ERR("Not destroyed queue: %s\n", queue->s.name);
			rc = -1;
		}
		UNLOCK(queue);
	}

	ret = odp_shm_free(odp_shm_lookup("odp_queues"));
	if (ret < 0) {
		ODP_ERR("shm free failed for odp_queues");
		rc = -1;
	}

	return rc;
}
Exemple #2
0
static void schedule_common(odp_schedule_sync_t sync, int num_queues,
			    int num_prio, int enable_schd_multi)
{
	thread_args_t args;
	odp_shm_t shm;
	test_globals_t *globals;

	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(globals);

	memset(&args, 0, sizeof(thread_args_t));
	args.globals = globals;
	args.sync = sync;
	args.num_queues = num_queues;
	args.num_prio = num_prio;
	args.num_bufs = BUFS_PER_QUEUE;
	args.num_workers = 1;
	args.enable_schd_multi = enable_schd_multi;
	args.enable_excl_atomic = 0;	/* Not needed with a single CPU */

	fill_queues(&args);

	schedule_common_(&args);
	if (sync == ODP_SCHED_SYNC_ORDERED)
		reset_queues(&args);
}
Exemple #3
0
int odp_pool_term_global(void)
{
	int i;
	pool_entry_t *pool;
	int ret = 0;
	int rc = 0;

	for (i = 0; i < ODP_CONFIG_POOLS; i++) {
		pool = get_pool_entry(i);

		POOL_LOCK(&pool->s.lock);
		if (pool->s.pool_shm != ODP_SHM_INVALID) {
			ODP_ERR("Not destroyed pool: %s\n", pool->s.name);
			rc = -1;
		}
		POOL_UNLOCK(&pool->s.lock);
	}

	ret = odp_shm_free(odp_shm_lookup(SHM_DEFAULT_NAME));
	if (ret < 0) {
		ODP_ERR("shm free failed for %s", SHM_DEFAULT_NAME);
		rc = -1;
	}

	return rc;
}
Exemple #4
0
odph_table_t odph_linear_table_create(const char *name, uint32_t capacity,
				      uint32_t ODP_IGNORED, uint32_t value_size)
{
	int idx;
	uint32_t node_num;
	odp_shm_t shmem;
	odph_linear_table_imp *tbl;

	if (strlen(name) >= ODPH_TABLE_NAME_LEN || capacity < 1 ||
	    capacity >= 0x1000 || value_size == 0) {
		printf("create para input error or less than !");
		return NULL;
	}
	/* check name confict in shm*/
	tbl = (odph_linear_table_imp *)odp_shm_addr(odp_shm_lookup(name));
	if (tbl != NULL) {
		ODPH_DBG("name already exist\n");
		return NULL;
	}

	/* alloc memory from shm */
	shmem = odp_shm_reserve(name, capacity << 20, 64, ODP_SHM_SW_ONLY);
	if (shmem == ODP_SHM_INVALID) {
		ODPH_DBG("shm reserve fail\n");
		return NULL;
	}
	tbl = (odph_linear_table_imp *)odp_shm_addr(shmem);

	/* clean this block of memory */
	memset(tbl, 0, capacity << 20);

	tbl->init_cap = capacity < 20;

	strncpy(tbl->name, name, ODPH_TABLE_NAME_LEN - 1);

	/* for linear table, the key is just the index, without confict
	 * so we just need to record the value content
	 * there is a rwlock in the head of every node
	 */

	tbl->value_size = value_size + sizeof(odp_rwlock_t);

	node_num = tbl->init_cap / tbl->value_size;
	tbl->node_sum = node_num;

	tbl->value_array = (void *)((char *)tbl
			+ sizeof(odph_linear_table_imp));

	/* initialize rwlock*/
	for (idx = 0; idx < tbl->node_sum; idx++) {
		odp_rwlock_t *lock = (odp_rwlock_t *)((char *)tbl->value_array
				+ idx * tbl->value_size);
		odp_rwlock_init(lock);
	}

	tbl->magicword = ODPH_LINEAR_TABLE_MAGIC_WORD;

	return (odph_table_t)(tbl);
}
Exemple #5
0
static int test_term(void)
{
	char pool_name[ODP_POOL_NAME_LEN];
	odp_pool_t pool;
	int i;
	int ret = 0;

	if (gbl_args->pktio_tx != gbl_args->pktio_rx) {
		if (odp_pktio_stop(gbl_args->pktio_tx)) {
			LOG_ERR("Failed to stop pktio_tx\n");
			return -1;
		}

		if (odp_pktio_close(gbl_args->pktio_tx)) {
			LOG_ERR("Failed to close pktio_tx\n");
			ret = -1;
		}
	}

	empty_inq(gbl_args->pktio_rx);

	if (odp_pktio_stop(gbl_args->pktio_rx)) {
		LOG_ERR("Failed to stop pktio_rx\n");
		return -1;
	}

	if (odp_pktio_close(gbl_args->pktio_rx) != 0) {
		LOG_ERR("Failed to close pktio_rx\n");
		ret = -1;
	}

	for (i = 0; i < gbl_args->args.num_ifaces; ++i) {
		snprintf(pool_name, sizeof(pool_name),
			 "pkt_pool_%s", gbl_args->args.ifaces[i]);
		pool = odp_pool_lookup(pool_name);
		if (pool == ODP_POOL_INVALID)
			continue;

		if (odp_pool_destroy(pool) != 0) {
			LOG_ERR("Failed to destroy pool %s\n", pool_name);
			ret = -1;
		}
	}

	if (odp_pool_destroy(transmit_pkt_pool) != 0) {
		LOG_ERR("Failed to destroy transmit pool\n");
		ret = -1;
	}

	free(gbl_args->args.if_str);

	if (odp_shm_free(odp_shm_lookup("test_globals")) != 0) {
		LOG_ERR("Failed to free test_globals\n");
		ret = -1;
	}

	return ret;
}
Exemple #6
0
static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
			     int num_prio, int enable_schd_multi,
			     int enable_excl_atomic)
{
	odp_shm_t shm;
	test_globals_t *globals;
	thread_args_t *args;

	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(globals);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->sync = sync;
	args->num_queues = num_queues;
	args->num_prio = num_prio;
	if (enable_excl_atomic)
		args->num_bufs = BUFS_PER_QUEUE_EXCL;
	else
		args->num_bufs = BUFS_PER_QUEUE;
	args->num_workers = globals->num_workers;
	args->enable_schd_multi = enable_schd_multi;
	args->enable_excl_atomic = enable_excl_atomic;

	fill_queues(args);

	/* Create and launch worker threads */
	args->cu_thr.numthrds = globals->num_workers;
	odp_cunit_thread_create(schedule_common_, &args->cu_thr);

	/* Wait for worker threads to terminate */
	odp_cunit_thread_exit(&args->cu_thr);

	/* Cleanup ordered queues for next pass */
	if (sync == ODP_SCHED_SYNC_ORDERED)
		reset_queues(args);
}
Exemple #7
0
int odp_thread_term_global(void)
{
	int ret;

	ret = odp_shm_free(odp_shm_lookup("odp_thread_globals"));
	if (ret < 0)
		ODP_ERR("shm free failed for odp_thread_globals");

	return ret;
}
Exemple #8
0
int ofp_shared_memory_free_raw(const char *name)
{
	odp_shm_t shm_h;

	shm_h = odp_shm_lookup(name);
	if (shm_h == ODP_SHM_INVALID)
		return -1;

	odp_shm_free(shm_h);
	return 0;
}
Exemple #9
0
static void *run_thread_rx(void *arg)
{
	test_globals_t *globals;
	int thr_id, batch_len;
	odp_queue_t pollq = ODP_QUEUE_INVALID;

	thread_args_t *targs = arg;

	batch_len = targs->batch_len;

	if (batch_len > BATCH_LEN_MAX)
		batch_len = BATCH_LEN_MAX;

	thr_id = odp_thread_id();

	globals = odp_shm_addr(odp_shm_lookup("test_globals"));

	pkt_rx_stats_t *stats = &globals->rx_stats[thr_id];

	if (gbl_args->args.schedule == 0) {
		pollq = odp_pktio_inq_getdef(globals->pktio_rx);
		if (pollq == ODP_QUEUE_INVALID)
			LOG_ABORT("Invalid input queue.\n");
	}

	odp_barrier_wait(&globals->rx_barrier);
	while (1) {
		odp_event_t ev[BATCH_LEN_MAX];
		int i, n_ev;

		n_ev = receive_packets(pollq, ev, batch_len);

		for (i = 0; i < n_ev; ++i) {
			if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) {
				odp_packet_t pkt = odp_packet_from_event(ev[i]);
				if (pktio_pkt_has_magic(pkt))
					stats->s.rx_cnt++;
				else
					stats->s.rx_ignore++;
			}
			odp_buffer_free(odp_buffer_from_event(ev[i]));
		}
		if (n_ev == 0 && odp_atomic_load_u32(&shutdown))
			break;
	}

	return NULL;
}
Exemple #10
0
odph_table_t odph_linear_table_lookup(const char *name)
{
	odph_linear_table_imp *tbl;

	if (name == NULL || strlen(name) >= ODPH_TABLE_NAME_LEN)
		return NULL;

	tbl = (odph_linear_table_imp *)odp_shm_addr(odp_shm_lookup(name));

	/* check magicword to make sure the memory block is used by a table */
	if (tbl != NULL &&
	    tbl->magicword == ODPH_LINEAR_TABLE_MAGIC_WORD &&
	    strcmp(tbl->name, name) == 0)
		return (odph_table_t)tbl;

	return NULL;
}
Exemple #11
0
void *ofp_shared_memory_lookup_raw(const char *name)
{
	odp_shm_t shm_h;
	void *shm;

	shm_h = odp_shm_lookup(name);
	if (shm_h == ODP_SHM_INVALID)
		return NULL;

	shm = odp_shm_addr(shm_h);
	if (shm == NULL) {
		odp_shm_free(shm_h);
		return NULL;
	}

	return shm;
}
Exemple #12
0
int odph_linear_table_destroy(odph_table_t table)
{
	int ret;
	odph_linear_table_imp *linear_tbl = NULL;

	if (table != NULL) {
		linear_tbl = (odph_linear_table_imp *)table;

		/* check magicword, make sure the memory is used by a table */
		if (linear_tbl->magicword != ODPH_LINEAR_TABLE_MAGIC_WORD)
			return ODPH_FAIL;

		ret = odp_shm_free(odp_shm_lookup(linear_tbl->name));
		if (ret != 0) {
			ODPH_DBG("free fail\n");
			return ret;
		}

		return ODPH_SUCCESS;
	}
	return ODPH_FAIL;
}
Exemple #13
0
/* Initialise per-thread memory */
static per_thread_mem_t *thread_init(void)
{
	global_shared_mem_t *global_mem;
	per_thread_mem_t *per_thread_mem;
	odp_shm_t global_shm;
	uint32_t per_thread_mem_len;

	per_thread_mem_len = sizeof(per_thread_mem_t);
	per_thread_mem = malloc(per_thread_mem_len);
	memset(per_thread_mem, 0, per_thread_mem_len);

	per_thread_mem->delay_counter = 1;

	per_thread_mem->thread_id = odp_thread_id();
	per_thread_mem->thread_core = odp_cpu_id();

	global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
	global_mem = odp_shm_addr(global_shm);
	CU_ASSERT_PTR_NOT_NULL(global_mem);

	per_thread_mem->global_mem = global_mem;

	return per_thread_mem;
}
Exemple #14
0
int odp_pktio_term_global(void)
{
	pktio_entry_t *pktio_entry;
	int ret = 0;
	int id;
	int pktio_if;

	for (pktio_if = 0; pktio_if_ops[pktio_if]; ++pktio_if)
		if (pktio_if_ops[pktio_if]->term)
			if (pktio_if_ops[pktio_if]->term())
				ODP_ERR("failed to terminate pktio type %d",
					pktio_if);

	for (id = 1; id <= ODP_CONFIG_PKTIO_ENTRIES; ++id) {
		pktio_entry = &pktio_tbl->entries[id - 1];
		odp_queue_destroy(pktio_entry->s.outq_default);
	}

	ret = odp_shm_free(odp_shm_lookup("odp_pktio_entries"));
	if (ret < 0)
		ODP_ERR("shm free failed for odp_pktio_entries");

	return ret;
}
Exemple #15
0
static void chaos_run(unsigned int qtype)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	int i, rc;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
				      ODP_SCHED_SYNC_ATOMIC,
				      ODP_SCHED_SYNC_ORDERED};
	const unsigned num_sync = (sizeof(sync) / sizeof(odp_schedule_sync_t));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(globals);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.type        = ODP_QUEUE_TYPE_SCHED;
	qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qp.sched.group = ODP_SCHED_GROUP_ALL;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		uint32_t ndx = (qtype == num_sync ? i % num_sync : qtype);

		qp.sched.sync = sync[ndx];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[ndx]);

		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name, &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i), 0);
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	drain_queues();
	exit_schedule_loop();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}
Exemple #16
0
void scheduler_test_chaos(void)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	odp_event_t ev;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	odp_queue_t from;
	int i, rc;
	uint64_t wait;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
				      ODP_SCHED_SYNC_ATOMIC/* , */
				      /* ODP_SCHED_SYNC_ORDERED */};
	const int num_sync = (sizeof(sync) / sizeof(sync[0]));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(shm);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		qp.sched.sync = sync[i % num_sync];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[i % num_sync]);
		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name,
					 ODP_QUEUE_TYPE_SCHED,
					 &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	odp_atomic_init_u32(&globals->chaos_pending_event_count, 0);

	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
		odp_atomic_inc_u32(&globals->chaos_pending_event_count);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	/* Cleanup: Drain queues, free events */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	while (odp_atomic_fetch_dec_u32(
		       &globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Draining event %" PRIu64
			       " seq %" PRIu64 " from Q %s...\n",
			       cbuf->evno,
			       cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	odp_schedule_release_ordered();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}
Exemple #17
0
/*
 * Main packet transmit routine. Transmit packets at a fixed rate for
 * specified length of time.
 */
static int run_thread_tx(void *arg)
{
	test_globals_t *globals;
	int thr_id;
	odp_pktout_queue_t pktout;
	pkt_tx_stats_t *stats;
	odp_time_t cur_time, send_time_end, send_duration;
	odp_time_t burst_gap_end, burst_gap;
	uint32_t batch_len;
	int unsent_pkts = 0;
	odp_packet_t tx_packet[BATCH_LEN_MAX];
	odp_time_t idle_start = ODP_TIME_NULL;

	thread_args_t *targs = arg;

	batch_len = targs->batch_len;

	if (batch_len > BATCH_LEN_MAX)
		batch_len = BATCH_LEN_MAX;

	thr_id = odp_thread_id();

	globals = odp_shm_addr(odp_shm_lookup("test_globals"));
	stats = &globals->tx_stats[thr_id];

	if (odp_pktout_queue(globals->pktio_tx, &pktout, 1) != 1)
		LOG_ABORT("Failed to get output queue for thread %d\n", thr_id);

	burst_gap = odp_time_local_from_ns(
			ODP_TIME_SEC_IN_NS / (targs->pps / targs->batch_len));
	send_duration =
		odp_time_local_from_ns(targs->duration * ODP_TIME_SEC_IN_NS);

	odp_barrier_wait(&globals->tx_barrier);

	cur_time     = odp_time_local();
	send_time_end = odp_time_sum(cur_time, send_duration);
	burst_gap_end = cur_time;
	while (odp_time_cmp(send_time_end, cur_time) > 0) {
		unsigned alloc_cnt = 0, tx_cnt;

		if (odp_time_cmp(burst_gap_end, cur_time) > 0) {
			cur_time = odp_time_local();
			if (!odp_time_cmp(idle_start, ODP_TIME_NULL))
				idle_start = cur_time;
			continue;
		}

		if (odp_time_cmp(idle_start, ODP_TIME_NULL) > 0) {
			odp_time_t diff = odp_time_diff(cur_time, idle_start);

			stats->s.idle_ticks =
				odp_time_sum(diff, stats->s.idle_ticks);

			idle_start = ODP_TIME_NULL;
		}

		burst_gap_end = odp_time_sum(burst_gap_end, burst_gap);

		alloc_cnt = alloc_packets(tx_packet, batch_len - unsent_pkts);
		if (alloc_cnt != batch_len)
			stats->s.alloc_failures++;

		tx_cnt = send_packets(pktout, tx_packet, alloc_cnt);
		unsent_pkts = alloc_cnt - tx_cnt;
		stats->s.enq_failures += unsent_pkts;
		stats->s.tx_cnt += tx_cnt;

		cur_time = odp_time_local();
	}

	VPRINT(" %02d: TxPkts %-8" PRIu64 " EnqFail %-6" PRIu64
	       " AllocFail %-6" PRIu64 " Idle %" PRIu64 "ms\n",
	       thr_id, stats->s.tx_cnt,
	       stats->s.enq_failures, stats->s.alloc_failures,
	       odp_time_to_ns(stats->s.idle_ticks) /
	       (uint64_t)ODP_TIME_MSEC_IN_NS);

	return 0;
}
Exemple #18
0
/*
 * Main packet transmit routine. Transmit packets at a fixed rate for
 * specified length of time.
 */
static void *run_thread_tx(void *arg)
{
	test_globals_t *globals;
	int thr_id;
	odp_queue_t outq;
	pkt_tx_stats_t *stats;
	uint64_t next_tx_cycles, end_cycles, cur_cycles;
	uint64_t burst_gap_cycles;
	uint32_t batch_len;
	int unsent_pkts = 0;
	odp_event_t  tx_event[BATCH_LEN_MAX];
	uint64_t idle_start = 0;

	thread_args_t *targs = arg;

	batch_len = targs->batch_len;

	if (batch_len > BATCH_LEN_MAX)
		batch_len = BATCH_LEN_MAX;

	thr_id = odp_thread_id();

	globals = odp_shm_addr(odp_shm_lookup("test_globals"));
	stats = &globals->tx_stats[thr_id];

	outq = odp_pktio_outq_getdef(globals->pktio_tx);
	if (outq == ODP_QUEUE_INVALID)
		LOG_ABORT("Failed to get output queue for thread %d\n", thr_id);

	burst_gap_cycles = odp_time_ns_to_cycles(
						 (ODP_TIME_SEC * 999) / (1000 * targs->pps / (targs->batch_len)));

	odp_barrier_wait(&globals->tx_barrier);

	cur_cycles     = odp_time_cycles();
	next_tx_cycles = cur_cycles;
	end_cycles     = cur_cycles +
			 odp_time_ns_to_cycles(targs->duration * ODP_TIME_SEC);

	while (cur_cycles < end_cycles) {
		unsigned alloc_cnt = 0, tx_cnt;

		if (cur_cycles < next_tx_cycles) {
			cur_cycles = odp_time_cycles();
			if (idle_start == 0)
				idle_start = cur_cycles;
			continue;
		}

		if (idle_start) {
			stats->s.idle_cycles += odp_time_diff_cycles(
							idle_start, cur_cycles);
			idle_start = 0;
		}

		next_tx_cycles += burst_gap_cycles;

		alloc_cnt = alloc_packets(tx_event, batch_len - unsent_pkts);
		if (alloc_cnt != batch_len)
			stats->s.alloc_failures++;

		tx_cnt = send_packets(outq, tx_event, alloc_cnt);
		unsent_pkts = alloc_cnt - tx_cnt;
		stats->s.enq_failures += unsent_pkts;
		stats->s.tx_cnt += tx_cnt;

		cur_cycles = odp_time_cycles();
	}

	VPRINT(" %02d: TxPkts %-8"PRIu64" EnqFail %-6"PRIu64
	       " AllocFail %-6"PRIu64" Idle %"PRIu64"ms\n",
	       thr_id, stats->s.tx_cnt,
	       stats->s.enq_failures, stats->s.alloc_failures,
	       odp_time_cycles_to_ns(stats->s.idle_cycles)/1000/1000);

	return NULL;
}