コード例 #1
0
ファイル: odp_generator.c プロジェクト: kalray/odp-mppa
/**
 * Main receive function
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *gen_recv_thread(void *arg)
{
	int thr;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	odp_packet_t pkt;
	odp_event_t ev;

	thr = odp_thread_id();
	thr_args = arg;

	pktio = odp_pktio_lookup(thr_args->pktio_dev);
	if (pktio == ODP_PKTIO_INVALID) {
		EXAMPLE_ERR("  [%02i] Error: lookup of pktio %s failed\n",
			    thr, thr_args->pktio_dev);
		return NULL;
	}

	printf("  [%02i] created mode: RECEIVE\n", thr);

	for (;;) {
		if (args->appl.number != -1 &&
		    odp_atomic_load_u64(&counters.icmp) >=
		    (unsigned int)args->appl.number) {
			break;
		}

		/* Use schedule to get buf from any input queue */
		ev = odp_schedule(NULL, ODP_SCHED_WAIT);

		pkt = odp_packet_from_event(ev);
		/* Drop packets with errors */
		if (odp_unlikely(odp_packet_has_error(pkt))) {
			odp_packet_free(pkt);
			continue;
		}

		print_pkts(thr, &pkt, 1);

		odp_packet_free(pkt);
	}

	return arg;
}
コード例 #2
0
ファイル: odp_l2fwd.c プロジェクト: weixiaohui/packages
/**
 * Packet IO worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	int thr;
	odp_queue_t outq_def;
	odp_packet_t pkt;
	odp_event_t ev;
	thread_args_t *thr_args = arg;

	stats_t *stats = calloc(1, sizeof(stats_t));
	*thr_args->stats = stats;

	thr = odp_thread_id();

	printf("[%02i] QUEUE mode\n", thr);
	odp_barrier_wait(&barrier);

	/* Loop packets */
	while (!exit_threads) {
		/* Use schedule to get buf from any input queue */
		ev  = odp_schedule(NULL, ODP_SCHED_WAIT);
		pkt = odp_packet_from_event(ev);

		/* Drop packets with errors */
		if (odp_unlikely(drop_err_pkts(&pkt, 1) == 0)) {
			stats->drops += 1;
			continue;
		}

		outq_def = lookup_dest_q(pkt);

		/* Enqueue the packet for output */
		if (odp_queue_enq(outq_def, ev)) {
			printf("  [%i] Queue enqueue failed.\n", thr);
			odp_packet_free(pkt);
			continue;
		}

		stats->packets += 1;
	}

	free(stats);
	return NULL;
}
コード例 #3
0
ファイル: synchronizers.c プロジェクト: kalray/odp-mppa
/* Initialise per-thread memory */
static per_thread_mem_t *thread_init(void)
{
	global_shared_mem_t *global_mem;
	per_thread_mem_t *per_thread_mem;
	odp_shm_t global_shm;
	uint32_t per_thread_mem_len;

	per_thread_mem_len = sizeof(per_thread_mem_t);
	per_thread_mem = malloc(per_thread_mem_len);
	memset(per_thread_mem, 0, per_thread_mem_len);

	per_thread_mem->delay_counter = 1;

	per_thread_mem->thread_id = odp_thread_id();
	per_thread_mem->thread_core = odp_cpu_id();

	global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
	global_mem = odp_shm_addr(global_shm);
	CU_ASSERT_PTR_NOT_NULL(global_mem);

	per_thread_mem->global_mem = global_mem;

	return per_thread_mem;
}
コード例 #4
0
ファイル: synchronizers.c プロジェクト: kalray/odp-mppa
static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
			     odp_bool_t no_barrier_test)
{
	global_shared_mem_t *global_mem;
	uint32_t barrier_errs, iterations, cnt, i_am_slow_thread;
	uint32_t thread_num, slow_thread_num, next_slow_thread, num_threads;
	uint32_t lock_owner_delay, barrier_cnt1, barrier_cnt2;

	thread_num = odp_thread_id();
	global_mem = per_thread_mem->global_mem;
	num_threads = global_mem->g_num_threads;
	iterations = BARRIER_ITERATIONS;

	barrier_errs = 0;
	lock_owner_delay = SLOW_BARRIER_DELAY;

	for (cnt = 1; cnt < iterations; cnt++) {
		/* Wait here until all of the threads reach this point */
		custom_barrier_wait(&global_mem->custom_barrier1[cnt]);

		barrier_cnt1 = LOAD_U32(global_mem->barrier_cnt1);
		barrier_cnt2 = LOAD_U32(global_mem->barrier_cnt2);

		if ((barrier_cnt1 != cnt) || (barrier_cnt2 != cnt)) {
			printf("thread_num=%" PRIu32 " barrier_cnts of %" PRIu32
				   " %" PRIu32 " cnt=%" PRIu32 "\n",
			       thread_num, barrier_cnt1, barrier_cnt2, cnt);
			barrier_errs++;
		}

		/* Wait here until all of the threads reach this point */
		custom_barrier_wait(&global_mem->custom_barrier2[cnt]);

		slow_thread_num = LOAD_U32(global_mem->slow_thread_num);
		i_am_slow_thread = thread_num == slow_thread_num;
		next_slow_thread = slow_thread_num + 1;
		if (num_threads < next_slow_thread)
			next_slow_thread = 1;

		/*
		* Now run the test, which involves having all but one thread
		* immediately calling odp_barrier_wait(), and one thread wait a
		* moderate amount of time and then calling odp_barrier_wait().
		* The test fails if any of the first group of threads
		* has not waited for the "slow" thread. The "slow" thread is
		* responsible for re-initializing the barrier for next trial.
		*/
		if (i_am_slow_thread) {
			thread_delay(per_thread_mem, lock_owner_delay);
			lock_owner_delay += BASE_DELAY;
			if ((LOAD_U32(global_mem->barrier_cnt1) != cnt) ||
			    (LOAD_U32(global_mem->barrier_cnt2) != cnt) ||
			    (LOAD_U32(global_mem->slow_thread_num)
					!= slow_thread_num))
				barrier_errs++;
		}

		if (no_barrier_test == 0)
			odp_barrier_wait(&global_mem->test_barriers[cnt]);

		STORE_U32(global_mem->barrier_cnt1, cnt + 1);
		odp_mb_full();

		if (i_am_slow_thread) {
			STORE_U32(global_mem->slow_thread_num, next_slow_thread);
			STORE_U32(global_mem->barrier_cnt2, cnt + 1);
			odp_mb_full();
		} else {
			uint32_t cnt2 = LOAD_U32(global_mem->barrier_cnt2);
			while (cnt2 != (cnt + 1)){
				thread_delay(per_thread_mem, BASE_DELAY);
				cnt2 = LOAD_U32(global_mem->barrier_cnt2);
			}
		}
	}

	if ((global_mem->g_verbose) && (barrier_errs != 0))
		printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
		       " barrier_errs in %" PRIu32 " iterations\n", thread_num,
		       per_thread_mem->thread_id,
		       per_thread_mem->thread_core, barrier_errs, iterations);

	return barrier_errs;
}
コード例 #5
0
ファイル: timer.c プロジェクト: nmorey/odp
			odp_atomic_inc_u32(&ndelivtoolate);
		}
	}

	if (ttp) {
		/* Internal error */
		CU_ASSERT_FATAL(ttp->ev == ODP_EVENT_INVALID);
		ttp->ev = ev;
	}
}

/* @private Worker thread entrypoint which performs timer alloc/set/cancel/free
 * tests */
static int worker_entrypoint(void *arg TEST_UNUSED)
{
	int thr = odp_thread_id();
	uint32_t i, allocated;
	unsigned seed = thr;
	int rc;
	odp_queue_t queue;
	struct test_timer *tt;
	uint32_t nset;
	uint64_t tck;
	uint32_t nrcv;
	uint32_t nreset;
	uint32_t ncancel;
	uint32_t ntoolate;
	uint32_t ms;
	uint64_t prev_tick;
	odp_event_t ev;
	struct timespec ts;
コード例 #6
0
ファイル: odp_schedule.c プロジェクト: weixiaohui/packages
/*
 * Schedule queues
 *
 * TODO: SYNC_ORDERED not implemented yet
 */
static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
		    unsigned int max_num, unsigned int max_deq)
{
	int i, j;
	int thr;
	int ret;

	if (sched_local.num) {
		ret = copy_events(out_ev, max_num);

		if (out_queue)
			*out_queue = queue_handle(sched_local.qe);

		return ret;
	}

	odp_schedule_release_atomic();

	if (odp_unlikely(sched_local.pause))
		return 0;

	thr = odp_thread_id();

	for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) {
		int id;

		if (sched->pri_mask[i] == 0)
			continue;

		id = thr & (QUEUES_PER_PRIO-1);

		for (j = 0; j < QUEUES_PER_PRIO; j++, id++) {
			odp_queue_t  pri_q;
			odp_event_t  ev;
			odp_buffer_t buf;
			sched_cmd_t *sched_cmd;
			queue_entry_t *qe;
			int num;

			if (id >= QUEUES_PER_PRIO)
				id = 0;

			if (odp_unlikely((sched->pri_mask[i] & (1 << id)) == 0))
				continue;

			pri_q = sched->pri_queue[i][id];
			ev    = odp_queue_deq(pri_q);
			buf   = odp_buffer_from_event(ev);

			if (buf == ODP_BUFFER_INVALID)
				continue;

			sched_cmd = odp_buffer_addr(buf);

			if (sched_cmd->cmd == SCHED_CMD_POLL_PKTIN) {
				/* Poll packet input */
				if (pktin_poll(sched_cmd->pe)) {
					/* Stop scheduling the pktio */
					pri_clr_pktio(sched_cmd->pktio,
						      sched_cmd->prio);
					odp_buffer_free(buf);
				} else {
					/* Continue scheduling the pktio */
					if (odp_queue_enq(pri_q, ev))
						ODP_ABORT("schedule failed\n");
				}

				continue;
			}

			qe  = sched_cmd->qe;
			num = queue_deq_multi(qe, sched_local.buf_hdr, max_deq);

			if (num < 0) {
				/* Destroyed queue */
				queue_destroy_finalize(qe);
				continue;
			}

			if (num == 0) {
				/* Remove empty queue from scheduling */
				continue;
			}

			sched_local.num   = num;
			sched_local.index = 0;
			sched_local.qe    = qe;
			ret = copy_events(out_ev, max_num);

			if (queue_is_atomic(qe)) {
				/* Hold queue during atomic access */
				sched_local.pri_queue = pri_q;
				sched_local.cmd_ev    = ev;
			} else {
				/* Continue scheduling the queue */
				if (odp_queue_enq(pri_q, ev))
					ODP_ABORT("schedule failed\n");
			}

			/* Output the source queue handle */
			if (out_queue)
				*out_queue = queue_handle(qe);

			return ret;
		}
	}

	return 0;
}
コード例 #7
0
ファイル: thread.c プロジェクト: rajeshwari27/odp-mppa
static void thread_test_odp_thread_id(void)
{
	(void)odp_thread_id();
	CU_PASS();
}
コード例 #8
0
/**
 * Packet IO loopback worker thread using bursts from/to IO resources
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_ifburst_thread(void *arg)
{
	int thr;
	odp_buffer_pool_t pkt_pool;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	int pkts, pkts_ok;
	odp_packet_t pkt_tbl[MAX_PKT_BURST];
	unsigned long pkt_cnt = 0;
	unsigned long err_cnt = 0;
	unsigned long tmp = 0;
	odp_pktio_params_t params;
	socket_params_t *sock_params = &params.sock_params;

	thr = odp_thread_id();
	thr_args = arg;

	printf("Pktio thread [%02i] starts, pktio_dev:%s\n", thr,
	       thr_args->pktio_dev);

	/* Lookup the packet pool */
	pkt_pool = odp_buffer_pool_lookup("packet_pool");
	if (pkt_pool == ODP_BUFFER_POOL_INVALID || pkt_pool != thr_args->pool) {
		ODP_ERR("  [%02i] Error: pkt_pool not found\n", thr);
		return NULL;
	}

	/* Open a packet IO instance for this thread */
	sock_params->type = thr_args->type;
	sock_params->fanout = thr_args->fanout;
	pktio = odp_pktio_open(thr_args->pktio_dev, pkt_pool, &params);
	if (pktio == ODP_PKTIO_INVALID) {
		ODP_ERR("  [%02i] Error: pktio create failed.\n", thr);
		return NULL;
	}

	printf("  [%02i] created pktio:%02i, burst mode\n",
	       thr, pktio);

	/* Loop packets */
	for (;;) {
		pkts = odp_pktio_recv(pktio, pkt_tbl, MAX_PKT_BURST);
		if (pkts > 0) {
			/* Drop packets with errors */
			pkts_ok = drop_err_pkts(pkt_tbl, pkts);
			if (pkts_ok > 0) {
				/* Swap Eth MACs and IP-addrs */
				swap_pkt_addrs(pkt_tbl, pkts_ok);
				odp_pktio_send(pktio, pkt_tbl, pkts_ok);
			}

			if (odp_unlikely(pkts_ok != pkts))
				ODP_ERR("Dropped frames:%u - err_cnt:%lu\n",
					pkts-pkts_ok, ++err_cnt);

			/* Print packet counts every once in a while */
			tmp += pkts_ok;
			if (odp_unlikely((tmp >= 100000) || /* OR first print:*/
			    ((pkt_cnt == 0) && ((tmp-1) < MAX_PKT_BURST)))) {
				pkt_cnt += tmp;
				printf("  [%02i] pkt_cnt:%lu\n", thr, pkt_cnt);
				fflush(NULL);
				tmp = 0;
			}
		}
	}

/* unreachable */
}
コード例 #9
0
ファイル: odp_generator.c プロジェクト: kalray/odp-mppa
static void *gen_send_thread(void *arg)
{
	int thr;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	odp_queue_t outq_def;
	odp_packet_t pkt[PKT_BURST_SZ];

	thr = odp_thread_id();
	thr_args = arg;

	pktio = odp_pktio_lookup(thr_args->pktio_dev);
	if (pktio == ODP_PKTIO_INVALID) {
		EXAMPLE_ERR("  [%02i] Error: lookup of pktio %s failed\n",
			    thr, thr_args->pktio_dev);
		return NULL;
	}

	outq_def = odp_pktio_outq_getdef(pktio);
	if (outq_def == ODP_QUEUE_INVALID) {
		EXAMPLE_ERR("  [%02i] Error: def output-Q query\n", thr);
		return NULL;
	}

	printf("  [%02i] created mode: SEND\n", thr);
	for (int i = 0; i < PKT_BURST_SZ; ++i) {
		if (args->appl.mode == APPL_MODE_UDP)
			pkt[i] = pack_udp_pkt(thr_args->pool);
		else if (args->appl.mode == APPL_MODE_PING)
			pkt[i] = pack_icmp_pkt(thr_args->pool);
		else
			pkt[i] = ODP_PACKET_INVALID;

		if (!odp_packet_is_valid(pkt[i])) {
			EXAMPLE_ERR("  [%2i] alloc_single failed\n", thr);
			return NULL;
		}
	}

	for (;;) {
		int err;

		err = odp_queue_enq_multi(outq_def, (odp_event_t*)pkt, PKT_BURST_SZ);
		if (err != PKT_BURST_SZ) {
			/* EXAMPLE_ERR("  [%02i] send pkt err!\n", thr); */
			/* return NULL; */
		}

		static uint64_t toto = 0;
		if (args->appl.interval != 0) {
			printf("  [%02i] send pkt no:%"PRIu64" seq %"PRIu64"\n",
			       thr,
			       odp_atomic_load_u64(&counters.seq),
				   toto++);
			usleep(args->appl.interval);
			/* millisleep(args->appl.interval, */
			/* 	   thr_args->tp, */
			/* 	   thr_args->tim, */
			/* 	   thr_args->tq, */
			/* 	   thr_args->tmo_ev); */

		}
	}
	printf("Done\n");
	_exit(0);
	/* receive number of reply pks until timeout */
	if (args->appl.mode == APPL_MODE_PING && args->appl.number > 0) {
		while (args->appl.timeout >= 0) {
			if (odp_atomic_load_u64(&counters.icmp) >=
			    (unsigned int)args->appl.number)
				break;
			millisleep(DEFAULT_PKT_INTERVAL,
				   thr_args->tp,
				   thr_args->tim,
				   thr_args->tq,
				   thr_args->tmo_ev);
			args->appl.timeout--;
		}
	}

	return arg;
}
コード例 #10
0
ファイル: odp_l2fwd.c プロジェクト: mitra/odp-mppa
/**
 * Packet IO worker thread accessing IO resources directly
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_direct_recv_thread(void *arg)
{
	int thr;
	int pkts;
	odp_packet_t pkt_tbl[MAX_PKT_BURST];
	int src_idx, dst_idx;
	odp_pktio_t pktio_src, pktio_dst;
	thread_args_t *thr_args = arg;
	stats_t *stats = thr_args->stats;

	thr = odp_thread_id();

	src_idx = thr_args->src_idx;
	dst_idx = gbl_args->dst_port[src_idx];
	pktio_src = gbl_args->pktios[src_idx];
	pktio_dst = gbl_args->pktios[dst_idx];

	printf("[%02i] srcif:%s dstif:%s spktio:%02" PRIu64
	       " dpktio:%02" PRIu64 " DIRECT RECV mode\n",
	       thr,
	       gbl_args->appl.if_names[src_idx],
	       gbl_args->appl.if_names[dst_idx],
	       odp_pktio_to_u64(pktio_src), odp_pktio_to_u64(pktio_dst));
	odp_barrier_wait(&barrier);

	/* Loop packets */
	while (!exit_threads) {
		int sent, i;
		unsigned tx_drops;

		pkts = odp_pktio_recv(pktio_src, pkt_tbl, MAX_PKT_BURST);
		if (odp_unlikely(pkts <= 0))
			continue;

		if (gbl_args->appl.error_check) {
			int rx_drops;

			/* Drop packets with errors */
			rx_drops = drop_err_pkts(pkt_tbl, pkts);

			if (odp_unlikely(rx_drops)) {
				stats->s.rx_drops += rx_drops;
				if (pkts == rx_drops)
					continue;

				pkts -= rx_drops;
			}
		}

		fill_eth_addrs(pkt_tbl, pkts, dst_idx);

		sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts);

		sent     = odp_unlikely(sent < 0) ? 0 : sent;
		tx_drops = pkts - sent;

		if (odp_unlikely(tx_drops)) {
			stats->s.tx_drops += tx_drops;

			/* Drop rejected packets */
			for (i = sent; i < pkts; i++)
				odp_packet_free(pkt_tbl[i]);
		}

		stats->s.packets += pkts;
	}

	/* Make sure that latest stat writes are visible to other threads */
	odp_mb_full();

	return NULL;
}
コード例 #11
0
ファイル: odp_l2fwd.c プロジェクト: mitra/odp-mppa
/**
 * Packet IO worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	odp_event_t  ev_tbl[MAX_PKT_BURST];
	odp_packet_t pkt_tbl[MAX_PKT_BURST];
	int pkts;
	int thr;
	uint64_t wait;
	int dst_idx;
	odp_pktio_t pktio_dst;
	thread_args_t *thr_args = arg;
	stats_t *stats = thr_args->stats;

	thr = odp_thread_id();

	printf("[%02i] QUEUE mode\n", thr);
	odp_barrier_wait(&barrier);

	wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS * 100);

	/* Loop packets */
	while (!exit_threads) {
		int sent, i;
		unsigned tx_drops;

		pkts = odp_schedule_multi(NULL, wait, ev_tbl, MAX_PKT_BURST);

		if (pkts <= 0)
			continue;

		for (i = 0; i < pkts; i++)
			pkt_tbl[i] = odp_packet_from_event(ev_tbl[i]);

		if (gbl_args->appl.error_check) {
			int rx_drops;

			/* Drop packets with errors */
			rx_drops = drop_err_pkts(pkt_tbl, pkts);

			if (odp_unlikely(rx_drops)) {
				stats->s.rx_drops += rx_drops;
				if (pkts == rx_drops)
					continue;

				pkts -= rx_drops;
			}
		}

		/* packets from the same queue are from the same interface */
		dst_idx = lookup_dest_port(pkt_tbl[0]);
		fill_eth_addrs(pkt_tbl, pkts, dst_idx);
		pktio_dst = gbl_args->pktios[dst_idx];

		sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts);

		sent     = odp_unlikely(sent < 0) ? 0 : sent;
		tx_drops = pkts - sent;

		if (odp_unlikely(tx_drops)) {
			stats->s.tx_drops += tx_drops;

			/* Drop rejected packets */
			for (i = sent; i < pkts; i++)
				odp_packet_free(pkt_tbl[i]);
		}

		stats->s.packets += pkts;
	}

	/* Make sure that latest stat writes are visible to other threads */
	odp_mb_full();

	return NULL;
}
コード例 #12
0
ファイル: scheduler.c プロジェクト: kalray/odp-mppa
void scheduler_test_chaos(void)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	odp_event_t ev;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	odp_queue_t from;
	int i, rc;
	uint64_t wait;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
				      ODP_SCHED_SYNC_ATOMIC/* , */
				      /* ODP_SCHED_SYNC_ORDERED */};
	const int num_sync = (sizeof(sync) / sizeof(sync[0]));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(shm);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		qp.sched.sync = sync[i % num_sync];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[i % num_sync]);
		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name,
					 ODP_QUEUE_TYPE_SCHED,
					 &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	odp_atomic_init_u32(&globals->chaos_pending_event_count, 0);

	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
		odp_atomic_inc_u32(&globals->chaos_pending_event_count);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	/* Cleanup: Drain queues, free events */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	while (odp_atomic_fetch_dec_u32(
		       &globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Draining event %" PRIu64
			       " seq %" PRIu64 " from Q %s...\n",
			       cbuf->evno,
			       cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	odp_schedule_release_ordered();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}
コード例 #13
0
ファイル: scheduler.c プロジェクト: kalray/odp-mppa
static void *chaos_thread(void *arg)
{
	uint64_t i, wait;
	int rc;
	chaos_buf *cbuf;
	odp_event_t ev;
	odp_queue_t from;
	thread_args_t *args = (thread_args_t *)arg;
	test_globals_t *globals = args->globals;
	int me = odp_thread_id();

	if (CHAOS_DEBUG)
		printf("Chaos thread %d starting...\n", me);

	/* Wait for all threads to start */
	odp_barrier_wait(&globals->barrier);

	/* Run the test */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	for (i = 0; i < CHAOS_NUM_ROUNDS * CHAOS_NUM_EVENTS; i++) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		CU_ASSERT_FATAL(cbuf != NULL);
		INVALIDATE(cbuf);
		if (CHAOS_DEBUG)
			printf("Thread %d received event %" PRIu64
			       " seq %" PRIu64
			       " from Q %s, sending to Q %s\n",
			       me, cbuf->evno, cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].name,
			       globals->
			       chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name);

		rc = odp_queue_enq(
			globals->
			chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle,
			ev);
		CU_ASSERT(rc == 0);
	}

	if (CHAOS_DEBUG)
		printf("Thread %d completed %d rounds...terminating\n",
		       odp_thread_id(), CHAOS_NUM_EVENTS);

	/* Thread complete--drain locally cached scheduled events */
	odp_schedule_pause();

	while (odp_atomic_load_u32(&globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
		if (ev == ODP_EVENT_INVALID)
			break;
		odp_atomic_dec_u32(&globals->chaos_pending_event_count);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Thread %d drained event %" PRIu64
			       " seq %" PRIu64
			       " from Q %s\n",
			       odp_thread_id(), cbuf->evno, cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	return NULL;
}
コード例 #14
0
ファイル: scheduler.c プロジェクト: kalray/odp-mppa
void scheduler_test_groups(void)
{
	odp_pool_t p;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_queue_t queue_grp1, queue_grp2, from;
	odp_buffer_t buf;
	odp_event_t ev;
	uint32_t *u32;
	int i, j, rc;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
				      ODP_SCHED_SYNC_ATOMIC/* , */
				      /* ODP_SCHED_SYNC_ORDERED */};
	const int num_sync = (sizeof(sync) / sizeof(sync[0]));
	int thr_id = odp_thread_id();
	odp_thrmask_t zeromask, mymask, testmask;
	odp_schedule_group_t mygrp1, mygrp2, lookup;

	odp_thrmask_zero(&zeromask);
	odp_thrmask_zero(&mymask);
	odp_thrmask_set(&mymask, thr_id);

	/* Can't find a group before we create it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create the group */
	mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask);
	CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID);

	/* Verify we can now find it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == mygrp1);

	/* Threadmask should be retrievable and be what we expect */
	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* We can't join or leave an unknown group */
	rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	/* But we can leave our group */
	rc = odp_schedule_group_leave(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* We shouldn't be able to find our second group before creating it */
	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create it and verify we can find it */
	mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask);
	CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID);

	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == mygrp2);

	/* Verify we're not part of it */
	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp2, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* Now verify scheduler adherence to groups */
	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size  = 100;
	params.buf.align = 0;
	params.buf.num   = 2;
	params.type      = ODP_POOL_BUFFER;

	p = odp_pool_create("sched_group_pool", &params);

	CU_ASSERT_FATAL(p != ODP_POOL_INVALID);

	for (i = 0; i < num_sync; i++) {
		qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
		qp.sched.sync  = sync[i];
		qp.sched.group = mygrp1;

		/* Create and populate a group in group 1 */
		queue_grp1 = odp_queue_create("sched_group_test_queue_1",
					      ODP_QUEUE_TYPE_SCHED, &qp);
		CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);

		buf = odp_buffer_alloc(p);

		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC1;

		ev = odp_buffer_to_event(buf);
		if (!(CU_ASSERT(odp_queue_enq(queue_grp1, ev) == 0)))
			odp_buffer_free(buf);

		/* Now create and populate a queue in group 2 */
		qp.sched.group = mygrp2;
		queue_grp2 = odp_queue_create("sched_group_test_queue_2",
					      ODP_QUEUE_TYPE_SCHED, &qp);
		CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);

		buf = odp_buffer_alloc(p);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC2;

		ev = odp_buffer_to_event(buf);
		if (!(CU_ASSERT(odp_queue_enq(queue_grp2, ev) == 0)))
			odp_buffer_free(buf);

		/* Scheduler should give us the event from Group 2 */
		ev = odp_schedule(&from, ODP_SCHED_WAIT);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		CU_ASSERT_FATAL(from == queue_grp2);

		buf = odp_buffer_from_event(ev);
		u32 = odp_buffer_addr(buf);

		CU_ASSERT_FATAL(u32[0] == MAGIC2);

		odp_buffer_free(buf);

		/* Scheduler should not return anything now since we're
		 * not in Group 1 and Queue 2 is empty.  Do this several
		 * times to confirm.
		 */

		for (j = 0; j < 10; j++) {
			ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
			CU_ASSERT_FATAL(ev == ODP_EVENT_INVALID)
		}

		/* Now join group 1 and verify we can get the event */
		rc = odp_schedule_group_join(mygrp1, &mymask);
		CU_ASSERT_FATAL(rc == 0);

		/* Tell scheduler we're about to request an event.
		 * Not needed, but a convenient place to test this API.
		 */
		odp_schedule_prefetch(1);

		/* Now get the event from Queue 1 */
		ev = odp_schedule(&from, ODP_SCHED_WAIT);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		CU_ASSERT_FATAL(from == queue_grp1);

		buf = odp_buffer_from_event(ev);
		u32 = odp_buffer_addr(buf);

		CU_ASSERT_FATAL(u32[0] == MAGIC1);

		odp_buffer_free(buf);

		/* Leave group 1 for next pass */
		rc = odp_schedule_group_leave(mygrp1, &mymask);
		CU_ASSERT_FATAL(rc == 0);

		/* We must release order before destroying queues */
		odp_schedule_release_ordered();

		/* Done with queues for this round */
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0);
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0);

		/* Verify we can no longer find our queues */
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") ==
				ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") ==
				ODP_QUEUE_INVALID);
	}

	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0);
	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0);
	CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
}
コード例 #15
0
ファイル: main.c プロジェクト: FI-Lab/OCC-benchmarks
void* thread_fwd_routine(void *arg)
{
    odp_packet_t pkt_tbl[PACKET_IO_BURST];
    int rv_nb, sd_nb;
    int thr_id;
    int out_port;
    int tuple[5];
    int i;

    thr_id = odp_thread_id();
    printf("fwd thread %d start(on cpu %d)\n", thr_id, odp_cpu_id());
    //match to port id
    thr_id--;

    memset(&port_stat.stat[thr_id], 0 , 3 * sizeof(uint64_t));
    for(;;)
    {
        rv_nb = odp_pktio_recv(thr_data.nic_hdl[thr_id], pkt_tbl, 
                PACKET_IO_BURST);
        port_stat.stat[thr_id].recv += rv_nb;
#ifdef EXECUTE_CLASSIFICATION
        for(i = 0; i < rv_nb; i++)
        {
            if(extract_tuple(pkt_tbl[i], tuple) == 0)
            {
                int res;
                res = packet_classifier_search(tuple);
            }
        }
#endif
#ifdef EXECUTE_HASH_LOOKUP
        for(i = 0; i < rv_nb; i++)
        {
            if(extract_tuple(pkt_tbl[i], tuple) == 0)
            {
                int res;
                res = odph_hash_lookup(hs_tbl, (void*)tuple);
            }
        }
#endif
#ifdef EXECUTE_DPI
        unsigned char *payload;
        int payload_len;
        for(i = 0; i < rv_nb; i++)
        {
            if(get_payload(pkt_tbl[i], (unsigned char**)&payload, &payload_len) == 0)
            {
                int res;
		//printf("%d %d %s\n", thr_id, strlen(payload), payload);
                res = sm_search(sm_hdl, payload, payload_len);
                //printf("search res: %d\n", res);
            }
        }
#endif
        if((thr_id & 1) == 1)
        {
            out_port = thr_id - 1;
        }
        else
        {
            out_port = thr_id + 1 == glb_param.nic.num ? thr_id : thr_id + 1;
        }
        sd_nb = odp_pktio_send(thr_data.nic_hdl[out_port], pkt_tbl, rv_nb);
        port_stat.stat[thr_id].send += sd_nb;
        while(sd_nb < rv_nb)
        {
            odp_packet_free(pkt_tbl[sd_nb++]);
            port_stat.stat[thr_id].drop++;
        }
    }
    return NULL;
}
コード例 #16
0
ファイル: scheduler.c プロジェクト: nmorey/odp
void scheduler_test_groups(void)
{
	odp_pool_t p;
	odp_pool_param_t params;
	odp_queue_t queue_grp1, queue_grp2;
	odp_buffer_t buf;
	odp_event_t ev;
	uint32_t *u32;
	int i, j, rc;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
				      ODP_SCHED_SYNC_ATOMIC,
				      ODP_SCHED_SYNC_ORDERED};
	int thr_id = odp_thread_id();
	odp_thrmask_t zeromask, mymask, testmask;
	odp_schedule_group_t mygrp1, mygrp2, lookup;
	odp_schedule_group_info_t info;

	odp_thrmask_zero(&zeromask);
	odp_thrmask_zero(&mymask);
	odp_thrmask_set(&mymask, thr_id);

	/* Can't find a group before we create it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create the group */
	mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask);
	CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID);

	/* Verify we can now find it */
	lookup = odp_schedule_group_lookup("Test Group 1");
	CU_ASSERT(lookup == mygrp1);

	/* Threadmask should be retrievable and be what we expect */
	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* Info struct */
	memset(&info, 0, sizeof(odp_schedule_group_info_t));
	rc = odp_schedule_group_info(mygrp1, &info);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_equal(&info.thrmask, &mymask) != 0);
	CU_ASSERT(strcmp(info.name, "Test Group 1") == 0);

	/* We can't join or leave an unknown group */
	rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask);
	CU_ASSERT(rc != 0);

	/* But we can leave our group */
	rc = odp_schedule_group_leave(mygrp1, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp1, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* We shouldn't be able to find our second group before creating it */
	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID);

	/* Now create it and verify we can find it */
	mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask);
	CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID);

	lookup = odp_schedule_group_lookup("Test Group 2");
	CU_ASSERT(lookup == mygrp2);

	/* Verify we're not part of it */
	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id));

	/* Now join the group and verify we're part of it */
	rc = odp_schedule_group_join(mygrp2, &mymask);
	CU_ASSERT(rc == 0);

	rc = odp_schedule_group_thrmask(mygrp2, &testmask);
	CU_ASSERT(rc == 0);
	CU_ASSERT(odp_thrmask_isset(&testmask, thr_id));

	/* Now verify scheduler adherence to groups */
	odp_pool_param_init(&params);
	params.buf.size  = 100;
	params.buf.align = 0;
	params.buf.num   = 2;
	params.type      = ODP_POOL_BUFFER;

	p = odp_pool_create("sched_group_pool", &params);

	CU_ASSERT_FATAL(p != ODP_POOL_INVALID);

	for (i = 0; i < 3; i++) {
		odp_queue_param_t qp;
		odp_queue_t queue, from;
		odp_schedule_group_t mygrp[NUM_GROUPS];
		odp_queue_t queue_grp[NUM_GROUPS];
		int num = NUM_GROUPS;

		odp_queue_param_init(&qp);
		qp.type        = ODP_QUEUE_TYPE_SCHED;
		qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
		qp.sched.sync  = sync[i];
		qp.sched.group = mygrp1;

		/* Create and populate a group in group 1 */
		queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp);
		CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);

		buf = odp_buffer_alloc(p);

		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC1;

		ev = odp_buffer_to_event(buf);
		rc = odp_queue_enq(queue_grp1, ev);
		CU_ASSERT(rc == 0);
		if (rc)
			odp_buffer_free(buf);

		/* Now create and populate a queue in group 2 */
		qp.sched.group = mygrp2;
		queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp);
		CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);

		buf = odp_buffer_alloc(p);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);

		u32 = odp_buffer_addr(buf);
		u32[0] = MAGIC2;

		ev = odp_buffer_to_event(buf);
		rc = odp_queue_enq(queue_grp2, ev);
		CU_ASSERT(rc == 0);
		if (rc)
			odp_buffer_free(buf);

		/* Swap between two groups. Application should serve both
		 * groups to avoid potential head of line blocking in
		 * scheduler. */
		mygrp[0]     = mygrp1;
		mygrp[1]     = mygrp2;
		queue_grp[0] = queue_grp1;
		queue_grp[1] = queue_grp2;
		j = 0;

		/* Ensure that each test run starts from mygrp1 */
		odp_schedule_group_leave(mygrp1, &mymask);
		odp_schedule_group_leave(mygrp2, &mymask);
		odp_schedule_group_join(mygrp1, &mymask);

		while (num) {
			queue = queue_grp[j];
			ev    = odp_schedule(&from, ODP_SCHED_NO_WAIT);

			if (ev == ODP_EVENT_INVALID) {
				/* change group */
				rc = odp_schedule_group_leave(mygrp[j],
							      &mymask);
				CU_ASSERT_FATAL(rc == 0);

				j = (j + 1) % NUM_GROUPS;
				rc = odp_schedule_group_join(mygrp[j],
							     &mymask);
				CU_ASSERT_FATAL(rc == 0);
				continue;
			}

			CU_ASSERT_FATAL(from == queue);

			buf = odp_buffer_from_event(ev);
			u32 = odp_buffer_addr(buf);

			if (from == queue_grp1) {
				/* CU_ASSERT_FATAL needs these brackets */
				CU_ASSERT_FATAL(u32[0] == MAGIC1);
			} else {
				CU_ASSERT_FATAL(u32[0] == MAGIC2);
			}

			odp_buffer_free(buf);

			/* Tell scheduler we're about to request an event.
			 * Not needed, but a convenient place to test this API.
			 */
			odp_schedule_prefetch(1);

			num--;
		}

		/* Release schduler context and leave groups */
		odp_schedule_group_join(mygrp1, &mymask);
		odp_schedule_group_join(mygrp2, &mymask);
		CU_ASSERT(exit_schedule_loop() == 0);
		odp_schedule_group_leave(mygrp1, &mymask);
		odp_schedule_group_leave(mygrp2, &mymask);

		/* Done with queues for this round */
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0);
		CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0);

		/* Verify we can no longer find our queues */
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") ==
				ODP_QUEUE_INVALID);
		CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") ==
				ODP_QUEUE_INVALID);
	}

	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0);
	CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0);
	CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
}
コード例 #17
0
ファイル: scheduler.c プロジェクト: nmorey/odp
static void chaos_run(unsigned int qtype)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	int i, rc;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
				      ODP_SCHED_SYNC_ATOMIC,
				      ODP_SCHED_SYNC_ORDERED};
	const unsigned num_sync = (sizeof(sync) / sizeof(odp_schedule_sync_t));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(globals);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.type        = ODP_QUEUE_TYPE_SCHED;
	qp.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qp.sched.group = ODP_SCHED_GROUP_ALL;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		uint32_t ndx = (qtype == num_sync ? i % num_sync : qtype);

		qp.sched.sync = sync[ndx];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[ndx]);

		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name, &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i), 0);
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	drain_queues();
	exit_schedule_loop();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}
コード例 #18
0
ファイル: odp_pktio_perf.c プロジェクト: nmorey/odp
/*
 * Main packet transmit routine. Transmit packets at a fixed rate for
 * specified length of time.
 */
static int run_thread_tx(void *arg)
{
	test_globals_t *globals;
	int thr_id;
	odp_pktout_queue_t pktout;
	pkt_tx_stats_t *stats;
	odp_time_t cur_time, send_time_end, send_duration;
	odp_time_t burst_gap_end, burst_gap;
	uint32_t batch_len;
	int unsent_pkts = 0;
	odp_packet_t tx_packet[BATCH_LEN_MAX];
	odp_time_t idle_start = ODP_TIME_NULL;

	thread_args_t *targs = arg;

	batch_len = targs->batch_len;

	if (batch_len > BATCH_LEN_MAX)
		batch_len = BATCH_LEN_MAX;

	thr_id = odp_thread_id();

	globals = odp_shm_addr(odp_shm_lookup("test_globals"));
	stats = &globals->tx_stats[thr_id];

	if (odp_pktout_queue(globals->pktio_tx, &pktout, 1) != 1)
		LOG_ABORT("Failed to get output queue for thread %d\n", thr_id);

	burst_gap = odp_time_local_from_ns(
			ODP_TIME_SEC_IN_NS / (targs->pps / targs->batch_len));
	send_duration =
		odp_time_local_from_ns(targs->duration * ODP_TIME_SEC_IN_NS);

	odp_barrier_wait(&globals->tx_barrier);

	cur_time     = odp_time_local();
	send_time_end = odp_time_sum(cur_time, send_duration);
	burst_gap_end = cur_time;
	while (odp_time_cmp(send_time_end, cur_time) > 0) {
		unsigned alloc_cnt = 0, tx_cnt;

		if (odp_time_cmp(burst_gap_end, cur_time) > 0) {
			cur_time = odp_time_local();
			if (!odp_time_cmp(idle_start, ODP_TIME_NULL))
				idle_start = cur_time;
			continue;
		}

		if (odp_time_cmp(idle_start, ODP_TIME_NULL) > 0) {
			odp_time_t diff = odp_time_diff(cur_time, idle_start);

			stats->s.idle_ticks =
				odp_time_sum(diff, stats->s.idle_ticks);

			idle_start = ODP_TIME_NULL;
		}

		burst_gap_end = odp_time_sum(burst_gap_end, burst_gap);

		alloc_cnt = alloc_packets(tx_packet, batch_len - unsent_pkts);
		if (alloc_cnt != batch_len)
			stats->s.alloc_failures++;

		tx_cnt = send_packets(pktout, tx_packet, alloc_cnt);
		unsent_pkts = alloc_cnt - tx_cnt;
		stats->s.enq_failures += unsent_pkts;
		stats->s.tx_cnt += tx_cnt;

		cur_time = odp_time_local();
	}

	VPRINT(" %02d: TxPkts %-8" PRIu64 " EnqFail %-6" PRIu64
	       " AllocFail %-6" PRIu64 " Idle %" PRIu64 "ms\n",
	       thr_id, stats->s.tx_cnt,
	       stats->s.enq_failures, stats->s.alloc_failures,
	       odp_time_to_ns(stats->s.idle_ticks) /
	       (uint64_t)ODP_TIME_MSEC_IN_NS);

	return 0;
}
コード例 #19
0
/**
 * Packet IO loopback worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	int thr;
	odp_buffer_pool_t pkt_pool;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	odp_queue_t outq_def;
	odp_queue_t inq_def;
	char inq_name[ODP_QUEUE_NAME_LEN];
	odp_queue_param_t qparam;
	odp_packet_t pkt;
	odp_buffer_t buf;
	int ret;
	unsigned long pkt_cnt = 0;
	unsigned long err_cnt = 0;
	odp_pktio_params_t params;
	socket_params_t *sock_params = &params.sock_params;

	thr = odp_thread_id();
	thr_args = arg;

	printf("Pktio thread [%02i] starts, pktio_dev:%s\n", thr,
	       thr_args->pktio_dev);

	/* Lookup the packet pool */
	pkt_pool = odp_buffer_pool_lookup("packet_pool");
	if (pkt_pool == ODP_BUFFER_POOL_INVALID || pkt_pool != thr_args->pool) {
		ODP_ERR("  [%02i] Error: pkt_pool not found\n", thr);
		return NULL;
	}

	/* Open a packet IO instance for this thread */
	sock_params->type = thr_args->type;
	sock_params->fanout = thr_args->fanout;
	pktio = odp_pktio_open(thr_args->pktio_dev, pkt_pool, &params);
	if (pktio == ODP_PKTIO_INVALID) {
		ODP_ERR("  [%02i] Error: pktio create failed\n", thr);
		return NULL;
	}

	/*
	 * Create and set the default INPUT queue associated with the 'pktio'
	 * resource
	 */
	qparam.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qparam.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
	qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;
	snprintf(inq_name, sizeof(inq_name), "%i-pktio_inq_def", (int)pktio);
	inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';

	inq_def = odp_queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam);
	if (inq_def == ODP_QUEUE_INVALID) {
		ODP_ERR("  [%02i] Error: pktio queue creation failed\n", thr);
		return NULL;
	}

	ret = odp_pktio_inq_setdef(pktio, inq_def);
	if (ret != 0) {
		ODP_ERR("  [%02i] Error: default input-Q setup\n", thr);
		return NULL;
	}

	printf("  [%02i] created pktio:%02i, queue mode (ATOMIC queues)\n"
	       "          default pktio%02i-INPUT queue:%u\n",
		thr, pktio, pktio, inq_def);

	/* Loop packets */
	for (;;) {
		odp_pktio_t pktio_tmp;

#if 1
		/* Use schedule to get buf from any input queue */
		buf = odp_schedule(NULL, ODP_SCHED_WAIT);
#else
		/* Always dequeue from the same input queue */
		buf = odp_queue_deq(inq_def);
		if (!odp_buffer_is_valid(buf))
			continue;
#endif

		pkt = odp_packet_from_buffer(buf);

		/* Drop packets with errors */
		if (odp_unlikely(drop_err_pkts(&pkt, 1) == 0)) {
			ODP_ERR("Drop frame - err_cnt:%lu\n", ++err_cnt);
			continue;
		}

		pktio_tmp = odp_pktio_get_input(pkt);
		outq_def = odp_pktio_outq_getdef(pktio_tmp);

		if (outq_def == ODP_QUEUE_INVALID) {
			ODP_ERR("  [%02i] Error: def output-Q query\n", thr);
			return NULL;
		}

		/* Swap Eth MACs and possibly IP-addrs before sending back */
		swap_pkt_addrs(&pkt, 1);

		/* Enqueue the packet for output */
		odp_queue_enq(outq_def, buf);

		/* Print packet counts every once in a while */
		if (odp_unlikely(pkt_cnt++ % 100000 == 0)) {
			printf("  [%02i] pkt_cnt:%lu\n", thr, pkt_cnt);
			fflush(NULL);
		}
	}

/* unreachable */
}
コード例 #20
0
ファイル: odp_pktio_perf.c プロジェクト: rahulgvf/odp-mppa
/*
 * Main packet transmit routine. Transmit packets at a fixed rate for
 * specified length of time.
 */
static void *run_thread_tx(void *arg)
{
	test_globals_t *globals;
	int thr_id;
	odp_queue_t outq;
	pkt_tx_stats_t *stats;
	uint64_t next_tx_cycles, end_cycles, cur_cycles;
	uint64_t burst_gap_cycles;
	uint32_t batch_len;
	int unsent_pkts = 0;
	odp_event_t  tx_event[BATCH_LEN_MAX];
	uint64_t idle_start = 0;

	thread_args_t *targs = arg;

	batch_len = targs->batch_len;

	if (batch_len > BATCH_LEN_MAX)
		batch_len = BATCH_LEN_MAX;

	thr_id = odp_thread_id();

	globals = odp_shm_addr(odp_shm_lookup("test_globals"));
	stats = &globals->tx_stats[thr_id];

	outq = odp_pktio_outq_getdef(globals->pktio_tx);
	if (outq == ODP_QUEUE_INVALID)
		LOG_ABORT("Failed to get output queue for thread %d\n", thr_id);

	burst_gap_cycles = odp_time_ns_to_cycles(
						 (ODP_TIME_SEC * 999) / (1000 * targs->pps / (targs->batch_len)));

	odp_barrier_wait(&globals->tx_barrier);

	cur_cycles     = odp_time_cycles();
	next_tx_cycles = cur_cycles;
	end_cycles     = cur_cycles +
			 odp_time_ns_to_cycles(targs->duration * ODP_TIME_SEC);

	while (cur_cycles < end_cycles) {
		unsigned alloc_cnt = 0, tx_cnt;

		if (cur_cycles < next_tx_cycles) {
			cur_cycles = odp_time_cycles();
			if (idle_start == 0)
				idle_start = cur_cycles;
			continue;
		}

		if (idle_start) {
			stats->s.idle_cycles += odp_time_diff_cycles(
							idle_start, cur_cycles);
			idle_start = 0;
		}

		next_tx_cycles += burst_gap_cycles;

		alloc_cnt = alloc_packets(tx_event, batch_len - unsent_pkts);
		if (alloc_cnt != batch_len)
			stats->s.alloc_failures++;

		tx_cnt = send_packets(outq, tx_event, alloc_cnt);
		unsent_pkts = alloc_cnt - tx_cnt;
		stats->s.enq_failures += unsent_pkts;
		stats->s.tx_cnt += tx_cnt;

		cur_cycles = odp_time_cycles();
	}

	VPRINT(" %02d: TxPkts %-8"PRIu64" EnqFail %-6"PRIu64
	       " AllocFail %-6"PRIu64" Idle %"PRIu64"ms\n",
	       thr_id, stats->s.tx_cnt,
	       stats->s.enq_failures, stats->s.alloc_failures,
	       odp_time_cycles_to_ns(stats->s.idle_cycles)/1000/1000);

	return NULL;
}
コード例 #21
0
ファイル: odp_pool.c プロジェクト: guanhe0/packages
int odp_pool_init_local(void)
{
	local_id = odp_thread_id();
	return 0;
}
コード例 #22
0
static void *test_ring(void *arg)
{
	ring_arg_t *parg = (ring_arg_t *)arg;
	int thr;
	char ring_name[ODP_RING_NAMESIZE];
	odp_ring_t *r;
	int result = 0;

	thr = odp_thread_id();

	printf("Thread %i starts\n", thr);

	switch (parg->thrdarg.testcase) {
	case ODP_RING_TEST_BASIC:
		snprintf(ring_name, sizeof(ring_name), "test_ring_%i", thr);

		r = odp_ring_create(ring_name, RING_SIZE,
				    0 /* not used, alignement
					 taken care inside func : todo */);
		if (r == NULL) {
			ODP_ERR("ring create failed\n");
			result = -1;
			break;
		}
		/* lookup ring from its name */
		if (odp_ring_lookup(ring_name) != r) {
			ODP_ERR("ring lookup failed\n");
			result = -1;
			break;
		}

		/* basic operations */
		if (test_ring_basic(r) < 0) {
			ODP_ERR("ring basic enqueue/dequeu ops failed\n");
			result = -1;
		}

		/* dump ring stats */
		odp_ring_list_dump();

		break;

	case ODP_RING_TEST_STRESS:
		test_ring_stress(parg->stress_type);

		/* dump ring stats */
		odp_ring_list_dump();
		break;

	default:
		ODP_ERR("Invalid test case [%d]\n", parg->thrdarg.testcase);
		result = -1;
		break;
	}

	ODP_DBG("result = %d\n", result);
	if (result == 0)
		printf("test_ring Result:pass\n");
	else
		printf("test_ring Result:fail\n");

	fflush(stdout);

	return parg;
}