示例#1
0
static int test_init(void)
{
	odp_pool_param_t params;
	odp_queue_param_t qparam;
	odp_queue_t inq_def;
	char inq_name[ODP_QUEUE_NAME_LEN];

	memset(&params, 0, sizeof(params));
	params.pkt.len     = PKT_HDR_LEN + gbl_args->args.pkt_len;
	params.pkt.seg_len = params.pkt.len;
	params.pkt.num     = PKT_BUF_NUM;
	params.type        = ODP_POOL_PACKET;

	transmit_pkt_pool = odp_pool_create("pkt_pool_transmit",
						  ODP_SHM_NULL, &params);
	if (transmit_pkt_pool == ODP_POOL_INVALID)
		LOG_ABORT("Failed to create transmit pool\n");

	odp_atomic_init_u32(&ip_seq, 0);
	odp_atomic_init_u32(&shutdown, 0);

	/* create pktios and associate input/output queues */
	gbl_args->pktio_tx = create_pktio(gbl_args->args.ifaces[0]);
	if (gbl_args->args.num_ifaces > 1)
		gbl_args->pktio_rx = create_pktio(gbl_args->args.ifaces[1]);
	else
		gbl_args->pktio_rx = gbl_args->pktio_tx;

	if (gbl_args->pktio_rx == ODP_PKTIO_INVALID ||
	    gbl_args->pktio_tx == ODP_PKTIO_INVALID) {
		LOG_ERR("failed to open pktio\n");
		return -1;
	}

	/* create and associate an input queue for the RX side */
	qparam.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qparam.sched.sync  = ODP_SCHED_SYNC_NONE;
	qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;

	snprintf(inq_name, sizeof(inq_name), "inq-pktio-%" PRIu64,
		 odp_pktio_to_u64(gbl_args->pktio_rx));
	inq_def = odp_queue_lookup(inq_name);
	if (inq_def == ODP_QUEUE_INVALID)
		inq_def = odp_queue_create(inq_name,
				ODP_QUEUE_TYPE_PKTIN, &qparam);

	if (inq_def == ODP_QUEUE_INVALID)
		return -1;

	if (odp_pktio_inq_setdef(gbl_args->pktio_rx, inq_def) != 0)
		return -1;

	return 0;
}
示例#2
0
int main(int argc, char *argv[])
{
	struct sigaction signal_action;
	struct rlimit    rlimit;
	uint32_t pkts_into_tm, pkts_from_tm;
	odp_instance_t instance;
	int rc;

	memset(&signal_action, 0, sizeof(signal_action));
	signal_action.sa_handler = signal_handler;
	sigfillset(&signal_action.sa_mask);
	sigaction(SIGILL,  &signal_action, NULL);
	sigaction(SIGFPE,  &signal_action, NULL);
	sigaction(SIGSEGV, &signal_action, NULL);
	sigaction(SIGTERM, &signal_action, NULL);
	sigaction(SIGBUS,  &signal_action, NULL);

	getrlimit(RLIMIT_CORE, &rlimit);
	rlimit.rlim_cur = rlimit.rlim_max;
	setrlimit(RLIMIT_CORE, &rlimit);

	rc = odp_init_global(&instance, &ODP_INIT_PARAMS, NULL);
	if (rc != 0) {
		printf("Error: odp_init_global() failed, rc = %d\n", rc);
		abort();
	}
	rc = odp_init_local(instance, ODP_THREAD_CONTROL);
	if (rc != 0) {
		printf("Error: odp_init_local() failed, rc = %d\n", rc);
		abort();
	}

	if (process_cmd_line_options(argc, argv) < 0)
		return -1;

	create_and_config_tm();

	odp_random_data(random_buf, RANDOM_BUF_LEN, 1);
	next_rand_byte = 0;

	odp_atomic_init_u32(&atomic_pkts_into_tm, 0);
	odp_atomic_init_u32(&atomic_pkts_from_tm, 0);

	traffic_generator(g_num_pkts_to_send);

	pkts_into_tm = odp_atomic_load_u32(&atomic_pkts_into_tm);
	pkts_from_tm = odp_atomic_load_u32(&atomic_pkts_from_tm);
	printf("pkts_into_tm=%u pkts_from_tm=%u\n", pkts_into_tm, pkts_from_tm);

	odp_tm_stats_print(odp_tm_test);
	return 0;
}
示例#3
0
int odp_pool_init_global(void)
{
	uint32_t i;
	odp_shm_t shm;

	shm = odp_shm_reserve(SHM_DEFAULT_NAME,
			      sizeof(pool_table_t),
			      sizeof(pool_entry_t), 0);

	pool_tbl = odp_shm_addr(shm);

	if (pool_tbl == NULL)
		return -1;

	memset(pool_tbl, 0, sizeof(pool_table_t));

	for (i = 0; i < ODP_CONFIG_POOLS; i++) {
		/* init locks */
		pool_entry_t *pool = &pool_tbl->pool[i];

		POOL_LOCK_INIT(&pool->s.lock);
		POOL_LOCK_INIT(&pool->s.buf_lock);
		POOL_LOCK_INIT(&pool->s.blk_lock);
		pool->s.pool_hdl = pool_index_to_handle(i);
		pool->s.pool_id = i;
		pool_entry_ptr[i] = pool;
		odp_atomic_init_u32(&pool->s.bufcount, 0);
		odp_atomic_init_u32(&pool->s.blkcount, 0);

		/* Initialize pool statistics counters */
		odp_atomic_init_u64(&pool->s.poolstats.bufallocs, 0);
		odp_atomic_init_u64(&pool->s.poolstats.buffrees, 0);
		odp_atomic_init_u64(&pool->s.poolstats.blkallocs, 0);
		odp_atomic_init_u64(&pool->s.poolstats.blkfrees, 0);
		odp_atomic_init_u64(&pool->s.poolstats.bufempty, 0);
		odp_atomic_init_u64(&pool->s.poolstats.blkempty, 0);
		odp_atomic_init_u64(&pool->s.poolstats.high_wm_count, 0);
		odp_atomic_init_u64(&pool->s.poolstats.low_wm_count, 0);
	}

	ODP_DBG("\nPool init global\n");
	ODP_DBG(" pool_entry_s size     %zu\n", sizeof(struct pool_entry_s));
	ODP_DBG(" pool_entry_t size     %zu\n", sizeof(pool_entry_t));
	ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
	ODP_DBG("\n");
	return 0;
}
int classification_suite_init(void)
{
	odp_pool_t pool;
	odp_pool_param_t param;
	odp_queue_t inq_def;
	odp_queue_param_t qparam;
	char queuename[ODP_QUEUE_NAME_LEN];
	int i;
	int ret;

	memset(&param, 0, sizeof(param));
	param.pkt.seg_len = SHM_PKT_BUF_SIZE;
	param.pkt.len     = SHM_PKT_BUF_SIZE;
	param.pkt.num     = SHM_PKT_NUM_BUFS;
	param.type        = ODP_POOL_PACKET;

	pool = odp_pool_create("classification_pool", &param);
	if (ODP_POOL_INVALID == pool) {
		fprintf(stderr, "Packet pool creation failed.\n");
		return -1;
	}

	pool_default = odp_pool_lookup("classification_pool");
	if (pool_default == ODP_POOL_INVALID)
		return -1;

	pktio_loop = odp_pktio_open("loop", pool_default);
	if (pktio_loop == ODP_PKTIO_INVALID) {
		ret = odp_pool_destroy(pool_default);
		if (ret)
			fprintf(stderr, "unable to destroy pool.\n");
		return -1;
	}
	qparam.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qparam.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
	qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;

	sprintf(queuename, "%s", "inq_loop");
	inq_def = odp_queue_create(queuename,
			ODP_QUEUE_TYPE_PKTIN, &qparam);
	odp_pktio_inq_setdef(pktio_loop, inq_def);

	for (i = 0; i < CLS_ENTRIES; i++)
		cos_list[i] = ODP_COS_INVALID;

	for (i = 0; i < CLS_ENTRIES; i++)
		pmr_list[i] = ODP_PMR_INVAL;

	for (i = 0; i < CLS_ENTRIES; i++)
		queue_list[i] = ODP_QUEUE_INVALID;

	odp_atomic_init_u32(&seq, 0);
	return 0;
}
示例#5
0
文件: pktio.c 项目: kalray/odp-mppa
static int pktio_suite_init(void)
{
	int i;

	odp_atomic_init_u32(&ip_seq, 0);

	if (getenv("ODP_WAIT_FOR_NETWORK"))
		wait_for_network = true;

	iface_name[0] = getenv("ODP_PKTIO_IF0");
	iface_name[1] = getenv("ODP_PKTIO_IF1");
	num_ifaces = 1;

	if (!iface_name[0]) {
		printf("No interfaces specified, using default \"loop\".\n");
		iface_name[0] = "loop";
	} else if (!iface_name[1]) {
		printf("Using loopback interface: %s\n", iface_name[0]);
	} else {
		num_ifaces = 2;
		printf("Using paired interfaces: %s %s\n",
		       iface_name[0], iface_name[1]);
	}

	for (i = 0; i < num_ifaces; i++) {
		if (create_pool(iface_name[i], i) != 0)
			return -1;
	}

	if (default_pool_create() != 0) {
		fprintf(stderr, "error: failed to create default pool\n");
		return -1;
	}

	return 0;
}
示例#6
0
odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
{
	odp_pool_t pool_hdl = ODP_POOL_INVALID;
	pool_entry_t *pool;
	uint32_t i, headroom = 0, tailroom = 0;
	odp_shm_t shm;

	if (params == NULL)
		return ODP_POOL_INVALID;

	/* Default size and align for timeouts */
	if (params->type == ODP_POOL_TIMEOUT) {
		params->buf.size  = 0; /* tmo.__res1 */
		params->buf.align = 0; /* tmo.__res2 */
	}

	/* Default initialization parameters */
	uint32_t p_udata_size = 0;
	uint32_t udata_stride = 0;

	/* Restriction for v1.0: All non-packet buffers are unsegmented */
	int unseg = 1;

	/* Restriction for v1.0: No zeroization support */
	const int zeroized = 0;

	uint32_t blk_size, buf_stride, buf_num, seg_len = 0;
	uint32_t buf_align =
		params->type == ODP_POOL_BUFFER ? params->buf.align : 0;

	/* Validate requested buffer alignment */
	if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
	    buf_align != ODP_ALIGN_ROUNDDOWN_POWER_2(buf_align, buf_align))
		return ODP_POOL_INVALID;

	/* Set correct alignment based on input request */
	if (buf_align == 0)
		buf_align = ODP_CACHE_LINE_SIZE;
	else if (buf_align < ODP_CONFIG_BUFFER_ALIGN_MIN)
		buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN;

	/* Calculate space needed for buffer blocks and metadata */
	switch (params->type) {
	case ODP_POOL_BUFFER:
		buf_num  = params->buf.num;
		blk_size = params->buf.size;

		/* Optimize small raw buffers */
		if (blk_size > ODP_MAX_INLINE_BUF || params->buf.align != 0)
			blk_size = ODP_ALIGN_ROUNDUP(blk_size, buf_align);

		buf_stride = sizeof(odp_buffer_hdr_stride);
		break;

	case ODP_POOL_PACKET:
		unseg = 0; /* Packets are always segmented */
		headroom = ODP_CONFIG_PACKET_HEADROOM;
		tailroom = ODP_CONFIG_PACKET_TAILROOM;

		buf_num = params->pkt.num + 1; /* more one for pkt_ctx */


		seg_len = params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MIN ?
			ODP_CONFIG_PACKET_SEG_LEN_MIN :
			(params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MAX ?
			 params->pkt.seg_len : ODP_CONFIG_PACKET_SEG_LEN_MAX);

		seg_len = ODP_ALIGN_ROUNDUP(
			headroom + seg_len + tailroom,
			ODP_CONFIG_BUFFER_ALIGN_MIN);

		blk_size = params->pkt.len <= seg_len ? seg_len :
			ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len);

		/* Reject create if pkt.len needs too many segments */
		if (blk_size / seg_len > ODP_BUFFER_MAX_SEG)
			return ODP_POOL_INVALID;

		p_udata_size = params->pkt.uarea_size;
		udata_stride = ODP_ALIGN_ROUNDUP(p_udata_size,
						 sizeof(uint64_t));

		buf_stride = sizeof(odp_packet_hdr_stride);
		break;

	case ODP_POOL_TIMEOUT:
		blk_size = 0;
		buf_num = params->tmo.num;
		buf_stride = sizeof(odp_timeout_hdr_stride);
		break;

	default:
		return ODP_POOL_INVALID;
	}

	/* Validate requested number of buffers against addressable limits */
	if (buf_num >
	    (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE)))
		return ODP_POOL_INVALID;

	/* Find an unused buffer pool slot and iniitalize it as requested */
	for (i = 0; i < ODP_CONFIG_POOLS; i++) {
		pool = get_pool_entry(i);

		POOL_LOCK(&pool->s.lock);
		if (pool->s.pool_shm != ODP_SHM_INVALID) {
			POOL_UNLOCK(&pool->s.lock);
			continue;
		}

		/* found free pool */
		size_t block_size, pad_size, mdata_size, udata_size;

		pool->s.flags.all = 0;

		if (name == NULL) {
			pool->s.name[0] = 0;
		} else {
			strncpy(pool->s.name, name,
				ODP_POOL_NAME_LEN - 1);
			pool->s.name[ODP_POOL_NAME_LEN - 1] = 0;
			pool->s.flags.has_name = 1;
		}

		pool->s.params = *params;
		pool->s.buf_align = buf_align;

		/* Optimize for short buffers: Data stored in buffer hdr */
		if (blk_size <= ODP_MAX_INLINE_BUF) {
			block_size = 0;
			pool->s.buf_align = blk_size == 0 ? 0 : sizeof(void *);
		} else {
			/* more  64bytes for storing hdr address*/
			block_size	 = buf_num * (blk_size +
				ODP_HDR_BACK_PTR_SIZE);
			pool->s.buf_align = buf_align;
		}

		pad_size = ODP_CACHE_LINE_SIZE_ROUNDUP(block_size) - block_size;
		mdata_size = buf_num * buf_stride;
		udata_size = buf_num * udata_stride;

		pool->s.buf_num  = buf_num;
		pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(block_size +
							  pad_size +
							  mdata_size +
							  udata_size);

		shm = odp_shm_reserve(pool->s.name, pool->s.pool_size,
			ODP_PAGE_SIZE, ODP_SHM_MONOPOLIZE_CNTNUS_PHY);
		if (shm == ODP_SHM_INVALID) {
			POOL_UNLOCK(&pool->s.lock);
			return ODP_POOL_INVALID;
		}
		pool->s.pool_base_addr = odp_shm_addr(shm);
		pool->s.pool_shm = shm;

		/* Now safe to unlock since pool entry has been allocated */
		POOL_UNLOCK(&pool->s.lock);

		pool->s.flags.unsegmented = unseg;
		pool->s.flags.zeroized = zeroized;
		pool->s.seg_size = unseg ? blk_size : seg_len;
		pool->s.blk_size = blk_size;

		uint8_t *block_base_addr = pool->s.pool_base_addr;
		uint8_t *mdata_base_addr =
			block_base_addr + block_size + pad_size;
		uint8_t *udata_base_addr = mdata_base_addr + mdata_size;

		uint64_t pool_base_phy = odp_v2p(pool->s.pool_base_addr);

		pool->s.v_p_offset = (uint64_t)pool->s.pool_base_addr -
				pool_base_phy;

		/* Pool mdata addr is used for indexing buffer metadata */
		pool->s.pool_mdata_addr = mdata_base_addr;
		pool->s.udata_size = p_udata_size;

		pool->s.buf_stride = buf_stride;
		pool->s.buf_freelist = NULL;
		pool->s.blk_freelist = NULL;

		/* Initialization will increment these to their target vals */
		odp_atomic_store_u32(&pool->s.bufcount, 0);
		odp_atomic_store_u32(&pool->s.blkcount, 0);

		uint8_t *buf = udata_base_addr - buf_stride;
		uint8_t *udat = udata_stride == 0 ? NULL :
			udata_base_addr + udata_size - udata_stride;

		/* Init buffer common header and add to pool buffer freelist */
		do {
			odp_buffer_hdr_t *tmp =
				(odp_buffer_hdr_t *)(void *)buf;

			/* Iniitalize buffer metadata */
			tmp->allocator = ODP_FREEBUF;
			tmp->flags.all = 0;
			tmp->flags.zeroized = zeroized;
			tmp->size = 0;
			odp_atomic_init_u32(&tmp->ref_count, 0);
			tmp->type = params->type;
			tmp->event_type = params->type;
			tmp->pool_hdl = pool->s.pool_hdl;
			tmp->uarea_addr = (void *)udat;
			tmp->uarea_size = p_udata_size;
			tmp->segcount = 0;
			tmp->segsize = pool->s.seg_size;
			tmp->handle.handle = odp_buffer_encode_handle(tmp);

			/* Set 1st seg addr for zero-len buffers */
			tmp->addr[0] = NULL;

			/* Special case for short buffer data */
			if (blk_size <= ODP_MAX_INLINE_BUF) {
				tmp->flags.hdrdata = 1;
				if (blk_size > 0) {
					tmp->segcount = 1;
					tmp->addr[0] = &tmp->addr[1];
					tmp->size = blk_size;
				}
			}

			/* Push buffer onto pool's freelist */
			ret_buf(&pool->s, tmp);
			buf  -= buf_stride;
			udat -= udata_stride;
		} while (buf >= mdata_base_addr);

		/* Make sure blocks is divided into size align to 8 bytes,
		  * as odp_packet_seg_t refers to address and segment count.
		  * pool->s.seg_size is align to 8 bytes before here
		  */
		pool->s.seg_size = ODP_ALIGN_ROUNDUP(pool->s.seg_size,
							sizeof(uint64_t));
		/* Form block freelist for pool */
		uint8_t *blk =
			block_base_addr + block_size - pool->s.seg_size -
						ODP_HDR_BACK_PTR_SIZE;
		if (blk_size > ODP_MAX_INLINE_BUF)
			do {
				ret_blk(&pool->s, blk + ODP_HDR_BACK_PTR_SIZE);
				blk -= (pool->s.seg_size +
					ODP_HDR_BACK_PTR_SIZE);
			} while (blk >= block_base_addr);

		/* For pkt pool, initiating packet hdr relative area is stored
		  * in the pool entry.
		  */
		if (params->type == ODP_POOL_PACKET) {
			odp_buffer_hdr_t *bh = get_buf(&pool->s);
			uint8_t *pkt_ctx = ((uint8_t *)bh +
				ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr));

			memset(pkt_ctx, 0,  sizeof(odp_packet_hdr_t) -
				ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr));
			((odp_packet_hdr_t *)bh)->l3_offset =
				ODP_PACKET_OFFSET_INVALID;
			((odp_packet_hdr_t *)bh)->l4_offset =
				ODP_PACKET_OFFSET_INVALID;
			((odp_packet_hdr_t *)bh)->payload_offset =
				ODP_PACKET_OFFSET_INVALID;
			((odp_packet_hdr_t *)bh)->headroom  = headroom;
			pool->s.cache_pkt_hdr = pkt_ctx;
			pool->s.buf_num -= 1;
		}

		/* Every kind of pool has max_size unit, as alloc, just need to
		  * compare with max_size here to check
		  */
		if (!unseg)
			pool->s.max_size = pool->s.seg_size *
				ODP_BUFFER_MAX_SEG -
				headroom - tailroom;
		else
			pool->s.max_size = pool->s.seg_size;

		/* Initialize pool statistics counters */
		odp_atomic_store_u64(&pool->s.poolstats.bufallocs, 0);
		odp_atomic_store_u64(&pool->s.poolstats.buffrees, 0);
		odp_atomic_store_u64(&pool->s.poolstats.blkallocs, 0);
		odp_atomic_store_u64(&pool->s.poolstats.blkfrees, 0);
		odp_atomic_store_u64(&pool->s.poolstats.bufempty, 0);
		odp_atomic_store_u64(&pool->s.poolstats.blkempty, 0);
		odp_atomic_store_u64(&pool->s.poolstats.high_wm_count, 0);
		odp_atomic_store_u64(&pool->s.poolstats.low_wm_count, 0);

		/* Reset other pool globals to initial state */
		pool->s.low_wm_assert = 0;
		pool->s.quiesced = 0;
		pool->s.headroom = headroom;
		pool->s.tailroom = tailroom;

		pool->s.room_size = headroom + tailroom;


		/* Watermarks are hard-coded for now to control caching */
		pool->s.high_wm = pool->s.buf_num / 2;
		pool->s.low_wm  = pool->s.buf_num / 4;
		pool_hdl = pool->s.pool_hdl;
		break;
	}

	return pool_hdl;
}
示例#7
0
void test_atomic_init(void)
{
	odp_atomic_init_u32(&a32u, 0);
	odp_atomic_init_u64(&a64u, 0);
}
示例#8
0
static void custom_barrier_init(custom_barrier_t *custom_barrier,
				uint32_t num_threads)
{
	odp_atomic_init_u32(&custom_barrier->wait_cnt, num_threads);
}
示例#9
0
/**
 * Test main function
 */
int main(int argc, char *argv[])
{
	odph_linux_pthread_t thread_tbl[MAX_WORKERS];
	int num_workers;
	odp_queue_t queue;
	uint64_t cycles, ns;
	odp_queue_param_t param;
	odp_pool_param_t params;
	odp_timer_pool_param_t tparams;
	odp_timer_pool_info_t tpinfo;
	odp_cpumask_t cpumask;
	char cpumaskstr[ODP_CPUMASK_STR_SIZE];
	odp_shm_t shm;
	test_globals_t	*gbls;

	printf("\nODP timer example starts\n");

	if (odp_init_global(NULL, NULL)) {
		printf("ODP global init failed.\n");
		return -1;
	}

	/* Init this thread. */
	if (odp_init_local()) {
		printf("ODP local init failed.\n");
		return -1;
	}

	printf("\n");
	printf("ODP system info\n");
	printf("---------------\n");
	printf("ODP API version: %s\n",        odp_version_api_str());
	printf("CPU model:       %s\n",        odp_sys_cpu_model_str());
	printf("CPU freq (hz):   %"PRIu64"\n", odp_sys_cpu_hz());
	printf("Cache line size: %i\n",        odp_sys_cache_line_size());
	printf("Max CPU count:   %i\n",        odp_cpu_count());

	printf("\n");

	/* Reserve memory for test_globals_t from shared mem */
	shm = odp_shm_reserve("shm_test_globals", sizeof(test_globals_t),
			      ODP_CACHE_LINE_SIZE, 0);
	if (ODP_SHM_INVALID == shm) {
		EXAMPLE_ERR("Error: shared mem reserve failed.\n");
		return -1;
	}

	gbls = odp_shm_addr(shm);
	if (NULL == gbls) {
		EXAMPLE_ERR("Error: shared mem alloc failed.\n");
		return -1;
	}
	memset(gbls, 0, sizeof(test_globals_t));

	parse_args(argc, argv, &gbls->args);

	memset(thread_tbl, 0, sizeof(thread_tbl));

	/* Default to system CPU count unless user specified */
	num_workers = MAX_WORKERS;
	if (gbls->args.cpu_count)
		num_workers = gbls->args.cpu_count;

	/*
	 * By default CPU #0 runs Linux kernel background tasks.
	 * Start mapping thread from CPU #1
	 */
	num_workers = odph_linux_cpumask_default(&cpumask, num_workers);
	(void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));

	printf("num worker threads: %i\n", num_workers);
	printf("first CPU:          %i\n", odp_cpumask_first(&cpumask));
	printf("cpu mask:           %s\n", cpumaskstr);

	printf("resolution:         %i usec\n", gbls->args.resolution_us);
	printf("min timeout:        %i usec\n", gbls->args.min_us);
	printf("max timeout:        %i usec\n", gbls->args.max_us);
	printf("period:             %i usec\n", gbls->args.period_us);
	printf("timeouts:           %i\n", gbls->args.tmo_count);

	/*
	 * Create pool for timeouts
	 */
	params.tmo.num   = NUM_TMOS;
	params.type      = ODP_POOL_TIMEOUT;

	gbls->pool = odp_pool_create("msg_pool", ODP_SHM_NULL, &params);

	if (gbls->pool == ODP_POOL_INVALID) {
		EXAMPLE_ERR("Pool create failed.\n");
		return -1;
	}

	tparams.res_ns = gbls->args.resolution_us*ODP_TIME_USEC;
	tparams.min_tmo = gbls->args.min_us*ODP_TIME_USEC;
	tparams.max_tmo = gbls->args.max_us*ODP_TIME_USEC;
	tparams.num_timers = num_workers; /* One timer per worker */
	tparams.priv = 0; /* Shared */
	tparams.clk_src = ODP_CLOCK_CPU;
	gbls->tp = odp_timer_pool_create("timer_pool", &tparams);
	if (gbls->tp == ODP_TIMER_POOL_INVALID) {
		EXAMPLE_ERR("Timer pool create failed.\n");
		return -1;
	}
	odp_timer_pool_start();

	odp_shm_print_all();
	(void)odp_timer_pool_info(gbls->tp, &tpinfo);
	printf("Timer pool\n");
	printf("----------\n");
	printf("  name: %s\n", tpinfo.name);
	printf("  resolution: %"PRIu64" ns\n", tpinfo.param.res_ns);
	printf("  min tmo: %"PRIu64" ticks\n", tpinfo.param.min_tmo);
	printf("  max tmo: %"PRIu64" ticks\n", tpinfo.param.max_tmo);
	printf("\n");

	/*
	 * Create a queue for timer test
	 */
	memset(&param, 0, sizeof(param));
	param.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	param.sched.sync  = ODP_SCHED_SYNC_NONE;
	param.sched.group = ODP_SCHED_GROUP_DEFAULT;

	queue = odp_queue_create("timer_queue", ODP_QUEUE_TYPE_SCHED, &param);

	if (queue == ODP_QUEUE_INVALID) {
		EXAMPLE_ERR("Timer queue create failed.\n");
		return -1;
	}

	printf("CPU freq %"PRIu64" Hz\n", odp_sys_cpu_hz());
	printf("Cycles vs nanoseconds:\n");
	ns = 0;
	cycles = odp_time_ns_to_cycles(ns);

	printf("  %12"PRIu64" ns      ->  %12"PRIu64" cycles\n", ns, cycles);
	printf("  %12"PRIu64" cycles  ->  %12"PRIu64" ns\n", cycles,
	       odp_time_cycles_to_ns(cycles));

	for (ns = 1; ns <= 100*ODP_TIME_SEC; ns *= 10) {
		cycles = odp_time_ns_to_cycles(ns);

		printf("  %12"PRIu64" ns      ->  %12"PRIu64" cycles\n", ns,
		       cycles);
		printf("  %12"PRIu64" cycles  ->  %12"PRIu64" ns\n", cycles,
		       odp_time_cycles_to_ns(cycles));
	}

	printf("\n");

	/* Initialize number of timeouts to receive */
	odp_atomic_init_u32(&gbls->remain, gbls->args.tmo_count * num_workers);

	/* Barrier to sync test case execution */
	odp_barrier_init(&gbls->test_barrier, num_workers);

	/* Create and launch worker threads */
	odph_linux_pthread_create(thread_tbl, &cpumask,
				  run_thread, gbls);

	/* Wait for worker threads to exit */
	odph_linux_pthread_join(thread_tbl, num_workers);

	printf("ODP timer test complete\n\n");

	return 0;
}
示例#10
0
static int test_init(void)
{
	odp_pool_param_t params;
	const char *iface;
	int schedule;

	odp_pool_param_init(&params);
	params.pkt.len     = PKT_HDR_LEN + gbl_args->args.pkt_len;
	params.pkt.seg_len = params.pkt.len;
	params.pkt.num     = PKT_BUF_NUM;
	params.type        = ODP_POOL_PACKET;

	transmit_pkt_pool = odp_pool_create("pkt_pool_transmit", &params);
	if (transmit_pkt_pool == ODP_POOL_INVALID)
		LOG_ABORT("Failed to create transmit pool\n");

	odp_atomic_init_u32(&ip_seq, 0);
	odp_atomic_init_u32(&shutdown, 0);

	iface    = gbl_args->args.ifaces[0];
	schedule = gbl_args->args.schedule;

	/* create pktios and associate input/output queues */
	gbl_args->pktio_tx = create_pktio(iface, schedule);
	if (gbl_args->args.num_ifaces > 1) {
		iface = gbl_args->args.ifaces[1];
		gbl_args->pktio_rx = create_pktio(iface, schedule);
	} else {
		gbl_args->pktio_rx = gbl_args->pktio_tx;
	}

	odp_pktio_mac_addr(gbl_args->pktio_tx, gbl_args->src_mac,
			   ODPH_ETHADDR_LEN);
	odp_pktio_mac_addr(gbl_args->pktio_rx, gbl_args->dst_mac,
			   ODPH_ETHADDR_LEN);

	if (gbl_args->pktio_rx == ODP_PKTIO_INVALID ||
	    gbl_args->pktio_tx == ODP_PKTIO_INVALID) {
		LOG_ERR("failed to open pktio\n");
		return -1;
	}

	/* Create single queue with default parameters */
	if (odp_pktout_queue_config(gbl_args->pktio_tx, NULL)) {
		LOG_ERR("failed to configure pktio_tx queue\n");
		return -1;
	}

	/* Configure also input side (with defaults) */
	if (odp_pktin_queue_config(gbl_args->pktio_tx, NULL)) {
		LOG_ERR("failed to configure pktio_tx queue\n");
		return -1;
	}

	if (gbl_args->args.num_ifaces > 1) {
		if (odp_pktout_queue_config(gbl_args->pktio_rx, NULL)) {
			LOG_ERR("failed to configure pktio_rx queue\n");
			return -1;
		}

		if (odp_pktin_queue_config(gbl_args->pktio_rx, NULL)) {
			LOG_ERR("failed to configure pktio_rx queue\n");
			return -1;
		}
	}

	if (odp_pktio_start(gbl_args->pktio_tx) != 0)
		return -1;
	if (gbl_args->args.num_ifaces > 1 &&
	    odp_pktio_start(gbl_args->pktio_rx))
		return -1;

	return 0;
}
示例#11
0
void odp_ticketlock_init(odp_ticketlock_t *ticketlock)
{
	odp_atomic_init_u32(&ticketlock->next_ticket, 0);
	odp_atomic_init_u32(&ticketlock->cur_ticket, 0);
}
示例#12
0
文件: odp_rwlock.c 项目: leitao/odp
void odp_rwlock_init(odp_rwlock_t *rwlock)
{
	odp_atomic_init_u32(&rwlock->cnt, 0);
}
示例#13
0
void scheduler_test_chaos(void)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	odp_event_t ev;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	odp_queue_t from;
	int i, rc;
	uint64_t wait;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
				      ODP_SCHED_SYNC_ATOMIC/* , */
				      /* ODP_SCHED_SYNC_ORDERED */};
	const int num_sync = (sizeof(sync) / sizeof(sync[0]));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(shm);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		qp.sched.sync = sync[i % num_sync];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[i % num_sync]);
		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name,
					 ODP_QUEUE_TYPE_SCHED,
					 &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	odp_atomic_init_u32(&globals->chaos_pending_event_count, 0);

	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
		odp_atomic_inc_u32(&globals->chaos_pending_event_count);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	/* Cleanup: Drain queues, free events */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	while (odp_atomic_fetch_dec_u32(
		       &globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Draining event %" PRIu64
			       " seq %" PRIu64 " from Q %s...\n",
			       cbuf->evno,
			       cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	odp_schedule_release_ordered();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}