示例#1
0
int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
{
	odp_cpumask_t overlap;
	int cpu, i;

	/*
	 * If no user supplied number or it's too large, then attempt
	 * to use all CPUs
	 */
	cpu = odp_cpumask_count(&odp_global_data.worker_cpus);
	if (0 == num || cpu < num)
		num = cpu;

	/* build the mask, allocating down from highest numbered CPU */
	odp_cpumask_zero(mask);
	for (cpu = 0, i = CPU_SETSIZE - 1; i >= 0 && cpu < num; --i) {
		if (odp_cpumask_isset(&odp_global_data.worker_cpus, i)) {
			odp_cpumask_set(mask, i);
			cpu++;
		}
	}

	odp_cpumask_and(&overlap, mask, &odp_global_data.control_cpus);
	if (odp_cpumask_count(&overlap))
		ODP_DBG("\n\tWorker CPUs overlap with control CPUs...\n"
			"\tthis will likely have a performance impact on the worker threads.\n");

	return cpu;
}
示例#2
0
void cpumask_test_odp_cpumask_def(void)
{
	unsigned mask_count;
	unsigned num_worker;
	unsigned num_control;
	unsigned max_cpus = mask_capacity();
	unsigned available_cpus = odp_cpu_count();
	unsigned requested_cpus;
	odp_cpumask_t mask;

	CU_ASSERT(available_cpus <= max_cpus);

	if (available_cpus > 1)
		requested_cpus = available_cpus - 1;
	else
		requested_cpus = available_cpus;
	num_worker = odp_cpumask_def_worker(&mask, requested_cpus);
	mask_count = odp_cpumask_count(&mask);
	CU_ASSERT(mask_count == num_worker);

	num_control = odp_cpumask_def_control(&mask, 1);
	mask_count = odp_cpumask_count(&mask);
	CU_ASSERT(mask_count == num_control);

	CU_ASSERT(num_control == 1);
	CU_ASSERT(num_worker <= available_cpus);
	CU_ASSERT(num_worker > 0);
}
示例#3
0
/*
 * Run a single instance of the throughput test. When attempting to determine
 * the maximum packet rate this will be invoked multiple times with the only
 * difference between runs being the target PPS rate.
 */
static int run_test_single(odp_cpumask_t *thd_mask_tx,
			   odp_cpumask_t *thd_mask_rx,
			   test_status_t *status)
{
	odph_odpthread_t thd_tbl[MAX_WORKERS];
	thread_args_t args_tx, args_rx;
	uint64_t expected_tx_cnt;
	int num_tx_workers, num_rx_workers;
	odph_odpthread_params_t thr_params;

	memset(&thr_params, 0, sizeof(thr_params));
	thr_params.thr_type = ODP_THREAD_WORKER;
	thr_params.instance = gbl_args->instance;

	odp_atomic_store_u32(&shutdown, 0);

	memset(thd_tbl, 0, sizeof(thd_tbl));
	memset(gbl_args->rx_stats, 0, gbl_args->rx_stats_size);
	memset(gbl_args->tx_stats, 0, gbl_args->tx_stats_size);

	expected_tx_cnt = status->pps_curr * gbl_args->args.duration;

	/* start receiver threads first */
	thr_params.start  = run_thread_rx;
	thr_params.arg    = &args_rx;
	args_rx.batch_len = gbl_args->args.rx_batch_len;
	odph_odpthreads_create(&thd_tbl[0], thd_mask_rx, &thr_params);
	odp_barrier_wait(&gbl_args->rx_barrier);
	num_rx_workers = odp_cpumask_count(thd_mask_rx);

	/* then start transmitters */
	thr_params.start  = run_thread_tx;
	thr_params.arg    = &args_tx;
	num_tx_workers    = odp_cpumask_count(thd_mask_tx);
	args_tx.pps       = status->pps_curr / num_tx_workers;
	args_tx.duration  = gbl_args->args.duration;
	args_tx.batch_len = gbl_args->args.tx_batch_len;
	odph_odpthreads_create(&thd_tbl[num_rx_workers], thd_mask_tx,
			       &thr_params);
	odp_barrier_wait(&gbl_args->tx_barrier);

	/* wait for transmitter threads to terminate */
	odph_odpthreads_join(&thd_tbl[num_rx_workers]);

	/* delay to allow transmitted packets to reach the receivers */
	odp_time_wait_ns(SHUTDOWN_DELAY_NS);

	/* indicate to the receivers to exit */
	odp_atomic_store_u32(&shutdown, 1);

	/* wait for receivers */
	odph_odpthreads_join(&thd_tbl[0]);

	if (!status->warmup)
		return process_results(expected_tx_cnt, status);

	return 1;
}
示例#4
0
static int run_test(void)
{
	int ret = 1;
	int i;
	odp_cpumask_t txmask, rxmask;
	test_status_t status = {
		.pps_curr = gbl_args->args.pps,
		.pps_pass = 0,
		.pps_fail = 0,
	};

	if (setup_txrx_masks(&txmask, &rxmask) != 0)
		return -1;

	printf("Starting test with params:\n");
	printf("\tTransmit workers:     \t%d\n", odp_cpumask_count(&txmask));
	printf("\tReceive workers:      \t%d\n", odp_cpumask_count(&rxmask));
	printf("\tDuration (seconds):   \t%d\n", gbl_args->args.duration);
	printf("\tTransmit batch length:\t%" PRIu32 "\n",
	       gbl_args->args.tx_batch_len);
	printf("\tReceive batch length: \t%" PRIu32 "\n",
	       gbl_args->args.rx_batch_len);
	printf("\tPacket receive method:\t%s\n",
	       gbl_args->args.schedule ? "schedule" : "poll");
	printf("\tInterface(s):         \t");
	for (i = 0; i < gbl_args->args.num_ifaces; ++i)
		printf("%s ", gbl_args->args.ifaces[i]);
	printf("\n");

	while (ret > 0)
		ret = run_test_single(&txmask, &rxmask, &status);

	return ret;
}

static odp_pktio_t create_pktio(const char *iface)
{
	odp_pool_t pool;
	odp_pktio_t pktio;
	char pool_name[ODP_POOL_NAME_LEN];
	odp_pool_param_t params;

	memset(&params, 0, sizeof(params));
	params.pkt.len     = PKT_HDR_LEN + gbl_args->args.pkt_len;
	params.pkt.seg_len = params.pkt.len;
	params.pkt.num     = PKT_BUF_NUM;
	params.type        = ODP_POOL_PACKET;

	snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface);
	pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
	if (pool == ODP_POOL_INVALID)
		return ODP_PKTIO_INVALID;

	pktio = odp_pktio_open(iface, pool);

	return pktio;
}
示例#5
0
/*
 * Run a single instance of the throughput test. When attempting to determine
 * the maximum packet rate this will be invoked multiple times with the only
 * difference between runs being the target PPS rate.
 */
static int run_test_single(odp_cpumask_t *thd_mask_tx,
			   odp_cpumask_t *thd_mask_rx,
			   test_status_t *status)
{
	odph_linux_pthread_t thd_tbl[MAX_WORKERS];
	thread_args_t args_tx, args_rx;
	uint64_t expected_tx_cnt;
	int num_tx_workers, num_rx_workers;

	odp_atomic_store_u32(&shutdown, 0);

	memset(thd_tbl, 0, sizeof(thd_tbl));
	memset(&gbl_args->rx_stats, 0, sizeof(gbl_args->rx_stats));
	memset(&gbl_args->tx_stats, 0, sizeof(gbl_args->tx_stats));

	expected_tx_cnt = status->pps_curr * gbl_args->args.duration;

	/* start receiver threads first */
	args_rx.batch_len = gbl_args->args.rx_batch_len;
	odph_linux_pthread_create(&thd_tbl[0], thd_mask_rx,
				  run_thread_rx, &args_rx);
	odp_barrier_wait(&gbl_args->rx_barrier);
	num_rx_workers = odp_cpumask_count(thd_mask_rx);

	/* then start transmitters */
	num_tx_workers    = odp_cpumask_count(thd_mask_tx);
	args_tx.pps       = status->pps_curr / num_tx_workers;
	args_tx.duration  = gbl_args->args.duration;
	args_tx.batch_len = gbl_args->args.tx_batch_len;
	odph_linux_pthread_create(&thd_tbl[num_rx_workers], thd_mask_tx,
				  run_thread_tx, &args_tx);
	odp_barrier_wait(&gbl_args->tx_barrier);

	/* wait for transmitter threads to terminate */
	odph_linux_pthread_join(&thd_tbl[num_rx_workers],
				num_tx_workers);

	/* delay to allow transmitted packets to reach the receivers */
	busy_loop_ns(SHUTDOWN_DELAY_NS);

	/* indicate to the receivers to exit */
	odp_atomic_store_u32(&shutdown, 1);

	/* wait for receivers */
	odph_linux_pthread_join(&thd_tbl[0], num_rx_workers);

	return process_results(expected_tx_cnt, status);
}
示例#6
0
int odp_cpumask_all_available(odp_cpumask_t *mask)
{
	odp_cpumask_or(mask, &odp_global_data.worker_cpus,
		       &odp_global_data.control_cpus);

	return odp_cpumask_count(mask);
}
示例#7
0
文件: linux.c 项目: guanhe0/packages
int odph_linux_pthread_create(odph_linux_pthread_t *thread_tbl,
			      const odp_cpumask_t *mask,
			      void *(*start_routine)(void *), void *arg,
			      odp_thread_type_t thr_type)
{
	int i;
	int num;
	int cpu_count;
	int cpu;
	int ret;

	num = odp_cpumask_count(mask);

	memset(thread_tbl, 0, num * sizeof(odph_linux_pthread_t));

	cpu_count = odp_cpu_count();

	if (num < 1 || num > cpu_count) {
		ODPH_ERR("Invalid number of threads:%d (%d cores available)\n",
			 num, cpu_count);
		return 0;
	}

	cpu = odp_cpumask_first(mask);
	for (i = 0; i < num; i++) {
		cpu_set_t cpu_set;

		CPU_ZERO(&cpu_set);
		CPU_SET(cpu, &cpu_set);

		pthread_attr_init(&thread_tbl[i].attr);

		thread_tbl[i].cpu = cpu;

		pthread_attr_setaffinity_np(&thread_tbl[i].attr,
					    sizeof(cpu_set_t), &cpu_set);

		thread_tbl[i].start_args = malloc(sizeof(odp_start_args_t));
		if (thread_tbl[i].start_args == NULL)
			ODPH_ABORT("Malloc failed");

		thread_tbl[i].start_args->start_routine = start_routine;
		thread_tbl[i].start_args->arg           = arg;
		thread_tbl[i].start_args->thr_type      = thr_type;

		ret = pthread_create(&thread_tbl[i].thread,
				     &thread_tbl[i].attr,
				     odp_run_start_routine,
				     thread_tbl[i].start_args);
		if (ret != 0) {
			ODPH_ERR("Failed to start thread on cpu #%d\n", cpu);
			free(thread_tbl[i].start_args);
			break;
		}

		cpu = odp_cpumask_next(mask, cpu);
	}

	return i;
}
示例#8
0
static int setup_txrx_masks(odp_cpumask_t *thd_mask_tx,
			    odp_cpumask_t *thd_mask_rx)
{
	odp_cpumask_t cpumask;
	int num_workers, num_tx_workers, num_rx_workers;
	int i, cpu;

	num_workers =
		odp_cpumask_default_worker(&cpumask,
					   gbl_args->args.cpu_count);
	if (num_workers < 2) {
		LOG_ERR("Need at least two cores\n");
		return -1;
	}

	if (gbl_args->args.num_tx_workers) {
		if (gbl_args->args.num_tx_workers > (num_workers - 1)) {
			LOG_ERR("Invalid TX worker count\n");
			return -1;
		}
		num_tx_workers = gbl_args->args.num_tx_workers;
	} else {
		/* default is to split the available cores evenly into TX and
		 * RX workers, favour TX for odd core count */
		num_tx_workers = (num_workers + 1) / 2;
	}

	num_rx_workers = odp_cpumask_count(&cpumask) - num_tx_workers;
	odp_cpumask_zero(thd_mask_tx);
	odp_cpumask_zero(thd_mask_rx);

	cpu = odp_cpumask_first(&cpumask);
	for (i = 0; i < num_workers; ++i) {
		if (i < num_rx_workers)
			odp_cpumask_set(thd_mask_rx, cpu);
		else
			odp_cpumask_set(thd_mask_tx, cpu);
		cpu = odp_cpumask_next(&cpumask, cpu);
	}

	num_rx_workers = odp_cpumask_count(thd_mask_rx);

	odp_barrier_init(&gbl_args->rx_barrier, num_rx_workers+1);
	odp_barrier_init(&gbl_args->tx_barrier, num_tx_workers+1);

	return 0;
}
示例#9
0
void cpumask_test_odp_cpumask_def_worker(void)
{
	unsigned num;
	unsigned mask_count;
	unsigned max_cpus = mask_capacity();
	odp_cpumask_t mask;

	num = odp_cpumask_def_worker(&mask, ALL_AVAILABLE);
	mask_count = odp_cpumask_count(&mask);

	CU_ASSERT(mask_count == num);
	CU_ASSERT(num > 0);
	CU_ASSERT(num <= max_cpus);
}
示例#10
0
int odp_cpumask_default_control(odp_cpumask_t *mask, int num)
{
	odp_cpumask_t overlap;
	int cpu, i;

	/*
	 * If no user supplied number then default to one control CPU.
	 */
	if (0 == num) {
		num = 1;
	} else {
		/*
		 * If user supplied number is too large, then attempt
		 * to use all installed control CPUs
		 */
		cpu = odp_cpumask_count(&odp_global_data.control_cpus);
		if (cpu < num)
			num = cpu;
	}

	/* build the mask, allocating upwards from lowest numbered CPU */
	odp_cpumask_zero(mask);
	for (cpu = 0, i = 0; i < CPU_SETSIZE && cpu < num; i++) {
		if (odp_cpumask_isset(&odp_global_data.control_cpus, i)) {
			odp_cpumask_set(mask, i);
			cpu++;
		}
	}

	odp_cpumask_and(&overlap, mask, &odp_global_data.worker_cpus);
	if (odp_cpumask_count(&overlap))
		ODP_DBG("\n\tControl CPUs overlap with worker CPUs...\n"
			"\tthis will likely have a performance impact on the worker threads.\n");

	return cpu;
}
示例#11
0
static int run_test(void)
{
	int ret;
	int i;
	odp_cpumask_t txmask, rxmask;
	test_status_t status = {
		.pps_curr = gbl_args->args.pps,
		.pps_pass = 0,
		.pps_fail = 0,
		.warmup = 1,
	};

	ret = setup_txrx_masks(&txmask, &rxmask);
	if (ret)
		return ret;

	printf("Starting test with params:\n");
	printf("\tTransmit workers:     \t%d\n", odp_cpumask_count(&txmask));
	printf("\tReceive workers:      \t%d\n", odp_cpumask_count(&rxmask));
	printf("\tDuration (seconds):   \t%d\n", gbl_args->args.duration);
	printf("\tTransmit batch length:\t%" PRIu32 "\n",
	       gbl_args->args.tx_batch_len);
	printf("\tReceive batch length: \t%" PRIu32 "\n",
	       gbl_args->args.rx_batch_len);
	printf("\tPacket receive method:\t%s\n",
	       gbl_args->args.schedule ? "schedule" : "plain");
	printf("\tInterface(s):         \t");
	for (i = 0; i < gbl_args->args.num_ifaces; ++i)
		printf("%s ", gbl_args->args.ifaces[i]);
	printf("\n");

	/* first time just run the test but throw away the results */
	run_test_single(&txmask, &rxmask, &status);
	status.warmup = 0;

	while (1) {
		ret = run_test_single(&txmask, &rxmask, &status);
		if (ret <= 0)
			break;
	}

	return ret;
}

static odp_pktio_t create_pktio(const char *iface, int schedule)
{
	odp_pool_t pool;
	odp_pktio_t pktio;
	char pool_name[ODP_POOL_NAME_LEN];
	odp_pool_param_t params;
	odp_pktio_param_t pktio_param;

	odp_pool_param_init(&params);
	params.pkt.len     = PKT_HDR_LEN + gbl_args->args.pkt_len;
	params.pkt.seg_len = params.pkt.len;
	params.pkt.num     = PKT_BUF_NUM;
	params.type        = ODP_POOL_PACKET;

	snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface);
	pool = odp_pool_create(pool_name, &params);
	if (pool == ODP_POOL_INVALID)
		return ODP_PKTIO_INVALID;

	odp_pktio_param_init(&pktio_param);

	if (schedule)
		pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
	else
		pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;

	pktio = odp_pktio_open(iface, pool, &pktio_param);

	return pktio;
}
示例#12
0
/**
 * Parse and store the command line arguments
 *
 * @param argc       argument count
 * @param argv[]     argument vector
 * @param appl_args  Store application arguments here
 */
static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{
	int opt;
	int long_index;
	char *token;
	size_t len;
	odp_cpumask_t cpumask, cpumask_args, cpumask_and;
	int i, num_workers;
	static struct option longopts[] = {
		{"interface", required_argument, NULL, 'I'},
		{"workers", required_argument, NULL, 'w'},
		{"cpumask", required_argument, NULL, 'c'},
		{"srcmac", required_argument, NULL, 'a'},
		{"dstmac", required_argument, NULL, 'b'},
		{"srcip", required_argument, NULL, 's'},
		{"dstip", required_argument, NULL, 'd'},
		{"packetsize", required_argument, NULL, 'p'},
		{"mode", required_argument, NULL, 'm'},
		{"count", required_argument, NULL, 'n'},
		{"timeout", required_argument, NULL, 't'},
		{"interval", required_argument, NULL, 'i'},
		{"help", no_argument, NULL, 'h'},
		{NULL, 0, NULL, 0}
	};

	appl_args->mode = -1; /* Invalid, must be changed by parsing */
	appl_args->number = -1;
	appl_args->payload = 56;
	appl_args->timeout = -1;
	int remove_header_size = 0;

	while (1) {
		opt = getopt_long(argc, argv, "+I:a:b:s:d:p:P:i:m:n:t:w:c:h",
				  longopts, &long_index);
		if (opt == -1)
			break;	/* No more options */

		switch (opt) {
		case 'w':
			appl_args->cpu_count = atoi(optarg);
			break;
		case 'c':
			appl_args->mask = optarg;
			odp_cpumask_from_str(&cpumask_args, args->appl.mask);
			num_workers = odp_cpumask_default_worker(&cpumask, 0);
			odp_cpumask_and(&cpumask_and, &cpumask_args, &cpumask);
			if (odp_cpumask_count(&cpumask_and) <
			    odp_cpumask_count(&cpumask_args)) {
				EXAMPLE_ERR("Wrong cpu mask, max cpu's:%d\n",
					    num_workers);
				exit(EXIT_FAILURE);
			}
			break;
		/* parse packet-io interface names */
		case 'I':
			len = strlen(optarg);
			if (len == 0) {
				usage(argv[0]);
				exit(EXIT_FAILURE);
			}
			len += 1;	/* add room for '\0' */

			appl_args->if_str = malloc(len);
			if (appl_args->if_str == NULL) {
				usage(argv[0]);
				exit(EXIT_FAILURE);
			}

			/* count the number of tokens separated by ',' */
			strcpy(appl_args->if_str, optarg);
			for (token = strtok(appl_args->if_str, ","), i = 0;
			     token != NULL;
			     token = strtok(NULL, ","), i++)
				;

			appl_args->if_count = i;

			if (appl_args->if_count == 0) {
				usage(argv[0]);
				exit(EXIT_FAILURE);
			}

			/* allocate storage for the if names */
			appl_args->if_names =
			    calloc(appl_args->if_count, sizeof(char *));

			/* store the if names (reset names string) */
			strcpy(appl_args->if_str, optarg);
			for (token = strtok(appl_args->if_str, ","), i = 0;
			     token != NULL; token = strtok(NULL, ","), i++) {
				appl_args->if_names[i] = token;
			}
			break;

		case 'm':
			if (optarg[0] == 'u') {
				appl_args->mode = APPL_MODE_UDP;
			} else if (optarg[0] == 'p') {
				appl_args->mode = APPL_MODE_PING;
			} else if (optarg[0] == 'r') {
				appl_args->mode = APPL_MODE_RCV;
			} else {
				EXAMPLE_ERR("wrong mode!\n");
				exit(EXIT_FAILURE);
			}
			break;

		case 'a':
			if (scan_mac(optarg, &appl_args->srcmac) != 1) {
				EXAMPLE_ERR("wrong src mac:%s\n", optarg);
				exit(EXIT_FAILURE);
			}
			break;

		case 'b':
			if (scan_mac(optarg, &appl_args->dstmac) != 1) {
				EXAMPLE_ERR("wrong dst mac:%s\n", optarg);
				exit(EXIT_FAILURE);
			}
			break;

		case 's':
			if (scan_ip(optarg, &appl_args->srcip) != 1) {
				EXAMPLE_ERR("wrong src ip:%s\n", optarg);
				exit(EXIT_FAILURE);
			}
			break;

		case 'd':
			if (scan_ip(optarg, &appl_args->dstip) != 1) {
				EXAMPLE_ERR("wrong dst ip:%s\n", optarg);
				exit(EXIT_FAILURE);
			}
			break;

		case 'p':
			appl_args->payload = atoi(optarg);
			break;

		case 'P':
			appl_args->payload = atoi(optarg);
			remove_header_size = 1;
			break;

		case 'n':
			appl_args->number = atoi(optarg);
			break;

		case 't':
			appl_args->timeout = atoi(optarg);
			break;

		case 'i':
			appl_args->interval = atoi(optarg);
			break;

		case 'h':
			usage(argv[0]);
			exit(EXIT_SUCCESS);
			break;

		default:
			break;
		}
	}

	if ( remove_header_size ) {
		int header_size = 0;
		switch (appl_args->mode) {
			case APPL_MODE_UDP:
				header_size = ODPH_UDPHDR_LEN + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN;
				break;
			case APPL_MODE_PING:
				header_size = ODPH_ICMPHDR_LEN + ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN;
				break;
			case APPL_MODE_RCV:
				break;
		}
		appl_args->payload -= header_size;
	}

	printf("PAYLOAD = %i\n", appl_args->payload);
	if (appl_args->if_count == 0 || appl_args->mode == -1) {
		usage(argv[0]);
		exit(EXIT_FAILURE);
	}

	optind = 1;		/* reset 'extern optind' from the getopt lib */
}
示例#13
0
/**
 * ODP packet example main function
 */
int main(int argc, char * argv[])
{
	odph_linux_pthread_t thread_tbl[MAX_WORKERS];
	odp_pool_t pool;
	int num_workers;
	int i;
	odp_shm_t shm;
	odp_cpumask_t cpumask;
	char cpumaskstr[ODP_CPUMASK_STR_SIZE];
	odp_pool_param_t params;
	odp_timer_pool_param_t tparams;
	odp_timer_pool_t tp;
	odp_pool_t tmop;

	/* Init ODP before calling anything else */
	if (odp_init_global(NULL, NULL)) {
		EXAMPLE_ERR("Error: ODP global init failed.\n");
		exit(EXIT_FAILURE);
	}

	if (odp_init_local(ODP_THREAD_CONTROL)) {
		EXAMPLE_ERR("Error: ODP local init failed.\n");
		exit(EXIT_FAILURE);
	}
	my_sleep(1 + __k1_get_cluster_id() / 4);

	/* init counters */
	odp_atomic_init_u64(&counters.seq, 0);
	odp_atomic_init_u64(&counters.ip, 0);
	odp_atomic_init_u64(&counters.udp, 0);
	odp_atomic_init_u64(&counters.icmp, 0);
	odp_atomic_init_u64(&counters.cnt, 0);

	/* Reserve memory for args from shared mem */
	shm = odp_shm_reserve("shm_args", sizeof(args_t),
			      ODP_CACHE_LINE_SIZE, 0);
	args = odp_shm_addr(shm);

	if (args == NULL) {
		EXAMPLE_ERR("Error: shared mem alloc failed.\n");
		exit(EXIT_FAILURE);
	}
	memset(args, 0, sizeof(*args));

	/* Parse and store the application arguments */
	parse_args(argc, argv, &args->appl);

	/* Print both system and application information */
	print_info(NO_PATH(argv[0]), &args->appl);

	/* Default to system CPU count unless user specified */
	num_workers = MAX_WORKERS;
	if (args->appl.cpu_count)
		num_workers = args->appl.cpu_count;

	num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
	if (args->appl.mask) {
		odp_cpumask_from_str(&cpumask, args->appl.mask);
		num_workers = odp_cpumask_count(&cpumask);
	}

	(void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));

	printf("num worker threads: %i\n", num_workers);
	printf("first CPU:          %i\n", odp_cpumask_first(&cpumask));
	printf("cpu mask:           %s\n", cpumaskstr);

	/* ping mode need two workers */
	if (args->appl.mode == APPL_MODE_PING) {
		if (num_workers < 2) {
			EXAMPLE_ERR("Need at least two worker threads\n");
			exit(EXIT_FAILURE);
		} else {
			num_workers = 2;
		}
	}

	/* Create packet pool */
	odp_pool_param_init(&params);
	params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
	params.pkt.len     = SHM_PKT_POOL_BUF_SIZE;
	params.pkt.num     = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE;
	params.type        = ODP_POOL_PACKET;

	pool = odp_pool_create("packet_pool", &params);

	if (pool == ODP_POOL_INVALID) {
		EXAMPLE_ERR("Error: packet pool create failed.\n");
		exit(EXIT_FAILURE);
	}
	odp_pool_print(pool);

	/* Create timer pool */
	tparams.res_ns = 1 * ODP_TIME_MSEC_IN_NS;
	tparams.min_tmo = 0;
	tparams.max_tmo = 10000 * ODP_TIME_SEC_IN_NS;
	tparams.num_timers = num_workers; /* One timer per worker */
	tparams.priv = 0; /* Shared */
	tparams.clk_src = ODP_CLOCK_CPU;
	tp = odp_timer_pool_create("timer_pool", &tparams);
	if (tp == ODP_TIMER_POOL_INVALID) {
		EXAMPLE_ERR("Timer pool create failed.\n");
		exit(EXIT_FAILURE);
	}
	odp_timer_pool_start();

	/* Create timeout pool */
	memset(&params, 0, sizeof(params));
	params.tmo.num     = tparams.num_timers; /* One timeout per timer */
	params.type	   = ODP_POOL_TIMEOUT;

	tmop = odp_pool_create("timeout_pool", &params);

	if (pool == ODP_POOL_INVALID) {
		EXAMPLE_ERR("Error: packet pool create failed.\n");
		exit(EXIT_FAILURE);
	}
	for (i = 0; i < args->appl.if_count; ++i)
		create_pktio(args->appl.if_names[i], pool);

	/* Create and init worker threads */
	memset(thread_tbl, 0, sizeof(thread_tbl));

	if (args->appl.mode == APPL_MODE_PING) {
		odp_cpumask_t cpu_mask;
		odp_queue_t tq;
		int cpu_first, cpu_next;

		odp_cpumask_zero(&cpu_mask);
		cpu_first = odp_cpumask_first(&cpumask);
		odp_cpumask_set(&cpu_mask, cpu_first);

		tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL);
		if (tq == ODP_QUEUE_INVALID)
			abort();
		args->thread[1].pktio_dev = args->appl.if_names[0];
		args->thread[1].pool = pool;
		args->thread[1].tp = tp;
		args->thread[1].tq = tq;
		args->thread[1].tim = odp_timer_alloc(tp, tq, NULL);
		if (args->thread[1].tim == ODP_TIMER_INVALID)
			abort();
		args->thread[1].tmo_ev = odp_timeout_alloc(tmop);
		if (args->thread[1].tmo_ev == ODP_TIMEOUT_INVALID)
			abort();
		args->thread[1].mode = args->appl.mode;
		odph_linux_pthread_create(&thread_tbl[1], &cpu_mask,
					  gen_recv_thread, &args->thread[1],
					  ODP_THREAD_WORKER);

		tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL);
		if (tq == ODP_QUEUE_INVALID)
			abort();
		args->thread[0].pktio_dev = args->appl.if_names[0];
		args->thread[0].pool = pool;
		args->thread[0].tp = tp;
		args->thread[0].tq = tq;
		args->thread[0].tim = odp_timer_alloc(tp, tq, NULL);
		if (args->thread[0].tim == ODP_TIMER_INVALID)
			abort();
		args->thread[0].tmo_ev = odp_timeout_alloc(tmop);
		if (args->thread[0].tmo_ev == ODP_TIMEOUT_INVALID)
			abort();
		args->thread[0].mode = args->appl.mode;
		cpu_next = odp_cpumask_next(&cpumask, cpu_first);
		odp_cpumask_zero(&cpu_mask);
		odp_cpumask_set(&cpu_mask, cpu_next);
		odph_linux_pthread_create(&thread_tbl[0], &cpu_mask,
					  gen_send_thread, &args->thread[0],
					  ODP_THREAD_WORKER);

	} else {
		int cpu = odp_cpumask_first(&cpumask);
		for (i = 0; i < num_workers; ++i) {
			odp_cpumask_t thd_mask;
			void *(*thr_run_func) (void *);
			int if_idx;
			odp_queue_t tq;

			if_idx = i % args->appl.if_count;

			args->thread[i].pktio_dev = args->appl.if_names[if_idx];
			tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL);
			if (tq == ODP_QUEUE_INVALID)
				abort();
			args->thread[i].pool = pool;
			args->thread[i].tp = tp;
			args->thread[i].tq = tq;
			args->thread[i].tim = odp_timer_alloc(tp, tq, NULL);
			if (args->thread[i].tim == ODP_TIMER_INVALID)
				abort();
			args->thread[i].tmo_ev = odp_timeout_alloc(tmop);
			if (args->thread[i].tmo_ev == ODP_TIMEOUT_INVALID)
				abort();
			args->thread[i].mode = args->appl.mode;

			if (args->appl.mode == APPL_MODE_UDP) {
				thr_run_func = gen_send_thread;
			} else if (args->appl.mode == APPL_MODE_RCV) {
				thr_run_func = gen_recv_thread;
			} else {
				EXAMPLE_ERR("ERR MODE\n");
				exit(EXIT_FAILURE);
			}
			/*
			 * Create threads one-by-one instead of all-at-once,
			 * because each thread might get different arguments.
			 * Calls odp_thread_create(cpu) for each thread
			 */
			odp_cpumask_zero(&thd_mask);
			odp_cpumask_set(&thd_mask, cpu);
			odph_linux_pthread_create(&thread_tbl[i],
						  &thd_mask,
						  thr_run_func,
						  &args->thread[i],
						  ODP_THREAD_WORKER);
			cpu = odp_cpumask_next(&cpumask, cpu);

		}
	}

	print_global_stats(num_workers);

	/* Master thread waits for other threads to exit */
	odph_linux_pthread_join(thread_tbl, num_workers);

	free(args->appl.if_names);
	free(args->appl.if_str);
	printf("Exit\n\n");

	return 0;
}
示例#14
0
文件: linux.c 项目: guanhe0/packages
int odph_linux_process_fork_n(odph_linux_process_t *proc_tbl,
			      const odp_cpumask_t *mask)
{
	pid_t pid;
	int num;
	int cpu_count;
	int cpu;
	int i;

	num = odp_cpumask_count(mask);

	memset(proc_tbl, 0, num * sizeof(odph_linux_process_t));

	cpu_count = odp_cpu_count();

	if (num < 1 || num > cpu_count) {
		ODPH_ERR("Bad num\n");
		return -1;
	}

	cpu = odp_cpumask_first(mask);
	for (i = 0; i < num; i++) {
		cpu_set_t cpu_set;

		CPU_ZERO(&cpu_set);
		CPU_SET(cpu, &cpu_set);

		pid = fork();

		if (pid < 0) {
			ODPH_ERR("fork() failed\n");
			return -1;
		}

		/* Parent continues to fork */
		if (pid > 0) {
			proc_tbl[i].pid  = pid;
			proc_tbl[i].cpu = cpu;

			cpu = odp_cpumask_next(mask, cpu);
			continue;
		}

		/* Child process */

		/* Request SIGTERM if parent dies */
		prctl(PR_SET_PDEATHSIG, SIGTERM);
		/* Parent died already? */
		if (getppid() == 1)
			kill(getpid(), SIGTERM);

		if (sched_setaffinity(0, sizeof(cpu_set_t), &cpu_set)) {
			ODPH_ERR("sched_setaffinity() failed\n");
			return -2;
		}

		if (odp_init_local(ODP_THREAD_WORKER)) {
			ODPH_ERR("Local init failed\n");
			return -2;
		}

		return 0;
	}

	return 1;
}