Example #1
0
phys_addr_t odp_mem_virt2phy(const void *virtaddr)
{
	int fd;
	uint64_t page, physaddr;
	unsigned long virt_pfn;
	int page_size;
	off_t offset;

	page_size = getpagesize();

	fd = open("/proc/self/pagemap", O_RDONLY);
	if (fd < 0) {
		ODP_ERR("cannot open /proc/self/pagemap: %s\n",
			strerror(errno));
		return ODP_BAD_PHYS_ADDR;
	}

	virt_pfn = (unsigned long)virtaddr / page_size;
	offset = sizeof(uint64_t) * virt_pfn;
	if (lseek(fd, offset, SEEK_SET) == (off_t)-1) {
		ODP_ERR("seek error in /proc/self/pagemap: %s\n",
			strerror(errno));
		close(fd);
		return ODP_BAD_PHYS_ADDR;
	}

	if (read(fd, &page, sizeof(uint64_t)) < 0) {
		ODP_ERR("cannot read /proc/self/pagemap: %s\n",
			strerror(errno));
		close(fd);
		return ODP_BAD_PHYS_ADDR;
	}

	physaddr = ((page & 0x7fffffffffffffULL) * page_size) +
		   ((unsigned long)virtaddr % page_size);

	close(fd);
	return physaddr;
}
Example #2
0
static int sock_mmap_close(pktio_entry_t *entry)
{
	pkt_sock_mmap_t *const pkt_sock = &entry->s.pkt_sock_mmap;

	mmap_unmap_sock(pkt_sock);
	if (pkt_sock->sockfd != -1 && close(pkt_sock->sockfd) != 0) {
		__odp_errno = errno;
		ODP_ERR("close(sockfd): %s\n", strerror(errno));
		return -1;
	}

	return 0;
}
Example #3
0
static int netmap_do_ioctl(pktio_entry_t *pktio_entry, unsigned long cmd,
			   int subcmd)
{
	pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;

	struct ethtool_value eval;
	struct ifreq ifr;
	int err;
	int fd = pkt_nm->sockfd;

	memset(&ifr, 0, sizeof(ifr));
	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
		 pktio_entry->s.name);

	switch (cmd) {
	case SIOCSIFFLAGS:
		ifr.ifr_flags = pkt_nm->if_flags & 0xffff;
		break;
	case SIOCETHTOOL:
		eval.cmd  = subcmd;
		eval.data = 0;
		ifr.ifr_data = (caddr_t)&eval;
		break;
	default:
		break;
	}

	err = ioctl(fd, cmd, &ifr);
	if (err)
		goto done;

	switch (cmd) {
	case SIOCGIFFLAGS:
		pkt_nm->if_flags = (ifr.ifr_flags << 16) |
				   (0xffff & ifr.ifr_flags);
		break;
	case SIOCETHTOOL:
		if (subcmd == ETHTOOL_GLINK)
			return eval.data;

		break;
	default:
		break;
	}

done:
	if (err)
		ODP_ERR("ioctl err %d %lu: %s\n", err, cmd, strerror(errno));

	return err;
}
Example #4
0
static int netmap_close(pktio_entry_t *pktio_entry)
{
	pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;

	netmap_close_descriptors(pktio_entry);

	if (pkt_nm->sockfd != -1 && close(pkt_nm->sockfd) != 0) {
		__odp_errno = errno;
		ODP_ERR("close(sockfd): %s\n", strerror(errno));
		return -1;
	}

	return 0;
}
Example #5
0
int odp_pktio_term_global(void)
{
	pktio_entry_t *pktio_entry;
	int ret = 0;
	int id;
	int pktio_if;

	for (pktio_if = 0; pktio_if_ops[pktio_if]; ++pktio_if)
		if (pktio_if_ops[pktio_if]->term)
			if (pktio_if_ops[pktio_if]->term())
				ODP_ERR("failed to terminate pktio type %d",
					pktio_if);

	for (id = 1; id <= ODP_CONFIG_PKTIO_ENTRIES; ++id) {
		pktio_entry = &pktio_tbl->entries[id - 1];
		odp_queue_destroy(pktio_entry->s.outq_default);
	}

	ret = odp_shm_free(odp_shm_lookup("odp_pktio_entries"));
	if (ret < 0)
		ODP_ERR("shm free failed for odp_pktio_entries");

	return ret;
}
Example #6
0
int odp_thread_term_local(void)
{
	int num;
	int id = this_thread->thr;

	odp_spinlock_lock(&thread_globals->lock);
	num = free_id(id);
	odp_spinlock_unlock(&thread_globals->lock);

	if (num < 0) {
		ODP_ERR("failed to free thread id %i", id);
		return -1;
	}

	return num; /* return a number of threads left */
}
Example #7
0
int odp_pktio_restart(odp_pktio_t id)
{
	pktio_entry_t *entry;
	uint8_t port_id;
	int ret;

	entry = get_pktio_entry(id);
	if (entry == NULL) {
		ODP_DBG("pktio entry %d does not exist\n",
			id->unused_dummy_var);
		return -1;
	}

	if (odp_unlikely(is_free(entry))) {
		ODP_DBG("already freed pktio\n");
		return -1;
	}

	if (odp_pktio_is_not_hns_eth(entry)) {
		ODP_DBG("pktio entry %d is not ODP UMD pktio\n",
			id->unused_dummy_var);
		return -1;
	}

	port_id = entry->s.pkt_odp.portid;

	if (!odp_eth_dev_is_valid_port(port_id)) {
		ODP_DBG("pktio entry %d ODP UMD Invalid port_id=%d\n",
			id->unused_dummy_var, port_id);
		return -1;
	}

	/* Stop device */
	odp_eth_dev_stop(port_id);

	/* Start device */
	ret = odp_eth_dev_start(port_id);
	if (ret < 0) {
		ODP_ERR("odp_eth_dev_start:err=%d, port=%u\n",
			ret, (unsigned)port_id);
		return -1;
	}

	ODP_DBG("odp pmd restart done\n\n");

	return 0;
}
Example #8
0
odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
			     odp_queue_param_t *param)
{
	uint32_t i;
	queue_entry_t *queue;
	odp_queue_t handle = ODP_QUEUE_INVALID;

	for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
		queue = &queue_tbl->queue[i];

		if (queue->s.status != QUEUE_STATUS_FREE)
			continue;

		LOCK(&queue->s.lock);
		if (queue->s.status == QUEUE_STATUS_FREE) {
			queue_init(queue, name, type, param);

			if (type == ODP_QUEUE_TYPE_SCHED ||
			    type == ODP_QUEUE_TYPE_PKTIN)
				queue->s.status = QUEUE_STATUS_NOTSCHED;
			else
				queue->s.status = QUEUE_STATUS_READY;

			handle = queue->s.handle;
			UNLOCK(&queue->s.lock);
			break;
		}
		UNLOCK(&queue->s.lock);
	}

	if (handle != ODP_QUEUE_INVALID &&
	    (type == ODP_QUEUE_TYPE_SCHED || type == ODP_QUEUE_TYPE_PKTIN)) {
		odp_buffer_t buf;

		buf = odp_schedule_buffer_alloc(handle);
		if (buf == ODP_BUFFER_INVALID) {
			ODP_ERR("queue_init: sched buf alloc failed\n");
			return ODP_QUEUE_INVALID;
		}

		queue->s.sched_buf = buf;
		odp_schedule_mask_set(handle, queue->s.param.sched.prio);
	}

	return handle;
}
Example #9
0
int odp_pktio_inq_setdef(odp_pktio_t id, odp_queue_t queue)
{
	pktio_entry_t *pktio_entry = get_pktio_entry(id);
	queue_entry_t *qentry;

	if (pktio_entry == NULL || queue == ODP_QUEUE_INVALID)
		return -1;

	qentry = queue_to_qentry(queue);

	if (qentry->s.type != ODP_QUEUE_TYPE_PKTIN)
		return -1;

	lock_entry(pktio_entry);
	pktio_entry->s.inq_default = queue;
	unlock_entry(pktio_entry);

	switch (qentry->s.type) {
	/* Change to ODP_QUEUE_TYPE_POLL when ODP_QUEUE_TYPE_PKTIN is removed */
	case ODP_QUEUE_TYPE_PKTIN:

		/* User polls the input queue */
		queue_lock(qentry);
		qentry->s.pktin = id;
		queue_unlock(qentry);

		/* Uncomment when ODP_QUEUE_TYPE_PKTIN is removed
		        break;
		   case ODP_QUEUE_TYPE_SCHED:
		 */

		/* Packet input through the scheduler */
		if (schedule_pktio_start(id, ODP_SCHED_PRIO_LOWEST)) {
			ODP_ERR("Schedule pktio start failed\n");
			return -1;
		}

		break;
	default:
		ODP_ABORT("Bad queue type\n");
	}

	return 0;
}
Example #10
0
static int set_pkt_sock_fanout_mmap(pkt_sock_mmap_t *const pkt_sock,
				    int sock_group_idx)
{
	int sockfd = pkt_sock->sockfd;
	int val;
	int err;
	uint16_t fanout_group;

	fanout_group = (uint16_t)(sock_group_idx & 0xffff);
	val = (PACKET_FANOUT_HASH << 16) | fanout_group;

	err = setsockopt(sockfd, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val));
	if (err != 0) {
		__odp_errno = errno;
		ODP_ERR("setsockopt(PACKET_FANOUT): %s\n", strerror(errno));
		return -1;
	}
	return 0;
}
Example #11
0
static int mmap_sock(pkt_sock_mmap_t *pkt_sock)
{
	int i;
	int sock = pkt_sock->sockfd;

	/* map rx + tx buffer to userspace : they are in this order */
	pkt_sock->mmap_len =
		pkt_sock->rx_ring.req.tp_block_size *
		pkt_sock->rx_ring.req.tp_block_nr +
		pkt_sock->tx_ring.req.tp_block_size *
		pkt_sock->tx_ring.req.tp_block_nr;

	pkt_sock->mmap_base =
		mmap(NULL, pkt_sock->mmap_len, PROT_READ | PROT_WRITE,
		     MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0);

	if (pkt_sock->mmap_base == MAP_FAILED) {
		__odp_errno = errno;
		ODP_ERR("mmap rx&tx buffer failed: %s\n", strerror(errno));
		return -1;
	}

	pkt_sock->rx_ring.mm_space = pkt_sock->mmap_base;
	memset(pkt_sock->rx_ring.rd, 0, pkt_sock->rx_ring.rd_len);
	for (i = 0; i < pkt_sock->rx_ring.rd_num; ++i) {
		pkt_sock->rx_ring.rd[i].iov_base =
			pkt_sock->rx_ring.mm_space
			+ (i * pkt_sock->rx_ring.flen);
		pkt_sock->rx_ring.rd[i].iov_len = pkt_sock->rx_ring.flen;
	}

	pkt_sock->tx_ring.mm_space =
		pkt_sock->mmap_base + pkt_sock->rx_ring.mm_len;
	memset(pkt_sock->tx_ring.rd, 0, pkt_sock->tx_ring.rd_len);
	for (i = 0; i < pkt_sock->tx_ring.rd_num; ++i) {
		pkt_sock->tx_ring.rd[i].iov_base =
			pkt_sock->tx_ring.mm_space
			+ (i * pkt_sock->tx_ring.flen);
		pkt_sock->tx_ring.rd[i].iov_len = pkt_sock->tx_ring.flen;
	}

	return 0;
}
Example #12
0
odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
			     odp_queue_param_t *param)
{
	uint32_t i;
	queue_entry_t *queue;
	odp_queue_t handle = ODP_QUEUE_INVALID;

	for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
		queue = &queue_tbl->queue[i];

		if (LOAD_S32(queue->s.status) != QUEUE_STATUS_FREE)
			continue;

		LOCK(queue);
		INVALIDATE(queue);
		if (queue->s.status == QUEUE_STATUS_FREE) {
			queue_init(queue, name, type, param);

			if (type == ODP_QUEUE_TYPE_SCHED ||
			    type == ODP_QUEUE_TYPE_PKTIN)
				queue->s.status = QUEUE_STATUS_NOTSCHED;
			else
				queue->s.status = QUEUE_STATUS_READY;

			handle = queue->s.handle;
			UNLOCK(queue);
			break;
		}
		UNLOCK(queue);
	}

	if (handle != ODP_QUEUE_INVALID &&
	    (type == ODP_QUEUE_TYPE_SCHED || type == ODP_QUEUE_TYPE_PKTIN)) {
		if (schedule_queue_init(queue)) {
			ODP_ERR("schedule queue init failed\n");
			return ODP_QUEUE_INVALID;
		}
	}

	return handle;
}
Example #13
0
/* Stress func for Multi producer only */
static int producer_fn(void)
{
	unsigned i;

	void **src = NULL;

	/* alloc dummy object pointers */
	src = malloc(MAX_BULK*2*sizeof(void *));
	if (src == NULL) {
		ODP_ERR("failed to allocate producer memory.\n");
		return -1;
	}
	for (i = 0; i < MAX_BULK; i++)
		src[i] = (void *)(unsigned long)i;

	do {
		i = odp_ring_mp_enqueue_bulk(r_stress, src, MAX_BULK);
		if (i == 0)
			return 0;
	} while (1);
}
Example #14
0
static int mmap_bind_sock(pkt_sock_mmap_t *pkt_sock, const char *netdev)
{
	int ret;

	pkt_sock->ll.sll_family = PF_PACKET;
	pkt_sock->ll.sll_protocol = htons(ETH_P_ALL);
	pkt_sock->ll.sll_ifindex = if_nametoindex(netdev);
	pkt_sock->ll.sll_hatype = 0;
	pkt_sock->ll.sll_pkttype = 0;
	pkt_sock->ll.sll_halen = 0;

	ret = bind(pkt_sock->sockfd, (struct sockaddr *)&pkt_sock->ll,
		   sizeof(pkt_sock->ll));
	if (ret == -1) {
		__odp_errno = errno;
		ODP_ERR("bind(to IF): %s\n", strerror(errno));
		return -1;
	}

	return 0;
}
Example #15
0
int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
{
	int sched = 0;
	int i;
	odp_buffer_hdr_t *tail;

	for (i = 0; i < num - 1; i++)
		buf_hdr[i]->next = buf_hdr[i+1];

	tail = buf_hdr[num-1];
	buf_hdr[num-1]->next = NULL;

	LOCK(queue);
	int status = LOAD_S32(queue->s.status);
	if (odp_unlikely(status < QUEUE_STATUS_READY)) {
		UNLOCK(queue);
		ODP_ERR("Bad queue status\n");
		return -1;
	}

	/* Empty queue */
	if (LOAD_PTR(queue->s.head) == NULL)
		STORE_PTR(queue->s.head, buf_hdr[0]);
	else
		STORE_PTR(((typeof(queue->s.tail))LOAD_PTR(queue->s.tail))->next, buf_hdr[0]);

	STORE_PTR(queue->s.tail, tail);

	if (status == QUEUE_STATUS_NOTSCHED) {
		STORE_PTR(queue->s.status, QUEUE_STATUS_SCHED);
		sched = 1; /* retval: schedule queue */
	}
	UNLOCK(queue);

	/* Add queue to scheduling */
	if (sched)
		schedule_queue(queue);

	return num; /* All events enqueued */
}
Example #16
0
int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
{
	int sched = 0;
	int i;
	odp_buffer_hdr_t *tail;

	for (i = 0; i < num - 1; i++)
		buf_hdr[i]->next = buf_hdr[i+1];

	tail = buf_hdr[num-1];
	buf_hdr[num-1]->next = NULL;

	LOCK(&queue->s.lock);
	if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
		UNLOCK(&queue->s.lock);
		ODP_ERR("Bad queue status\n");
		return -1;
	}

	/* Empty queue */
	if (queue->s.head == NULL)
		queue->s.head = buf_hdr[0];
	else
		queue->s.tail->next = buf_hdr[0];

	queue->s.tail = tail;

	if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
		queue->s.status = QUEUE_STATUS_SCHED;
		sched = 1; /* retval: schedule queue */
	}
	UNLOCK(&queue->s.lock);

	/* Add queue to scheduling */
	if (sched && schedule_queue(queue))
		ODP_ABORT("schedule_queue failed\n");

	return num; /* All events enqueued */
}
Example #17
0
static void init_pktio_entry(pktio_entry_t *entry, odp_pktio_params_t *params)
{
	set_taken(entry);
	entry->s.inq_default = ODP_QUEUE_INVALID;
	switch (params->type) {
	case ODP_PKTIO_TYPE_SOCKET_BASIC:
	case ODP_PKTIO_TYPE_SOCKET_MMSG:
	case ODP_PKTIO_TYPE_SOCKET_MMAP:
		memset(&entry->s.pkt_sock, 0, sizeof(entry->s.pkt_sock));
		memset(&entry->s.pkt_sock_mmap, 0,
		      sizeof(entry->s.pkt_sock_mmap));
		break;
#ifdef ODP_HAVE_NETMAP
	case ODP_PKTIO_TYPE_NETMAP:
		memset(&entry->s.pkt_nm, 0, sizeof(entry->s.pkt_nm));
		break;
#endif
	default:
		ODP_ERR("Packet I/O type not supported. Please recompile\n");
		break;
	}
	/* Save pktio parameters, type is the most useful */
	memcpy(&entry->s.params, params, sizeof(*params));
}
/**
 * Packet IO loopback worker thread using bursts from/to IO resources
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_ifburst_thread(void *arg)
{
	int thr;
	odp_buffer_pool_t pkt_pool;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	int pkts, pkts_ok;
	odp_packet_t pkt_tbl[MAX_PKT_BURST];
	unsigned long pkt_cnt = 0;
	unsigned long err_cnt = 0;
	unsigned long tmp = 0;
	odp_pktio_params_t params;
	socket_params_t *sock_params = &params.sock_params;

	thr = odp_thread_id();
	thr_args = arg;

	printf("Pktio thread [%02i] starts, pktio_dev:%s\n", thr,
	       thr_args->pktio_dev);

	/* Lookup the packet pool */
	pkt_pool = odp_buffer_pool_lookup("packet_pool");
	if (pkt_pool == ODP_BUFFER_POOL_INVALID || pkt_pool != thr_args->pool) {
		ODP_ERR("  [%02i] Error: pkt_pool not found\n", thr);
		return NULL;
	}

	/* Open a packet IO instance for this thread */
	sock_params->type = thr_args->type;
	sock_params->fanout = thr_args->fanout;
	pktio = odp_pktio_open(thr_args->pktio_dev, pkt_pool, &params);
	if (pktio == ODP_PKTIO_INVALID) {
		ODP_ERR("  [%02i] Error: pktio create failed.\n", thr);
		return NULL;
	}

	printf("  [%02i] created pktio:%02i, burst mode\n",
	       thr, pktio);

	/* Loop packets */
	for (;;) {
		pkts = odp_pktio_recv(pktio, pkt_tbl, MAX_PKT_BURST);
		if (pkts > 0) {
			/* Drop packets with errors */
			pkts_ok = drop_err_pkts(pkt_tbl, pkts);
			if (pkts_ok > 0) {
				/* Swap Eth MACs and IP-addrs */
				swap_pkt_addrs(pkt_tbl, pkts_ok);
				odp_pktio_send(pktio, pkt_tbl, pkts_ok);
			}

			if (odp_unlikely(pkts_ok != pkts))
				ODP_ERR("Dropped frames:%u - err_cnt:%lu\n",
					pkts-pkts_ok, ++err_cnt);

			/* Print packet counts every once in a while */
			tmp += pkts_ok;
			if (odp_unlikely((tmp >= 100000) || /* OR first print:*/
			    ((pkt_cnt == 0) && ((tmp-1) < MAX_PKT_BURST)))) {
				pkt_cnt += tmp;
				printf("  [%02i] pkt_cnt:%lu\n", thr, pkt_cnt);
				fflush(NULL);
				tmp = 0;
			}
		}
	}

/* unreachable */
}
Example #19
0
static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
			  pktio_entry_t *pktio_entry,
			  const char *netdev, odp_pool_t pool)
{
	int if_idx;
	int ret = 0;
	odp_pktio_stats_t cur_stats;

	if (disable_pktio)
		return -1;

	pkt_sock_mmap_t *const pkt_sock = &pktio_entry->s.pkt_sock_mmap;
	int fanout = 1;

	/* Init pktio entry */
	memset(pkt_sock, 0, sizeof(*pkt_sock));
	/* set sockfd to -1, because a valid socked might be initialized to 0 */
	pkt_sock->sockfd = -1;

	if (pool == ODP_POOL_INVALID)
		return -1;

	/* Store eth buffer offset for pkt buffers from this pool */
	pkt_sock->frame_offset = 0;

	pkt_sock->pool = pool;
	pkt_sock->sockfd = mmap_pkt_socket();
	if (pkt_sock->sockfd == -1)
		goto error;

	ret = mmap_bind_sock(pkt_sock, netdev);
	if (ret != 0)
		goto error;

	ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring,
			      PACKET_TX_RING, pool, fanout);
	if (ret != 0)
		goto error;

	ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring,
			      PACKET_RX_RING, pool, fanout);
	if (ret != 0)
		goto error;

	ret = mmap_sock(pkt_sock);
	if (ret != 0)
		goto error;

	ret = mac_addr_get_fd(pkt_sock->sockfd, netdev, pkt_sock->if_mac);
	if (ret != 0)
		goto error;

	if_idx = if_nametoindex(netdev);
	if (if_idx == 0) {
		__odp_errno = errno;
		ODP_ERR("if_nametoindex(): %s\n", strerror(errno));
		goto error;
	}

	pkt_sock->fanout = fanout;
	if (fanout) {
		ret = set_pkt_sock_fanout_mmap(pkt_sock, if_idx);
		if (ret != 0)
			goto error;
	}

	ret = ethtool_stats_get_fd(pktio_entry->s.pkt_sock_mmap.sockfd,
				   pktio_entry->s.name,
				   &cur_stats);
	if (ret != 0) {
		ret = sysfs_stats(pktio_entry, &cur_stats);
		if (ret != 0) {
			pktio_entry->s.stats_type = STATS_UNSUPPORTED;
			ODP_DBG("pktio: %s unsupported stats\n",
				pktio_entry->s.name);
		} else {
			pktio_entry->s.stats_type = STATS_SYSFS;
		}
	} else {
		pktio_entry->s.stats_type = STATS_ETHTOOL;
	}

	ret = sock_stats_reset_fd(pktio_entry,
				  pktio_entry->s.pkt_sock_mmap.sockfd);
	if (ret != 0)
		goto error;

	return 0;

error:
	sock_mmap_close(pktio_entry);
	return -1;
}
Example #20
0
File: ethtool.c Project: nmorey/odp
static int ethtool_stats(int fd, struct ifreq *ifr, odp_pktio_stats_t *stats)
{
	struct ethtool_gstrings *strings;
	struct ethtool_stats *estats;
	unsigned int n_stats, i;
	int err;
	int cnts;

	strings = get_stringset(fd, ifr);
	if (!strings)
		return -1;

	n_stats = strings->len;
	if (n_stats < 1) {
		ODP_ERR("no stats available\n");
		free(strings);
		return -1;
	}

	estats = calloc(1, n_stats * sizeof(uint64_t) +
			sizeof(struct ethtool_stats));
	if (!estats) {
		free(strings);
		return -1;
	}

	estats->cmd = ETHTOOL_GSTATS;
	estats->n_stats = n_stats;
	ifr->ifr_data = estats;
	err = ioctl(fd, SIOCETHTOOL, ifr);
	if (err < 0) {
		__odp_errno = errno;
		free(strings);
		free(estats);
		return -1;
	}

	cnts = 0;
	for (i = 0; i < n_stats; i++) {
		char *cnt = (char *)&strings->data[i * ETH_GSTRING_LEN];
		uint64_t val = estats->data[i];

		if (!strcmp(cnt, "rx_octets")) {
			stats->in_octets = val;
			cnts++;
		} else if (!strcmp(cnt, "rx_ucast_packets")) {
			stats->in_ucast_pkts = val;
			cnts++;
		} else if (!strcmp(cnt, "rx_discards")) {
			stats->in_discards = val;
			cnts++;
		} else if (!strcmp(cnt, "rx_errors")) {
			stats->in_errors = val;
			cnts++;
		} else if (!strcmp(cnt, "tx_octets")) {
			stats->out_octets = val;
			cnts++;
		} else if (!strcmp(cnt, "tx_ucast_packets")) {
			stats->out_ucast_pkts = val;
			cnts++;
		} else if (!strcmp(cnt, "tx_discards")) {
			stats->out_discards = val;
			cnts++;
		} else if (!strcmp(cnt, "tx_errors")) {
			stats->out_errors = val;
			cnts++;
		}
	}

	free(strings);
	free(estats);

	/* Ethtool strings came from kernel driver. Name of that
	 * strings is not universal. Current function needs to be updated
	 * if your driver has different names for counters */
	if (cnts < 8)
		return -1;

	return 0;
}
Example #21
0
static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
				      const odp_packet_t pkt_table[],
				      unsigned len)
{
	union frame_map ppd;
	uint32_t pkt_len;
	unsigned first_frame_num, frame_num, frame_count;
	int ret;
	uint8_t *buf;
	unsigned n, i = 0;
	unsigned nb_tx = 0;
	int send_errno;
	int total_len = 0;

	first_frame_num = ring->frame_num;
	frame_num = first_frame_num;
	frame_count = ring->rd_num;

	while (i < len) {
		ppd.raw = ring->rd[frame_num].iov_base;
		if (!odp_unlikely(mmap_tx_kernel_ready(ppd.raw)))
			break;

		pkt_len = odp_packet_len(pkt_table[i]);
		ppd.v2->tp_h.tp_snaplen = pkt_len;
		ppd.v2->tp_h.tp_len = pkt_len;
		total_len += pkt_len;

		buf = (uint8_t *)ppd.raw + TPACKET2_HDRLEN -
		       sizeof(struct sockaddr_ll);
		odp_packet_copy_to_mem(pkt_table[i], 0, pkt_len, buf);

		mmap_tx_user_ready(ppd.raw);

		if (++frame_num >= frame_count)
			frame_num = 0;

		i++;
	}

	ret = sendto(sock, NULL, 0, MSG_DONTWAIT, NULL, 0);
	send_errno = errno;

	/* On success, the return value indicates the number of bytes sent. On
	 * failure a value of -1 is returned, even if the failure occurred
	 * after some of the packets in the ring have already been sent, so we
	 * need to inspect the packet status to determine which were sent. */
	if (odp_likely(ret == total_len)) {
		nb_tx = i;
		ring->frame_num = frame_num;
	} else if (ret == -1) {
		for (frame_num = first_frame_num, n = 0; n < i; ++n) {
			struct tpacket2_hdr *hdr = ring->rd[frame_num].iov_base;

			if (odp_likely(hdr->tp_status == TP_STATUS_AVAILABLE ||
				       hdr->tp_status == TP_STATUS_SENDING)) {
				nb_tx++;
			} else {
				/* The remaining frames weren't sent, clear
				 * their status to indicate we're not waiting
				 * for the kernel to process them. */
				hdr->tp_status = TP_STATUS_AVAILABLE;
			}

			if (++frame_num >= frame_count)
				frame_num = 0;
		}

		ring->frame_num = (first_frame_num + nb_tx) % frame_count;

		if (nb_tx == 0 && SOCK_ERR_REPORT(send_errno)) {
			__odp_errno = send_errno;
			/* ENOBUFS indicates that the transmit queue is full,
			 * which will happen regularly when overloaded so don't
			 * print it */
			if (errno != ENOBUFS)
				ODP_ERR("sendto(pkt mmap): %s\n",
					strerror(send_errno));
			return -1;
		}
	} else {
		/* Short send, return value is number of bytes sent so use this
		 * to determine number of complete frames sent. */
		for (n = 0; n < i && ret > 0; ++n) {
			ret -= odp_packet_len(pkt_table[n]);
			if (ret >= 0)
				nb_tx++;
		}

		ring->frame_num = (first_frame_num + nb_tx) % frame_count;
	}

	for (i = 0; i < nb_tx; ++i)
		odp_packet_free(pkt_table[i]);

	return nb_tx;
}
Example #22
0
static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
		       const char *netdev, odp_pool_t pool)
{
	int i;
	int err;
	int sockfd;
	int mtu;
	uint32_t buf_size;
	pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
	struct nm_desc *desc;
	struct netmap_ring *ring;
	odp_pktin_hash_proto_t hash_proto;
	odp_pktio_stats_t   cur_stats;

	if (getenv("ODP_PKTIO_DISABLE_NETMAP"))
		return -1;

	if (pool == ODP_POOL_INVALID)
		return -1;

	/* Init pktio entry */
	memset(pkt_nm, 0, sizeof(*pkt_nm));
	pkt_nm->sockfd = -1;
	pkt_nm->pool = pool;

	/* max frame len taking into account the l2-offset */
	pkt_nm->max_frame_len = ODP_CONFIG_PACKET_BUF_LEN_MAX -
				odp_buffer_pool_headroom(pool) -
				odp_buffer_pool_tailroom(pool);

	snprintf(pktio_entry->s.name, sizeof(pktio_entry->s.name), "%s",
		 netdev);
	snprintf(pkt_nm->nm_name, sizeof(pkt_nm->nm_name), "netmap:%s",
		 netdev);

	/* Dummy open here to check if netmap module is available and to read
	 * capability info. */
	desc = nm_open(pkt_nm->nm_name, NULL, 0, NULL);
	if (desc == NULL) {
		ODP_ERR("nm_open(%s) failed\n", pkt_nm->nm_name);
		goto error;
	}

	if (desc->nifp->ni_rx_rings > NM_MAX_DESC) {
		ODP_ERR("Unable to store all rx rings\n");
		nm_close(desc);
		goto error;
	}

	pkt_nm->num_rx_rings = desc->nifp->ni_rx_rings;
	pkt_nm->capa.max_input_queues = PKTIO_MAX_QUEUES;
	if (desc->nifp->ni_rx_rings < PKTIO_MAX_QUEUES)
		pkt_nm->capa.max_input_queues = desc->nifp->ni_rx_rings;

	if (desc->nifp->ni_tx_rings > NM_MAX_DESC) {
		ODP_ERR("Unable to store all tx rings\n");
		nm_close(desc);
		goto error;
	}

	pkt_nm->num_tx_rings = desc->nifp->ni_tx_rings;
	pkt_nm->capa.max_output_queues = PKTIO_MAX_QUEUES;
	if (desc->nifp->ni_tx_rings < PKTIO_MAX_QUEUES)
		pkt_nm->capa.max_output_queues = desc->nifp->ni_tx_rings;

	ring = NETMAP_RXRING(desc->nifp, desc->cur_rx_ring);
	buf_size = ring->nr_buf_size;
	nm_close(desc);

	sockfd = socket(AF_INET, SOCK_DGRAM, 0);
	if (sockfd == -1) {
		ODP_ERR("Cannot get device control socket\n");
		goto error;
	}

	pkt_nm->sockfd = sockfd;

	/* Use either interface MTU (+ ethernet header length) or netmap buffer
	 * size as MTU, whichever is smaller. */
	mtu = mtu_get_fd(pktio_entry->s.pkt_nm.sockfd, pktio_entry->s.name) +
	      ODPH_ETHHDR_LEN;
	if (mtu < 0) {
		ODP_ERR("Unable to read interface MTU\n");
		goto error;
	}

	pkt_nm->mtu = ((uint32_t)mtu < buf_size) ? (uint32_t)mtu : buf_size;

	/* Check if RSS is supported. If not, set 'max_input_queues' to 1. */
	if (rss_conf_get_supported_fd(sockfd, netdev, &hash_proto) == 0) {
		ODP_DBG("RSS not supported\n");
		pkt_nm->capa.max_input_queues = 1;
	}

	err = netmap_do_ioctl(pktio_entry, SIOCGIFFLAGS, 0);
	if (err)
		goto error;

	if ((pkt_nm->if_flags & IFF_UP) == 0)
		ODP_DBG("%s is down\n", pktio_entry->s.name);

	err = mac_addr_get_fd(sockfd, netdev, pkt_nm->if_mac);
	if (err)
		goto error;

	for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
		odp_ticketlock_init(&pkt_nm->rx_desc_ring[i].s.lock);
		odp_ticketlock_init(&pkt_nm->tx_desc_ring[i].s.lock);
	}

	/* netmap uses only ethtool to get statistics counters */
	err = ethtool_stats_get_fd(pktio_entry->s.pkt_nm.sockfd,
				   pktio_entry->s.name,
				   &cur_stats);
	if (err) {
		ODP_ERR(
			"netmap pktio %s does not support statistics counters\n",
			pktio_entry->s.name);
		pktio_entry->s.stats_type = STATS_UNSUPPORTED;
	} else {
		pktio_entry->s.stats_type = STATS_ETHTOOL;
	}

	(void)netmap_stats_reset(pktio_entry);

	return 0;

error:
	netmap_close(pktio_entry);
	return -1;
}
Example #23
0
static int netmap_start(pktio_entry_t *pktio_entry)
{
	pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
	netmap_ring_t *desc_ring;
	struct nm_desc base_desc;
	unsigned i;
	unsigned j;
	unsigned num_rx_desc = 0;
	uint64_t flags;
	odp_pktin_mode_t in_mode = pktio_entry->s.param.in_mode;
	odp_pktout_mode_t out_mode = pktio_entry->s.param.out_mode;

	/* If no pktin/pktout queues have been configured. Configure one
	 * for each direction. */
	if (!pktio_entry->s.num_in_queue &&
	    in_mode != ODP_PKTIN_MODE_DISABLED) {
		odp_pktin_queue_param_t param;

		odp_pktin_queue_param_init(&param);
		param.num_queues = 1;
		if (odp_pktin_queue_config(pktio_entry->s.handle, &param))
			return -1;
	}

	if (!pktio_entry->s.num_out_queue &&
	    out_mode == ODP_PKTOUT_MODE_DIRECT) {
		odp_pktout_queue_param_t param;

		odp_pktout_queue_param_init(&param);
		param.num_queues = 1;
		if (odp_pktout_queue_config(pktio_entry->s.handle, &param))
			return -1;
	}

	if (pkt_nm->num_rx_desc_rings == pktio_entry->s.num_in_queue &&
	    pkt_nm->num_tx_desc_rings == pktio_entry->s.num_out_queue)
		return (netmap_wait_for_link(pktio_entry) == 1) ? 0 : -1;

	netmap_close_descriptors(pktio_entry);

	/* Map pktin/pktout queues to netmap rings */
	if (pktio_entry->s.num_in_queue) {
		/* In single queue case only one netmap descriptor is
		 * required. */
		num_rx_desc = (pktio_entry->s.num_in_queue == 1) ? 1 :
			      pkt_nm->num_rx_rings;

		map_netmap_rings(pkt_nm->rx_desc_ring,
				 pktio_entry->s.num_in_queue, num_rx_desc);
	}

	if (pktio_entry->s.num_out_queue)
		/* Enough to map only one netmap tx ring per pktout queue */
		map_netmap_rings(pkt_nm->tx_desc_ring,
				 pktio_entry->s.num_out_queue,
				 pktio_entry->s.num_out_queue);

	base_desc.self = &base_desc;
	base_desc.mem  = NULL;
	memcpy(base_desc.req.nr_name, pktio_entry->s.name,
	       sizeof(pktio_entry->s.name));
	base_desc.req.nr_flags &= ~NR_REG_MASK;

	if (num_rx_desc == 1)
		base_desc.req.nr_flags |= NR_REG_ALL_NIC;
	else
		base_desc.req.nr_flags |= NR_REG_ONE_NIC;

	base_desc.req.nr_ringid = 0;

	/* Only the first rx descriptor does mmap */
	desc_ring = pkt_nm->rx_desc_ring;
	flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL;
	desc_ring[0].s.desc[0] = nm_open(pkt_nm->nm_name, NULL, flags,
					 &base_desc);
	if (desc_ring[0].s.desc[0] == NULL) {
		ODP_ERR("nm_start(%s) failed\n", pkt_nm->nm_name);
		goto error;
	}

	/* Open rest of the rx descriptors (one per netmap ring) */
	flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL | NM_OPEN_NO_MMAP;
	for (i = 0; i < pktio_entry->s.num_in_queue; i++)
		for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
			if (i == 0 && j == 0) { /* First already opened */
				if (num_rx_desc > 1)
					continue;
				else
					break;
			}

			base_desc.req.nr_ringid = j;
			desc_ring[i].s.desc[j]	= nm_open(pkt_nm->nm_name, NULL,
							  flags, &base_desc);
			if (desc_ring[i].s.desc[j] == NULL) {
				ODP_ERR("nm_start(%s) failed\n",
					pkt_nm->nm_name);
				goto error;
			}
		}

	/* Open tx descriptors */
	desc_ring = pkt_nm->tx_desc_ring;
	flags = NM_OPEN_IFNAME | NM_OPEN_NO_MMAP;
	base_desc.req.nr_flags &= !NR_REG_ALL_NIC;
	base_desc.req.nr_flags |= NR_REG_ONE_NIC;
	for (i = 0; i < pktio_entry->s.num_out_queue; i++)
		for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
			base_desc.req.nr_ringid = j;
			desc_ring[i].s.desc[j]	= nm_open(pkt_nm->nm_name, NULL,
							  flags, &base_desc);
			if (desc_ring[i].s.desc[j] == NULL) {
				ODP_ERR("nm_start(%s) failed\n",
					pkt_nm->nm_name);
				goto error;
			}
		}

	pkt_nm->num_rx_desc_rings = pktio_entry->s.num_in_queue;
	pkt_nm->num_tx_desc_rings = pktio_entry->s.num_out_queue;

	/* Wait for the link to come up */
	return (netmap_wait_for_link(pktio_entry) == 1) ? 0 : -1;

error:
	netmap_close_descriptors(pktio_entry);
	return -1;
}
Example #24
0
odp_pktio_t odp_pktio_open(const char *dev, odp_buffer_pool_t pool,
			   odp_pktio_params_t *params)
{
	odp_pktio_t id;
	pktio_entry_t *pktio_entry;
	int res;

	if (params == NULL) {
		ODP_ERR("Invalid pktio params\n");
		return ODP_PKTIO_INVALID;
	}

	switch (params->type) {
	case ODP_PKTIO_TYPE_SOCKET_BASIC:
	case ODP_PKTIO_TYPE_SOCKET_MMSG:
	case ODP_PKTIO_TYPE_SOCKET_MMAP:
		ODP_DBG("Allocating socket pktio\n");
		break;
#ifdef ODP_HAVE_NETMAP
	case ODP_PKTIO_TYPE_NETMAP:
		ODP_DBG("Allocating netmap pktio\n");
		break;
#endif
	default:
		ODP_ERR("Invalid pktio type: %02x\n", params->type);
		return ODP_PKTIO_INVALID;
	}

	id = alloc_lock_pktio_entry(params);
	if (id == ODP_PKTIO_INVALID) {
		ODP_ERR("No resources available.\n");
		return ODP_PKTIO_INVALID;
	}
	/* if successful, alloc_pktio_entry() returns with the entry locked */

	pktio_entry = get_entry(id);

	switch (params->type) {
	case ODP_PKTIO_TYPE_SOCKET_BASIC:
	case ODP_PKTIO_TYPE_SOCKET_MMSG:
		res = setup_pkt_sock(&pktio_entry->s.pkt_sock, dev, pool);
		if (res == -1) {
			close_pkt_sock(&pktio_entry->s.pkt_sock);
			free_pktio_entry(id);
			id = ODP_PKTIO_INVALID;
		}
		break;
	case ODP_PKTIO_TYPE_SOCKET_MMAP:
		res = setup_pkt_sock_mmap(&pktio_entry->s.pkt_sock_mmap, dev,
				pool, params->sock_params.fanout);
		if (res == -1) {
			close_pkt_sock_mmap(&pktio_entry->s.pkt_sock_mmap);
			free_pktio_entry(id);
			id = ODP_PKTIO_INVALID;
		}
		break;
#ifdef ODP_HAVE_NETMAP
	case ODP_PKTIO_TYPE_NETMAP:

		res = setup_pkt_netmap(&pktio_entry->s.pkt_nm, dev,
				pool, &params->nm_params);
		if (res == -1) {
			close_pkt_netmap(&pktio_entry->s.pkt_nm);
			free_pktio_entry(id);
			id = ODP_PKTIO_INVALID;
		}
		break;
#endif
	default:
		free_pktio_entry(id);
		id = ODP_PKTIO_INVALID;
		ODP_ERR("This type of I/O is not supported. Please recompile.\n");
		break;
	}

	unlock_entry(pktio_entry);
	return id;
}
Example #25
0
int odp_hugepage_info_init(void)
{
	unsigned i, num_sizes = 0;
	struct dirent *dirent = NULL;
	DIR *dir = NULL;
	struct odp_hugepage_type *hpt = NULL;

	dir = opendir(sys_dir_path);
	if (!dir)
		ODP_PRINT("Cannot open directory %s to "
			  "read system hugepage info\n", sys_dir_path);

	dirent = readdir(dir);

	/* loop get different kinds of hugepages in
	 * the system hugepage directory */
	while (dirent) {
		if (strncmp(dirent->d_name, "hugepages-",
			    ODP_SYS_HGPG_STR_LEN) == 0) {
			hpt = &local_config.odp_hugepage_type[num_sizes];

			hpt->hugepage_sz =
				odp_str_to_size(&dirent->d_name[
							ODP_SYS_HGPG_STR_LEN]);
			hpt->hugedir = get_hugepage_dir(hpt->hugepage_sz);

			/* first, check if we have a mountpoint */
			if (!hpt->hugedir) {
				int32_t num_pages;

				num_pages = get_num_hugepages(dirent->d_name);
				if (num_pages > 0) {
					ODP_PRINT("%d hugepages of"
						  " size %lu reserved, ",
						  num_pages,
						  hpt->hugepage_sz);
					ODP_PRINT("but no mounted hugetlbfs"
						  " found for that size\n");
				}
			} else {
				/* try to obtain a writelock */
				hpt->lock_descriptor = open(hpt->hugedir,
							    O_RDONLY);

				/* if blocking lock failed */
				if (flock(hpt->lock_descriptor,
					  LOCK_EX) == -1) {
					ODP_ERR("Failed to lock hugepage"
						" directory!\n");
					closedir(dir);
					return -1;
				}

				/* clear out the hugepages
				* dir from unused pages */
				if (clear_hugedir(hpt->hugedir) == -1) {
					closedir(dir);
					return -1;
				}

				/* for now, put all pages into socket 0,
				 * later they will be sohodpd */
				hpt->num_pages[0] =
					get_num_hugepages(dirent->d_name);
				hpt->num_pages[0] =
					ODP_MIN(hpt->num_pages[0],
						ODP_PAGE_MEMORY_MAX /
						hpt->hugepage_sz);
				num_sizes++;
			}
		}

		dirent = readdir(dir);
	}

	closedir(dir);
	local_config.num_hugepage_types = num_sizes;

	/* sort the page directory entries by size, largest to smallest */
	for (i = 0; i < num_sizes; i++) {
		unsigned int j;

		for (j = i + 1; j < num_sizes; j++)
			if (local_config.odp_hugepage_type[j - 1].hugepage_sz <
			    local_config.odp_hugepage_type[j].hugepage_sz)
				swap_hpt(
					&local_config.odp_hugepage_type[j - 1],
					&local_config.odp_hugepage_type[j]);
	}

	/* now we have all info, check we have at least one valid size */
	for (i = 0; i < num_sizes; i++)
		if (local_config.odp_hugepage_type[i].hugedir &&
		    (local_config.odp_hugepage_type[i].num_pages[0] > 0))
			return 0;

	/* no valid hugepage mounts available, return error */
	return -1;
}
Example #26
0
static int find_hpt_numasocket(struct hugepage_file	*hugepg_tbl,
			       struct odp_hugepage_type *hpt)
{
	int socket_id;
	char *end, *nodestr;
	unsigned i, hp_count = 0;
	uint64_t virt_addr;
	char buf[ODP_BUFF_SIZE];
	char hugedir_str[ODP_PATH_MAX];
	FILE *f;

	f = fopen("/proc/self/numa_maps", "r");
	if (!f)
		return 0;

	snprintf(hugedir_str, sizeof(hugedir_str), "%s/%s",
		 hpt->hugedir, HUGEFILE_PREFIX_DEFAULT);

	while (fgets(buf, sizeof(buf), f)) {
		if (!strstr(buf, " huge ") && !strstr(buf, hugedir_str))
			continue;

		virt_addr = strtoull(buf, &end, 16);
		if ((virt_addr == 0) || (end == buf)) {
			ODP_ERR("error in numa_maps parsing\n");
			goto error;
		}

		nodestr = strstr(buf, " N");
		if (!nodestr) {
			ODP_ERR("error in numa_maps parsing\n");
			goto error;
		}

		nodestr += 2;
		end = strstr(nodestr, "=");
		if (!end) {
			ODP_ERR("error in numa_maps parsing\n");
			goto error;
		}

		end[0] = '\0';
		end = NULL;

		socket_id = strtoul(nodestr, &end, 0);
		if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
			ODP_ERR("error in numa_maps parsing\n");
			goto error;
		}

		for (i = 0; i < hpt->num_pages[0]; i++) {
			void *va = (void *)(unsigned long)virt_addr;

			if (hugepg_tbl[i].orig_va == va) {
				hugepg_tbl[i].socket_id = socket_id;
				hp_count++;
			}
		}
	}

	if (hp_count < hpt->num_pages[0])
		goto error;

	fclose(f);
	return 0;

error:
	fclose(f);
	return -1;
}
/**
 * ODP packet example main function
 */
int main(int argc, char *argv[])
{
	odp_linux_pthread_t thread_tbl[MAX_WORKERS];
	odp_buffer_pool_t pool;
	int thr_id;
	int num_workers;
	void *pool_base;
	int i;
	int first_core;
	int core_count;

	/* Init ODP before calling anything else */
	if (odp_init_global()) {
		ODP_ERR("Error: ODP global init failed.\n");
		exit(EXIT_FAILURE);
	}

	/* Reserve memory for args from shared mem */
	args = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE);
	if (args == NULL) {
		ODP_ERR("Error: shared mem alloc failed.\n");
		exit(EXIT_FAILURE);
	}
	memset(args, 0, sizeof(*args));

	/* Parse and store the application arguments */
	parse_args(argc, argv, &args->appl);

	/* Print both system and application information */
	print_info(NO_PATH(argv[0]), &args->appl);

	core_count  = odp_sys_core_count();
	num_workers = core_count;

	if (args->appl.core_count)
		num_workers = args->appl.core_count;

	if (num_workers > MAX_WORKERS)
		num_workers = MAX_WORKERS;

	printf("Num worker threads: %i\n", num_workers);

	/*
	 * By default core #0 runs Linux kernel background tasks.
	 * Start mapping thread from core #1
	 */
	first_core = 1;

	if (core_count == 1)
		first_core = 0;

	printf("First core:         %i\n\n", first_core);

	/* Init this thread */
	thr_id = odp_thread_create(0);
	odp_init_local(thr_id);

	/* Create packet pool */
	pool_base = odp_shm_reserve("shm_packet_pool",
				    SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE);
	if (pool_base == NULL) {
		ODP_ERR("Error: packet pool mem alloc failed.\n");
		exit(EXIT_FAILURE);
	}

	pool = odp_buffer_pool_create("packet_pool", pool_base,
				      SHM_PKT_POOL_SIZE,
				      SHM_PKT_POOL_BUF_SIZE,
				      ODP_CACHE_LINE_SIZE,
				      ODP_BUFFER_TYPE_PACKET);
	if (pool == ODP_BUFFER_POOL_INVALID) {
		ODP_ERR("Error: packet pool create failed.\n");
		exit(EXIT_FAILURE);
	}
	odp_buffer_pool_print(pool);

	/* Create and init worker threads */
	memset(thread_tbl, 0, sizeof(thread_tbl));
	for (i = 0; i < num_workers; ++i) {
		void *(*thr_run_func) (void *);
		int core;
		int if_idx;

		core = (first_core + i) % core_count;

		if_idx = i % args->appl.if_count;

		args->thread[i].pktio_dev = args->appl.if_names[if_idx];
		args->thread[i].pool = pool;
		args->thread[i].mode = args->appl.mode;
		args->thread[i].type = args->appl.type;
		args->thread[i].fanout = args->appl.fanout;

		if (args->appl.mode == APPL_MODE_PKT_BURST)
			thr_run_func = pktio_ifburst_thread;
		else /* APPL_MODE_PKT_QUEUE */
			thr_run_func = pktio_queue_thread;
		/*
		 * Create threads one-by-one instead of all-at-once,
		 * because each thread might get different arguments.
		 * Calls odp_thread_create(cpu) for each thread
		 */
		odp_linux_pthread_create(thread_tbl, 1, core, thr_run_func,
					 &args->thread[i]);
	}

	/* Master thread waits for other threads to exit */
	odp_linux_pthread_join(thread_tbl, num_workers);

	printf("Exit\n\n");

	return 0;
}
Example #28
0
static const struct odp_mm_district *mm_district_reserve_aligned(
	const char *name, const char *orig_name,
	size_t len,
	int socket_id, unsigned flags,
	unsigned align,
	unsigned bound)
{
	struct odp_sys_layout *mcfg;
	unsigned i = 0;
	int mmfrag_idx = -1;
	uint64_t addr_offset, seg_offset = 0;
	size_t	 requested_len;
	size_t	 mmfrag_len = 0;
	phys_addr_t mmfrag_physaddr;
	void *mmfrag_addr;
	struct odp_mm_district *md = NULL;

	/* get pointer to global configuration */
	mcfg = odp_get_configuration()->sys_layout;

	/* no more room in config */
	if (mcfg->mm_district_idx >= ODP_MAX_MM_DISTRICT) {
		ODP_ERR("%s: No more room in config\n", name);
		odp_err = ENOSPC;
		return NULL;
	}

	/* zone already exist */
	if (mm_district_lookup(name)) {
		ODP_ERR("mm_district <%s> already exists\n", name);
		odp_err = EEXIST;
		return NULL;
	}

	if (!orig_name) {
		ODP_ERR("Invalid param: orig_name\n");
		odp_err = EINVAL;
		return NULL;
	}

	md = free_mm_district_lookup(orig_name);
	if (md)
		if (len <= md->len) {
			free_mm_district_fetch(md);
			return md;
		}

	/* if alignment is not a power of two */
	if (align && !odp_is_power_of_2(align)) {
		ODP_ERR("Invalid alignment: %u\n", align);
		odp_err = EINVAL;
		return NULL;
	}

	/* alignment less than cache size is not allowed */
	if (align < ODP_CACHE_LINE_SIZE)
		align = ODP_CACHE_LINE_SIZE;

	/* align length on cache boundary. Check for overflow before doing so */
	if (len > MEM_SIZE_MAX - ODP_CACHE_LINE_MASK) {
		odp_err = EINVAL; /* requested size too big */
		return NULL;
	}

	len += ODP_CACHE_LINE_MASK;
	len &= ~((size_t)ODP_CACHE_LINE_MASK);

	/* save minimal requested  length */
	requested_len = ODP_MAX((size_t)ODP_CACHE_LINE_SIZE, len);

	/* check that boundary condition is valid */
	if ((bound != 0) && ((requested_len > bound) ||
			     !odp_is_power_of_2(bound))) {
		odp_err = EINVAL;
		return NULL;
	}

	/* find the smallest segment matching requirements */
	for (i = 0; i < ODP_MAX_MMFRAG; i++) {
		/* last segment */
		if (!free_mmfrag[i].addr)
			break;

		/* empty segment, skip it */
		if (free_mmfrag[i].len == 0)
			continue;

		/* bad socket ID */
		if ((socket_id != SOCKET_ID_ANY) &&
		    (free_mmfrag[i].socket_id != SOCKET_ID_ANY) &&
		    (socket_id != free_mmfrag[i].socket_id))
			continue;

		/*
		 * calculate offset to closest alignment that
		 * meets boundary conditions.
		 */
		addr_offset = align_phys_boundary(free_mmfrag + i,
						  requested_len, align, bound);

		/* check len */
		if ((requested_len + addr_offset) > free_mmfrag[i].len)
			continue;

		/* check flags for hugepage sizes */
		if ((flags & ODP_MEMZONE_2MB) &&
		    (free_mmfrag[i].hugepage_sz == ODP_PGSIZE_1G))
			continue;

		if ((flags & ODP_MEMZONE_1GB) &&
		    (free_mmfrag[i].hugepage_sz == ODP_PGSIZE_2M))
			continue;

		if ((flags & ODP_MEMZONE_16MB) &&
		    (free_mmfrag[i].hugepage_sz == ODP_PGSIZE_16G))
			continue;

		if ((flags & ODP_MEMZONE_16GB) &&
		    (free_mmfrag[i].hugepage_sz == ODP_PGSIZE_16M))
			continue;

		/* this segment is the best until now */
		if (mmfrag_idx == -1) {
			mmfrag_idx = i;
			mmfrag_len = free_mmfrag[i].len;
			seg_offset = addr_offset;
		}

		/* find the biggest contiguous zone */
		else if (len == 0) {
			if (free_mmfrag[i].len > mmfrag_len) {
				mmfrag_idx = i;
				mmfrag_len = free_mmfrag[i].len;
				seg_offset = addr_offset;
			}
		}

		/*
		 * find the smallest (we already checked that current
		 * zone length is > len
		 */
		else if ((free_mmfrag[i].len + align < mmfrag_len) ||
			 ((free_mmfrag[i].len <= mmfrag_len + align) &&
			  (addr_offset < seg_offset))) {
			mmfrag_idx = i;
			mmfrag_len = free_mmfrag[i].len;
			seg_offset = addr_offset;
		}
	}

	/* no segment found */
	if (mmfrag_idx == -1) {
		/*
		 * If ODP_MEMZONE_SIZE_HINT_ONLY flag is specified,
		 * try allocating again without the size parameter
		 * otherwise -fail.
		 */
		if ((flags & ODP_MEMZONE_SIZE_HINT_ONLY) &&
		    ((flags & ODP_MEMZONE_1GB) ||
		     (flags & ODP_MEMZONE_2MB) ||
		     (flags & ODP_MEMZONE_16MB) ||
		     (flags & ODP_MEMZONE_16GB)))
			return mm_district_reserve_aligned(name, orig_name,
							   len, socket_id, 0,
							   align, bound);

		odp_err = ENOMEM;
		return NULL;
	}

	/* save aligned physical and virtual addresses */
	mmfrag_physaddr = free_mmfrag[mmfrag_idx].phys_addr + seg_offset;
	mmfrag_addr = ODP_PTR_ADD(free_mmfrag[mmfrag_idx].addr,
				  (uintptr_t)seg_offset);

	/* if we are looking for a biggest mm_district */
	if (len == 0) {
		if (bound == 0)
			requested_len = mmfrag_len - seg_offset;
		else
			requested_len =
				ODP_ALIGN_CEIL(mmfrag_physaddr + 1, bound)
				- mmfrag_physaddr;
	}

	/* set length to correct value */
	len = (size_t)seg_offset + requested_len;

	/* update our internal state */
	free_mmfrag[mmfrag_idx].len -= len;
	free_mmfrag[mmfrag_idx].phys_addr += len;
	free_mmfrag[mmfrag_idx].addr =
		(char *)free_mmfrag[mmfrag_idx].addr + len;

	/* fill the zone in config */
	struct odp_mm_district *mz =
		&mcfg->mm_district[mcfg->mm_district_idx++];

	snprintf(mz->orig_name, sizeof(mz->orig_name), "%s", orig_name);
	snprintf(mz->name, sizeof(mz->name), "%s", name);
	mz->phys_addr = mmfrag_physaddr;
	mz->phys_addr_end  = mmfrag_physaddr + requested_len;
	mz->excursion_addr = mmfrag_addr - mmfrag_physaddr;
	mz->addr = mmfrag_addr;
	mz->len	 = requested_len;
	mz->hugepage_sz = free_mmfrag[mmfrag_idx].hugepage_sz;
	mz->socket_id = free_mmfrag[mmfrag_idx].socket_id;
	mz->flags = 0;
	mz->mmfrag_id = mmfrag_idx;

	return mz;
}
/**
 * Packet IO loopback worker thread using ODP queues
 *
 * @param arg  thread arguments of type 'thread_args_t *'
 */
static void *pktio_queue_thread(void *arg)
{
	int thr;
	odp_buffer_pool_t pkt_pool;
	odp_pktio_t pktio;
	thread_args_t *thr_args;
	odp_queue_t outq_def;
	odp_queue_t inq_def;
	char inq_name[ODP_QUEUE_NAME_LEN];
	odp_queue_param_t qparam;
	odp_packet_t pkt;
	odp_buffer_t buf;
	int ret;
	unsigned long pkt_cnt = 0;
	unsigned long err_cnt = 0;
	odp_pktio_params_t params;
	socket_params_t *sock_params = &params.sock_params;

	thr = odp_thread_id();
	thr_args = arg;

	printf("Pktio thread [%02i] starts, pktio_dev:%s\n", thr,
	       thr_args->pktio_dev);

	/* Lookup the packet pool */
	pkt_pool = odp_buffer_pool_lookup("packet_pool");
	if (pkt_pool == ODP_BUFFER_POOL_INVALID || pkt_pool != thr_args->pool) {
		ODP_ERR("  [%02i] Error: pkt_pool not found\n", thr);
		return NULL;
	}

	/* Open a packet IO instance for this thread */
	sock_params->type = thr_args->type;
	sock_params->fanout = thr_args->fanout;
	pktio = odp_pktio_open(thr_args->pktio_dev, pkt_pool, &params);
	if (pktio == ODP_PKTIO_INVALID) {
		ODP_ERR("  [%02i] Error: pktio create failed\n", thr);
		return NULL;
	}

	/*
	 * Create and set the default INPUT queue associated with the 'pktio'
	 * resource
	 */
	qparam.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
	qparam.sched.sync  = ODP_SCHED_SYNC_ATOMIC;
	qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;
	snprintf(inq_name, sizeof(inq_name), "%i-pktio_inq_def", (int)pktio);
	inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';

	inq_def = odp_queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam);
	if (inq_def == ODP_QUEUE_INVALID) {
		ODP_ERR("  [%02i] Error: pktio queue creation failed\n", thr);
		return NULL;
	}

	ret = odp_pktio_inq_setdef(pktio, inq_def);
	if (ret != 0) {
		ODP_ERR("  [%02i] Error: default input-Q setup\n", thr);
		return NULL;
	}

	printf("  [%02i] created pktio:%02i, queue mode (ATOMIC queues)\n"
	       "          default pktio%02i-INPUT queue:%u\n",
		thr, pktio, pktio, inq_def);

	/* Loop packets */
	for (;;) {
		odp_pktio_t pktio_tmp;

#if 1
		/* Use schedule to get buf from any input queue */
		buf = odp_schedule(NULL, ODP_SCHED_WAIT);
#else
		/* Always dequeue from the same input queue */
		buf = odp_queue_deq(inq_def);
		if (!odp_buffer_is_valid(buf))
			continue;
#endif

		pkt = odp_packet_from_buffer(buf);

		/* Drop packets with errors */
		if (odp_unlikely(drop_err_pkts(&pkt, 1) == 0)) {
			ODP_ERR("Drop frame - err_cnt:%lu\n", ++err_cnt);
			continue;
		}

		pktio_tmp = odp_pktio_get_input(pkt);
		outq_def = odp_pktio_outq_getdef(pktio_tmp);

		if (outq_def == ODP_QUEUE_INVALID) {
			ODP_ERR("  [%02i] Error: def output-Q query\n", thr);
			return NULL;
		}

		/* Swap Eth MACs and possibly IP-addrs before sending back */
		swap_pkt_addrs(&pkt, 1);

		/* Enqueue the packet for output */
		odp_queue_enq(outq_def, buf);

		/* Print packet counts every once in a while */
		if (odp_unlikely(pkt_cnt++ % 100000 == 0)) {
			printf("  [%02i] pkt_cnt:%lu\n", thr, pkt_cnt);
			fflush(NULL);
		}
	}

/* unreachable */
}
Example #30
0
int setup_pkt_odp(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
		  const char *netdev, odp_pool_t pool)
{
	uint8_t	portid = 0;
	uint16_t nbrxq = 0;
	uint16_t nbtxq = 0;
	int ret, i;
	struct odp_eth_dev *dev;

	pkt_odp_t *const pkt_odp = &pktio_entry->s.pkt_odp;

	if (!_odp_netdev_is_valid(netdev)) {
		ODP_ERR("netdev %s, format err! should be "
			"pktio_x! x is digital number\n", netdev);
		return -1;
	}

	portid = atoi(netdev + strlen(PKTIO_DEV_NAME));
	pkt_odp->portid = portid;
	pkt_odp->pool = pool;
	pkt_odp->queueid = 0;

	/* On init set it up only to 1 rx and tx queue.*/

	dev = odp_eth_dev_allocated_id(portid);
	if (!dev) {
		ODP_ERR("netdev %s, odp_eth_dev_allocated_id err!\n", netdev);
		return -1;
	}

	nbtxq = dev->q_num;
	nbrxq = dev->q_num;

	ret = odp_eth_dev_configure(portid, nbrxq, nbtxq, &port_conf);
	if (ret < 0) {
		ODP_ERR("Cannot configure device: err=%d, port=%u\n",
			ret, (unsigned int)portid);
		return -1;
	}

	/* init one RX queue on each port */
	for (i = 0; i < nbrxq; i++) {
		ret = odp_eth_rx_queue_setup(portid, i, nb_rxd,
					     0, NULL/*&rx_conf*/, (void *)pool);
		if (ret < 0) {
			ODP_ERR("rxq:err=%d, port=%u\n",
				ret, (unsigned int)portid);
			return -1;
		}
	}

	/* init one TX queue on each port */
	for (i = 0; i < nbtxq; i++) {
		ret = odp_eth_tx_queue_setup(portid, i, nb_txd,
					     0, NULL/*&tx_conf*/, (void *)pool);
		if (ret < 0) {
			ODP_ERR("txq:err=%d, port=%u\n",
				ret, (unsigned int)portid);
			return -1;
		}
	}

	/* Start device */
	ret = odp_eth_dev_start(portid);
	if (ret < 0) {
		ODP_ERR("odp_eth_dev_start:err=%d, port=%u\n",
			ret, (unsigned int)portid);
		return -1;
	}

	return 0;
}