Exemplo n.º 1
0
static int systemcpu(odp_system_info_t *sysinfo)
{
	int ret, i;

	ret = sysconf_cpu_count();
	if (ret == 0) {
		ODP_ERR("sysconf_cpu_count failed.\n");
		return -1;
	}

	sysinfo->cpu_count = ret;

	sysinfo->huge_page_size = huge_page_size();

	/* Dummy values */
	sysinfo->cache_line_size = 64;

	ODP_DBG("Warning: use dummy values for freq and model string\n");
	ODP_DBG("Refer to https://bugs.linaro.org/show_bug.cgi?id=1870\n");
	for (i = 0; i < MAX_CPU_NUMBER; i++) {
		sysinfo->cpu_hz_max[i] = 1400000000;
		strcpy(sysinfo->model_str[i], "UNKNOWN");
	}

	return 0;
}
Exemplo n.º 2
0
static void timer_init(void)
{
	struct sigevent   sigev;
	struct itimerspec ispec;

	ODP_DBG("Timer thread starts\n");

	memset(&sigev, 0, sizeof(sigev));
	memset(&ispec, 0, sizeof(ispec));

	sigev.sigev_notify          = SIGEV_THREAD;
	sigev.sigev_notify_function = notify_function;

	if (timer_create(CLOCK_MONOTONIC, &sigev,
			 &odp_timer.timer[0].timerid)) {
		ODP_DBG("Timer create failed\n");
		return;
	}

	ispec.it_interval.tv_sec  = 0;
	ispec.it_interval.tv_nsec = RESOLUTION_NS;
	ispec.it_value.tv_sec     = 0;
	ispec.it_value.tv_nsec    = RESOLUTION_NS;

	if (timer_settime(odp_timer.timer[0].timerid, 0, &ispec, NULL)) {
		ODP_DBG("Timer set failed\n");
		return;
	}

	return;
}
Exemplo n.º 3
0
odp_pktio_t odp_pktio_open(const char *dev, odp_buffer_pool_t pool,
			   odp_pktio_params_t *params)
{
	odp_pktio_t id;
	pktio_entry_t *pktio_entry;
	char name[ODP_QUEUE_NAME_LEN];
	queue_entry_t *queue_entry;
	odp_queue_t qid = ODP_QUEUE_INVALID;

	if (params == NULL) {
		ODP_ERR("Invalid pktio params\n");
		return ODP_PKTIO_INVALID;
	}

	ODP_DBG("Allocating HW pktio\n");

	id = alloc_lock_pktio_entry(params);
	if (id == ODP_PKTIO_INVALID) {
		ODP_ERR("No resources available.\n");
		return ODP_PKTIO_INVALID;
	}
	/* if successful, alloc_pktio_entry() returns with the entry locked */

	pktio_entry = get_entry(id);

	/* Create a default output queue for each pktio resource */
	snprintf(name, sizeof(name), "%i-pktio_outq_default", (int)id);
	name[ODP_QUEUE_NAME_LEN-1] = '\0';

	pktio_entry->s.dev = _odp_pktio_dev_lookup(dev);
	if (!pktio_entry->s.dev) {
		free_pktio_entry(id);
		id = ODP_PKTIO_INVALID;
		goto unlock;
	}

	qid = _odp_queue_create(name, ODP_QUEUE_TYPE_PKTOUT, NULL,
				pktio_entry->s.dev->tx_hw_queue);
	ODP_DBG("Created queue %u for hw queue %d\n", (uint32_t)qid,
		pktio_entry->s.dev->tx_hw_queue);
	if (qid == ODP_QUEUE_INVALID) {
		free_pktio_entry(id);
		id = ODP_PKTIO_INVALID;
		goto unlock;
	}
	pktio_entry->s.in_pool = pool;
	pktio_entry->s.outq_default = qid;

	queue_entry = queue_to_qentry(qid);
	queue_entry->s.pktout = id;
	queue_entry->s.out_port_id = pktio_entry->s.dev->port_id;
unlock:
	unlock_entry(pktio_entry);
	return id;
}
Exemplo n.º 4
0
void _odp_sorted_list_stats_print(_odp_int_sorted_pool_t sorted_pool)
{
	sorted_pool_t *pool;

	pool = (sorted_pool_t *)(uintptr_t)sorted_pool;
	ODP_DBG("sorted_pool=0x%" PRIX64 "\n", sorted_pool);
	ODP_DBG("  max_sorted_lists=%u next_list_idx=%u\n",
		pool->max_sorted_lists, pool->next_list_idx);
	ODP_DBG("  total_inserts=%" PRIu64 " total_deletes=%" PRIu64
		" total_removes=%" PRIu64 "\n", pool->total_inserts,
		pool->total_deletes, pool->total_removes);
}
Exemplo n.º 5
0
odp_timer_tmo_t odp_timer_absolute_tmo(odp_timer_t timer, uint64_t tmo_tick,
				       odp_queue_t queue, odp_buffer_t buf)
{
	int id;
	uint64_t tick;
	uint64_t cur_tick;
	timeout_t *new_tmo;
	odp_buffer_t tmo_buf;
	odp_timeout_hdr_t *tmo_hdr;

	id = timer - 1;

	cur_tick = odp_timer.timer[id].cur_tick;
	if (tmo_tick <= cur_tick) {
		ODP_DBG("timeout too close\n");
		return ODP_TIMER_TMO_INVALID;
	}

	tick = tmo_tick - cur_tick;
	if (tick > MAX_TICKS) {
		ODP_DBG("timeout too far\n");
		return ODP_TIMER_TMO_INVALID;
	}

	tick = (cur_tick + tick) % MAX_TICKS;

	tmo_buf = odp_buffer_alloc(odp_timer.timer[id].pool);
	if (tmo_buf == ODP_BUFFER_INVALID) {
		ODP_DBG("alloc failed\n");
		return ODP_TIMER_TMO_INVALID;
	}

	tmo_hdr = odp_timeout_hdr((odp_timeout_t) tmo_buf);
	new_tmo = &tmo_hdr->meta;

	new_tmo->timer_id = id;
	new_tmo->tick     = (int)tick;
	new_tmo->tmo_tick = tmo_tick;
	new_tmo->queue    = queue;
	new_tmo->tmo_buf  = tmo_buf;

	if (buf != ODP_BUFFER_INVALID)
		new_tmo->buf = buf;
	else
		new_tmo->buf = tmo_buf;

	add_tmo(&odp_timer.timer[id].tick[tick], new_tmo);

	return tmo_buf;
}
Exemplo n.º 6
0
int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
{
	odp_cpumask_t overlap;
	int cpu, i;

	/*
	 * If no user supplied number or it's too large, then attempt
	 * to use all CPUs
	 */
	cpu = odp_cpumask_count(&odp_global_data.worker_cpus);
	if (0 == num || cpu < num)
		num = cpu;

	/* build the mask, allocating down from highest numbered CPU */
	odp_cpumask_zero(mask);
	for (cpu = 0, i = CPU_SETSIZE - 1; i >= 0 && cpu < num; --i) {
		if (odp_cpumask_isset(&odp_global_data.worker_cpus, i)) {
			odp_cpumask_set(mask, i);
			cpu++;
		}
	}

	odp_cpumask_and(&overlap, mask, &odp_global_data.control_cpus);
	if (odp_cpumask_count(&overlap))
		ODP_DBG("\n\tWorker CPUs overlap with control CPUs...\n"
			"\tthis will likely have a performance impact on the worker threads.\n");

	return cpu;
}
Exemplo n.º 7
0
/**
 * Map netmap rings to pktin/pktout queues
 *
 * @param rings          Array of netmap descriptor rings
 * @param num_queues     Number of pktin/pktout queues
 * @param num_rings      Number of matching netmap rings
 */
static inline void map_netmap_rings(netmap_ring_t *rings,
				    unsigned num_queues, unsigned num_rings)
{
	struct netmap_ring_t *desc_ring;
	unsigned rings_per_queue;
	unsigned remainder;
	unsigned mapped_rings;
	unsigned i;
	unsigned desc_id = 0;

	rings_per_queue = num_rings / num_queues;
	remainder = num_rings % num_queues;

	if (remainder)
		ODP_DBG("WARNING: Netmap rings mapped unevenly to queues\n");

	for (i = 0; i < num_queues; i++) {
		desc_ring = &rings[i].s;
		if (i < remainder)
			mapped_rings = rings_per_queue + 1;
		else
			mapped_rings = rings_per_queue;

		desc_ring->first = desc_id;
		desc_ring->cur	= desc_id;
		desc_ring->last = desc_ring->first + mapped_rings - 1;
		desc_ring->num	= mapped_rings;

		desc_id = desc_ring->last + 1;
	}
}
Exemplo n.º 8
0
int odp_timer_cancel_tmo(odp_timer_t timer, odp_timer_tmo_t tmo)
{
	int id;
	uint64_t tick_idx;
	timeout_t *cancel_tmo;
	tick_t *tick;

	/* get id */
	id = timer - 1;

	/* get tmo_buf to cancel */
	cancel_tmo = (timeout_t *)odp_buffer_addr(tmo);
	tick_idx = cancel_tmo->tick;
	tick = &odp_timer.timer[id].tick[tick_idx];

	odp_spinlock_lock(&tick->lock);
	/* search and delete tmo from tick list */
	if (find_and_del_tmo(&tick->list, tmo) != 0) {
		odp_spinlock_unlock(&tick->lock);
		ODP_DBG("Couldn't find the tmo (%d) in tick list\n", (int)tmo);
		return -1;
	}
	odp_spinlock_unlock(&tick->lock);

	return 0;
}
Exemplo n.º 9
0
int odp_pktio_restart(odp_pktio_t id)
{
	pktio_entry_t *entry;
	uint8_t port_id;
	int ret;

	entry = get_pktio_entry(id);
	if (entry == NULL) {
		ODP_DBG("pktio entry %d does not exist\n",
			id->unused_dummy_var);
		return -1;
	}

	if (odp_unlikely(is_free(entry))) {
		ODP_DBG("already freed pktio\n");
		return -1;
	}

	if (odp_pktio_is_not_hns_eth(entry)) {
		ODP_DBG("pktio entry %d is not ODP UMD pktio\n",
			id->unused_dummy_var);
		return -1;
	}

	port_id = entry->s.pkt_odp.portid;

	if (!odp_eth_dev_is_valid_port(port_id)) {
		ODP_DBG("pktio entry %d ODP UMD Invalid port_id=%d\n",
			id->unused_dummy_var, port_id);
		return -1;
	}

	/* Stop device */
	odp_eth_dev_stop(port_id);

	/* Start device */
	ret = odp_eth_dev_start(port_id);
	if (ret < 0) {
		ODP_ERR("odp_eth_dev_start:err=%d, port=%u\n",
			ret, (unsigned)port_id);
		return -1;
	}

	ODP_DBG("odp pmd restart done\n\n");

	return 0;
}
Exemplo n.º 10
0
int odp_pool_init_global(void)
{
	uint32_t i;
	odp_shm_t shm;

	shm = odp_shm_reserve(SHM_DEFAULT_NAME,
			      sizeof(pool_table_t),
			      sizeof(pool_entry_t), 0);

	pool_tbl = odp_shm_addr(shm);

	if (pool_tbl == NULL)
		return -1;

	memset(pool_tbl, 0, sizeof(pool_table_t));

	for (i = 0; i < ODP_CONFIG_POOLS; i++) {
		/* init locks */
		pool_entry_t *pool = &pool_tbl->pool[i];

		POOL_LOCK_INIT(&pool->s.lock);
		POOL_LOCK_INIT(&pool->s.buf_lock);
		POOL_LOCK_INIT(&pool->s.blk_lock);
		pool->s.pool_hdl = pool_index_to_handle(i);
		pool->s.pool_id = i;
		pool_entry_ptr[i] = pool;
		odp_atomic_init_u32(&pool->s.bufcount, 0);
		odp_atomic_init_u32(&pool->s.blkcount, 0);

		/* Initialize pool statistics counters */
		odp_atomic_init_u64(&pool->s.poolstats.bufallocs, 0);
		odp_atomic_init_u64(&pool->s.poolstats.buffrees, 0);
		odp_atomic_init_u64(&pool->s.poolstats.blkallocs, 0);
		odp_atomic_init_u64(&pool->s.poolstats.blkfrees, 0);
		odp_atomic_init_u64(&pool->s.poolstats.bufempty, 0);
		odp_atomic_init_u64(&pool->s.poolstats.blkempty, 0);
		odp_atomic_init_u64(&pool->s.poolstats.high_wm_count, 0);
		odp_atomic_init_u64(&pool->s.poolstats.low_wm_count, 0);
	}

	ODP_DBG("\nPool init global\n");
	ODP_DBG(" pool_entry_s size     %zu\n", sizeof(struct pool_entry_s));
	ODP_DBG(" pool_entry_t size     %zu\n", sizeof(pool_entry_t));
	ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
	ODP_DBG("\n");
	return 0;
}
Exemplo n.º 11
0
void _odp_pkt_queue_stats_print(_odp_int_queue_pool_t queue_pool)
{
	queue_pool_t *pool;

	pool = (queue_pool_t *)(uintptr_t)queue_pool;
	ODP_DBG("pkt_queue_stats - queue_pool=0x%" PRIX64 "\n", queue_pool);
	ODP_DBG("  max_queue_num=%u max_queued_pkts=%u next_queue_num=%u\n",
		pool->max_queue_num, pool->max_queued_pkts,
		pool->next_queue_num);
	ODP_DBG("  total pkt appends=%" PRIu64 " total pkt removes=%" PRIu64
		" bad removes=%" PRIu64 "\n",
		pool->total_pkt_appends, pool->total_pkt_removes,
		pool->total_bad_removes);
	ODP_DBG("  free_list size=%u min size=%u peak size=%u\n",
		pool->free_list_size, pool->min_free_list_size,
		pool->peak_free_list_size);
}
Exemplo n.º 12
0
int odp_pktio_dev_get(struct odp_pktio_info *pktio_info, uint8_t *num)
{
	uint8_t port_num = 0;
	uint8_t i;
	struct odp_eth_dev_info dev_info;

	if (!pktio_info || !num) {
		ODP_DBG("parameter pktio_info or num is NULL!\n");
		return -1;
	}

	for (i = 0; i < ODP_MAX_ETHPORTS; i++) {
		if (odp_eth_dev_is_valid_port(i)) {
			memset(pktio_info[i].name, 0,
			       sizeof(PACKET_IO_NAME_LENGTH_MAX));
			(void)sprintf(pktio_info[i].name, "pktio_%d", i);

			odp_eth_dev_info_get(i, &dev_info);
			if (dev_info.pci_dev) { /* pci netif */
				pktio_info[port_num].if_type
					= ODP_PKITIO_DEV_TYPE_PCI;
				pktio_info[port_num].info.pci_info.addr.domain
					= dev_info.pci_dev->addr.domain;
				pktio_info[port_num].info.pci_info.addr.bus
					= dev_info.pci_dev->addr.bus;
				pktio_info[port_num].info.pci_info.addr.devid
					= dev_info.pci_dev->addr.devid;
				pktio_info[port_num].info.pci_info.addr.function
					= dev_info.pci_dev->addr.function;
				pktio_info[port_num].info.pci_info.id.vendor_id
					= dev_info.pci_dev->id.vendor_id;
				pktio_info[port_num].info.pci_info.id.device_id
					= dev_info.pci_dev->id.device_id;
				pktio_info[port_num].info.pci_info.id
					.subsystem_vendor_id = dev_info
					.pci_dev->id.subsystem_vendor_id;
				pktio_info[port_num].info.pci_info.id
					.subsystem_device_id = dev_info
					.pci_dev->id.subsystem_device_id;
				pktio_info[port_num].info.pci_info.numa_node
					= dev_info.pci_dev->numa_node;
			} else { /* soc netif */
				pktio_info[port_num].if_type
					= ODP_PKITIO_DEV_TYPE_SOC;
				pktio_info[port_num].info.soc_info.if_idx
					= dev_info.if_index;
				/* checkpatch ERROR: do not initialise
				globals to 0 or NULL */
				/* pktio_info[port_num].info.soc_info.numa_node
					= 0;*/ /* not used now  */
			}
			port_num++;
		}
	}

	*num = port_num;
	return 0;
}
Exemplo n.º 13
0
int odp_queue_init_global(void)
{
	uint32_t i;
	odp_shm_t shm;

	ODP_DBG("Queue init ... ");

	shm = odp_shm_reserve("odp_queues",
			      sizeof(queue_table_t),
			      sizeof(queue_entry_t), 0);

	queue_tbl = odp_shm_addr(shm);

	if (queue_tbl == NULL)
		return -1;

	memset(queue_tbl, 0, sizeof(queue_table_t));

	for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
		/* init locks */
		queue_entry_t *queue = get_qentry(i);
		LOCK_INIT(queue);
		queue->s.handle = queue_from_id(i);
	}

	ODP_DBG("done\n");
	ODP_DBG("Queue init global\n");
	ODP_DBG("  struct queue_entry_s size %zu\n",
		sizeof(struct queue_entry_s));
	ODP_DBG("  queue_entry_t size        %zu\n",
		sizeof(queue_entry_t));
	ODP_DBG("\n");
	__k1_wmb();
	return 0;
}
Exemplo n.º 14
0
int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len)
{
	pktio_entry_t *pktio_entry = get_entry(id);
	unsigned pkts = 0;
	odp_buffer_t buf;

	if (pktio_entry == NULL)
		return -1;

	lock_entry(pktio_entry);

	if (pktio_entry->s.inq_default == ODP_QUEUE_INVALID) {
		char name[ODP_QUEUE_NAME_LEN];
		odp_queue_param_t qparam;
		odp_queue_t inq_def;
		/*
		 * Create a default input queue.
		 * FIXME: IT is a kind of WA for current ODP API usage.
		 * It should be revised.
		 */
		ODP_DBG("Creating default input queue\n");
		qparam.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
		qparam.sched.sync  = ODP_SCHED_SYNC_NONE;
		qparam.sched.group = ODP_SCHED_GROUP_DEFAULT;
		snprintf(name, sizeof(name), "%i-pktio_inq_default", (int)id);
		name[ODP_QUEUE_NAME_LEN-1] = '\0';
		inq_def = odp_queue_create(name, ODP_QUEUE_TYPE_PKTIN, &qparam);
		if (inq_def == ODP_QUEUE_INVALID) {
			ODP_ERR("pktio queue creation failed\n");
			goto unlock;
		}

		if (odp_pktio_inq_setdef(id, inq_def)) {
			ODP_ERR("default input-Q setup\n");
			goto unlock;
		}
	}

	for (pkts = 0; pkts < len; pkts++) {
		buf = odp_queue_deq(pktio_entry->s.inq_default);
		if (!odp_buffer_is_valid(buf))
			break;

		pkt_table[pkts] = odp_packet_from_buffer(buf);
	}
unlock:
	unlock_entry(pktio_entry);
	return pkts;
}
Exemplo n.º 15
0
int odp_eth_mtu_get(pktio_entry_t *pktio_entry)
{
	uint8_t port_id = pktio_entry->s.pkt_odp.portid;
	uint16_t mtu;
	int ret;

	if (!odp_eth_dev_is_valid_port(port_id))
		return -1;

	ret = odp_eth_dev_get_mtu(port_id, &mtu);
	if (ret) {
		ODP_DBG("port_id %d get mtu failed!\n", port_id);
		return -1;
	}

	return (int)mtu;
}
Exemplo n.º 16
0
int odp_pktio_inq_setdef(odp_pktio_t id, odp_queue_t queue)
{
	pktio_entry_t *pktio_entry = get_entry(id);
	queue_entry_t *qentry = queue_to_qentry(queue);

	if (pktio_entry == NULL || qentry == NULL)
		return -1;

	if (qentry->s.type != ODP_QUEUE_TYPE_PKTIN)
		return -1;

	pktio_entry->s.inq_default = queue;
	{
		uint32_t free_queue =
			_odp_pool_get_free_queue(pktio_entry->s.in_pool);
		ti_em_osal_cppi_rx_channel_close(Cppi_CpDma_PASS_CPDMA,
					pktio_entry->s.dev->rx_channel);
		ti_em_osal_cppi_rx_flow_open(Cppi_CpDma_PASS_CPDMA,
					     pktio_entry->s.dev->rx_flow,
					     qentry->s.hw_queue,
					     free_queue,
					     0);
		ti_em_osal_cppi_rx_channel_open(Cppi_CpDma_PASS_CPDMA,
						pktio_entry->s.dev->rx_channel);
		ODP_DBG("%s: Opened rx flow %u with dest queue: %u and free queue: %u\n",
			__func__,
			pktio_entry->s.dev->rx_flow,
			qentry->s.hw_queue,
			free_queue);
	}

	queue_lock(qentry);
	qentry->s.pktin = id;
	qentry->s.status = QUEUE_STATUS_SCHED;
	queue_unlock(qentry);

	odp_schedule_queue(queue, qentry->s.param.sched.prio);

	return 0;
}
Exemplo n.º 17
0
void _odp_timer_init(odp_timer_pool *tp)
{
	struct sigevent   sigev;
	struct itimerspec ispec;
	uint64_t res, sec, nsec;

	if(_odp_timer_pool_global != NULL){
		ODP_ABORT("Cannot have more than one timer at once");
	}
	ODP_DBG("Creating POSIX timer for timer pool %s, period %"
		PRIu64" ns\n", tp->name, tp->param.res_ns);

	memset(&sigev, 0, sizeof(sigev));
	memset(&ispec, 0, sizeof(ispec));

	_odp_timer_pool_global = tp;
	sigev.sigev_notify          = SIGEV_CALLBACK;
	sigev.sigev_notify_function = timer_notify;
	sigev.sigev_value.sival_ptr = tp;

	if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid))
		ODP_ABORT("timer_create() returned error %s\n",
			  strerror(errno));

	res  = tp->param.res_ns;
	sec  = res / ODP_TIME_SEC_IN_NS;
	nsec = res - sec * ODP_TIME_SEC_IN_NS;

	ispec.it_interval.tv_sec  = (time_t)sec;
	ispec.it_interval.tv_nsec = (long)nsec;
	ispec.it_value.tv_sec     = (time_t)sec;
	ispec.it_value.tv_nsec    = (long)nsec;

	if (timer_settime(tp->timerid, 0, &ispec, NULL))
		ODP_ABORT("timer_settime() returned error %s\n",
			  strerror(errno));
}
Exemplo n.º 18
0
/**
 * Wait for netmap link to come up
 *
 * @param pktio_entry    Packet IO entry
 *
 * @retval  1 link is up
 * @retval  0 link is down
 * @retval <0 on failure
 */
static inline int netmap_wait_for_link(pktio_entry_t *pktio_entry)
{
	int i;
	int ret;

	/* Wait for the link to come up */
	for (i = 0; i <= NM_WAIT_TIMEOUT; i++) {
		ret = netmap_link_status(pktio_entry);
		if (ret == -1)
			return -1;

		/* nm_open() causes the physical link to reset. When using a
		 * direct attached loopback cable there may be a small delay
		 * until the opposing end's interface comes back up again. In
		 * this case without the additional sleep pktio validation
		 * tests fail. */
		sleep(1);
		if (ret == 1)
			return 1;
	}

	ODP_DBG("%s link is down\n", pktio_entry->s.name);
	return 0;
}
Exemplo n.º 19
0
int odp_cpumask_default_control(odp_cpumask_t *mask, int num)
{
	odp_cpumask_t overlap;
	int cpu, i;

	/*
	 * If no user supplied number then default to one control CPU.
	 */
	if (0 == num) {
		num = 1;
	} else {
		/*
		 * If user supplied number is too large, then attempt
		 * to use all installed control CPUs
		 */
		cpu = odp_cpumask_count(&odp_global_data.control_cpus);
		if (cpu < num)
			num = cpu;
	}

	/* build the mask, allocating upwards from lowest numbered CPU */
	odp_cpumask_zero(mask);
	for (cpu = 0, i = 0; i < CPU_SETSIZE && cpu < num; i++) {
		if (odp_cpumask_isset(&odp_global_data.control_cpus, i)) {
			odp_cpumask_set(mask, i);
			cpu++;
		}
	}

	odp_cpumask_and(&overlap, mask, &odp_global_data.worker_cpus);
	if (odp_cpumask_count(&overlap))
		ODP_DBG("\n\tControl CPUs overlap with worker CPUs...\n"
			"\tthis will likely have a performance impact on the worker threads.\n");

	return cpu;
}
Exemplo n.º 20
0
static void *test_ring(void *arg)
{
	ring_arg_t *parg = (ring_arg_t *)arg;
	int thr;
	char ring_name[ODP_RING_NAMESIZE];
	odp_ring_t *r;
	int result = 0;

	thr = odp_thread_id();

	printf("Thread %i starts\n", thr);

	switch (parg->thrdarg.testcase) {
	case ODP_RING_TEST_BASIC:
		snprintf(ring_name, sizeof(ring_name), "test_ring_%i", thr);

		r = odp_ring_create(ring_name, RING_SIZE,
				    0 /* not used, alignement
					 taken care inside func : todo */);
		if (r == NULL) {
			ODP_ERR("ring create failed\n");
			result = -1;
			break;
		}
		/* lookup ring from its name */
		if (odp_ring_lookup(ring_name) != r) {
			ODP_ERR("ring lookup failed\n");
			result = -1;
			break;
		}

		/* basic operations */
		if (test_ring_basic(r) < 0) {
			ODP_ERR("ring basic enqueue/dequeu ops failed\n");
			result = -1;
		}

		/* dump ring stats */
		odp_ring_list_dump();

		break;

	case ODP_RING_TEST_STRESS:
		test_ring_stress(parg->stress_type);

		/* dump ring stats */
		odp_ring_list_dump();
		break;

	default:
		ODP_ERR("Invalid test case [%d]\n", parg->thrdarg.testcase);
		result = -1;
		break;
	}

	ODP_DBG("result = %d\n", result);
	if (result == 0)
		printf("test_ring Result:pass\n");
	else
		printf("test_ring Result:fail\n");

	fflush(stdout);

	return parg;
}
Exemplo n.º 21
0
int odp_shm_free(odp_shm_t shm)
{
	uint32_t i;
	int ret;
	odp_shm_block_t *block;
	char name[ODP_SHM_NAME_LEN + 8];

	if (shm == ODP_SHM_INVALID) {
		ODP_DBG("odp_shm_free: Invalid handle\n");
		return -1;
	}

	i = from_handle(shm);

	if (i >= ODP_CONFIG_SHM_BLOCKS) {
		ODP_DBG("odp_shm_free: Bad handle\n");
		return -1;
	}

	odp_spinlock_lock(&odp_shm_tbl->lock);

	block = &odp_shm_tbl->block[i];

	if (block->addr == NULL) {
		ODP_DBG("odp_shm_free: Free block\n");
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return 0;
	}

	/* right now, for this tpye of memory, we do nothing as free */
	if (block->flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY) {
		int pid = getpid();

		snprintf(name, sizeof(name), "%s_%d", block->name, pid);
		odp_mm_district_unreserve(name);
		memset(block, 0, sizeof(odp_shm_block_t));
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return 0;
	}

	if (block->flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		odp_mm_district_unreserve(name);
		memset(block, 0, sizeof(odp_shm_block_t));
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return 0;
	}

	ret = munmap(block->addr_orig, block->alloc_size);
	if (0 != ret) {
		ODP_DBG("odp_shm_free: munmap failed: %s, id %u, addr %p\n",
			strerror(errno), i, block->addr_orig);
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return -1;
	}

	if (block->flags & ODP_SHM_PROC) {
		ret = shm_unlink(block->name);
		if (0 != ret) {
			ODP_DBG("odp_shm_free: shm_unlink failed\n");
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			return -1;
		}
	}

	memset(block, 0, sizeof(odp_shm_block_t));
	odp_spinlock_unlock(&odp_shm_tbl->lock);
	return 0;
}
Exemplo n.º 22
0
odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
			  uint32_t flags)
{
	uint32_t i;
	odp_shm_block_t *block;
	void *addr;
	int   fd = -1;
	int   map_flag = MAP_SHARED;

	/* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */
	int oflag = O_RDWR | O_CREAT | O_TRUNC;
	uint64_t alloc_size;
	uint64_t page_sz;

#ifdef MAP_HUGETLB
	uint64_t huge_sz;
	int need_huge_page = 0;
	uint64_t alloc_hp_size;
#endif

	const struct odp_mm_district *zone = NULL;
	char memdistrict_name[ODP_SHM_NAME_LEN + 8];

	page_sz = odp_sys_page_size();
	alloc_size = size + align;

#ifdef MAP_HUGETLB
	huge_sz = odp_sys_huge_page_size();
	need_huge_page = (huge_sz && alloc_size > page_sz);

	/* munmap for huge pages requires sizes round up by page */
	alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz);
#endif

	if (flags & ODP_SHM_PROC) {
		/* Creates a file to /dev/shm */
		fd = shm_open(name, oflag,
			      S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
		if (fd == -1) {
			ODP_DBG("%s: shm_open failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY) {
		int pid = getpid();

		snprintf(memdistrict_name, sizeof(memdistrict_name),
			 "%s_%d", name, pid);
		zone = odp_mm_district_reserve(memdistrict_name, name,
					       alloc_size, 0,
					       ODP_MEMZONE_2MB |
					       ODP_MEMZONE_SIZE_HINT_ONLY);
		if (zone == NULL) {
			ODP_DBG("odp_mm_district_reseve %s failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else if (flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		zone = odp_mm_district_reserve(name, name,
					       alloc_size, 0,
					       ODP_MEMZONE_2MB |
					       ODP_MEMZONE_SIZE_HINT_ONLY);
		if (zone == NULL) {
			ODP_DBG("odp_mm_district_reseve %s failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else {
		map_flag |= MAP_ANONYMOUS;
	}

	odp_spinlock_lock(&odp_shm_tbl->lock);

	if (find_block(name, NULL)) {
		/* Found a block with the same name */
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		ODP_DBG("name %s already used.\n", name);
		return ODP_SHM_INVALID;
	}

	for (i = 0; i < ODP_CONFIG_SHM_BLOCKS; i++)
		if (odp_shm_tbl->block[i].addr == NULL)
			/* Found free block */
			break;

	if (i > ODP_CONFIG_SHM_BLOCKS - 1) {
		/* Table full */
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		ODP_DBG("%s: no more blocks.\n", name);
		return ODP_SHM_INVALID;
	}

	block = &odp_shm_tbl->block[i];

	block->hdl = to_handle(i);
	addr = MAP_FAILED;

#ifdef MAP_HUGETLB

	/* Try first huge pages */
	if (need_huge_page) {
		if ((flags & ODP_SHM_PROC) &&
		    (ftruncate(fd, alloc_hp_size) == -1)) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_DBG("%s: ftruncate huge pages failed.\n", name);
			return ODP_SHM_INVALID;
		}

		addr = mmap(NULL, alloc_hp_size, PROT_READ | PROT_WRITE,
			    map_flag | MAP_HUGETLB, fd, 0);
		if (addr == MAP_FAILED) {
			ODP_DBG(" %s: No huge pages, fall back to normal pages,"
				"check: /proc/sys/vm/nr_hugepages.\n",
				name);
		} else {
			block->alloc_size = alloc_hp_size;
			block->huge = 1;
			block->page_sz = huge_sz;
		}
	}
#endif

	if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY ||
	    flags & ODP_SHM_SHARE_CNTNUS_PHY)
		addr = zone->addr;

	/* Use normal pages for small or failed huge page allocations */
	if (addr == MAP_FAILED) {
		if ((flags & ODP_SHM_PROC) &&
		    (ftruncate(fd, alloc_size) == -1)) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_ERR("%s: ftruncate failed.\n", name);
			return ODP_SHM_INVALID;
		}

		addr = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE,
			    map_flag, fd, 0);
		if (addr == MAP_FAILED) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_DBG("%s mmap failed.\n", name);
			return ODP_SHM_INVALID;
		}

		block->alloc_size = alloc_size;
		block->huge = 0;
		block->page_sz = page_sz;
	}

	if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY ||
	    flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		block->alloc_size = alloc_size;
		block->huge = 1;
		block->page_sz = ODP_MEMZONE_2MB;
		block->addr_orig = addr;

		/* move to correct alignment */
		addr = ODP_ALIGN_ROUNDUP_PTR(zone->addr, align);

		strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
		block->name[ODP_SHM_NAME_LEN - 1] = 0;
		block->size  = size;
		block->align = align;
		block->flags = flags;
		block->fd = -1;
		block->addr = addr;
	} else {
		block->addr_orig = addr;

		/* move to correct alignment */
		addr = ODP_ALIGN_ROUNDUP_PTR(addr, align);

		strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
		block->name[ODP_SHM_NAME_LEN - 1] = 0;
		block->size  = size;
		block->align = align;
		block->flags = flags;
		block->fd = fd;
		block->addr = addr;
	}

	odp_spinlock_unlock(&odp_shm_tbl->lock);

	return block->hdl;
}
Exemplo n.º 23
0
void odp_pool_print(odp_pool_t pool_hdl)
{
	pool_entry_t *pool;
	uint32_t pool_id;

	pool_id = pool_handle_to_index(pool_hdl);
	pool    = get_pool_entry(pool_id);

	uint32_t bufcount  = odp_atomic_load_u32(&pool->s.bufcount);
	uint32_t blkcount  = odp_atomic_load_u32(&pool->s.blkcount);
	uint64_t bufallocs = odp_atomic_load_u64(&pool->s.poolstats.bufallocs);
	uint64_t buffrees  = odp_atomic_load_u64(&pool->s.poolstats.buffrees);
	uint64_t blkallocs = odp_atomic_load_u64(&pool->s.poolstats.blkallocs);
	uint64_t blkfrees  = odp_atomic_load_u64(&pool->s.poolstats.blkfrees);
	uint64_t bufempty  = odp_atomic_load_u64(&pool->s.poolstats.bufempty);
	uint64_t blkempty  = odp_atomic_load_u64(&pool->s.poolstats.blkempty);
	uint64_t hiwmct    =
		odp_atomic_load_u64(&pool->s.poolstats.high_wm_count);
	uint64_t lowmct    =
		odp_atomic_load_u64(&pool->s.poolstats.low_wm_count);

	ODP_DBG("Pool info\n");
	ODP_DBG("---------\n");
	ODP_DBG(" pool            %" PRIu64 "\n",
		odp_pool_to_u64(pool->s.pool_hdl));
	ODP_DBG(" name            %s\n",
		pool->s.flags.has_name ? pool->s.name : "Unnamed Pool");
	ODP_DBG(" pool type       %s\n",
		pool->s.params.type == ODP_POOL_BUFFER ? "buffer" :
	       (pool->s.params.type == ODP_POOL_PACKET ? "packet" :
	       (pool->s.params.type == ODP_POOL_TIMEOUT ? "timeout" :
		"unknown")));
	ODP_DBG(" pool storage    ODP managed shm handle %" PRIu64 "\n",
		odp_shm_to_u64(pool->s.pool_shm));
	ODP_DBG(" pool status     %s\n",
		pool->s.quiesced ? "quiesced" : "active");
	ODP_DBG(" pool opts       %s, %s, %s\n",
		pool->s.flags.unsegmented ? "unsegmented" : "segmented",
		pool->s.flags.zeroized ? "zeroized" : "non-zeroized",
		pool->s.flags.predefined  ? "predefined" : "created");
	ODP_DBG(" pool base       %p\n",  pool->s.pool_base_addr);
	ODP_DBG(" pool size       %lu(k)\n",
		pool->s.pool_size / 1024);
	ODP_DBG(" pool mdata base %p\n",  pool->s.pool_mdata_addr);
	ODP_DBG(" udata size      %u\n", pool->s.udata_size);
	ODP_DBG(" headroom        %u\n",  pool->s.headroom);
	ODP_DBG(" tailroom        %u\n",  pool->s.tailroom);
	if (pool->s.params.type == ODP_POOL_BUFFER) {
		ODP_DBG(" buf size        %1u\n", pool->s.params.buf.size);
		ODP_DBG(" buf align       %u requested, %u used\n",
			pool->s.params.buf.align, pool->s.buf_align);
	} else if (pool->s.params.type == ODP_POOL_PACKET) {
		ODP_DBG(" seg length      %u requested, %u used\n",
			pool->s.params.pkt.seg_len, pool->s.seg_size);
		ODP_DBG(" pkt length      %u requested, %u used\n",
			pool->s.params.pkt.len, pool->s.blk_size);
	}
	ODP_DBG(" num bufs        %u\n",  pool->s.buf_num);
	ODP_DBG(" bufs available  %u %s\n", bufcount,
		pool->s.low_wm_assert ? " **low wm asserted**" : "");
	ODP_DBG(" bufs in use     %u\n",  pool->s.buf_num - bufcount);
	ODP_DBG(" buf allocs      %lu\n", bufallocs);
	ODP_DBG(" buf frees       %lu\n", buffrees);
	ODP_DBG(" buf empty       %lu\n", bufempty);
	ODP_DBG(" blk size        %1u\n",
		pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0);
	ODP_DBG(" blks available  %u\n",  blkcount);
	ODP_DBG(" blk allocs      %lu\n", blkallocs);
	ODP_DBG(" blk frees       %lu\n", blkfrees);
	ODP_DBG(" blk empty       %lu\n", blkempty);
	ODP_DBG(" high wm value   %u\n", pool->s.high_wm);
	ODP_DBG(" high wm count   %lu\n", hiwmct);
	ODP_DBG(" low wm value    %u\n", pool->s.low_wm);
	ODP_DBG(" low wm count    %lu\n", lowmct);
}
Exemplo n.º 24
0
static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
			  pktio_entry_t *pktio_entry,
			  const char *netdev, odp_pool_t pool)
{
	int if_idx;
	int ret = 0;
	odp_pktio_stats_t cur_stats;

	if (disable_pktio)
		return -1;

	pkt_sock_mmap_t *const pkt_sock = &pktio_entry->s.pkt_sock_mmap;
	int fanout = 1;

	/* Init pktio entry */
	memset(pkt_sock, 0, sizeof(*pkt_sock));
	/* set sockfd to -1, because a valid socked might be initialized to 0 */
	pkt_sock->sockfd = -1;

	if (pool == ODP_POOL_INVALID)
		return -1;

	/* Store eth buffer offset for pkt buffers from this pool */
	pkt_sock->frame_offset = 0;

	pkt_sock->pool = pool;
	pkt_sock->sockfd = mmap_pkt_socket();
	if (pkt_sock->sockfd == -1)
		goto error;

	ret = mmap_bind_sock(pkt_sock, netdev);
	if (ret != 0)
		goto error;

	ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring,
			      PACKET_TX_RING, pool, fanout);
	if (ret != 0)
		goto error;

	ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring,
			      PACKET_RX_RING, pool, fanout);
	if (ret != 0)
		goto error;

	ret = mmap_sock(pkt_sock);
	if (ret != 0)
		goto error;

	ret = mac_addr_get_fd(pkt_sock->sockfd, netdev, pkt_sock->if_mac);
	if (ret != 0)
		goto error;

	if_idx = if_nametoindex(netdev);
	if (if_idx == 0) {
		__odp_errno = errno;
		ODP_ERR("if_nametoindex(): %s\n", strerror(errno));
		goto error;
	}

	pkt_sock->fanout = fanout;
	if (fanout) {
		ret = set_pkt_sock_fanout_mmap(pkt_sock, if_idx);
		if (ret != 0)
			goto error;
	}

	ret = ethtool_stats_get_fd(pktio_entry->s.pkt_sock_mmap.sockfd,
				   pktio_entry->s.name,
				   &cur_stats);
	if (ret != 0) {
		ret = sysfs_stats(pktio_entry, &cur_stats);
		if (ret != 0) {
			pktio_entry->s.stats_type = STATS_UNSUPPORTED;
			ODP_DBG("pktio: %s unsupported stats\n",
				pktio_entry->s.name);
		} else {
			pktio_entry->s.stats_type = STATS_SYSFS;
		}
	} else {
		pktio_entry->s.stats_type = STATS_ETHTOOL;
	}

	ret = sock_stats_reset_fd(pktio_entry,
				  pktio_entry->s.pkt_sock_mmap.sockfd);
	if (ret != 0)
		goto error;

	return 0;

error:
	sock_mmap_close(pktio_entry);
	return -1;
}
Exemplo n.º 25
0
odp_pktio_t odp_pktio_open(const char *dev, odp_buffer_pool_t pool,
			   odp_pktio_params_t *params)
{
	odp_pktio_t id;
	pktio_entry_t *pktio_entry;
	int res;

	if (params == NULL) {
		ODP_ERR("Invalid pktio params\n");
		return ODP_PKTIO_INVALID;
	}

	switch (params->type) {
	case ODP_PKTIO_TYPE_SOCKET_BASIC:
	case ODP_PKTIO_TYPE_SOCKET_MMSG:
	case ODP_PKTIO_TYPE_SOCKET_MMAP:
		ODP_DBG("Allocating socket pktio\n");
		break;
#ifdef ODP_HAVE_NETMAP
	case ODP_PKTIO_TYPE_NETMAP:
		ODP_DBG("Allocating netmap pktio\n");
		break;
#endif
	default:
		ODP_ERR("Invalid pktio type: %02x\n", params->type);
		return ODP_PKTIO_INVALID;
	}

	id = alloc_lock_pktio_entry(params);
	if (id == ODP_PKTIO_INVALID) {
		ODP_ERR("No resources available.\n");
		return ODP_PKTIO_INVALID;
	}
	/* if successful, alloc_pktio_entry() returns with the entry locked */

	pktio_entry = get_entry(id);

	switch (params->type) {
	case ODP_PKTIO_TYPE_SOCKET_BASIC:
	case ODP_PKTIO_TYPE_SOCKET_MMSG:
		res = setup_pkt_sock(&pktio_entry->s.pkt_sock, dev, pool);
		if (res == -1) {
			close_pkt_sock(&pktio_entry->s.pkt_sock);
			free_pktio_entry(id);
			id = ODP_PKTIO_INVALID;
		}
		break;
	case ODP_PKTIO_TYPE_SOCKET_MMAP:
		res = setup_pkt_sock_mmap(&pktio_entry->s.pkt_sock_mmap, dev,
				pool, params->sock_params.fanout);
		if (res == -1) {
			close_pkt_sock_mmap(&pktio_entry->s.pkt_sock_mmap);
			free_pktio_entry(id);
			id = ODP_PKTIO_INVALID;
		}
		break;
#ifdef ODP_HAVE_NETMAP
	case ODP_PKTIO_TYPE_NETMAP:

		res = setup_pkt_netmap(&pktio_entry->s.pkt_nm, dev,
				pool, &params->nm_params);
		if (res == -1) {
			close_pkt_netmap(&pktio_entry->s.pkt_nm);
			free_pktio_entry(id);
			id = ODP_PKTIO_INVALID;
		}
		break;
#endif
	default:
		free_pktio_entry(id);
		id = ODP_PKTIO_INVALID;
		ODP_ERR("This type of I/O is not supported. Please recompile.\n");
		break;
	}

	unlock_entry(pktio_entry);
	return id;
}
Exemplo n.º 26
0
static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
		       const char *netdev, odp_pool_t pool)
{
	int i;
	int err;
	int sockfd;
	int mtu;
	uint32_t buf_size;
	pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
	struct nm_desc *desc;
	struct netmap_ring *ring;
	odp_pktin_hash_proto_t hash_proto;
	odp_pktio_stats_t   cur_stats;

	if (getenv("ODP_PKTIO_DISABLE_NETMAP"))
		return -1;

	if (pool == ODP_POOL_INVALID)
		return -1;

	/* Init pktio entry */
	memset(pkt_nm, 0, sizeof(*pkt_nm));
	pkt_nm->sockfd = -1;
	pkt_nm->pool = pool;

	/* max frame len taking into account the l2-offset */
	pkt_nm->max_frame_len = ODP_CONFIG_PACKET_BUF_LEN_MAX -
				odp_buffer_pool_headroom(pool) -
				odp_buffer_pool_tailroom(pool);

	snprintf(pktio_entry->s.name, sizeof(pktio_entry->s.name), "%s",
		 netdev);
	snprintf(pkt_nm->nm_name, sizeof(pkt_nm->nm_name), "netmap:%s",
		 netdev);

	/* Dummy open here to check if netmap module is available and to read
	 * capability info. */
	desc = nm_open(pkt_nm->nm_name, NULL, 0, NULL);
	if (desc == NULL) {
		ODP_ERR("nm_open(%s) failed\n", pkt_nm->nm_name);
		goto error;
	}

	if (desc->nifp->ni_rx_rings > NM_MAX_DESC) {
		ODP_ERR("Unable to store all rx rings\n");
		nm_close(desc);
		goto error;
	}

	pkt_nm->num_rx_rings = desc->nifp->ni_rx_rings;
	pkt_nm->capa.max_input_queues = PKTIO_MAX_QUEUES;
	if (desc->nifp->ni_rx_rings < PKTIO_MAX_QUEUES)
		pkt_nm->capa.max_input_queues = desc->nifp->ni_rx_rings;

	if (desc->nifp->ni_tx_rings > NM_MAX_DESC) {
		ODP_ERR("Unable to store all tx rings\n");
		nm_close(desc);
		goto error;
	}

	pkt_nm->num_tx_rings = desc->nifp->ni_tx_rings;
	pkt_nm->capa.max_output_queues = PKTIO_MAX_QUEUES;
	if (desc->nifp->ni_tx_rings < PKTIO_MAX_QUEUES)
		pkt_nm->capa.max_output_queues = desc->nifp->ni_tx_rings;

	ring = NETMAP_RXRING(desc->nifp, desc->cur_rx_ring);
	buf_size = ring->nr_buf_size;
	nm_close(desc);

	sockfd = socket(AF_INET, SOCK_DGRAM, 0);
	if (sockfd == -1) {
		ODP_ERR("Cannot get device control socket\n");
		goto error;
	}

	pkt_nm->sockfd = sockfd;

	/* Use either interface MTU (+ ethernet header length) or netmap buffer
	 * size as MTU, whichever is smaller. */
	mtu = mtu_get_fd(pktio_entry->s.pkt_nm.sockfd, pktio_entry->s.name) +
	      ODPH_ETHHDR_LEN;
	if (mtu < 0) {
		ODP_ERR("Unable to read interface MTU\n");
		goto error;
	}

	pkt_nm->mtu = ((uint32_t)mtu < buf_size) ? (uint32_t)mtu : buf_size;

	/* Check if RSS is supported. If not, set 'max_input_queues' to 1. */
	if (rss_conf_get_supported_fd(sockfd, netdev, &hash_proto) == 0) {
		ODP_DBG("RSS not supported\n");
		pkt_nm->capa.max_input_queues = 1;
	}

	err = netmap_do_ioctl(pktio_entry, SIOCGIFFLAGS, 0);
	if (err)
		goto error;

	if ((pkt_nm->if_flags & IFF_UP) == 0)
		ODP_DBG("%s is down\n", pktio_entry->s.name);

	err = mac_addr_get_fd(sockfd, netdev, pkt_nm->if_mac);
	if (err)
		goto error;

	for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
		odp_ticketlock_init(&pkt_nm->rx_desc_ring[i].s.lock);
		odp_ticketlock_init(&pkt_nm->tx_desc_ring[i].s.lock);
	}

	/* netmap uses only ethtool to get statistics counters */
	err = ethtool_stats_get_fd(pktio_entry->s.pkt_nm.sockfd,
				   pktio_entry->s.name,
				   &cur_stats);
	if (err) {
		ODP_ERR(
			"netmap pktio %s does not support statistics counters\n",
			pktio_entry->s.name);
		pktio_entry->s.stats_type = STATS_UNSUPPORTED;
	} else {
		pktio_entry->s.stats_type = STATS_ETHTOOL;
	}

	(void)netmap_stats_reset(pktio_entry);

	return 0;

error:
	netmap_close(pktio_entry);
	return -1;
}
Exemplo n.º 27
0
int close_pkt_odp(pktio_entry_t *pktio_entry)
{
	ODP_DBG("close pkt_odp, %u\n", pktio_entry->s.pkt_odp.portid);

	return 0;
}