예제 #1
0
static int
order_queue_worker(void *arg)
{
	ORDER_WORKER_INIT;
	struct rte_event ev;

	while (t->err == false) {
		uint16_t event = rte_event_dequeue_burst(dev_id, port,
					&ev, 1, 0);
		if (!event) {
			if (rte_atomic64_read(outstand_pkts) <= 0)
				break;
			rte_pause();
			continue;
		}

		if (ev.queue_id == 0) { /* from ordered queue */
			order_queue_process_stage_0(&ev);
			while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
					!= 1)
				rte_pause();
		} else if (ev.queue_id == 1) { /* from atomic queue */
			order_process_stage_1(t, &ev, nb_flows,
					expected_flow_seq, outstand_pkts);
		} else {
			order_process_stage_invalid(t, &ev);
		}
	}
	return 0;
}
예제 #2
0
int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
			   struct rte_eth_stats *bnxt_stats)
{
	int rc = 0;
	unsigned int i;
	struct bnxt *bp = eth_dev->data->dev_private;

	memset(bnxt_stats, 0, sizeof(*bnxt_stats));

	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
		struct bnxt_cp_ring_info *cpr = rxq->cp_ring;

		rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
				     bnxt_stats, 1);
		if (unlikely(rc))
			return rc;
	}

	for (i = 0; i < bp->tx_cp_nr_rings; i++) {
		struct bnxt_tx_queue *txq = bp->tx_queues[i];
		struct bnxt_cp_ring_info *cpr = txq->cp_ring;

		rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
				     bnxt_stats, 0);
		if (unlikely(rc))
			return rc;
	}
	rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
	if (unlikely(rc))
		return rc;
	bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail);
	return rc;
}
예제 #3
0
/*
 * Unlock a mutex
 */
int lthread_mutex_unlock(struct lthread_mutex *m)
{
	struct lthread *lt = THIS_LTHREAD;
	struct lthread *unblocked;

	if ((m == NULL) || (m->blocked == NULL)) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	/* fail if its owned */
	if (m->owner != lt || m->owner == NULL) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
		return POSIX_ERRNO(EPERM);
	}

	rte_atomic64_dec(&m->count);
	/* if there are blocked threads then make one ready */
	while (rte_atomic64_read(&m->count) > 0) {
		unblocked = _lthread_queue_remove(m->blocked);

		if (unblocked != NULL) {
			rte_atomic64_dec(&m->count);
			DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
			RTE_ASSERT(unblocked->sched != NULL);
			_ready_queue_insert((struct lthread_sched *)
					    unblocked->sched, unblocked);
			break;
		}
	}
	/* release the lock */
	m->owner = NULL;
	return 0;
}
예제 #4
0
/*
 * Try to obtain a mutex
 */
int lthread_mutex_lock(struct lthread_mutex *m)
{
	struct lthread *lt = THIS_LTHREAD;

	if ((m == NULL) || (m->blocked == NULL)) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));
		return POSIX_ERRNO(EINVAL);
	}

	/* allow no recursion */
	if (m->owner == lt) {
		DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));
		return POSIX_ERRNO(EDEADLK);
	}

	for (;;) {
		rte_atomic64_inc(&m->count);
		do {
			if (rte_atomic64_cmpset
			    ((uint64_t *) &m->owner, 0, (uint64_t) lt)) {
				/* happy days, we got the lock */
				DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);
				return 0;
			}
			/* spin due to race with unlock when
			* nothing was blocked
			*/
		} while ((rte_atomic64_read(&m->count) == 1) &&
				(m->owner == NULL));

		/* queue the current thread in the blocked queue
		 * we defer this to after we return to the scheduler
		 * to ensure that the current thread context is saved
		 * before unlock could result in it being dequeued and
		 * resumed
		 */
		DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);
		lt->pending_wr_queue = m->blocked;
		/* now relinquish cpu */
		_suspend();
		/* resumed, must loop and compete for the lock again */
	}
	return 0;
}
예제 #5
0
static int
order_queue_worker_burst(void *arg)
{
	ORDER_WORKER_INIT;
	struct rte_event ev[BURST_SIZE];
	uint16_t i;

	while (t->err == false) {
		uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
				BURST_SIZE, 0);

		if (nb_rx == 0) {
			if (rte_atomic64_read(outstand_pkts) <= 0)
				break;
			rte_pause();
			continue;
		}

		for (i = 0; i < nb_rx; i++) {
			if (ev[i].queue_id == 0) { /* from ordered queue */
				order_queue_process_stage_0(&ev[i]);
			} else if (ev[i].queue_id == 1) {/* from atomic queue */
				order_process_stage_1(t, &ev[i], nb_flows,
					expected_flow_seq, outstand_pkts);
				ev[i].op = RTE_EVENT_OP_RELEASE;
			} else {
				order_process_stage_invalid(t, &ev[i]);
			}
		}

		uint16_t enq;

		enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
		while (enq < nb_rx) {
			enq += rte_event_enqueue_burst(dev_id, port,
							ev + enq, nb_rx - enq);
		}
	}
	return 0;
}
예제 #6
0
파일: lio_23xx_vf.c 프로젝트: Leon555/dpdk
int
cn23xx_pfvf_handshake(struct lio_device *lio_dev)
{
	struct lio_mbox_cmd mbox_cmd;
	struct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0];
	uint32_t q_no, count = 0;
	rte_atomic64_t status;
	uint32_t pfmajor;
	uint32_t vfmajor;
	uint32_t ret;

	PMD_INIT_FUNC_TRACE();

	/* Sending VF_ACTIVE indication to the PF driver */
	lio_dev_dbg(lio_dev, "requesting info from PF\n");

	mbox_cmd.msg.mbox_msg64 = 0;
	mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
	mbox_cmd.msg.s.resp_needed = 1;
	mbox_cmd.msg.s.cmd = LIO_VF_ACTIVE;
	mbox_cmd.msg.s.len = 2;
	mbox_cmd.data[0] = 0;
	lio_ver->major = LIO_BASE_MAJOR_VERSION;
	lio_ver->minor = LIO_BASE_MINOR_VERSION;
	lio_ver->micro = LIO_BASE_MICRO_VERSION;
	mbox_cmd.q_no = 0;
	mbox_cmd.recv_len = 0;
	mbox_cmd.recv_status = 0;
	mbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback;
	mbox_cmd.fn_arg = (void *)&status;

	if (lio_mbox_write(lio_dev, &mbox_cmd)) {
		lio_dev_err(lio_dev, "Write to mailbox failed\n");
		return -1;
	}

	rte_atomic64_set(&status, 0);

	do {
		rte_delay_ms(1);
	} while ((rte_atomic64_read(&status) == 0) && (count++ < 10000));

	ret = rte_atomic64_read(&status);
	if (ret == 0) {
		lio_dev_err(lio_dev, "cn23xx_pfvf_handshake timeout\n");
		return -1;
	}

	for (q_no = 0; q_no < lio_dev->num_iqs; q_no++)
		lio_dev->instr_queue[q_no]->txpciq.s.pkind =
						lio_dev->pfvf_hsword.pkind;

	vfmajor = LIO_BASE_MAJOR_VERSION;
	pfmajor = ret >> 16;
	if (pfmajor != vfmajor) {
		lio_dev_err(lio_dev,
			    "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n",
			    vfmajor, pfmajor);
		ret = -EPERM;
	} else {
		lio_dev_dbg(lio_dev,
			    "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n",
			    vfmajor, pfmajor);
		ret = 0;
	}

	lio_dev_dbg(lio_dev, "got data from PF pkind is %d\n",
		    lio_dev->pfvf_hsword.pkind);

	return ret;
}
예제 #7
0
static inline int
poll_burst(void *args)
{
#define MAX_IDLE           (10000)
	unsigned lcore_id;
	struct rte_mbuf **pkts_burst;
	uint64_t diff_tsc, cur_tsc;
	uint16_t next[RTE_MAX_ETHPORTS];
	struct lcore_conf *conf;
	uint32_t pkt_per_port = *((uint32_t *)args);
	unsigned i, portid, nb_rx = 0;
	uint64_t total;
	uint64_t timeout = MAX_IDLE;

	lcore_id = rte_lcore_id();
	conf = &lcore_conf[lcore_id];
	if (conf->status != LCORE_USED)
		return 0;

	total = pkt_per_port * conf->nb_ports;
	printf("start to receive total expect %"PRIu64"\n", total);

	pkts_burst = (struct rte_mbuf **)
		rte_calloc_socket("poll_burst",
				  total, sizeof(void *),
				  RTE_CACHE_LINE_SIZE, conf->socketid);
	if (!pkts_burst)
		return -1;

	for (i = 0; i < conf->nb_ports; i++) {
		portid = conf->portlist[i];
		next[portid] = i * pkt_per_port;
	}

	while (!rte_atomic64_read(&start))
		;

	cur_tsc = rte_rdtsc();
	while (total) {
		for (i = 0; i < conf->nb_ports; i++) {
			portid = conf->portlist[i];
			nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
						 &pkts_burst[next[portid]],
						 MAX_PKT_BURST);
			if (unlikely(nb_rx == 0)) {
				timeout--;
				if (unlikely(timeout == 0))
					goto timeout;
				continue;
			}
			next[portid] += nb_rx;
			total -= nb_rx;
		}
	}
timeout:
	diff_tsc = rte_rdtsc() - cur_tsc;

	printf("%"PRIu64" packets lost, IDLE %"PRIu64" times\n",
	       total, MAX_IDLE - timeout);

	/* clean up */
	total = pkt_per_port * conf->nb_ports - total;
	for (i = 0; i < total; i++)
		rte_pktmbuf_free(pkts_burst[i]);

	rte_free(pkts_burst);

	if (total > 0)
		return diff_tsc / total;
	else
		return -1;
}