Esempio n. 1
0
static inline uint64_t
total_latency(struct test_perf *t)
{
	uint8_t i;
	uint64_t total = 0;

	rte_smp_rmb();
	for (i = 0; i < t->nb_workers; i++)
		total += t->worker[i].latency;

	return total;
}
Esempio n. 2
0
static inline uint64_t
processed_pkts(struct test_perf *t)
{
	uint8_t i;
	uint64_t total = 0;

	rte_smp_rmb();
	for (i = 0; i < t->nb_workers; i++)
		total += t->worker[i].processed_pkts;

	return total;
}
Esempio n. 3
0
static uint16_t
eth_xenvirt_tx(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
	struct virtqueue *txvq = tx_queue;
	struct rte_mbuf *txm;
	uint16_t nb_used, nb_tx, num, i;
	int error;
	uint32_t len[VIRTIO_MBUF_BURST_SZ];
	struct rte_mbuf *snd_pkts[VIRTIO_MBUF_BURST_SZ];
	struct pmd_internals *pi = txvq->internals;

	nb_tx = 0;

	if (unlikely(nb_pkts == 0))
		return 0;

	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
	nb_used = VIRTQUEUE_NUSED(txvq);

	rte_smp_rmb();

	num = (uint16_t)(likely(nb_used <= VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
	num = virtqueue_dequeue_burst(txvq, snd_pkts, len, num);

	for (i = 0; i < num ; i ++) {
		/* mergable not supported, one segment only */
		rte_pktmbuf_free_seg(snd_pkts[i]);
	}

	while (nb_tx < nb_pkts) {
		if (likely(!virtqueue_full(txvq))) {
		/* TODO drop tx_pkts if it contains multiple segments */
			txm = tx_pkts[nb_tx];
			error = virtqueue_enqueue_xmit(txvq, txm);
			if (unlikely(error)) {
				if (error == ENOSPC)
					PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0\n");
				else if (error == EMSGSIZE)
					PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1\n");
				else
					PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d\n", error);
				break;
			}
			nb_tx++;
		} else {
			PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
			/* virtqueue_notify not needed in our para-virt solution */
			break;
		}
	}
	pi->eth_stats.opackets += nb_tx;
	return nb_tx;
}
Esempio n. 4
0
int32_t
rte_service_runstate_get(uint32_t id)
{
	struct rte_service_spec_impl *s;
	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
	rte_smp_rmb();

	int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
	int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);

	return (s->app_runstate == RUNSTATE_RUNNING) &&
		(s->comp_runstate == RUNSTATE_RUNNING) &&
		(check_disabled | lcore_mapped);
}
Esempio n. 5
0
/*
 * When we write to the ring buffer, check if the host needs to be
 * signaled.
 *
 * The contract:
 * - The host guarantees that while it is draining the TX bufring,
 *   it will set the br_imask to indicate it does not need to be
 *   interrupted when new data are added.
 * - The host guarantees that it will completely drain the TX bufring
 *   before exiting the read loop.  Further, once the TX bufring is
 *   empty, it will clear the br_imask and re-check to see if new
 *   data have arrived.
 */
static inline bool
vmbus_txbr_need_signal(const struct vmbus_br *tbr, uint32_t old_windex)
{
	rte_smp_mb();
	if (tbr->vbr->imask)
		return false;

	rte_smp_rmb();

	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
	return old_windex == tbr->vbr->rindex;
}
Esempio n. 6
0
static uint16_t
eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
	struct virtqueue *rxvq = q;
	struct rte_mbuf *rxm, *new_mbuf;
	uint16_t nb_used, num;
	uint32_t len[VIRTIO_MBUF_BURST_SZ];
	uint32_t i;
	struct pmd_internals *pi = rxvq->internals;

	nb_used = VIRTQUEUE_NUSED(rxvq);

	rte_smp_rmb();
	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
	if (unlikely(num == 0)) return 0;

	num = virtqueue_dequeue_burst(rxvq, rx_pkts, len, num);
	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
	for (i = 0; i < num ; i ++) {
		rxm = rx_pkts[i];
		PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
		rxm->next = NULL;
		rxm->data_off = RTE_PKTMBUF_HEADROOM;
		rxm->data_len = (uint16_t)(len[i] - sizeof(struct virtio_net_hdr));
		rxm->nb_segs = 1;
		rxm->port = pi->port_id;
		rxm->pkt_len  = (uint32_t)(len[i] - sizeof(struct virtio_net_hdr));
	}
	/* allocate new mbuf for the used descriptor */
	while (likely(!virtqueue_full(rxvq))) {
		new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
		if (unlikely(new_mbuf == NULL)) {
			break;
		}
		if (unlikely(virtqueue_enqueue_recv_refill(rxvq, new_mbuf))) {
			rte_pktmbuf_free_seg(new_mbuf);
			break;
		}
	}
	pi->eth_stats.ipackets += num;
	return num;
}
Esempio n. 7
0
static int vmbus_read_and_signal(struct vmbus_channel *chan,
				 void *data, size_t dlen, size_t skip)
{
	struct vmbus_br *rbr = &chan->rxbr;
	uint32_t write_sz, pending_sz, bytes_read;
	int error;

	/* Record where host was when we started read (for debug) */
	rbr->windex = rbr->vbr->windex;

	/* Read data and skip packet header */
	error = vmbus_rxbr_read(rbr, data, dlen, skip);
	if (error)
		return error;

	/* No need for signaling on older versions */
	if (!rbr->vbr->feature_bits.feat_pending_send_sz)
		return 0;

	/* Make sure reading of pending happens after new read index */
	rte_mb();

	pending_sz = rbr->vbr->pending_send;
	if (!pending_sz)
		return 0;

	rte_smp_rmb();
	write_sz = vmbus_br_availwrite(rbr, rbr->vbr->windex);
	bytes_read = dlen + skip + sizeof(uint64_t);

	/* If there was space before then host was not blocked */
	if (write_sz - bytes_read > pending_sz)
		return 0;

	/* If pending write will not fit */
	if (write_sz <= pending_sz)
		return 0;

	vmbus_set_event(chan->device, chan);
	return 0;
}
Esempio n. 8
0
static int32_t
rte_service_runner_func(void *arg)
{
	RTE_SET_USED(arg);
	uint32_t i;
	const int lcore = rte_lcore_id();
	struct core_state *cs = &lcore_states[lcore];

	while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
		const uint64_t service_mask = cs->service_mask;

		for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
			/* return value ignored as no change to code flow */
			service_run(i, cs, service_mask);
		}

		rte_smp_rmb();
	}

	lcore_config[lcore].state = WAIT;

	return 0;
}