void *
if_netmap_rxslot(struct if_netmap_host_context *ctx, uint32_t *slotno, uint32_t *len, uint32_t *index)
{
	struct netmap_ring *rxr = ctx->hw_rx_ring;
	uint32_t cur = *slotno;

	*slotno = NETMAP_RING_NEXT(rxr, cur); 
	*len = rxr->slot[cur].len;
	*index = rxr->slot[cur].buf_idx;
	return (NETMAP_BUF(rxr, rxr->slot[cur].buf_idx));
}
Exemple #2
0
/*
 * create and enqueue a batch of packets on a ring.
 * On the last one set NS_REPORT to tell the driver to generate
 * an interrupt when done.
 */
static int
send_packets(struct netmap_ring *ring, struct pkt *pkt, 
		int size, u_int count, int options)
{
	u_int sent, cur = ring->cur;

	if (ring->avail < count)
		count = ring->avail;

#if 0
	if (options & (OPT_COPY | OPT_PREFETCH) ) {
		for (sent = 0; sent < count; sent++) {
			struct netmap_slot *slot = &ring->slot[cur];
			char *p = NETMAP_BUF(ring, slot->buf_idx);

			prefetch(p);
			cur = NETMAP_RING_NEXT(ring, cur);
		}
		cur = ring->cur;
	}
#endif
	for (sent = 0; sent < count; sent++) {
		struct netmap_slot *slot = &ring->slot[cur];
		char *p = NETMAP_BUF(ring, slot->buf_idx);

		if (options & OPT_COPY)
			pkt_copy(pkt, p, size);
		else if (options & OPT_MEMCPY)
			memcpy(p, pkt, size);
		else if (options & OPT_PREFETCH)
			prefetch(p);
		slot->len = size;
		if (sent == count - 1)
			slot->flags |= NS_REPORT;
		cur = NETMAP_RING_NEXT(ring, cur);
	}
	ring->avail -= sent;
	ring->cur = cur;

	return (sent);
}
void *
if_netmap_txslot(struct if_netmap_host_context *ctx, uint32_t *slotno, uint32_t len)
{
	struct netmap_ring *txr = ctx->hw_tx_ring;
	uint32_t cur = *slotno;
		
	assert(len <= txr->nr_buf_size);

	txr->slot[cur].len = len;
	*slotno = NETMAP_RING_NEXT(txr, cur); 
	return (NETMAP_BUF(txr, txr->slot[cur].buf_idx));
}
Exemple #4
0
static struct peak_netmap *
_peak_netmap_claim(void)
{
	struct _peak_netmap *packet;
	struct netmap_ring *ring;
	struct my_ring *me;
	unsigned int j, si;

	for (j = 0; j < NETMAP_COUNT(); ++j) {
		me = self->me[j];

		for (si = me->begin; si < me->end; ++si) {
			unsigned int i, idx;

			ring = NETMAP_RXRING(me->nifp, si);
			if (!ring->avail) {
				continue;
			}

			packet = NETPKT_GET();
			if (!packet) {
				alert("netmap packet pool empty\n");
				return (NULL);
			}

			bzero(packet, sizeof(*packet));

			i = ring->cur;
			idx = ring->slot[i].buf_idx;
			if (idx < 2) {
				panic("%s bugus RX index %d at offset %d\n",
				    me->nifp->ni_name, idx, i);
			}

			/* volatile internals */
			packet->ring = ring;
			packet->i = i;

			/* external stuff */
			packet->data.buf = NETMAP_BUF(ring, idx);
			packet->data.len = ring->slot[i].len;
			packet->data.ll = LINKTYPE_ETHERNET;
			packet->data.ts_ms = (int64_t)ring->ts.tv_sec *
			    1000 + (int64_t)ring->ts.tv_usec / 1000;
			packet->data.ts_unix = ring->ts.tv_sec;
			packet->data.ifname = me->ifname;

			return (NETPKT_TO_USER(packet));
		}
	}

	return (NULL);
}
Exemple #5
0
static void receiver(lua_State *L, int cb_ref, struct nm_desc *d, unsigned int ring_id) {
    struct pollfd fds;
    struct netmap_ring *ring;
    unsigned int i, len;
    char *buf;
    time_t now;
    int pps;

    now = time(NULL);
    pps = 0;

    while (1) {
        fds.fd     = d->fd;
        fds.events = POLLIN;

        int r = poll(&fds, 1, 1000);
        if (r < 0) {
            if (errno != EINTR) {
                perror("poll()");
                exit(3);
            }
        }

        if (time(NULL) > now) {
            printf("[+] receiving %d pps\n", pps);
            pps = 0;
            now = time(NULL);
        }

        ring = NETMAP_RXRING(d->nifp, ring_id);

        while (!nm_ring_empty(ring)) {
            i   = ring->cur;
            buf = NETMAP_BUF(ring, ring->slot[i].buf_idx);
            len = ring->slot[i].len;

            pps++;

            if (filter_packet(L, cb_ref, buf, len)) {
                // forward packet to kernel
                ring->flags         |= NR_FORWARD;
                ring->slot[i].flags |= NS_FORWARD;
                printf("+++ PASS\n");
            } else {
                // drop packet
                printf("--- DROP\n");
            }

            ring->head = ring->cur = nm_ring_next(ring, i);
        }
    }
}
Exemple #6
0
/**
 * Given a Netmap ring and a slot index for that ring, construct a dpdk mbuf
 * from the data held in the buffer associated with the slot.
 * Allocation/deallocation of the dpdk mbuf are the responsability of the
 * caller.
 * Note that mbuf chains are not supported.
 */
static void
slot_to_mbuf(struct netmap_ring *r, uint32_t index, struct rte_mbuf *mbuf)
{
	char *data;
	uint16_t length;

	rte_pktmbuf_reset(mbuf);
	length = r->slot[index].len;
	data = rte_pktmbuf_append(mbuf, length);

	if (data != NULL)
	    rte_memcpy(data, NETMAP_BUF(r, r->slot[index].buf_idx), length);
}
/*
 * Note: this thread is the only one pulling packets off of any
 * given netmap instance
 */
static void *
receiver(void *arg)
{
	struct virtif_user *viu = arg;
	struct iovec iov;
	struct netmap_if *nifp = viu->nm_nifp;
	struct netmap_ring *ring = NETMAP_RXRING(nifp, 0);
	struct netmap_slot *slot;
	struct pollfd pfd;
	int prv;

	rumpuser_component_kthread();

	for (;;) {
		pfd.fd = viu->viu_fd;
		pfd.events = POLLIN;

		if (viu->viu_dying) {
			break;
		}

		prv = 0;
		while (nm_ring_empty(ring) && prv == 0) {
			DPRINTF(("receive pkt via netmap\n"));
			prv = poll(&pfd, 1, 1000);
			if (prv > 0 || (prv < 0 && errno != EAGAIN))
				break;
		}
#if 0
		/* XXX: report non-transient errors */
		if (ring->avail == 0) {
			rv = errno;
			break;
		}
#endif
		slot = &ring->slot[ring->cur];
		DPRINTF(("got pkt of size %d\n", slot->len));
		iov.iov_base = NETMAP_BUF(ring, slot->buf_idx);
		iov.iov_len = slot->len;

		/* XXX: allow batch processing */
		rumpuser_component_schedule(NULL);
		VIF_DELIVERPKT(viu->viu_virtifsc, &iov, 1);
		rumpuser_component_unschedule();

		ring->head = ring->cur = nm_ring_next(ring, ring->cur);
	}

	rumpuser_component_kthread_release();
	return NULL;
}
Exemple #8
0
/**
 * Given a dpdk mbuf, fill in the Netmap slot in ring r and its associated
 * buffer with the data held by the mbuf.
 * Note that mbuf chains are not supported.
 */
static void
mbuf_to_slot(struct rte_mbuf *mbuf, struct netmap_ring *r, uint32_t index)
{
	char *data;
	uint16_t length;

	data   = rte_pktmbuf_mtod(mbuf, char *);
	length = rte_pktmbuf_data_len(mbuf);

	if (length > r->nr_buf_size)
		length = 0;

	r->slot[index].len = length;
	rte_memcpy(NETMAP_BUF(r, r->slot[index].buf_idx), data, length);
}
void
VIFHYPER_SEND(struct virtif_user *viu, struct iovec *iov, size_t iovlen)
{
	void *cookie = NULL; /* XXXgcc */
	struct netmap_if *nifp = viu->nm_nifp;
	struct netmap_ring *ring = NETMAP_TXRING(nifp, 0);
	char *p;
	int retries;
	int unscheduled = 0;
	unsigned n;

	DPRINTF(("sending pkt via netmap len %d\n", (int)iovlen));
	for (retries = 10; !(n = nm_ring_space(ring)) && retries > 0; retries--) {
		struct pollfd pfd;

		if (!unscheduled) {
			cookie = rumpuser_component_unschedule();
			unscheduled = 1;
		}
		pfd.fd = viu->viu_fd;
		pfd.events = POLLOUT;
		DPRINTF(("cannot send on netmap, ring full\n"));
		(void)poll(&pfd, 1, 500 /* ms */);
	}
	if (n > 0) {
		int i, totlen = 0;
		struct netmap_slot *slot = &ring->slot[ring->cur];
#define MAX_BUF_SIZE 1900
		p = NETMAP_BUF(ring, slot->buf_idx);
		for (i = 0; totlen < MAX_BUF_SIZE && i < iovlen; i++) {
			int n = iov[i].iov_len;
			if (totlen + n > MAX_BUF_SIZE) {
				n = MAX_BUF_SIZE - totlen;
				DPRINTF(("truncating long pkt"));
			}
			memcpy(p + totlen, iov[i].iov_base, n);
			totlen += n;
		}
#undef MAX_BUF_SIZE
		slot->len = totlen;
		ring->head = ring->cur = nm_ring_next(ring, ring->cur);
		if (ioctl(viu->viu_fd, NIOCTXSYNC, NULL) < 0)
			perror("NIOCTXSYNC");
	}

	if (unscheduled)
		rumpuser_component_schedule(cookie);
}
Exemple #10
0
static int
test_send(struct netmap_ring *ring, usn_mbuf_t *m,  u_int count)
{
   u_int n, sent, cur = ring->cur;

   n = nm_ring_space(ring);
   if (n < count)
      count = n;
   for (sent = 0; sent < count; sent++) {
      struct netmap_slot *slot = &ring->slot[cur];
      char *p = NETMAP_BUF(ring, slot->buf_idx);
      nm_pkt_copy(m->head, p, m->mlen);
      slot->len = m->mlen;
      cur = nm_ring_next(ring, cur);
   }
   ring->head = ring->cur = cur;
   return (sent);
}
Exemple #11
0
int
pcap_dispatch(pcap_t *p, int cnt, pcap_handler callback, u_char *user)
{
	struct pcap_ring *pme = p;
	struct my_ring *me = &pme->me;
	int got = 0;
	u_int si;

	ND("cnt %d", cnt);
	if (cnt == 0)
		cnt = -1;
	/* scan all rings */
	for (si = me->begin; si < me->end; si++) {
		struct netmap_ring *ring = NETMAP_RXRING(me->nifp, si);
		ND("ring has %d pkts", ring->avail);
		if (ring->avail == 0)
			continue;
		pme->hdr.ts = ring->ts;
		/*
		 * XXX a proper prefetch should be done as
		 *	prefetch(i); callback(i-1); ...
		 */
		while ((cnt == -1 || cnt != got) && ring->avail > 0) {
			u_int i = ring->cur;
			u_int idx = ring->slot[i].buf_idx;
			if (idx < 2) {
				D("%s bogus RX index %d at offset %d",
					me->nifp->ni_name, idx, i);
				sleep(2);
			}
			u_char *buf = (u_char *)NETMAP_BUF(ring, idx);
			prefetch(buf);
			pme->hdr.len = pme->hdr.caplen = ring->slot[i].len;
			// D("call %p len %d", p, me->hdr.len);
			callback(user, &pme->hdr, buf);
			ring->cur = NETMAP_RING_NEXT(ring, i);
			ring->avail--;
			got++;
		}
	}
	pme->st.ps_recv += got;
	return got;
}
static __inline int
pci_vtnet_netmap_readv(struct nm_desc *nmd, struct iovec *iov, int iovcnt)
{
	int len = 0;
	int i = 0;
	int r;

	for (r = nmd->cur_rx_ring; ; ) {
		struct netmap_ring *ring = NETMAP_RXRING(nmd->nifp, r);
		uint32_t cur, idx;
		char *buf;
		size_t left;

		if (nm_ring_empty(ring)) {
			r++;
			if (r > nmd->last_rx_ring)
				r = nmd->first_rx_ring;
			if (r == nmd->cur_rx_ring)
				break;
			continue;
		}
		cur = ring->cur;
		idx = ring->slot[cur].buf_idx;
		buf = NETMAP_BUF(ring, idx);
		left = ring->slot[cur].len;

		for (i = 0; i < iovcnt && left > 0; i++) {
			if (iov[i].iov_len > left)
				iov[i].iov_len = left;
			memcpy(iov[i].iov_base, &buf[len], iov[i].iov_len);
			len += iov[i].iov_len;
			left -= iov[i].iov_len;
		}
		ring->head = ring->cur = nm_ring_next(ring, cur);
		nmd->cur_rx_ring = r;
		ioctl(nmd->fd, NIOCRXSYNC, NULL);
		break;
	}
	for (; i < iovcnt; i++)
		iov[i].iov_len = 0;

	return (len);
}
Exemple #13
0
int receive_packets(struct netmap_ring *ring) {
    u_int cur, rx, n;

    cur = ring->cur;
    n = nm_ring_space(ring);
    
    for (rx = 0; rx < n; rx++) {
        struct netmap_slot *slot = &ring->slot[cur];
        char *p = NETMAP_BUF(ring, slot->buf_idx);

        // process data
        consume_pkt((u_char*)p, slot->len);

        cur = nm_ring_next(ring, cur);
    }

    ring->head = ring->cur = cur;
    return (rx);
}
Exemple #14
0
static void netmap_send(void *opaque)
{
    NetmapState *s = opaque;
    struct netmap_ring *ring = s->rx;

    /* Keep sending while there are available packets into the netmap
       RX ring and the forwarding path towards the peer is open. */
    while (!nm_ring_empty(ring)) {
        uint32_t i;
        uint32_t idx;
        bool morefrag;
        int iovcnt = 0;
        int iovsize;

        do {
            i = ring->cur;
            idx = ring->slot[i].buf_idx;
            morefrag = (ring->slot[i].flags & NS_MOREFRAG);
            s->iov[iovcnt].iov_base = (u_char *)NETMAP_BUF(ring, idx);
            s->iov[iovcnt].iov_len = ring->slot[i].len;
            iovcnt++;

            ring->cur = ring->head = nm_ring_next(ring, i);
        } while (!nm_ring_empty(ring) && morefrag);

        if (unlikely(nm_ring_empty(ring) && morefrag)) {
            RD(5, "[netmap_send] ran out of slots, with a pending"
                   "incomplete packet\n");
        }

        iovsize = qemu_sendv_packet_async(&s->nc, s->iov, iovcnt,
                                            netmap_send_completed);

        if (iovsize == 0) {
            /* The peer does not receive anymore. Packet is queued, stop
             * reading from the backend until netmap_send_completed()
             */
            netmap_read_poll(s, false);
            break;
        }
    }
}
Exemple #15
0
static int
receive_packets(struct netmap_ring *ring, u_int limit, int skip_payload)
{
	u_int cur, rx;

	cur = ring->cur;
	if (ring->avail < limit)
		limit = ring->avail;
	for (rx = 0; rx < limit; rx++) {
		struct netmap_slot *slot = &ring->slot[cur];
		char *p = NETMAP_BUF(ring, slot->buf_idx);

		if (!skip_payload)
			check_payload(p, slot->len);

		cur = NETMAP_RING_NEXT(ring, cur);
	}
	ring->avail -= rx;
	ring->cur = cur;

	return (rx);
}
Exemple #16
0
static int
receive_packets(struct netmap_ring *ring, u_int limit, int dump)
{
	u_int cur, rx, n;

	cur = ring->cur;
	n = nm_ring_space(ring);
	if (n < limit)
		limit = n;
	for (rx = 0; rx < limit; rx++) {
		struct netmap_slot *slot = &ring->slot[cur];
		char *p = NETMAP_BUF(ring, slot->buf_idx);

		if (dump)
			dump_payload(p, slot->len, ring, cur);

		cur = nm_ring_next(ring, cur);
	}
	ring->head = ring->cur = cur;

	return (rx);
}
static __inline int
pci_vtnet_netmap_writev(struct nm_desc *nmd, struct iovec *iov, int iovcnt)
{
	int r, i;
	int len = 0;

	for (r = nmd->cur_tx_ring; ; ) {
		struct netmap_ring *ring = NETMAP_TXRING(nmd->nifp, r);
		uint32_t cur, idx;
		char *buf;

		if (nm_ring_empty(ring)) {
			r++;
			if (r > nmd->last_tx_ring)
				r = nmd->first_tx_ring;
			if (r == nmd->cur_tx_ring)
				break;
			continue;
		}
		cur = ring->cur;
		idx = ring->slot[cur].buf_idx;
		buf = NETMAP_BUF(ring, idx);

		for (i = 0; i < iovcnt; i++) {
			if (len + iov[i].iov_len > 2048)
				break;
			memcpy(&buf[len], iov[i].iov_base, iov[i].iov_len);
			len += iov[i].iov_len;
		}
		ring->slot[cur].len = len;
		ring->head = ring->cur = nm_ring_next(ring, cur);
		nmd->cur_tx_ring = r;
		ioctl(nmd->fd, NIOCTXSYNC, NULL);
		break;
	}

	return (len);
}
Exemple #18
0
static ssize_t netmap_receive(NetClientState *nc,
      const uint8_t *buf, size_t size)
{
    NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
    struct netmap_ring *ring = s->tx;
    uint32_t i;
    uint32_t idx;
    uint8_t *dst;

    if (unlikely(!ring)) {
        /* Drop. */
        return size;
    }

    if (unlikely(size > ring->nr_buf_size)) {
        RD(5, "[netmap_receive] drop packet of size %d > %d\n",
                                    (int)size, ring->nr_buf_size);
        return size;
    }

    if (nm_ring_empty(ring)) {
        /* No available slots in the netmap TX ring. */
        netmap_write_poll(s, true);
        return 0;
    }

    i = ring->cur;
    idx = ring->slot[i].buf_idx;
    dst = (uint8_t *)NETMAP_BUF(ring, idx);

    ring->slot[i].len = size;
    ring->slot[i].flags = 0;
    pkt_copy(buf, dst, size);
    ring->cur = ring->head = nm_ring_next(ring, i);
    ioctl(s->nmd->fd, NIOCTXSYNC, NULL);

    return size;
}
Exemple #19
0
/*----------------------------------------------------------------------------*/
int32_t
netmap_recv_pkts(struct mtcp_thread_context *ctxt, int ifidx)
{
    struct netmap_private_context *npc;
    struct nm_desc *d;
    npc = (struct netmap_private_context *)ctxt->io_private_context;
    d = npc->local_nmd[ifidx];

    int p = 0;
    int c, got = 0, ri = d->cur_rx_ring;
    int n = d->last_rx_ring - d->first_rx_ring + 1;
    int cnt = MAX_PKT_BURST;



    for (c = 0; c < n && cnt != got && npc->dev_poll_flag[ifidx]; c++) {
        /* compute current ring to use */
        struct netmap_ring *ring;

        ri = d->cur_rx_ring + c;
        if (ri > d->last_rx_ring)
            ri = d->first_rx_ring;
        ring = NETMAP_RXRING(d->nifp, ri);
        for ( ; !nm_ring_empty(ring) && cnt != got; got++) {
            u_int i = ring->cur;
            u_int idx = ring->slot[i].buf_idx;
            npc->rcv_pktbuf[p] = (u_char *)NETMAP_BUF(ring, idx);
            npc->rcv_pkt_len[p] = ring->slot[i].len;
            p++;
            ring->head = ring->cur = nm_ring_next(ring, i);
        }
    }
    d->cur_rx_ring = ri;

    npc->dev_poll_flag[ifidx] = 0;

    return p;
}
Exemple #20
0
struct sin_pkt *
sin_pkt_ctor(struct sin_pkt_zone *my_zone, int zone_idx,
 struct netmap_ring *my_ring, int *e)
{
    struct sin_pkt *pkt;

    pkt = malloc(sizeof(struct sin_pkt));
    if (pkt == NULL) {
        _SET_ERR(e, ENOMEM);
        return (NULL);
    }
    memset(pkt, '\0', sizeof(struct sin_pkt));
    SIN_TYPE_SET(pkt, _SIN_TYPE_PKT);
    pkt->ts = malloc(sizeof(struct timeval));
    memset(pkt->ts, '\0', sizeof(struct timeval));
    pkt->my_zone = my_zone;
    pkt->my_ring = my_ring;
    pkt->zone_idx = zone_idx;
    pkt->my_slot = &(my_ring->slot[zone_idx]);
    pkt->buf = NETMAP_BUF(my_ring, pkt->my_slot->buf_idx);

    return (pkt);
}
Exemple #21
0
int sendpacket_send_netmap(void *p, const u_char *data, size_t len)
{
    sendpacket_t *sp = p;
    struct netmap_ring *txring;
    struct netmap_slot *slot;
    char *pkt;
    uint32_t cur, avail;

    if (sp->abort)
        return 0;

    txring = NETMAP_TXRING(sp->nm_if, sp->cur_tx_ring);
    while ((avail = nm_ring_space(txring)) == 0) {
        /* out of space on current TX queue - go to next */
        ++sp->cur_tx_ring;
        if (sp->cur_tx_ring > sp->last_tx_ring) {
            /*
             * out of space on all queues
             *
             * we have looped through all configured TX queues
             * so we have to reset to the first queue and
             * wait for available space
             */
            sp->cur_tx_ring = sp->first_tx_ring;

            /* send TX interrupt signal
             *
             * On Linux this makes one slot free on the
             * ring, which increases speed by about 10Mbps.
             *
             * But it will never free up all the slots. For
             * that we must poll and call again.
             */
            ioctl(sp->handle.fd, NIOCTXSYNC, NULL);

            /* loop again */
            return -2;
        }

        txring = NETMAP_TXRING(sp->nm_if, sp->cur_tx_ring);
    }

    /*
     * send
     */
    cur = txring->cur;
    slot = &txring->slot[cur];
    slot->flags = 0;
    pkt = NETMAP_BUF(txring, slot->buf_idx);
    memcpy(pkt, data, min(len, txring->nr_buf_size));
    slot->len = len;

    if (avail <= 1)
        slot->flags = NS_REPORT;

    dbgx(3, "netmap cur=%d slot index=%d flags=0x%x empty=%d avail=%u bufsize=%d\n",
            cur, slot->buf_idx, slot->flags, NETMAP_TX_RING_EMPTY(txring),
            nm_ring_space(txring), txring->nr_buf_size);

    /* let kernel know that packet is available */
    cur = NETMAP_RING_NEXT(txring, cur);
#ifdef HAVE_NETMAP_RING_HEAD_TAIL
    txring->head = cur;
#else
    txring->avail--;
#endif
    txring->cur = cur;

    return len;
}
Exemple #22
0
int main(int argc, char **argv)
{
	int ch;
	uint32_t i;
	int rv;
	unsigned int iter = 0;

	glob_arg.ifname[0] = '\0';
	glob_arg.output_rings = DEF_OUT_PIPES;
	glob_arg.batch = DEF_BATCH;
	glob_arg.syslog_interval = DEF_SYSLOG_INT;

	while ( (ch = getopt(argc, argv, "i:p:b:B:s:")) != -1) {
		switch (ch) {
		case 'i':
			D("interface is %s", optarg);
			if (strlen(optarg) > MAX_IFNAMELEN - 8) {
				D("ifname too long %s", optarg);
				return 1;
			}
			if (strncmp(optarg, "netmap:", 7) && strncmp(optarg, "vale", 4)) {
				sprintf(glob_arg.ifname, "netmap:%s", optarg);
			} else {
				strcpy(glob_arg.ifname, optarg);
			}
			break;

		case 'p':
			glob_arg.output_rings = atoi(optarg);
			if (glob_arg.output_rings < 1) {
				D("you must output to at least one pipe");
				usage();
				return 1;
			}
			break;

		case 'B':
			glob_arg.extra_bufs = atoi(optarg);
			D("requested %d extra buffers", glob_arg.extra_bufs);
			break;

		case 'b':
			glob_arg.batch = atoi(optarg);
			D("batch is %d", glob_arg.batch);
			break;

		case 's':
			glob_arg.syslog_interval = atoi(optarg);
			D("syslog interval is %d", glob_arg.syslog_interval);
			break;

		default:
			D("bad option %c %s", ch, optarg);
			usage();
			return 1;

		}
	}

	if (glob_arg.ifname[0] == '\0') {
		D("missing interface name");
		usage();
		return 1;
	}

	setlogmask(LOG_UPTO(LOG_INFO));
	openlog("lb", LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1);

	uint32_t npipes = glob_arg.output_rings;

	struct overflow_queue *freeq = NULL;

	pthread_t stat_thread;

	ports = calloc(npipes + 1, sizeof(struct port_des));
	if (!ports) {
		D("failed to allocate the stats array");
		return 1;
	}
	struct port_des *rxport = &ports[npipes];

	if (pthread_create(&stat_thread, NULL, print_stats, NULL) == -1) {
		D("unable to create the stats thread: %s", strerror(errno));
		return 1;
	}


	/* we need base_req to specify pipes and extra bufs */
	struct nmreq base_req;
	memset(&base_req, 0, sizeof(base_req));

	base_req.nr_arg1 = npipes;
	base_req.nr_arg3 = glob_arg.extra_bufs;

	rxport->nmd = nm_open(glob_arg.ifname, &base_req, 0, NULL);

	if (rxport->nmd == NULL) {
		D("cannot open %s", glob_arg.ifname);
		return (1);
	} else {
		D("successfully opened %s (tx rings: %u)", glob_arg.ifname,
		  rxport->nmd->req.nr_tx_slots);
	}

	uint32_t extra_bufs = rxport->nmd->req.nr_arg3;
	struct overflow_queue *oq = NULL;
	/* reference ring to access the buffers */
	rxport->ring = NETMAP_RXRING(rxport->nmd->nifp, 0);

	if (!glob_arg.extra_bufs)
		goto run;

	D("obtained %d extra buffers", extra_bufs);
	if (!extra_bufs)
		goto run;

	/* one overflow queue for each output pipe, plus one for the
	 * free extra buffers
	 */
	oq = calloc(npipes + 1, sizeof(struct overflow_queue));
	if (!oq) {
		D("failed to allocated overflow queues descriptors");
		goto run;
	}

	freeq = &oq[npipes];
	rxport->oq = freeq;

	freeq->slots = calloc(extra_bufs, sizeof(struct netmap_slot));
	if (!freeq->slots) {
		D("failed to allocate the free list");
	}
	freeq->size = extra_bufs;
	snprintf(freeq->name, MAX_IFNAMELEN, "free queue");

	/*
	 * the list of buffers uses the first uint32_t in each buffer
	 * as the index of the next buffer.
	 */
	uint32_t scan;
	for (scan = rxport->nmd->nifp->ni_bufs_head;
	     scan;
	     scan = *(uint32_t *)NETMAP_BUF(rxport->ring, scan))
	{
		struct netmap_slot s;
		s.buf_idx = scan;
		ND("freeq <- %d", s.buf_idx);
		oq_enq(freeq, &s);
	}

	atexit(free_buffers);

	if (freeq->n != extra_bufs) {
		D("something went wrong: netmap reported %d extra_bufs, but the free list contained %d",
				extra_bufs, freeq->n);
		return 1;
	}
	rxport->nmd->nifp->ni_bufs_head = 0;

run:
	for (i = 0; i < npipes; ++i) {
		char interface[25];
		sprintf(interface, "%s{%d", glob_arg.ifname, i);
		D("opening pipe named %s", interface);

		//ports[i].nmd = nm_open(interface, NULL, NM_OPEN_NO_MMAP | NM_OPEN_ARG3 | NM_OPEN_RING_CFG, rxport->nmd);
		ports[i].nmd = nm_open(interface, NULL, 0, rxport->nmd);

		if (ports[i].nmd == NULL) {
			D("cannot open %s", interface);
			return (1);
		} else {
			D("successfully opened pipe #%d %s (tx slots: %d)",
			  i + 1, interface, ports[i].nmd->req.nr_tx_slots);
			ports[i].ring = NETMAP_TXRING(ports[i].nmd->nifp, 0);
		}
		D("zerocopy %s",
		  (rxport->nmd->mem == ports[i].nmd->mem) ? "enabled" : "disabled");

		if (extra_bufs) {
			struct overflow_queue *q = &oq[i];
			q->slots = calloc(extra_bufs, sizeof(struct netmap_slot));
			if (!q->slots) {
				D("failed to allocate overflow queue for pipe %d", i);
				/* make all overflow queue management fail */
				extra_bufs = 0;
			}
			q->size = extra_bufs;
			snprintf(q->name, MAX_IFNAMELEN, "oq %d", i);
			ports[i].oq = q;
		}
	}

	if (glob_arg.extra_bufs && !extra_bufs) {
		if (oq) {
			for (i = 0; i < npipes + 1; i++) {
				free(oq[i].slots);
				oq[i].slots = NULL;
			}
			free(oq);
			oq = NULL;
		}
		D("*** overflow queues disabled ***");
	}

	sleep(2);

	struct pollfd pollfd[npipes + 1];
	memset(&pollfd, 0, sizeof(pollfd));

	signal(SIGINT, sigint_h);
	while (!do_abort) {
		u_int polli = 0;
		iter++;

		for (i = 0; i < npipes; ++i) {
			struct netmap_ring *ring = ports[i].ring;
			if (nm_ring_next(ring, ring->tail) == ring->cur) {
				/* no need to poll, there are no packets pending */
				continue;
			}
			pollfd[polli].fd = ports[i].nmd->fd;
			pollfd[polli].events = POLLOUT;
			pollfd[polli].revents = 0;
			++polli;
		}

		pollfd[polli].fd = rxport->nmd->fd;
		pollfd[polli].events = POLLIN;
		pollfd[polli].revents = 0;
		++polli;

		//RD(5, "polling %d file descriptors", polli+1);
		rv = poll(pollfd, polli, 10);
		if (rv <= 0) {
			if (rv < 0 && errno != EAGAIN && errno != EINTR)
				RD(1, "poll error %s", strerror(errno));
			continue;
		}

		if (oq) {
			/* try to push packets from the overflow queues
			 * to the corresponding pipes
			 */
			for (i = 0; i < npipes; i++) {
				struct port_des *p = &ports[i];
				struct overflow_queue *q = p->oq;
				uint32_t j, lim;
				struct netmap_ring *ring;
				struct netmap_slot *slot;

				if (!q->n)
					continue;
				ring = p->ring;
				lim = nm_ring_space(ring);
				if (!lim)
					continue;
				if (q->n < lim)
					lim = q->n;
				for (j = 0; j < lim; j++) {
					struct netmap_slot s = oq_deq(q);
					slot = &ring->slot[ring->cur];
					oq_enq(freeq, slot);
					*slot = s;
					slot->flags |= NS_BUF_CHANGED;
					ring->cur = nm_ring_next(ring, ring->cur);
				}
				ring->head = ring->cur;
				forwarded += lim;
				p->ctr.pkts += lim;
			}
		}

		int batch = 0;
		for (i = rxport->nmd->first_rx_ring; i <= rxport->nmd->last_rx_ring; i++) {
			struct netmap_ring *rxring = NETMAP_RXRING(rxport->nmd->nifp, i);

			//D("prepare to scan rings");
			int next_cur = rxring->cur;
			struct netmap_slot *next_slot = &rxring->slot[next_cur];
			const char *next_buf = NETMAP_BUF(rxring, next_slot->buf_idx);
			while (!nm_ring_empty(rxring)) {
				struct overflow_queue *q;
				struct netmap_slot *rs = next_slot;

				// CHOOSE THE CORRECT OUTPUT PIPE
				uint32_t hash = pkt_hdr_hash((const unsigned char *)next_buf, 4, 'B');
				if (hash == 0)
					non_ip++; // XXX ??
				// prefetch the buffer for the next round
				next_cur = nm_ring_next(rxring, next_cur);
				next_slot = &rxring->slot[next_cur];
				next_buf = NETMAP_BUF(rxring, next_slot->buf_idx);
				__builtin_prefetch(next_buf);
				// 'B' is just a hashing seed
				uint32_t output_port = hash % glob_arg.output_rings;
				struct port_des *port = &ports[output_port];
				struct netmap_ring *ring = port->ring;
				uint32_t free_buf;

				// Move the packet to the output pipe.
				if (nm_ring_space(ring)) {
					struct netmap_slot *ts = &ring->slot[ring->cur];
					free_buf = ts->buf_idx;
					ts->buf_idx = rs->buf_idx;
					ts->len = rs->len;
					ts->flags |= NS_BUF_CHANGED;
					ring->head = ring->cur = nm_ring_next(ring, ring->cur);
					port->ctr.pkts++;
					forwarded++;
					goto forward;
				}

				/* use the overflow queue, if available */
				if (!oq) {
					dropped++;
					port->ctr.drop++;
					goto next;
				}

				q = &oq[output_port];

				if (!freeq->n) {
					/* revoke some buffers from the longest overflow queue */
					uint32_t j;
					struct port_des *lp = &ports[0];
					uint32_t max = lp->oq->n;

					for (j = 1; j < npipes; j++) {
						struct port_des *cp = &ports[j];
						if (cp->oq->n > max) {
							lp = cp;
							max = cp->oq->n;
						}
					}

					// XXX optimize this cycle
					for (j = 0; lp->oq->n && j < BUF_REVOKE; j++) {
						struct netmap_slot tmp = oq_deq(lp->oq);
						oq_enq(freeq, &tmp);
					}

					ND(1, "revoked %d buffers from %s", j, lq->name);
					lp->ctr.drop += j;
					dropped += j;
				}

				free_buf = oq_deq(freeq).buf_idx;
				oq_enq(q, rs);

			forward:
				rs->buf_idx = free_buf;
				rs->flags |= NS_BUF_CHANGED;
			next:
				rxring->head = rxring->cur = next_cur;

				batch++;
				if (unlikely(batch >= glob_arg.batch)) {
					ioctl(rxport->nmd->fd, NIOCRXSYNC, NULL);
					batch = 0;
				}
				ND(1,
				   "Forwarded Packets: %"PRIu64" Dropped packets: %"PRIu64"   Percent: %.2f",
				   forwarded, dropped,
				   ((float)dropped / (float)forwarded * 100));
			}

		}
	}

	pthread_join(stat_thread, NULL);

	printf("%"PRIu64" packets forwarded.  %"PRIu64" packets dropped. Total %"PRIu64"\n", forwarded,
	       dropped, forwarded + dropped);
	return 0;
}
Exemple #23
0
void *dispatcher(void *threadarg) {
  assert(threadarg);

  struct thread_context *context;
  struct thread_context *contexts;
  int rv;
  struct netmap_ring *rxring;
  struct ethernet_pkt *etherpkt;
  struct pollfd pfd;
  struct dispatcher_data *data;
  uint32_t *slots_used, *open_transactions;
  uint32_t i, arpd_idx, num_threads;
  char *buf;
  struct msg_hdr *msg;
  struct ether_addr *mac;

  context = (struct thread_context *)threadarg;
  contexts = context->shared->contexts;
  data = context->data;
  arpd_idx = context->shared->arpd_idx;
  mac = &context->shared->if_info->mac;
  num_threads = context->shared->num_threads;

  struct transaction *transactions[num_threads];
  uint64_t dropped[num_threads];
  for (i=0; i < num_threads; i++) {
    transactions[i] = NULL;
    dropped[i] = 0;
  }

  rv = dispatcher_init(context);
  if (!rv) {
    pthread_exit(NULL);
  }

  rxring = NETMAP_RXRING(data->nifp, 0);
  slots_used = bitmap_new(rxring->num_slots);
  if (!slots_used)
    pthread_exit(NULL);

  open_transactions = bitmap_new(num_threads);
  if (!open_transactions)
    pthread_exit(NULL);

  pfd.fd = data->fd;
  pfd.events = (POLLIN);

  printf("dispatcher[%d]: initialized\n", context->thread_id);
  // signal to main() that we are initialized
  atomic_store_explicit(&context->initialized, 1, memory_order_release);

  for (;;) {
    rv = poll(&pfd, 1, POLL_TIMEOUT);

    // read all packets from the ring
    if (rv > 0) {
      for (; rxring->avail > 0; rxring->avail--) {
        i = rxring->cur;
        rxring->cur = NETMAP_RING_NEXT(rxring, i);
        rxring->reserved++;
        buf = NETMAP_BUF(rxring, rxring->slot[i].buf_idx);
        etherpkt = (struct ethernet_pkt *)(void *)buf;

        // TODO: consider pushing this check to the workers
        if (!ethernet_is_valid(etherpkt, mac)) {
          if (rxring->reserved == 1)
            rxring->reserved = 0;
          continue;
        }

        // TODO: dispatch to n workers instead of just 0
        switch (etherpkt->h.ether_type) {
          case IP4_ETHERTYPE:
            rv = tqueue_insert(contexts[0].pkt_recv_q,
                               &transactions[0], (char *) NULL + i);
            switch (rv) {
              case TQUEUE_SUCCESS:
                bitmap_set(slots_used, i);
                bitmap_set(open_transactions, 0);
                break;
              case TQUEUE_TRANSACTION_FULL:
                bitmap_set(slots_used, i);
                bitmap_clear(open_transactions, 0);
                break;
              case TQUEUE_FULL:
                // just drop packet and do accounting
                dropped[0]++;
                if (rxring->reserved == 1)
                  rxring->reserved = 0;
                break;
            }
            break;
          case ARP_ETHERTYPE:
            rv = tqueue_insert(contexts[arpd_idx].pkt_recv_q,
                               &transactions[arpd_idx], (char *) NULL + i);
            switch (rv) {
              case TQUEUE_SUCCESS:
                tqueue_publish_transaction(contexts[arpd_idx].pkt_recv_q,
                                            &transactions[arpd_idx]);
                bitmap_set(slots_used, i);
                break;
              case TQUEUE_TRANSACTION_FULL:
                bitmap_set(slots_used, i);
                break;
              case TQUEUE_FULL:
                // just drop packet and do accounting
                dropped[arpd_idx]++;
                if (rxring->reserved == 1)
                  rxring->reserved = 0;
                break;
            }
            break;
          default:
            printf("dispatcher[%d]: unknown/unsupported ethertype %hu\n",
                    context->thread_id, etherpkt->h.ether_type);
            if (rxring->reserved == 1)
              rxring->reserved = 0;
        } // switch (ethertype)
      } // for (rxring)

      // publish any open transactions so that the worker can start on it
      for (i=0; i < num_threads; i++) {
        if (bitmap_get(open_transactions, i))
          tqueue_publish_transaction(contexts[i].pkt_recv_q, &transactions[i]);
      }
      bitmap_clearall(open_transactions, num_threads);
    } // if (packets)

    // read the message queue
    rv = squeue_enter(context->msg_q, 1);
    if (!rv)
      continue;
    while ((msg = squeue_get_next_pop_slot(context->msg_q)) != NULL) {
      switch (msg->msg_type) {
        case MSG_TRANSACTION_UPDATE:
          update_slots_used(context->msg_q, slots_used, rxring);
          break;
        case MSG_TRANSACTION_UPDATE_SINGLE:
          update_slots_used_single((void *)msg, slots_used, rxring);
          break;
        default:
          printf("dispatcher: unknown message %hu\n", msg->msg_type);
      }
    }
    squeue_exit(context->msg_q);

  } // for(;;)

  pthread_exit(NULL);
}
Exemple #24
0
/*
 * move up to 'limit' pkts from rxring to txring swapping buffers.
 */
static int
process_rings(struct netmap_ring *rxring, struct netmap_ring *txring,
              u_int limit, const char *msg)
{
    u_int j, k, m = 0;

    /* print a warning if any of the ring flags is set (e.g. NM_REINIT) */
    if (rxring->flags || txring->flags)
        D("%s rxflags %x txflags %x",
          msg, rxring->flags, txring->flags);
    j = rxring->cur; /* RX */
    k = txring->cur; /* TX */
    m = nm_ring_space(rxring);
    if (m < limit)
        limit = m;
    m = nm_ring_space(txring);
    if (m < limit)
        limit = m;
    m = limit;
    while (limit-- > 0) {
        struct netmap_slot *rs = &rxring->slot[j];
        struct netmap_slot *ts = &txring->slot[k];
#ifdef NO_SWAP
        char *rxbuf = NETMAP_BUF(rxring, rs->buf_idx);
        char *txbuf = NETMAP_BUF(txring, ts->buf_idx);
#else
        uint32_t pkt;
#endif

        /* swap packets */
        if (ts->buf_idx < 2 || rs->buf_idx < 2) {
            D("wrong index rx[%d] = %d  -> tx[%d] = %d",
              j, rs->buf_idx, k, ts->buf_idx);
            sleep(2);
        }
#ifndef NO_SWAP
        pkt = ts->buf_idx;
        ts->buf_idx = rs->buf_idx;
        rs->buf_idx = pkt;
#endif
        /* copy the packet length. */
        if (rs->len < 14 || rs->len > 2048)
            D("wrong len %d rx[%d] -> tx[%d]", rs->len, j, k);
        else if (verbose > 1)
            D("%s send len %d rx[%d] -> tx[%d]", msg, rs->len, j, k);
        ts->len = rs->len;
#ifdef NO_SWAP
        pkt_copy(rxbuf, txbuf, ts->len);
#else
        /* report the buffer change. */
        ts->flags |= NS_BUF_CHANGED;
        rs->flags |= NS_BUF_CHANGED;
#endif /* NO_SWAP */
        j = nm_ring_next(rxring, j);
        k = nm_ring_next(txring, k);
    }
    rxring->head = rxring->cur = j;
    txring->head = txring->cur = k;
    if (verbose && m > 0)
        D("%s sent %d packets to %p", msg, m, txring);

    return (m);
}
Exemple #25
0
static void *
pinger_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
	struct netmap_if *nifp = targ->nmd->nifp;
	int i, rx = 0, n = targ->g->npackets;
	void *frame;
	int size;
	uint32_t sent = 0;
	struct timespec ts, now, last_print;
	uint32_t count = 0, min = 1000000000, av = 0;

	frame = &targ->pkt;
	frame += sizeof(targ->pkt.vh) - targ->g->virt_header;
	size = targ->g->pkt_size + targ->g->virt_header;

	if (targ->g->nthreads > 1) {
		D("can only ping with 1 thread");
		return NULL;
	}

	clock_gettime(CLOCK_REALTIME_PRECISE, &last_print);
	now = last_print;
	while (n == 0 || (int)sent < n) {
		struct netmap_ring *ring = NETMAP_TXRING(nifp, 0);
		struct netmap_slot *slot;
		char *p;
	    for (i = 0; i < 1; i++) { /* XXX why the loop for 1 pkt ? */
		slot = &ring->slot[ring->cur];
		slot->len = size;
		p = NETMAP_BUF(ring, slot->buf_idx);

		if (nm_ring_empty(ring)) {
			D("-- ouch, cannot send");
		} else {
			struct tstamp *tp;
			nm_pkt_copy(frame, p, size);
			clock_gettime(CLOCK_REALTIME_PRECISE, &ts);
			bcopy(&sent, p+42, sizeof(sent));
			tp = (struct tstamp *)(p+46);
			tp->sec = (uint32_t)ts.tv_sec;
			tp->nsec = (uint32_t)ts.tv_nsec;
			sent++;
			ring->head = ring->cur = nm_ring_next(ring, ring->cur);
		}
	    }
		/* should use a parameter to decide how often to send */
		if (poll(&pfd, 1, 3000) <= 0) {
			D("poll error/timeout on queue %d: %s", targ->me,
				strerror(errno));
			continue;
		}
		/* see what we got back */
		for (i = targ->nmd->first_tx_ring;
			i <= targ->nmd->last_tx_ring; i++) {
			ring = NETMAP_RXRING(nifp, i);
			while (!nm_ring_empty(ring)) {
				uint32_t seq;
				struct tstamp *tp;
				slot = &ring->slot[ring->cur];
				p = NETMAP_BUF(ring, slot->buf_idx);

				clock_gettime(CLOCK_REALTIME_PRECISE, &now);
				bcopy(p+42, &seq, sizeof(seq));
				tp = (struct tstamp *)(p+46);
				ts.tv_sec = (time_t)tp->sec;
				ts.tv_nsec = (long)tp->nsec;
				ts.tv_sec = now.tv_sec - ts.tv_sec;
				ts.tv_nsec = now.tv_nsec - ts.tv_nsec;
				if (ts.tv_nsec < 0) {
					ts.tv_nsec += 1000000000;
					ts.tv_sec--;
				}
				if (1) D("seq %d/%d delta %d.%09d", seq, sent,
					(int)ts.tv_sec, (int)ts.tv_nsec);
				if (ts.tv_nsec < (int)min)
					min = ts.tv_nsec;
				count ++;
				av += ts.tv_nsec;
				ring->head = ring->cur = nm_ring_next(ring, ring->cur);
				rx++;
			}
		}
		//D("tx %d rx %d", sent, rx);
		//usleep(100000);
		ts.tv_sec = now.tv_sec - last_print.tv_sec;
		ts.tv_nsec = now.tv_nsec - last_print.tv_nsec;
		if (ts.tv_nsec < 0) {
			ts.tv_nsec += 1000000000;
			ts.tv_sec--;
		}
		if (ts.tv_sec >= 1) {
			D("count %d min %d av %d",
				count, min, av/count);
			count = 0;
			av = 0;
			min = 100000000;
			last_print = now;
		}
	}
	return NULL;
}


/*
 * reply to ping requests
 */
static void *
ponger_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
	struct netmap_if *nifp = targ->nmd->nifp;
	struct netmap_ring *txring, *rxring;
	int i, rx = 0, sent = 0, n = targ->g->npackets;

	if (targ->g->nthreads > 1) {
		D("can only reply ping with 1 thread");
		return NULL;
	}
	D("understood ponger %d but don't know how to do it", n);
	while (n == 0 || sent < n) {
		uint32_t txcur, txavail;
//#define BUSYWAIT
#ifdef BUSYWAIT
		ioctl(pfd.fd, NIOCRXSYNC, NULL);
#else
		if (poll(&pfd, 1, 1000) <= 0) {
			D("poll error/timeout on queue %d: %s", targ->me,
				strerror(errno));
			continue;
		}
#endif
		txring = NETMAP_TXRING(nifp, 0);
		txcur = txring->cur;
		txavail = nm_ring_space(txring);
		/* see what we got back */
		for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) {
			rxring = NETMAP_RXRING(nifp, i);
			while (!nm_ring_empty(rxring)) {
				uint16_t *spkt, *dpkt;
				uint32_t cur = rxring->cur;
				struct netmap_slot *slot = &rxring->slot[cur];
				char *src, *dst;
				src = NETMAP_BUF(rxring, slot->buf_idx);
				//D("got pkt %p of size %d", src, slot->len);
				rxring->head = rxring->cur = nm_ring_next(rxring, cur);
				rx++;
				if (txavail == 0)
					continue;
				dst = NETMAP_BUF(txring,
				    txring->slot[txcur].buf_idx);
				/* copy... */
				dpkt = (uint16_t *)dst;
				spkt = (uint16_t *)src;
				nm_pkt_copy(src, dst, slot->len);
				dpkt[0] = spkt[3];
				dpkt[1] = spkt[4];
				dpkt[2] = spkt[5];
				dpkt[3] = spkt[0];
				dpkt[4] = spkt[1];
				dpkt[5] = spkt[2];
				txring->slot[txcur].len = slot->len;
				/* XXX swap src dst mac */
				txcur = nm_ring_next(txring, txcur);
				txavail--;
				sent++;
			}
		}
		txring->head = txring->cur = txcur;
		targ->count = sent;
#ifdef BUSYWAIT
		ioctl(pfd.fd, NIOCTXSYNC, NULL);
#endif
		//D("tx %d rx %d", sent, rx);
	}
	return NULL;
}

static __inline int
timespec_ge(const struct timespec *a, const struct timespec *b)
{

	if (a->tv_sec > b->tv_sec)
		return (1);
	if (a->tv_sec < b->tv_sec)
		return (0);
	if (a->tv_nsec >= b->tv_nsec)
		return (1);
	return (0);
}

static __inline struct timespec
timeval2spec(const struct timeval *a)
{
	struct timespec ts = {
		.tv_sec = a->tv_sec,
		.tv_nsec = a->tv_usec * 1000
	};
	return ts;
}

static __inline struct timeval
timespec2val(const struct timespec *a)
{
	struct timeval tv = {
		.tv_sec = a->tv_sec,
		.tv_usec = a->tv_nsec / 1000
	};
	return tv;
}


static __inline struct timespec
timespec_add(struct timespec a, struct timespec b)
{
	struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec };
	if (ret.tv_nsec >= 1000000000) {
		ret.tv_sec++;
		ret.tv_nsec -= 1000000000;
	}
	return ret;
}

static __inline struct timespec
timespec_sub(struct timespec a, struct timespec b)
{
	struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec };
	if (ret.tv_nsec < 0) {
		ret.tv_sec--;
		ret.tv_nsec += 1000000000;
	}
	return ret;
}


/*
 * wait until ts, either busy or sleeping if more than 1ms.
 * Return wakeup time.
 */
static struct timespec
wait_time(struct timespec ts)
{
	for (;;) {
		struct timespec w, cur;
		clock_gettime(CLOCK_REALTIME_PRECISE, &cur);
		w = timespec_sub(ts, cur);
		if (w.tv_sec < 0)
			return cur;
		else if (w.tv_sec > 0 || w.tv_nsec > 1000000)
			poll(NULL, 0, 1);
	}
}

static void *
sender_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd pfd = { .fd = targ->fd, .events = POLLOUT };
	struct netmap_if *nifp;
	struct netmap_ring *txring;
	int i, n = targ->g->npackets / targ->g->nthreads;
	int64_t sent = 0;
	int options = targ->g->options | OPT_COPY;
	struct timespec nexttime = { 0, 0}; // XXX silence compiler
	int rate_limit = targ->g->tx_rate;
	struct pkt *pkt = &targ->pkt;
	void *frame;
	int size;

	if (targ->frame == NULL) {
		frame = pkt;
		frame += sizeof(pkt->vh) - targ->g->virt_header;
		size = targ->g->pkt_size + targ->g->virt_header;
	} else {
		frame = targ->frame;
		size = targ->g->pkt_size;
	}
	
	D("start, fd %d main_fd %d", targ->fd, targ->g->main_fd);
	if (setaffinity(targ->thread, targ->affinity))
		goto quit;

	/* main loop.*/
	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic);
	if (rate_limit) {
		targ->tic = timespec_add(targ->tic, (struct timespec){2,0});
		targ->tic.tv_nsec = 0;
		wait_time(targ->tic);
		nexttime = targ->tic;
	}
        if (targ->g->dev_type == DEV_TAP) {
	    D("writing to file desc %d", targ->g->main_fd);

	    for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
		if (write(targ->g->main_fd, frame, size) != -1)
			sent++;
		update_addresses(pkt, targ->g);
		if (i > 10000) {
			targ->count = sent;
			i = 0;
		}
	    }
#ifndef NO_PCAP
    } else if (targ->g->dev_type == DEV_PCAP) {
	    pcap_t *p = targ->g->p;

	    for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
		if (pcap_inject(p, frame, size) != -1)
			sent++;
		update_addresses(pkt, targ->g);
		if (i > 10000) {
			targ->count = sent;
			i = 0;
		}
	    }
#endif /* NO_PCAP */
    } else {
	int tosend = 0;
	int frags = targ->g->frags;

        nifp = targ->nmd->nifp;
	while (!targ->cancel && (n == 0 || sent < n)) {

		if (rate_limit && tosend <= 0) {
			tosend = targ->g->burst;
			nexttime = timespec_add(nexttime, targ->g->tx_period);
			wait_time(nexttime);
		}

		/*
		 * wait for available room in the send queue(s)
		 */
		if (poll(&pfd, 1, 2000) <= 0) {
			if (targ->cancel)
				break;
			D("poll error/timeout on queue %d: %s", targ->me,
				strerror(errno));
			// goto quit;
		}
		if (pfd.revents & POLLERR) {
			D("poll error");
			goto quit;
		}
		/*
		 * scan our queues and send on those with room
		 */
		if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) {
			D("drop copy");
			options &= ~OPT_COPY;
		}
		for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) {
			int m, limit = rate_limit ?  tosend : targ->g->burst;
			if (n > 0 && n - sent < limit)
				limit = n - sent;
			txring = NETMAP_TXRING(nifp, i);
			if (nm_ring_empty(txring))
				continue;
			if (frags > 1)
				limit = ((limit + frags - 1) / frags) * frags;

			m = send_packets(txring, pkt, frame, size, targ->g,
					 limit, options, frags);
			ND("limit %d tail %d frags %d m %d",
				limit, txring->tail, frags, m);
			sent += m;
			targ->count = sent;
			if (rate_limit) {
				tosend -= m;
				if (tosend <= 0)
					break;
			}
		}
	}
	/* flush any remaining packets */
	D("flush tail %d head %d on thread %p",
		txring->tail, txring->head,
		pthread_self());
	ioctl(pfd.fd, NIOCTXSYNC, NULL);

	/* final part: wait all the TX queues to be empty. */
	for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) {
		txring = NETMAP_TXRING(nifp, i);
		while (nm_tx_pending(txring)) {
			RD(5, "pending tx tail %d head %d on ring %d",
				txring->tail, txring->head, i);
			ioctl(pfd.fd, NIOCTXSYNC, NULL);
			usleep(1); /* wait 1 tick */
		}
	}
    } /* end DEV_NETMAP */

	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
	targ->completed = 1;
	targ->count = sent;

quit:
	/* reset the ``used`` flag. */
	targ->used = 0;

	return (NULL);
}


#ifndef NO_PCAP
static void
receive_pcap(u_char *user, const struct pcap_pkthdr * h,
	const u_char * bytes)
{
	int *count = (int *)user;
	(void)h;	/* UNUSED */
	(void)bytes;	/* UNUSED */
	(*count)++;
}
#endif /* !NO_PCAP */

static int
receive_packets(struct netmap_ring *ring, u_int limit, int dump)
{
	u_int cur, rx, n;

	cur = ring->cur;
	n = nm_ring_space(ring);
	if (n < limit)
		limit = n;
	for (rx = 0; rx < limit; rx++) {
		struct netmap_slot *slot = &ring->slot[cur];
		char *p = NETMAP_BUF(ring, slot->buf_idx);

		if (dump)
			dump_payload(p, slot->len, ring, cur);

		cur = nm_ring_next(ring, cur);
	}
	ring->head = ring->cur = cur;

	return (rx);
}

static void *
receiver_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
	struct netmap_if *nifp;
	struct netmap_ring *rxring;
	int i;
	uint64_t received = 0;

	if (setaffinity(targ->thread, targ->affinity))
		goto quit;

	D("reading from %s fd %d main_fd %d",
		targ->g->ifname, targ->fd, targ->g->main_fd);
	/* unbounded wait for the first packet. */
	for (;!targ->cancel;) {
		i = poll(&pfd, 1, 1000);
		if (i > 0 && !(pfd.revents & POLLERR))
			break;
		RD(1, "waiting for initial packets, poll returns %d %d",
			i, pfd.revents);
	}
	/* main loop, exit after 1s silence */
	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic);
    if (targ->g->dev_type == DEV_TAP) {
	while (!targ->cancel) {
		char buf[MAX_BODYSIZE];
		/* XXX should we poll ? */
		if (read(targ->g->main_fd, buf, sizeof(buf)) > 0)
			targ->count++;
	}
#ifndef NO_PCAP
    } else if (targ->g->dev_type == DEV_PCAP) {
	while (!targ->cancel) {
		/* XXX should we poll ? */
		pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap,
			(u_char *)&targ->count);
	}
#endif /* !NO_PCAP */
    } else {
	int dump = targ->g->options & OPT_DUMP;

        nifp = targ->nmd->nifp;
	while (!targ->cancel) {
		/* Once we started to receive packets, wait at most 1 seconds
		   before quitting. */
		if (poll(&pfd, 1, 1 * 1000) <= 0 && !targ->g->forever) {
			clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
			targ->toc.tv_sec -= 1; /* Subtract timeout time. */
			goto out;
		}

		if (pfd.revents & POLLERR) {
			D("poll err");
			goto quit;
		}

		for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) {
			int m;

			rxring = NETMAP_RXRING(nifp, i);
			if (nm_ring_empty(rxring))
				continue;

			m = receive_packets(rxring, targ->g->burst, dump);
			received += m;
		}
		targ->count = received;
	}
    }

	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);

out:
	targ->completed = 1;
	targ->count = received;

quit:
	/* reset the ``used`` flag. */
	targ->used = 0;

	return (NULL);
}

/* very crude code to print a number in normalized form.
 * Caller has to make sure that the buffer is large enough.
 */
static const char *
norm(char *buf, double val)
{
	char *units[] = { "", "K", "M", "G", "T" };
	u_int i;

	for (i = 0; val >=1000 && i < sizeof(units)/sizeof(char *) - 1; i++)
		val /= 1000;
	sprintf(buf, "%.2f %s", val, units[i]);
	return buf;
}

static void
tx_output(uint64_t sent, int size, double delta)
{
	double bw, raw_bw, pps;
	char b1[40], b2[80], b3[80];

	printf("Sent %llu packets, %d bytes each, in %.2f seconds.\n",
	       (unsigned long long)sent, size, delta);
	if (delta == 0)
		delta = 1e-6;
	if (size < 60)		/* correct for min packet size */
		size = 60;
	pps = sent / delta;
	bw = (8.0 * size * sent) / delta;
	/* raw packets have4 bytes crc + 20 bytes framing */
	raw_bw = (8.0 * (size + 24) * sent) / delta;

	printf("Speed: %spps Bandwidth: %sbps (raw %sbps)\n",
		norm(b1, pps), norm(b2, bw), norm(b3, raw_bw) );
}


static void
rx_output(uint64_t received, double delta)
{
	double pps;
	char b1[40];

	printf("Received %llu packets, in %.2f seconds.\n",
		(unsigned long long) received, delta);

	if (delta == 0)
		delta = 1e-6;
	pps = received / delta;
	printf("Speed: %spps\n", norm(b1, pps));
}
Exemple #26
0
/*
 * create and enqueue a batch of packets on a ring.
 * On the last one set NS_REPORT to tell the driver to generate
 * an interrupt when done.
 */
static int
send_packets(struct netmap_ring *ring, struct pkt *pkt, void *frame,
		int size, struct glob_arg *g, u_int count, int options,
		u_int nfrags)
{
	u_int n, sent, cur = ring->cur;
	u_int fcnt;

	n = nm_ring_space(ring);
	if (n < count)
		count = n;
	if (count < nfrags) {
		D("truncating packet, no room for frags %d %d",
				count, nfrags);
	}
#if 0
	if (options & (OPT_COPY | OPT_PREFETCH) ) {
		for (sent = 0; sent < count; sent++) {
			struct netmap_slot *slot = &ring->slot[cur];
			char *p = NETMAP_BUF(ring, slot->buf_idx);

			__builtin_prefetch(p);
			cur = nm_ring_next(ring, cur);
		}
		cur = ring->cur;
	}
#endif
	for (fcnt = nfrags, sent = 0; sent < count; sent++) {
		struct netmap_slot *slot = &ring->slot[cur];
		char *p = NETMAP_BUF(ring, slot->buf_idx);

		slot->flags = 0;
		if (options & OPT_INDIRECT) {
			slot->flags |= NS_INDIRECT;
			slot->ptr = (uint64_t)frame;
		} else if (options & OPT_COPY) {
			nm_pkt_copy(frame, p, size);
			if (fcnt == nfrags)
				update_addresses(pkt, g);
		} else if (options & OPT_MEMCPY) {
			memcpy(p, frame, size);
			if (fcnt == nfrags)
				update_addresses(pkt, g);
		} else if (options & OPT_PREFETCH) {
			__builtin_prefetch(p);
		}
		if (options & OPT_DUMP)
			dump_payload(p, size, ring, cur);
		slot->len = size;
		if (--fcnt > 0)
			slot->flags |= NS_MOREFRAG;
		else
			fcnt = nfrags;
		if (sent == count - 1) {
			slot->flags &= ~NS_MOREFRAG;
			slot->flags |= NS_REPORT;
		}
		cur = nm_ring_next(ring, cur);
	}
	ring->head = ring->cur = cur;

	return (sent);
}
Exemple #27
0
int sendpacket_send_netmap(void *p, const u_char *data, size_t len)
{
    int retcode = 0;
    sendpacket_t *sp = p;
    struct netmap_ring *txring;
    struct netmap_slot *slot;
    char *pkt;
    uint32_t cur, avail;

    if (sp->abort)
        return retcode;

    txring = NETMAP_TXRING(sp->nm_if, sp->cur_tx_ring);
    while ((avail = nm_ring_space(txring)) == 0) {
        /* out of space on current TX queue - go to next */
        ++sp->cur_tx_ring;
        if (sp->cur_tx_ring > sp->last_tx_ring) {
            /*
             * out of space on all queues
             *
             * we have looped through all configured TX queues
             * so we have to reset to the first queue and
             * wait for available space
             */
            struct pollfd pfd;

            sp->cur_tx_ring = sp->first_tx_ring;

            /* send TX interrupt signal
             *
             * On Linux this makes one slot free on the
             * ring, which increases speed by about 10Mbps.
             *
             * But it will never free up all the slots. For
             * that we must poll and call again.
             */
            ioctl(sp->handle.fd, NIOCTXSYNC, NULL);

            pfd.fd = sp->handle.fd;
            pfd.events = POLLOUT;
            pfd.revents = 0;
            if (poll(&pfd, 1, 1000) <= 0) {
                if (++sp->tx_timeouts == NETMAP_TX_TIMEOUT_SEC) {
                    return -1;
                }
                return -2;
            }

            sp->tx_timeouts = 0;

            /*
             * Do not remove this even though it looks redundant.
             * Overall performance is increased with this restart
             * of the TX queue.
             *
             * This call increases the number of available slots from
             * 1 to all that are truly available.
             */
            ioctl(sp->handle.fd, NIOCTXSYNC, NULL);
        }

        txring = NETMAP_TXRING(sp->nm_if, sp->cur_tx_ring);
    }

    /*
     * send
     */
    cur = txring->cur;
    slot = &txring->slot[cur];
    slot->flags = 0;
    pkt = NETMAP_BUF(txring, slot->buf_idx);
    memcpy(pkt, data, min(len, txring->nr_buf_size));
    slot->len = len;

    if (avail <= 1)
        slot->flags = NS_REPORT;

    dbgx(3, "netmap cur=%d slot index=%d flags=0x%x empty=%d avail=%u bufsize=%d\n",
            cur, slot->buf_idx, slot->flags, NETMAP_TX_RING_EMPTY(txring),
            nm_ring_space(txring), txring->nr_buf_size);

    /* let kernel know that packet is available */
    cur = NETMAP_RING_NEXT(txring, cur);
#ifdef HAVE_NETMAP_RING_HEAD_TAIL
    txring->head = cur;
#else
    txring->avail--;
#endif
    txring->cur = cur;
    retcode = len;

    return retcode;
}
Exemple #28
0
// public function definitions
void *arpd(void *threadarg) {
  assert(threadarg);

  struct thread_context *context;
  struct thread_context *contexts;
  struct arpd_data *data;
  struct in_addr *my_addr;
  int rv;

  struct transaction *transaction = NULL;
  struct ethernet_pkt *etherpkt;
  struct arp_pkt *arp;
  struct netmap_ring *rxring;
  void *ring_idx;
  uint32_t dispatcher_idx;
  struct msg_hdr *msg_hdr;

  context = (struct thread_context *)threadarg;
  contexts = context->shared->contexts;
  data = context->data;
  rxring = data->rxring;
  dispatcher_idx = context->shared->dispatcher_idx;
  my_addr = &context->shared->inet_info->addr;

  rv = arpd_init(context);
  if (!rv) {
    pthread_exit(NULL);
  }

  printf("arpd[%d]: initialized\n", context->thread_id);
  // signal to main() that we are initialized
  atomic_store_explicit(&context->initialized, 1, memory_order_release);

  // main event loop
  for (;;) {
    // read all the incoming packets
    while (tqueue_remove(context->pkt_recv_q, &transaction, 
            &ring_idx) > 0) {
      etherpkt = (struct ethernet_pkt *) NETMAP_BUF(rxring, 
                                    rxring->slot[(uint32_t)ring_idx].buf_idx);
      arp = (struct arp_pkt*) etherpkt->data;

      if (!arp_is_valid(arp)) {
        send_msg_transaction_update_single(&contexts[dispatcher_idx],
                                            (uint32_t)ring_idx);
        continue;
      }

      if (arp->arp_h.ar_op == ARP_OP_REQUEST) {
        if (arp->tpa.s_addr != my_addr->s_addr) {
          send_msg_transaction_update_single(&contexts[dispatcher_idx],
                                              (uint32_t) ring_idx);
          continue;
        }

        printf("R)");
        arp_print_line(arp);

        // send_pkt_arp_reply could fail when xmit queue is full,
        // however, the sender should just resend a request
        send_pkt_arp_reply(context->pkt_xmit_q, &arp->spa, &arp->sha);
      } else {  // ARP_OP_REPLY
        if (!arp_reply_filter(arp, my_addr)) {
          send_msg_transaction_update_single(&contexts[dispatcher_idx],
                                              (uint32_t) ring_idx);
          continue;
        }

        printf("R)");
        arp_print_line(arp);

        // TODO: also check against a list of my outstanding arp requests
        // prior to insertion in the arp cache
        recv_pkt_arp_reply(arp, data->arp_cache, contexts);
      }

      send_msg_transaction_update_single(&contexts[dispatcher_idx],
                                          (uint32_t) ring_idx);
    } // while (packets)

    // resend outstanding requests and refresh expiring entries
    update_arp_cache(data->arp_cache, contexts, context->pkt_xmit_q);

    // TODO: read all the messages
    rv = squeue_enter(context->msg_q, 1);
    if (!rv)
      continue;

    while ((msg_hdr = squeue_get_next_pop_slot(context->msg_q)) != NULL) {
      switch (msg_hdr->msg_type) {
        case MSG_ARPD_GET_MAC:
          recv_msg_get_mac((void *)msg_hdr, data->arp_cache,
                            contexts, context->pkt_xmit_q);
          break;
        default:
          printf("arpd: unknown message %hu\n", msg_hdr->msg_type);
      }
    }
    squeue_exit(context->msg_q);

    usleep(ARP_CACHE_RETRY_INTERVAL);
  } // for (;;)

  pthread_exit(NULL);
}
Exemple #29
0
/*
 * reply to ping requests
 */
static void *
ponger_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd fds[1];
	struct netmap_if *nifp = targ->nifp;
	struct netmap_ring *txring, *rxring;
	int i, rx = 0, sent = 0, n = targ->g->npackets;
	fds[0].fd = targ->fd;
	fds[0].events = (POLLIN);

	if (targ->g->nthreads > 1) {
		D("can only reply ping with 1 thread");
		return NULL;
	}
	D("understood ponger %d but don't know how to do it", n);
	while (n == 0 || sent < n) {
		uint32_t txcur, txavail;
//#define BUSYWAIT
#ifdef BUSYWAIT
		ioctl(fds[0].fd, NIOCRXSYNC, NULL);
#else
		if (poll(fds, 1, 1000) <= 0) {
			D("poll error/timeout on queue %d: %s", targ->me,
				strerror(errno));
			continue;
		}
#endif
		txring = NETMAP_TXRING(nifp, 0);
		txcur = txring->cur;
		txavail = nm_ring_space(txring);
		/* see what we got back */
		for (i = targ->qfirst; i < targ->qlast; i++) {
			rxring = NETMAP_RXRING(nifp, i);
			while (!nm_ring_empty(rxring)) {
				uint16_t *spkt, *dpkt;
				uint32_t cur = rxring->cur;
				struct netmap_slot *slot = &rxring->slot[cur];
				char *src, *dst;
				src = NETMAP_BUF(rxring, slot->buf_idx);
				//D("got pkt %p of size %d", src, slot->len);
				rxring->head = rxring->cur = nm_ring_next(rxring, cur);
				rx++;
				if (txavail == 0)
					continue;
				dst = NETMAP_BUF(txring,
				    txring->slot[txcur].buf_idx);
				/* copy... */
				dpkt = (uint16_t *)dst;
				spkt = (uint16_t *)src;
				pkt_copy(src, dst, slot->len);
				dpkt[0] = spkt[3];
				dpkt[1] = spkt[4];
				dpkt[2] = spkt[5];
				dpkt[3] = spkt[0];
				dpkt[4] = spkt[1];
				dpkt[5] = spkt[2];
				txring->slot[txcur].len = slot->len;
				/* XXX swap src dst mac */
				txcur = nm_ring_next(txring, txcur);
				txavail--;
				sent++;
			}
		}
		txring->head = txring->cur = txcur;
		targ->count = sent;
#ifdef BUSYWAIT
		ioctl(fds[0].fd, NIOCTXSYNC, NULL);
#endif
		//D("tx %d rx %d", sent, rx);
	}
	return NULL;
}
Exemple #30
0
static void *
pinger_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd fds[1];
	struct netmap_if *nifp = targ->nifp;
	int i, rx = 0, n = targ->g->npackets;
	void *frame;
	int size;

	frame = &targ->pkt;
	frame += sizeof(targ->pkt.vh) - targ->g->virt_header;
	size = targ->g->pkt_size + targ->g->virt_header;

	fds[0].fd = targ->fd;
	fds[0].events = (POLLIN);
	static uint32_t sent;
	struct timespec ts, now, last_print;
	uint32_t count = 0, min = 1000000000, av = 0;

	if (targ->g->nthreads > 1) {
		D("can only ping with 1 thread");
		return NULL;
	}

	clock_gettime(CLOCK_REALTIME_PRECISE, &last_print);
	now = last_print;
	while (n == 0 || (int)sent < n) {
		struct netmap_ring *ring = NETMAP_TXRING(nifp, 0);
		struct netmap_slot *slot;
		char *p;
	    for (i = 0; i < 1; i++) { /* XXX why the loop for 1 pkt ? */
		slot = &ring->slot[ring->cur];
		slot->len = size;
		p = NETMAP_BUF(ring, slot->buf_idx);

		if (nm_ring_empty(ring)) {
			D("-- ouch, cannot send");
		} else {
			pkt_copy(frame, p, size);
			clock_gettime(CLOCK_REALTIME_PRECISE, &ts);
			bcopy(&sent, p+42, sizeof(sent));
			bcopy(&ts, p+46, sizeof(ts));
			sent++;
			ring->head = ring->cur = nm_ring_next(ring, ring->cur);
		}
	    }
		/* should use a parameter to decide how often to send */
		if (poll(fds, 1, 3000) <= 0) {
			D("poll error/timeout on queue %d: %s", targ->me,
				strerror(errno));
			continue;
		}
		/* see what we got back */
		for (i = targ->qfirst; i < targ->qlast; i++) {
			ring = NETMAP_RXRING(nifp, i);
			while (!nm_ring_empty(ring)) {
				uint32_t seq;
				slot = &ring->slot[ring->cur];
				p = NETMAP_BUF(ring, slot->buf_idx);

				clock_gettime(CLOCK_REALTIME_PRECISE, &now);
				bcopy(p+42, &seq, sizeof(seq));
				bcopy(p+46, &ts, sizeof(ts));
				ts.tv_sec = now.tv_sec - ts.tv_sec;
				ts.tv_nsec = now.tv_nsec - ts.tv_nsec;
				if (ts.tv_nsec < 0) {
					ts.tv_nsec += 1000000000;
					ts.tv_sec--;
				}
				if (1) D("seq %d/%d delta %d.%09d", seq, sent,
					(int)ts.tv_sec, (int)ts.tv_nsec);
				if (ts.tv_nsec < (int)min)
					min = ts.tv_nsec;
				count ++;
				av += ts.tv_nsec;
				ring->head = ring->cur = nm_ring_next(ring, ring->cur);
				rx++;
			}
		}
		//D("tx %d rx %d", sent, rx);
		//usleep(100000);
		ts.tv_sec = now.tv_sec - last_print.tv_sec;
		ts.tv_nsec = now.tv_nsec - last_print.tv_nsec;
		if (ts.tv_nsec < 0) {
			ts.tv_nsec += 1000000000;
			ts.tv_sec--;
		}
		if (ts.tv_sec >= 1) {
			D("count %d min %d av %d",
				count, min, av/count);
			count = 0;
			av = 0;
			min = 100000000;
			last_print = now;
		}
	}
	return NULL;
}