コード例 #1
0
ファイル: usnet_core.c プロジェクト: carriercomm/libusnet
void 
netmap_wait()
{
   int i;
   struct pollfd pfd = { .fd = g_nmd->fd, .events = POLLOUT };

   for (i = g_nmd->first_tx_ring; i <= g_nmd->last_tx_ring; i++) {
      struct netmap_ring *txring = NETMAP_TXRING(g_nmd->nifp, i);
      while (nm_tx_pending(txring)) {
         ioctl(pfd.fd, NIOCTXSYNC, NULL);
         usleep(1); // wait 1 tick
      }    
   }
}

/* sysctl wrapper to return the number of active CPUs */
int
system_ncpus(void)
{
   int ncpus;
#if defined (__FreeBSD__)
   int mib[2] = { CTL_HW, HW_NCPU };
   size_t len = sizeof(mib);
   sysctl(mib, 2, &ncpus, &len, NULL, 0);
#elif defined(linux)
   ncpus = sysconf(_SC_NPROCESSORS_ONLN);
#else /* others */
   ncpus = 1;
#endif /* others */
   return (ncpus);
}
コード例 #2
0
ファイル: usnet_core.c プロジェクト: carriercomm/libusnet
void 
usnet_netmap_flush()
{
   int i;
   ioctl(g_nmd->fd, NIOCTXSYNC, NULL);
  /* final part: wait all the TX queues to be empty. */
   for (i = g_nmd->first_tx_ring; i <= g_nmd->last_tx_ring; i++) {
      struct netmap_ring *txring = NETMAP_TXRING(g_nmd->nifp, i);
      while (nm_tx_pending(txring)) {
         ioctl(g_nmd->fd, NIOCTXSYNC, NULL);
         usleep(1); /* wait 1 tick */
      }
   }

}
コード例 #3
0
ファイル: pkt-gen.c プロジェクト: 2asoft/freebsd
static void *
pinger_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
	struct netmap_if *nifp = targ->nmd->nifp;
	int i, rx = 0, n = targ->g->npackets;
	void *frame;
	int size;
	uint32_t sent = 0;
	struct timespec ts, now, last_print;
	uint32_t count = 0, min = 1000000000, av = 0;

	frame = &targ->pkt;
	frame += sizeof(targ->pkt.vh) - targ->g->virt_header;
	size = targ->g->pkt_size + targ->g->virt_header;

	if (targ->g->nthreads > 1) {
		D("can only ping with 1 thread");
		return NULL;
	}

	clock_gettime(CLOCK_REALTIME_PRECISE, &last_print);
	now = last_print;
	while (n == 0 || (int)sent < n) {
		struct netmap_ring *ring = NETMAP_TXRING(nifp, 0);
		struct netmap_slot *slot;
		char *p;
	    for (i = 0; i < 1; i++) { /* XXX why the loop for 1 pkt ? */
		slot = &ring->slot[ring->cur];
		slot->len = size;
		p = NETMAP_BUF(ring, slot->buf_idx);

		if (nm_ring_empty(ring)) {
			D("-- ouch, cannot send");
		} else {
			struct tstamp *tp;
			nm_pkt_copy(frame, p, size);
			clock_gettime(CLOCK_REALTIME_PRECISE, &ts);
			bcopy(&sent, p+42, sizeof(sent));
			tp = (struct tstamp *)(p+46);
			tp->sec = (uint32_t)ts.tv_sec;
			tp->nsec = (uint32_t)ts.tv_nsec;
			sent++;
			ring->head = ring->cur = nm_ring_next(ring, ring->cur);
		}
	    }
		/* should use a parameter to decide how often to send */
		if (poll(&pfd, 1, 3000) <= 0) {
			D("poll error/timeout on queue %d: %s", targ->me,
				strerror(errno));
			continue;
		}
		/* see what we got back */
		for (i = targ->nmd->first_tx_ring;
			i <= targ->nmd->last_tx_ring; i++) {
			ring = NETMAP_RXRING(nifp, i);
			while (!nm_ring_empty(ring)) {
				uint32_t seq;
				struct tstamp *tp;
				slot = &ring->slot[ring->cur];
				p = NETMAP_BUF(ring, slot->buf_idx);

				clock_gettime(CLOCK_REALTIME_PRECISE, &now);
				bcopy(p+42, &seq, sizeof(seq));
				tp = (struct tstamp *)(p+46);
				ts.tv_sec = (time_t)tp->sec;
				ts.tv_nsec = (long)tp->nsec;
				ts.tv_sec = now.tv_sec - ts.tv_sec;
				ts.tv_nsec = now.tv_nsec - ts.tv_nsec;
				if (ts.tv_nsec < 0) {
					ts.tv_nsec += 1000000000;
					ts.tv_sec--;
				}
				if (1) D("seq %d/%d delta %d.%09d", seq, sent,
					(int)ts.tv_sec, (int)ts.tv_nsec);
				if (ts.tv_nsec < (int)min)
					min = ts.tv_nsec;
				count ++;
				av += ts.tv_nsec;
				ring->head = ring->cur = nm_ring_next(ring, ring->cur);
				rx++;
			}
		}
		//D("tx %d rx %d", sent, rx);
		//usleep(100000);
		ts.tv_sec = now.tv_sec - last_print.tv_sec;
		ts.tv_nsec = now.tv_nsec - last_print.tv_nsec;
		if (ts.tv_nsec < 0) {
			ts.tv_nsec += 1000000000;
			ts.tv_sec--;
		}
		if (ts.tv_sec >= 1) {
			D("count %d min %d av %d",
				count, min, av/count);
			count = 0;
			av = 0;
			min = 100000000;
			last_print = now;
		}
	}
	return NULL;
}


/*
 * reply to ping requests
 */
static void *
ponger_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
	struct netmap_if *nifp = targ->nmd->nifp;
	struct netmap_ring *txring, *rxring;
	int i, rx = 0, sent = 0, n = targ->g->npackets;

	if (targ->g->nthreads > 1) {
		D("can only reply ping with 1 thread");
		return NULL;
	}
	D("understood ponger %d but don't know how to do it", n);
	while (n == 0 || sent < n) {
		uint32_t txcur, txavail;
//#define BUSYWAIT
#ifdef BUSYWAIT
		ioctl(pfd.fd, NIOCRXSYNC, NULL);
#else
		if (poll(&pfd, 1, 1000) <= 0) {
			D("poll error/timeout on queue %d: %s", targ->me,
				strerror(errno));
			continue;
		}
#endif
		txring = NETMAP_TXRING(nifp, 0);
		txcur = txring->cur;
		txavail = nm_ring_space(txring);
		/* see what we got back */
		for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) {
			rxring = NETMAP_RXRING(nifp, i);
			while (!nm_ring_empty(rxring)) {
				uint16_t *spkt, *dpkt;
				uint32_t cur = rxring->cur;
				struct netmap_slot *slot = &rxring->slot[cur];
				char *src, *dst;
				src = NETMAP_BUF(rxring, slot->buf_idx);
				//D("got pkt %p of size %d", src, slot->len);
				rxring->head = rxring->cur = nm_ring_next(rxring, cur);
				rx++;
				if (txavail == 0)
					continue;
				dst = NETMAP_BUF(txring,
				    txring->slot[txcur].buf_idx);
				/* copy... */
				dpkt = (uint16_t *)dst;
				spkt = (uint16_t *)src;
				nm_pkt_copy(src, dst, slot->len);
				dpkt[0] = spkt[3];
				dpkt[1] = spkt[4];
				dpkt[2] = spkt[5];
				dpkt[3] = spkt[0];
				dpkt[4] = spkt[1];
				dpkt[5] = spkt[2];
				txring->slot[txcur].len = slot->len;
				/* XXX swap src dst mac */
				txcur = nm_ring_next(txring, txcur);
				txavail--;
				sent++;
			}
		}
		txring->head = txring->cur = txcur;
		targ->count = sent;
#ifdef BUSYWAIT
		ioctl(pfd.fd, NIOCTXSYNC, NULL);
#endif
		//D("tx %d rx %d", sent, rx);
	}
	return NULL;
}

static __inline int
timespec_ge(const struct timespec *a, const struct timespec *b)
{

	if (a->tv_sec > b->tv_sec)
		return (1);
	if (a->tv_sec < b->tv_sec)
		return (0);
	if (a->tv_nsec >= b->tv_nsec)
		return (1);
	return (0);
}

static __inline struct timespec
timeval2spec(const struct timeval *a)
{
	struct timespec ts = {
		.tv_sec = a->tv_sec,
		.tv_nsec = a->tv_usec * 1000
	};
	return ts;
}

static __inline struct timeval
timespec2val(const struct timespec *a)
{
	struct timeval tv = {
		.tv_sec = a->tv_sec,
		.tv_usec = a->tv_nsec / 1000
	};
	return tv;
}


static __inline struct timespec
timespec_add(struct timespec a, struct timespec b)
{
	struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec };
	if (ret.tv_nsec >= 1000000000) {
		ret.tv_sec++;
		ret.tv_nsec -= 1000000000;
	}
	return ret;
}

static __inline struct timespec
timespec_sub(struct timespec a, struct timespec b)
{
	struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec };
	if (ret.tv_nsec < 0) {
		ret.tv_sec--;
		ret.tv_nsec += 1000000000;
	}
	return ret;
}


/*
 * wait until ts, either busy or sleeping if more than 1ms.
 * Return wakeup time.
 */
static struct timespec
wait_time(struct timespec ts)
{
	for (;;) {
		struct timespec w, cur;
		clock_gettime(CLOCK_REALTIME_PRECISE, &cur);
		w = timespec_sub(ts, cur);
		if (w.tv_sec < 0)
			return cur;
		else if (w.tv_sec > 0 || w.tv_nsec > 1000000)
			poll(NULL, 0, 1);
	}
}

static void *
sender_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd pfd = { .fd = targ->fd, .events = POLLOUT };
	struct netmap_if *nifp;
	struct netmap_ring *txring;
	int i, n = targ->g->npackets / targ->g->nthreads;
	int64_t sent = 0;
	int options = targ->g->options | OPT_COPY;
	struct timespec nexttime = { 0, 0}; // XXX silence compiler
	int rate_limit = targ->g->tx_rate;
	struct pkt *pkt = &targ->pkt;
	void *frame;
	int size;

	if (targ->frame == NULL) {
		frame = pkt;
		frame += sizeof(pkt->vh) - targ->g->virt_header;
		size = targ->g->pkt_size + targ->g->virt_header;
	} else {
		frame = targ->frame;
		size = targ->g->pkt_size;
	}
	
	D("start, fd %d main_fd %d", targ->fd, targ->g->main_fd);
	if (setaffinity(targ->thread, targ->affinity))
		goto quit;

	/* main loop.*/
	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic);
	if (rate_limit) {
		targ->tic = timespec_add(targ->tic, (struct timespec){2,0});
		targ->tic.tv_nsec = 0;
		wait_time(targ->tic);
		nexttime = targ->tic;
	}
        if (targ->g->dev_type == DEV_TAP) {
	    D("writing to file desc %d", targ->g->main_fd);

	    for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
		if (write(targ->g->main_fd, frame, size) != -1)
			sent++;
		update_addresses(pkt, targ->g);
		if (i > 10000) {
			targ->count = sent;
			i = 0;
		}
	    }
#ifndef NO_PCAP
    } else if (targ->g->dev_type == DEV_PCAP) {
	    pcap_t *p = targ->g->p;

	    for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
		if (pcap_inject(p, frame, size) != -1)
			sent++;
		update_addresses(pkt, targ->g);
		if (i > 10000) {
			targ->count = sent;
			i = 0;
		}
	    }
#endif /* NO_PCAP */
    } else {
	int tosend = 0;
	int frags = targ->g->frags;

        nifp = targ->nmd->nifp;
	while (!targ->cancel && (n == 0 || sent < n)) {

		if (rate_limit && tosend <= 0) {
			tosend = targ->g->burst;
			nexttime = timespec_add(nexttime, targ->g->tx_period);
			wait_time(nexttime);
		}

		/*
		 * wait for available room in the send queue(s)
		 */
		if (poll(&pfd, 1, 2000) <= 0) {
			if (targ->cancel)
				break;
			D("poll error/timeout on queue %d: %s", targ->me,
				strerror(errno));
			// goto quit;
		}
		if (pfd.revents & POLLERR) {
			D("poll error");
			goto quit;
		}
		/*
		 * scan our queues and send on those with room
		 */
		if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) {
			D("drop copy");
			options &= ~OPT_COPY;
		}
		for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) {
			int m, limit = rate_limit ?  tosend : targ->g->burst;
			if (n > 0 && n - sent < limit)
				limit = n - sent;
			txring = NETMAP_TXRING(nifp, i);
			if (nm_ring_empty(txring))
				continue;
			if (frags > 1)
				limit = ((limit + frags - 1) / frags) * frags;

			m = send_packets(txring, pkt, frame, size, targ->g,
					 limit, options, frags);
			ND("limit %d tail %d frags %d m %d",
				limit, txring->tail, frags, m);
			sent += m;
			targ->count = sent;
			if (rate_limit) {
				tosend -= m;
				if (tosend <= 0)
					break;
			}
		}
	}
	/* flush any remaining packets */
	D("flush tail %d head %d on thread %p",
		txring->tail, txring->head,
		pthread_self());
	ioctl(pfd.fd, NIOCTXSYNC, NULL);

	/* final part: wait all the TX queues to be empty. */
	for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) {
		txring = NETMAP_TXRING(nifp, i);
		while (nm_tx_pending(txring)) {
			RD(5, "pending tx tail %d head %d on ring %d",
				txring->tail, txring->head, i);
			ioctl(pfd.fd, NIOCTXSYNC, NULL);
			usleep(1); /* wait 1 tick */
		}
	}
    } /* end DEV_NETMAP */

	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
	targ->completed = 1;
	targ->count = sent;

quit:
	/* reset the ``used`` flag. */
	targ->used = 0;

	return (NULL);
}


#ifndef NO_PCAP
static void
receive_pcap(u_char *user, const struct pcap_pkthdr * h,
	const u_char * bytes)
{
	int *count = (int *)user;
	(void)h;	/* UNUSED */
	(void)bytes;	/* UNUSED */
	(*count)++;
}
#endif /* !NO_PCAP */

static int
receive_packets(struct netmap_ring *ring, u_int limit, int dump)
{
	u_int cur, rx, n;

	cur = ring->cur;
	n = nm_ring_space(ring);
	if (n < limit)
		limit = n;
	for (rx = 0; rx < limit; rx++) {
		struct netmap_slot *slot = &ring->slot[cur];
		char *p = NETMAP_BUF(ring, slot->buf_idx);

		if (dump)
			dump_payload(p, slot->len, ring, cur);

		cur = nm_ring_next(ring, cur);
	}
	ring->head = ring->cur = cur;

	return (rx);
}

static void *
receiver_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd pfd = { .fd = targ->fd, .events = POLLIN };
	struct netmap_if *nifp;
	struct netmap_ring *rxring;
	int i;
	uint64_t received = 0;

	if (setaffinity(targ->thread, targ->affinity))
		goto quit;

	D("reading from %s fd %d main_fd %d",
		targ->g->ifname, targ->fd, targ->g->main_fd);
	/* unbounded wait for the first packet. */
	for (;!targ->cancel;) {
		i = poll(&pfd, 1, 1000);
		if (i > 0 && !(pfd.revents & POLLERR))
			break;
		RD(1, "waiting for initial packets, poll returns %d %d",
			i, pfd.revents);
	}
	/* main loop, exit after 1s silence */
	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic);
    if (targ->g->dev_type == DEV_TAP) {
	while (!targ->cancel) {
		char buf[MAX_BODYSIZE];
		/* XXX should we poll ? */
		if (read(targ->g->main_fd, buf, sizeof(buf)) > 0)
			targ->count++;
	}
#ifndef NO_PCAP
    } else if (targ->g->dev_type == DEV_PCAP) {
	while (!targ->cancel) {
		/* XXX should we poll ? */
		pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap,
			(u_char *)&targ->count);
	}
#endif /* !NO_PCAP */
    } else {
	int dump = targ->g->options & OPT_DUMP;

        nifp = targ->nmd->nifp;
	while (!targ->cancel) {
		/* Once we started to receive packets, wait at most 1 seconds
		   before quitting. */
		if (poll(&pfd, 1, 1 * 1000) <= 0 && !targ->g->forever) {
			clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
			targ->toc.tv_sec -= 1; /* Subtract timeout time. */
			goto out;
		}

		if (pfd.revents & POLLERR) {
			D("poll err");
			goto quit;
		}

		for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) {
			int m;

			rxring = NETMAP_RXRING(nifp, i);
			if (nm_ring_empty(rxring))
				continue;

			m = receive_packets(rxring, targ->g->burst, dump);
			received += m;
		}
		targ->count = received;
	}
    }

	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);

out:
	targ->completed = 1;
	targ->count = received;

quit:
	/* reset the ``used`` flag. */
	targ->used = 0;

	return (NULL);
}

/* very crude code to print a number in normalized form.
 * Caller has to make sure that the buffer is large enough.
 */
static const char *
norm(char *buf, double val)
{
	char *units[] = { "", "K", "M", "G", "T" };
	u_int i;

	for (i = 0; val >=1000 && i < sizeof(units)/sizeof(char *) - 1; i++)
		val /= 1000;
	sprintf(buf, "%.2f %s", val, units[i]);
	return buf;
}

static void
tx_output(uint64_t sent, int size, double delta)
{
	double bw, raw_bw, pps;
	char b1[40], b2[80], b3[80];

	printf("Sent %llu packets, %d bytes each, in %.2f seconds.\n",
	       (unsigned long long)sent, size, delta);
	if (delta == 0)
		delta = 1e-6;
	if (size < 60)		/* correct for min packet size */
		size = 60;
	pps = sent / delta;
	bw = (8.0 * size * sent) / delta;
	/* raw packets have4 bytes crc + 20 bytes framing */
	raw_bw = (8.0 * (size + 24) * sent) / delta;

	printf("Speed: %spps Bandwidth: %sbps (raw %sbps)\n",
		norm(b1, pps), norm(b2, bw), norm(b3, raw_bw) );
}


static void
rx_output(uint64_t received, double delta)
{
	double pps;
	char b1[40];

	printf("Received %llu packets, in %.2f seconds.\n",
		(unsigned long long) received, delta);

	if (delta == 0)
		delta = 1e-6;
	pps = received / delta;
	printf("Speed: %spps\n", norm(b1, pps));
}
コード例 #4
0
static __inline struct timespec
timeval2spec(const struct timeval *a)
{
	struct timespec ts = {
		.tv_sec = a->tv_sec,
		.tv_nsec = a->tv_usec * 1000
	};
	return ts;
}

static __inline struct timeval
timespec2val(const struct timespec *a)
{
	struct timeval tv = {
		.tv_sec = a->tv_sec,
		.tv_usec = a->tv_nsec / 1000
	};
	return tv;
}


static __inline struct timespec
timespec_add(struct timespec a, struct timespec b)
{
	struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec };
	if (ret.tv_nsec >= 1000000000) {
		ret.tv_sec++;
		ret.tv_nsec -= 1000000000;
	}
	return ret;
}

static __inline struct timespec
timespec_sub(struct timespec a, struct timespec b)
{
	struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec };
	if (ret.tv_nsec < 0) {
		ret.tv_sec--;
		ret.tv_nsec += 1000000000;
	}
	return ret;
}


/*
 * wait until ts, either busy or sleeping if more than 1ms.
 * Return wakeup time.
 */
static struct timespec
wait_time(struct timespec ts)
{
	for (;;) {
		struct timespec w, cur;
		clock_gettime(CLOCK_REALTIME_PRECISE, &cur);
		w = timespec_sub(ts, cur);
		if (w.tv_sec < 0)
			return cur;
		else if (w.tv_sec > 0 || w.tv_nsec > 1000000)
			poll(NULL, 0, 1);
	}
}

static void *
sender_body(void *data)
{
	struct targ *targ = (struct targ *) data;

	struct pollfd fds[1];
	struct netmap_if *nifp = targ->nifp;
	struct netmap_ring *txring;
	int i, n = targ->g->npackets / targ->g->nthreads, sent = 0;
	int options = targ->g->options | OPT_COPY;
	struct timespec nexttime = { 0, 0}; // XXX silence compiler
	int rate_limit = targ->g->tx_rate;
	struct pkt *pkt = &targ->pkt;
	void *frame;
	int size;

	frame = pkt;
	frame += sizeof(pkt->vh) - targ->g->virt_header;
	size = targ->g->pkt_size + targ->g->virt_header;

	D("start");
	if (setaffinity(targ->thread, targ->affinity))
		goto quit;
	/* setup poll(2) mechanism. */
	memset(fds, 0, sizeof(fds));
	fds[0].fd = targ->fd;
	fds[0].events = (POLLOUT);

	/* main loop.*/
	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic);
	if (rate_limit) {
		targ->tic = timespec_add(targ->tic, (struct timespec){2,0});
		targ->tic.tv_nsec = 0;
		wait_time(targ->tic);
		nexttime = targ->tic;
	}
    if (targ->g->dev_type == DEV_PCAP) {
	    pcap_t *p = targ->g->p;

	    for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
		if (pcap_inject(p, frame, size) != -1)
			sent++;
		update_addresses(pkt, targ->g);
		if (i > 10000) {
			targ->count = sent;
			i = 0;
		}
	    }
    } else if (targ->g->dev_type == DEV_TAP) { /* tap */
	    D("writing to file desc %d", targ->g->main_fd);

	    for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) {
		if (write(targ->g->main_fd, frame, size) != -1)
			sent++;
		update_addresses(pkt, targ->g);
		if (i > 10000) {
			targ->count = sent;
			i = 0;
		}
	    }
    } else {
	int tosend = 0;
	int frags = targ->g->frags;

	while (!targ->cancel && (n == 0 || sent < n)) {

		if (rate_limit && tosend <= 0) {
			tosend = targ->g->burst;
			nexttime = timespec_add(nexttime, targ->g->tx_period);
			wait_time(nexttime);
		}

		/*
		 * wait for available room in the send queue(s)
		 */
		if (poll(fds, 1, 2000) <= 0) {
			if (targ->cancel)
				break;
			D("poll error/timeout on queue %d: %s", targ->me,
				strerror(errno));
			goto quit;
		}
		if (fds[0].revents & POLLERR) {
			D("poll error");
			goto quit;
		}
		/*
		 * scan our queues and send on those with room
		 */
		if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) {
			D("drop copy");
			options &= ~OPT_COPY;
		}
		for (i = targ->qfirst; i < targ->qlast; i++) {
			int m, limit = rate_limit ?  tosend : targ->g->burst;
			if (n > 0 && n - sent < limit)
				limit = n - sent;
			txring = NETMAP_TXRING(nifp, i);
			if (nm_ring_empty(txring))
				continue;
			if (frags > 1)
				limit = ((limit + frags - 1) / frags) * frags;
				
			m = send_packets(txring, pkt, frame, size, targ->g,
					 limit, options, frags);
			ND("limit %d avail %d frags %d m %d", 
				limit, txring->avail, frags, m);
			sent += m;
			targ->count = sent;
			if (rate_limit) {
				tosend -= m;
				if (tosend <= 0)
					break;
			}
		}
	}
	/* flush any remaining packets */
	ioctl(fds[0].fd, NIOCTXSYNC, NULL);

	/* final part: wait all the TX queues to be empty. */
	for (i = targ->qfirst; i < targ->qlast; i++) {
		txring = NETMAP_TXRING(nifp, i);
		while (nm_tx_pending(txring)) {
			ioctl(fds[0].fd, NIOCTXSYNC, NULL);
			usleep(1); /* wait 1 tick */
		}
	}
    }

	clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc);
	targ->completed = 1;
	targ->count = sent;

quit:
	/* reset the ``used`` flag. */
	targ->used = 0;

	return (NULL);
}


static void
receive_pcap(u_char *user, const struct pcap_pkthdr * h,
	const u_char * bytes)
{
	int *count = (int *)user;
	(void)h;	/* UNUSED */
	(void)bytes;	/* UNUSED */
	(*count)++;
}
コード例 #5
0
ファイル: usnet_core.c プロジェクト: carriercomm/libusnet
void test_netmap(usn_mbuf_t *m)
{
   int tosend = 0;
   int n, i;
   int rate_limit = 0;
   int sent = 0;
   struct pollfd pfd = { .fd = g_nmd->fd, .events = POLLOUT };
   struct netmap_if *nifp = g_nmd->nifp;
   struct timeval stime, etime;
   struct nm_desc nmd = *g_nmd;
   struct nm_desc *t_nmd;
   uint64_t nmd_flags = 0;

   // re-open netmap device.
   nmd.req.nr_flags = NR_REG_ONE_NIC;
   nmd.req.nr_ringid = 0;  
   printf("interface name:%s,len=%d\n",g_interface, m->mlen);
   t_nmd = nm_open(g_interface, NULL, nmd_flags 
                   | NM_OPEN_IFNAME | NM_OPEN_NO_MMAP, &nmd);

   if (t_nmd == NULL) {
      printf("Unable to open %s: %s", g_interface, strerror(errno));
      return;
   }
   nifp =t_nmd->nifp;
   pfd.fd =t_nmd->fd;
   pfd.events = POLLOUT;

   n = 10000;
   sent = 0;
   g_config.burst = 512;
   printf("g_config.burst=%d\n", g_config.burst);
   gettimeofday(&stime, 0);
   while ( sent < n ) {
      /*
       * wait for available room in the send queue(s)
       */
      if (poll(&pfd, 1, 1000) <= 0) {
         D("poll error/timeout on queue: %s", strerror(errno));
         // goto quit;
      }
      if (pfd.revents & POLLERR) {
         D("poll error");
         goto quit;
      }
      for (i = g_nmd->first_tx_ring; i <= g_nmd->last_tx_ring; i++) {
         int limit = rate_limit ?  tosend : g_config.burst;
         int cnt = 0;
         if (n > 0 && n - sent < limit)
            limit = n - sent;
         struct netmap_ring *txring = NETMAP_TXRING(nifp, i);
         if (nm_ring_empty(txring))
            continue;

         cnt = test_send(txring, m, limit);
         DEBUG("limit %d tail %d  cnt %d",
            limit, txring->tail, cnt);
         sent += cnt;
      }
   }
   // print info stats
   gettimeofday(&etime, 0);
   timersub(&etime,&stime,&etime);
   printf("num of sent pkts: %d\n", n);
   printf("total time: %lu (seconds) %lu (microseconds) \n", 
              etime.tv_sec, etime.tv_usec);

   /* flush any remaining packets */
   ioctl(pfd.fd, NIOCTXSYNC, NULL);

   /* final part: wait all the TX queues to be empty. */
   for (i = g_nmd->first_tx_ring; i <= g_nmd->last_tx_ring; i++) {
      struct netmap_ring *txring = NETMAP_TXRING(nifp, i);
      while (nm_tx_pending(txring)) {
         ioctl(pfd.fd, NIOCTXSYNC, NULL);
         usleep(1); /* wait 1 tick */
      }
   }

quit:
  return;
}

/* set the thread affinity. */
int
setaffinity( int i)
{
   cpuset_t cpumask;

   if (i == -1)
      return 0;

   /* Set thread affinity affinity.*/
   CPU_ZERO(&cpumask);
   CPU_SET(i, &cpumask);

   if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_CPUSET, 
          -1, sizeof(cpuset_t), &cpumask) != 0) { 
      DEBUG("Unable to set affinity: %s", strerror(errno));
      return 1;
   }
   return 0;
}
コード例 #6
0
ファイル: usnet_core.c プロジェクト: carriercomm/libusnet
int32
usnet_send_frame(usn_mbuf_t *m)
{
   struct pollfd       fds;
   struct netmap_if   *nifp;
   int32               ret, error;
   u_int               size;
   u_char             *buf;
   int                 attemps = 0;
   int i, j;

   // TODO: put a check here
   fds.fd = g_nmd->fd;
   nifp = g_nmd->nifp;

   if ( m == 0 )
      return -USN_ENULLPTR;
   buf = m->head;
   size = m->mlen;

resend:
   if ( attemps==3 )
      return -USN_EBUSY;

   if(g_config.npkts >= g_config.burst ){
      fds.events = POLLOUT;
      fds.revents = 0;
      g_config.npkts = 0;

      ret = poll(&fds, 1, 2000);
      if (ret <= 0 ) {
         // XXX: save pending packets? 
         //      as it is easy to reach line rate.
         goto fail;
      }
      if (fds.revents & POLLERR) {
         struct netmap_ring *tx = NETMAP_RXRING(nifp, g_nmd->cur_tx_ring);
         (void)tx;
	      DEBUG("error on em1, rx [%d,%d,%d]",
         tx->head, tx->cur, tx->tail);
         error = -USN_EFDPOLL;
         goto fail;
      }
       
      if (fds.revents & POLLOUT) {
         goto send;
      }
      goto flush;
   }
send:
   for (j = g_nmd->first_tx_ring; 
            j <= g_nmd->last_tx_ring; j++) {
      struct netmap_ring *ring;
      uint32_t i, idx;
      ring = NETMAP_TXRING(nifp, j);

      if (nm_ring_empty(ring)) {
         continue;
      }

      i = ring->cur;
      idx = ring->slot[i].buf_idx;
      ring->slot[i].flags = 0;
      ring->slot[i].len = size;
      nm_pkt_copy(buf, NETMAP_BUF(ring, idx), size);
      ring->slot[i].len = size;
      g_nmd->cur_tx_ring = j;
      ring->head = ring->cur = nm_ring_next(ring, i); 

      g_config.npkts++;

      return size;
   }

flush:   
   /* flush any remaining packets */
   //printf("flush \n");
   ioctl(fds.fd, NIOCTXSYNC, NULL);
  /* final part: wait all the TX queues to be empty. */
   for (i = g_nmd->first_tx_ring; i <= g_nmd->last_tx_ring; i++) {
      struct netmap_ring *txring = NETMAP_TXRING(nifp, i);
      while (nm_tx_pending(txring)) {
         ioctl(fds.fd, NIOCTXSYNC, NULL);
         usleep(1); /* wait 1 tick */
      }
   }
   attemps++; 
   goto resend;

fail:
   printf("send_packet: failed to send\n");
   return error;
}