Esempio n. 1
0
/*
 * move up to 'limit' pkts from rxring to txring swapping buffers.
 */
static int
process_rings(struct netmap_ring *rxring, struct netmap_ring *txring,
	      u_int limit, const char *msg)
{
	u_int j, k, m = 0;

	/* print a warning if any of the ring flags is set (e.g. NM_REINIT) */
	if (rxring->flags || txring->flags)
		D("%s rxflags %x txflags %x",
			msg, rxring->flags, txring->flags);
	j = rxring->cur; /* RX */
	k = txring->cur; /* TX */
	if (rxring->avail < limit)
		limit = rxring->avail;
	if (txring->avail < limit)
		limit = txring->avail;
	m = limit;
	while (limit-- > 0) {
		struct netmap_slot *rs = &rxring->slot[j];
		struct netmap_slot *ts = &txring->slot[k];
		uint32_t pkt;

		/* swap packets */
		if (ts->buf_idx < 2 || rs->buf_idx < 2) {
			D("wrong index rx[%d] = %d  -> tx[%d] = %d",
				j, rs->buf_idx, k, ts->buf_idx);
			sleep(2);
		}
		pkt = ts->buf_idx;
		ts->buf_idx = rs->buf_idx;
		rs->buf_idx = pkt;

		/* copy the packet length. */
		if (rs->len < 14 || rs->len > 2048)
			D("wrong len %d rx[%d] -> tx[%d]", rs->len, j, k);
		else if (verbose > 1)
			D("%s send len %d rx[%d] -> tx[%d]", msg, rs->len, j, k);
		ts->len = rs->len;

		/* report the buffer change. */
		ts->flags |= NS_BUF_CHANGED;
		rs->flags |= NS_BUF_CHANGED;
		j = NETMAP_RING_NEXT(rxring, j);
		k = NETMAP_RING_NEXT(txring, k);
	}
	rxring->avail -= m;
	txring->avail -= m;
	rxring->cur = j;
	txring->cur = k;
	if (verbose && m > 0)
		D("%s sent %d packets to %p", msg, m, txring);

	return (m);
}
Esempio n. 2
0
int
pcap_inject(pcap_t *p, const void *buf, size_t size)
{
        struct my_ring *me = p;
        u_int si;
 
        ND("cnt %d", cnt);
        /* scan all rings */
        for (si = me->begin; si < me->end; si++) {
                struct netmap_ring *ring = NETMAP_TXRING(me->nifp, si);
 
                ND("ring has %d pkts", ring->avail);
                if (ring->avail == 0)
                        continue;
		u_int i = ring->cur;
		u_int idx = ring->slot[i].buf_idx;
		if (idx < 2) {
			D("%s bogus TX index %d at offset %d",
				me->nifp->ni_name, idx, i);
			sleep(2);
		}
		u_char *dst = (u_char *)NETMAP_BUF(ring, idx);
		ring->slot[i].len = size;
		pkt_copy(buf, dst, size);
		ring->cur = NETMAP_RING_NEXT(ring, i);
		ring->avail--;
		// if (ring->avail == 0) ioctl(me->fd, NIOCTXSYNC, NULL);
		return size;
        }
	errno = ENOBUFS;
	return -1;
}
Esempio n. 3
0
void
ether_bridge(struct nm_if *nmif, int ring, char *inbuf, int len)
{
	char *buf;
	struct netmap_if *ifp;
	struct netmap_ring *nring;
	struct nm_if *parentif;

	parentif = NETMAP_PARENTIF(nmif);
	ifp = parentif->nm_if_ifp;
	if (NETMAP_HOST_RING(parentif, ring))
		nring = netmap_hw_tx_ring(ifp);
	else
		nring = NETMAP_TXRING(ifp, ifp->ni_tx_rings);

	buf = NETMAP_GET_BUF(nring);
	if (buf == NULL) {
		DPRINTF("%s: no available buffer for tx (%s).\n",
		    __func__, nmif->nm_if_name);
		parentif->nm_if_txsync = 1;
		pktcnt.tx_drop++;
		return;
	}
	/* Copy the payload. */
	memcpy(buf, inbuf, len);

	NETMAP_UPDATE_LEN(nring, len);

	/* Update the current ring slot. */
	NETMAP_RING_NEXT(nring);

	pktcnt.tx_pkts++;
	parentif->nm_if_txsync = 1;
}
Esempio n. 4
0
static int fio_fill_dns_pkt(struct netmap_ring *ring, struct fio_txdata *pkt, u_int count)
{
    u_int sent, cur = ring->cur;
    struct netmap_slot *slot;
    char *p;
    static uint16_t hid = 2; 

    if (ring->avail < count)
        count = ring->avail;

    for (sent = 0; sent < count; sent++)
    {
        slot = &ring->slot[cur];
        p = NETMAP_BUF(ring, slot->buf_idx);

        memcpy(pkt->pdata, &hid, 2);

        struct pktudp *ppkt = (struct pktudp*)pkt->pbuf;
        ppkt->udp.uh_sum = 0;
        fixCheckSumUDP(&ppkt->udp, (struct iphdr*)(&ppkt->ip), ppkt->body);

        memcpy(p, pkt->pbuf, pkt->size);
//hid++;

        slot->len = pkt->size;
        //if (sent == count - 1)
        //    slot->flags |= NS_REPORT;
        cur = NETMAP_RING_NEXT(ring, cur);
    }

    ring->avail -= sent;
    ring->cur = cur;

    return (sent);
}
Esempio n. 5
0
static void
netmap_read(evutil_socket_t fd, short event, void *data)
{
	char *buf;
	int err, i, pkts, rx_rings;
	struct netmap_if *ifp;
	struct netmap_ring *nring;
	struct nm_if *nmif;

	nmif = (struct nm_if *)data;
	ifp = nmif->nm_if_ifp;
	rx_rings = ifp->ni_rx_rings;
	if (!nohostring && !nmif->nm_if_vale)
		rx_rings++;
	pkts = 0;
	for (i = 0; i < rx_rings; i++) {
		nring = NETMAP_RXRING(ifp, i);
		while (!nm_ring_empty(nring)) {
			buf = NETMAP_GET_BUF(nring);
			err = ether_input(nmif, i, buf, NETMAP_SLOT_LEN(nring));
			/* Send the packet to hw <-> host bridge. */
			if (!nohostring && err == 1)
				err = ether_bridge(nmif, i, buf,
				    NETMAP_SLOT_LEN(nring));
			NETMAP_RING_NEXT(nring);
			if (err < 0 || ++pkts == burst)
				goto done;
		}
	}
done:
	if_netmap_txsync();
}
Esempio n. 6
0
void
if_netmap_rxsetslot(struct if_netmap_host_context *ctx, uint32_t *slotno, uint32_t index)
{
	struct netmap_ring *rxr = ctx->hw_rx_ring;
	uint32_t cur = *slotno;

	rxr->slot[cur].buf_idx = index;
	rxr->slot[cur].flags |= NS_BUF_CHANGED;
	*slotno = NETMAP_RING_NEXT(rxr, cur);
}
Esempio n. 7
0
static void move(int n, struct netmap_ring *rx, struct netmap_ring *tx)
{
	uint32_t tmp;

	while (n-- > 0) {
		tmp = tx->slot[tx->cur].buf_idx;

		tx->slot[tx->cur].buf_idx = rx->slot[rx->cur].buf_idx;
		tx->slot[tx->cur].len     = rx->slot[rx->cur].len;
		tx->slot[tx->cur].flags  |= NS_BUF_CHANGED;
		tx->cur = NETMAP_RING_NEXT(tx, tx->cur);
		tx->avail--;

		rx->slot[rx->cur].buf_idx = tmp;
		rx->slot[rx->cur].flags  |= NS_BUF_CHANGED;
		rx->cur = NETMAP_RING_NEXT(rx, rx->cur);
		rx->avail--;
	}
}
Esempio n. 8
0
void *
if_netmap_rxslot(struct if_netmap_host_context *ctx, uint32_t *slotno, uint32_t *len, uint32_t *index)
{
	struct netmap_ring *rxr = ctx->hw_rx_ring;
	uint32_t cur = *slotno;

	*slotno = NETMAP_RING_NEXT(rxr, cur); 
	*len = rxr->slot[cur].len;
	*index = rxr->slot[cur].buf_idx;
	return (NETMAP_BUF(rxr, rxr->slot[cur].buf_idx));
}
Esempio n. 9
0
/*
 * create and enqueue a batch of packets on a ring.
 * On the last one set NS_REPORT to tell the driver to generate
 * an interrupt when done.
 */
static int
send_packets(struct netmap_ring *ring, struct pkt *pkt, 
		int size, u_int count, int options)
{
	u_int sent, cur = ring->cur;

	if (ring->avail < count)
		count = ring->avail;

#if 0
	if (options & (OPT_COPY | OPT_PREFETCH) ) {
		for (sent = 0; sent < count; sent++) {
			struct netmap_slot *slot = &ring->slot[cur];
			char *p = NETMAP_BUF(ring, slot->buf_idx);

			prefetch(p);
			cur = NETMAP_RING_NEXT(ring, cur);
		}
		cur = ring->cur;
	}
#endif
	for (sent = 0; sent < count; sent++) {
		struct netmap_slot *slot = &ring->slot[cur];
		char *p = NETMAP_BUF(ring, slot->buf_idx);

		if (options & OPT_COPY)
			pkt_copy(pkt, p, size);
		else if (options & OPT_MEMCPY)
			memcpy(p, pkt, size);
		else if (options & OPT_PREFETCH)
			prefetch(p);
		slot->len = size;
		if (sent == count - 1)
			slot->flags |= NS_REPORT;
		cur = NETMAP_RING_NEXT(ring, cur);
	}
	ring->avail -= sent;
	ring->cur = cur;

	return (sent);
}
Esempio n. 10
0
void *
if_netmap_txslot(struct if_netmap_host_context *ctx, uint32_t *slotno, uint32_t len)
{
	struct netmap_ring *txr = ctx->hw_tx_ring;
	uint32_t cur = *slotno;
		
	assert(len <= txr->nr_buf_size);

	txr->slot[cur].len = len;
	*slotno = NETMAP_RING_NEXT(txr, cur); 
	return (NETMAP_BUF(txr, txr->slot[cur].buf_idx));
}
Esempio n. 11
0
static inline void
peak_netmap_drop(struct _peak_netmap *packet)
{
	struct netmap_ring *source = packet->ring;

	/* drop kernel reference */
	source->cur = NETMAP_RING_NEXT(source, packet->i);
	--source->avail;

	/* drop userland reference */
	NETPKT_PUT(packet);
}
Esempio n. 12
0
/**
 * A call to tx_sync_ring will try to empty a Netmap TX ring by converting its
 * buffers into rte_mbufs and sending them out on the rings's dpdk port.
 */
static int
tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
	struct rte_mempool *pool, uint16_t max_burst)
{
	uint32_t i, n_tx;
	uint16_t burst_size;
	uint32_t cur_slot, n_used_slots;
	struct rte_mbuf *tx_mbufs[COMPAT_NETMAP_MAX_BURST];

	n_used_slots = ring->num_slots - ring->avail;
	n_used_slots = RTE_MIN(n_used_slots, max_burst);
	cur_slot = (ring->cur + ring->avail) & (ring->num_slots - 1);

	while (n_used_slots) {
		burst_size = (uint16_t)RTE_MIN(n_used_slots, RTE_DIM(tx_mbufs));

		for (i = 0; i < burst_size; i++) {
 			tx_mbufs[i] = rte_pktmbuf_alloc(pool);
			if (tx_mbufs[i] == NULL)
				goto err;

			slot_to_mbuf(ring, cur_slot, tx_mbufs[i]);
			cur_slot = NETMAP_RING_NEXT(ring, cur_slot);
		}

		n_tx = rte_eth_tx_burst(port, ring_number, tx_mbufs,
			burst_size);

		/* Update the Netmap ring structure to reflect the change */
		ring->avail += n_tx;
		n_used_slots -= n_tx;

		/* Return the mbufs that failed to transmit to their pool */
		if (unlikely(n_tx != burst_size)) {
			for (i = n_tx; i < burst_size; i++)
				rte_pktmbuf_free(tx_mbufs[i]);
	        	break;
		}
	}

	return 0;

err:
	for (; i == 0; --i)
		rte_pktmbuf_free(tx_mbufs[i]);

	RTE_LOG(ERR, USER1,
		"Couldn't get mbuf from mempool is the mempool too small?\n");
	return -1;
}
Esempio n. 13
0
int
pcap_dispatch(pcap_t *p, int cnt, pcap_handler callback, u_char *user)
{
	struct pcap_ring *pme = p;
	struct my_ring *me = &pme->me;
	int got = 0;
	u_int si;

	ND("cnt %d", cnt);
	if (cnt == 0)
		cnt = -1;
	/* scan all rings */
	for (si = me->begin; si < me->end; si++) {
		struct netmap_ring *ring = NETMAP_RXRING(me->nifp, si);
		ND("ring has %d pkts", ring->avail);
		if (ring->avail == 0)
			continue;
		pme->hdr.ts = ring->ts;
		/*
		 * XXX a proper prefetch should be done as
		 *	prefetch(i); callback(i-1); ...
		 */
		while ((cnt == -1 || cnt != got) && ring->avail > 0) {
			u_int i = ring->cur;
			u_int idx = ring->slot[i].buf_idx;
			if (idx < 2) {
				D("%s bogus RX index %d at offset %d",
					me->nifp->ni_name, idx, i);
				sleep(2);
			}
			u_char *buf = (u_char *)NETMAP_BUF(ring, idx);
			prefetch(buf);
			pme->hdr.len = pme->hdr.caplen = ring->slot[i].len;
			// D("call %p len %d", p, me->hdr.len);
			callback(user, &pme->hdr, buf);
			ring->cur = NETMAP_RING_NEXT(ring, i);
			ring->avail--;
			got++;
		}
	}
	pme->st.ps_recv += got;
	return got;
}
Esempio n. 14
0
/**
 * A call to rx_sync_ring will try to fill a Netmap RX ring with as many
 * packets as it can hold coming from its dpdk port.
 */
static inline int
rx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
	uint16_t max_burst)
{
	int32_t i, n_rx;
	uint16_t burst_size;
	uint32_t cur_slot, n_free_slots;
	struct rte_mbuf *rx_mbufs[COMPAT_NETMAP_MAX_BURST];

	n_free_slots = ring->num_slots - (ring->avail + ring->reserved);
	n_free_slots = RTE_MIN(n_free_slots, max_burst);
	cur_slot = (ring->cur + ring->avail) & (ring->num_slots - 1);

	while (n_free_slots) {
		burst_size = (uint16_t)RTE_MIN(n_free_slots, RTE_DIM(rx_mbufs));

		/* receive up to burst_size packets from the NIC's queue */
		n_rx = rte_eth_rx_burst(port, ring_number, rx_mbufs,
			burst_size);

		if (n_rx == 0)
			return 0;
		if (unlikely(n_rx < 0))
			return -1;

		/* Put those n_rx packets in the Netmap structures */
		for (i = 0; i < n_rx ; i++) {
			mbuf_to_slot(rx_mbufs[i], ring, cur_slot);
			rte_pktmbuf_free(rx_mbufs[i]);
			cur_slot = NETMAP_RING_NEXT(ring, cur_slot);
		}

		/* Update the Netmap ring structure to reflect the change */
		ring->avail += n_rx;
		n_free_slots -= n_rx;
	}

	return 0;
}
Esempio n. 15
0
static int
receive_packets(struct netmap_ring *ring, u_int limit, int skip_payload)
{
	u_int cur, rx;

	cur = ring->cur;
	if (ring->avail < limit)
		limit = ring->avail;
	for (rx = 0; rx < limit; rx++) {
		struct netmap_slot *slot = &ring->slot[cur];
		char *p = NETMAP_BUF(ring, slot->buf_idx);

		if (!skip_payload)
			check_payload(p, slot->len);

		cur = NETMAP_RING_NEXT(ring, cur);
	}
	ring->avail -= rx;
	ring->cur = cur;

	return (rx);
}
Esempio n. 16
0
int sendpacket_send_netmap(void *p, const u_char *data, size_t len)
{
    sendpacket_t *sp = p;
    struct netmap_ring *txring;
    struct netmap_slot *slot;
    char *pkt;
    uint32_t cur, avail;

    if (sp->abort)
        return 0;

    txring = NETMAP_TXRING(sp->nm_if, sp->cur_tx_ring);
    while ((avail = nm_ring_space(txring)) == 0) {
        /* out of space on current TX queue - go to next */
        ++sp->cur_tx_ring;
        if (sp->cur_tx_ring > sp->last_tx_ring) {
            /*
             * out of space on all queues
             *
             * we have looped through all configured TX queues
             * so we have to reset to the first queue and
             * wait for available space
             */
            sp->cur_tx_ring = sp->first_tx_ring;

            /* send TX interrupt signal
             *
             * On Linux this makes one slot free on the
             * ring, which increases speed by about 10Mbps.
             *
             * But it will never free up all the slots. For
             * that we must poll and call again.
             */
            ioctl(sp->handle.fd, NIOCTXSYNC, NULL);

            /* loop again */
            return -2;
        }

        txring = NETMAP_TXRING(sp->nm_if, sp->cur_tx_ring);
    }

    /*
     * send
     */
    cur = txring->cur;
    slot = &txring->slot[cur];
    slot->flags = 0;
    pkt = NETMAP_BUF(txring, slot->buf_idx);
    memcpy(pkt, data, min(len, txring->nr_buf_size));
    slot->len = len;

    if (avail <= 1)
        slot->flags = NS_REPORT;

    dbgx(3, "netmap cur=%d slot index=%d flags=0x%x empty=%d avail=%u bufsize=%d\n",
            cur, slot->buf_idx, slot->flags, NETMAP_TX_RING_EMPTY(txring),
            nm_ring_space(txring), txring->nr_buf_size);

    /* let kernel know that packet is available */
    cur = NETMAP_RING_NEXT(txring, cur);
#ifdef HAVE_NETMAP_RING_HEAD_TAIL
    txring->head = cur;
#else
    txring->avail--;
#endif
    txring->cur = cur;

    return len;
}
Esempio n. 17
0
void *dispatcher(void *threadarg) {
  assert(threadarg);

  struct thread_context *context;
  struct thread_context *contexts;
  int rv;
  struct netmap_ring *rxring;
  struct ethernet_pkt *etherpkt;
  struct pollfd pfd;
  struct dispatcher_data *data;
  uint32_t *slots_used, *open_transactions;
  uint32_t i, arpd_idx, num_threads;
  char *buf;
  struct msg_hdr *msg;
  struct ether_addr *mac;

  context = (struct thread_context *)threadarg;
  contexts = context->shared->contexts;
  data = context->data;
  arpd_idx = context->shared->arpd_idx;
  mac = &context->shared->if_info->mac;
  num_threads = context->shared->num_threads;

  struct transaction *transactions[num_threads];
  uint64_t dropped[num_threads];
  for (i=0; i < num_threads; i++) {
    transactions[i] = NULL;
    dropped[i] = 0;
  }

  rv = dispatcher_init(context);
  if (!rv) {
    pthread_exit(NULL);
  }

  rxring = NETMAP_RXRING(data->nifp, 0);
  slots_used = bitmap_new(rxring->num_slots);
  if (!slots_used)
    pthread_exit(NULL);

  open_transactions = bitmap_new(num_threads);
  if (!open_transactions)
    pthread_exit(NULL);

  pfd.fd = data->fd;
  pfd.events = (POLLIN);

  printf("dispatcher[%d]: initialized\n", context->thread_id);
  // signal to main() that we are initialized
  atomic_store_explicit(&context->initialized, 1, memory_order_release);

  for (;;) {
    rv = poll(&pfd, 1, POLL_TIMEOUT);

    // read all packets from the ring
    if (rv > 0) {
      for (; rxring->avail > 0; rxring->avail--) {
        i = rxring->cur;
        rxring->cur = NETMAP_RING_NEXT(rxring, i);
        rxring->reserved++;
        buf = NETMAP_BUF(rxring, rxring->slot[i].buf_idx);
        etherpkt = (struct ethernet_pkt *)(void *)buf;

        // TODO: consider pushing this check to the workers
        if (!ethernet_is_valid(etherpkt, mac)) {
          if (rxring->reserved == 1)
            rxring->reserved = 0;
          continue;
        }

        // TODO: dispatch to n workers instead of just 0
        switch (etherpkt->h.ether_type) {
          case IP4_ETHERTYPE:
            rv = tqueue_insert(contexts[0].pkt_recv_q,
                               &transactions[0], (char *) NULL + i);
            switch (rv) {
              case TQUEUE_SUCCESS:
                bitmap_set(slots_used, i);
                bitmap_set(open_transactions, 0);
                break;
              case TQUEUE_TRANSACTION_FULL:
                bitmap_set(slots_used, i);
                bitmap_clear(open_transactions, 0);
                break;
              case TQUEUE_FULL:
                // just drop packet and do accounting
                dropped[0]++;
                if (rxring->reserved == 1)
                  rxring->reserved = 0;
                break;
            }
            break;
          case ARP_ETHERTYPE:
            rv = tqueue_insert(contexts[arpd_idx].pkt_recv_q,
                               &transactions[arpd_idx], (char *) NULL + i);
            switch (rv) {
              case TQUEUE_SUCCESS:
                tqueue_publish_transaction(contexts[arpd_idx].pkt_recv_q,
                                            &transactions[arpd_idx]);
                bitmap_set(slots_used, i);
                break;
              case TQUEUE_TRANSACTION_FULL:
                bitmap_set(slots_used, i);
                break;
              case TQUEUE_FULL:
                // just drop packet and do accounting
                dropped[arpd_idx]++;
                if (rxring->reserved == 1)
                  rxring->reserved = 0;
                break;
            }
            break;
          default:
            printf("dispatcher[%d]: unknown/unsupported ethertype %hu\n",
                    context->thread_id, etherpkt->h.ether_type);
            if (rxring->reserved == 1)
              rxring->reserved = 0;
        } // switch (ethertype)
      } // for (rxring)

      // publish any open transactions so that the worker can start on it
      for (i=0; i < num_threads; i++) {
        if (bitmap_get(open_transactions, i))
          tqueue_publish_transaction(contexts[i].pkt_recv_q, &transactions[i]);
      }
      bitmap_clearall(open_transactions, num_threads);
    } // if (packets)

    // read the message queue
    rv = squeue_enter(context->msg_q, 1);
    if (!rv)
      continue;
    while ((msg = squeue_get_next_pop_slot(context->msg_q)) != NULL) {
      switch (msg->msg_type) {
        case MSG_TRANSACTION_UPDATE:
          update_slots_used(context->msg_q, slots_used, rxring);
          break;
        case MSG_TRANSACTION_UPDATE_SINGLE:
          update_slots_used_single((void *)msg, slots_used, rxring);
          break;
        default:
          printf("dispatcher: unknown message %hu\n", msg->msg_type);
      }
    }
    squeue_exit(context->msg_q);

  } // for(;;)

  pthread_exit(NULL);
}
Esempio n. 18
0
int
ether_output(struct nm_if *nmif, struct in_addr *dst, struct ether_addr *lladdr,
	unsigned short ether_type, char *inbuf, int inlen)
{
	char *buf;
	int err, len;
	struct arp *arp;
	struct ether_header *eh;
	struct ether_vlan_header *evl;
	struct netmap_ring *ring;
	struct nm_if *parentif;

	if (lladdr == NULL) {
		err = arp_search_if(nmif, dst, &arp);
		if (err != 0)
			return (err);
	}

	parentif = NETMAP_PARENTIF(nmif);
	ring = netmap_hw_tx_ring(parentif->nm_if_ifp);
	if (ring == NULL) {
		DPRINTF("%s: no available ring for tx (%s).\n",
		    __func__, parentif->nm_if_name);
		parentif->nm_if_txsync = 1;
		pktcnt.tx_drop++;
		return (-1);
	}
	if (inlen + ETHER_HDR_LEN > ring->nr_buf_size) {
		DPRINTF("%s: buffer too big, cannot tx.\n", __func__);
		pktcnt.tx_drop++;
		return (-1);
	}
	buf = NETMAP_GET_BUF(ring);
	if (buf == NULL) {
		DPRINTF("%s: no available buffer for tx (%s).\n",
		    __func__, parentif->nm_if_name);
		parentif->nm_if_txsync = 1;
		pktcnt.tx_drop++;
		return (-1);
	}

	if (NETMAP_VLANIF(nmif)) {
		/* Copy the ethernet vlan header. */
		evl = (struct ether_vlan_header *)buf;
		evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
		evl->evl_tag = htons(nmif->nm_if_vtag);
		evl->evl_proto = htons(ether_type);
		if (lladdr != NULL)
			memcpy(evl->evl_dhost, lladdr, sizeof(evl->evl_dhost));
		else
			memcpy(evl->evl_dhost, &arp->lladdr,
			    sizeof(evl->evl_dhost));
		memcpy(evl->evl_shost, LLADDR(&nmif->nm_if_dl),
		    sizeof(evl->evl_shost));
		len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
	} else {
		/* Copy the ethernet header. */
		eh = (struct ether_header *)buf;
		eh->ether_type = htons(ether_type);
		if (lladdr != NULL)
			memcpy(eh->ether_dhost, lladdr,
			    sizeof(eh->ether_dhost));
		else
			memcpy(eh->ether_dhost, &arp->lladdr,
			    sizeof(eh->ether_dhost));
		memcpy(eh->ether_shost, LLADDR(&nmif->nm_if_dl),
		    sizeof(eh->ether_shost));
		len = ETHER_HDR_LEN;
	}

	/* Copy the payload. */
	memcpy(buf + len, inbuf, inlen);
	len += inlen;

	NETMAP_UPDATE_LEN(ring, len);

//DPRINTF("%s: len: %d\n", __func__, len);
//if (verbose) hexdump(buf, len, NULL, 0);

	/* Update the current ring slot. */
	NETMAP_RING_NEXT(ring);

	pktcnt.tx_pkts++;
	parentif->nm_if_txsync = 1;

	return (0);
}
Esempio n. 19
0
int sendpacket_send_netmap(void *p, const u_char *data, size_t len)
{
    int retcode = 0;
    sendpacket_t *sp = p;
    struct netmap_ring *txring;
    struct netmap_slot *slot;
    char *pkt;
    uint32_t cur, avail;

    if (sp->abort)
        return retcode;

    txring = NETMAP_TXRING(sp->nm_if, sp->cur_tx_ring);
    while ((avail = nm_ring_space(txring)) == 0) {
        /* out of space on current TX queue - go to next */
        ++sp->cur_tx_ring;
        if (sp->cur_tx_ring > sp->last_tx_ring) {
            /*
             * out of space on all queues
             *
             * we have looped through all configured TX queues
             * so we have to reset to the first queue and
             * wait for available space
             */
            struct pollfd pfd;

            sp->cur_tx_ring = sp->first_tx_ring;

            /* send TX interrupt signal
             *
             * On Linux this makes one slot free on the
             * ring, which increases speed by about 10Mbps.
             *
             * But it will never free up all the slots. For
             * that we must poll and call again.
             */
            ioctl(sp->handle.fd, NIOCTXSYNC, NULL);

            pfd.fd = sp->handle.fd;
            pfd.events = POLLOUT;
            pfd.revents = 0;
            if (poll(&pfd, 1, 1000) <= 0) {
                if (++sp->tx_timeouts == NETMAP_TX_TIMEOUT_SEC) {
                    return -1;
                }
                return -2;
            }

            sp->tx_timeouts = 0;

            /*
             * Do not remove this even though it looks redundant.
             * Overall performance is increased with this restart
             * of the TX queue.
             *
             * This call increases the number of available slots from
             * 1 to all that are truly available.
             */
            ioctl(sp->handle.fd, NIOCTXSYNC, NULL);
        }

        txring = NETMAP_TXRING(sp->nm_if, sp->cur_tx_ring);
    }

    /*
     * send
     */
    cur = txring->cur;
    slot = &txring->slot[cur];
    slot->flags = 0;
    pkt = NETMAP_BUF(txring, slot->buf_idx);
    memcpy(pkt, data, min(len, txring->nr_buf_size));
    slot->len = len;

    if (avail <= 1)
        slot->flags = NS_REPORT;

    dbgx(3, "netmap cur=%d slot index=%d flags=0x%x empty=%d avail=%u bufsize=%d\n",
            cur, slot->buf_idx, slot->flags, NETMAP_TX_RING_EMPTY(txring),
            nm_ring_space(txring), txring->nr_buf_size);

    /* let kernel know that packet is available */
    cur = NETMAP_RING_NEXT(txring, cur);
#ifdef HAVE_NETMAP_RING_HEAD_TAIL
    txring->head = cur;
#else
    txring->avail--;
#endif
    txring->cur = cur;
    retcode = len;

    return retcode;
}
Esempio n. 20
0
static void *
pinger_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd fds[1];
	struct netmap_if *nifp = targ->nifp;
	int i, rx = 0, n = targ->g->npackets;

	fds[0].fd = targ->fd;
	fds[0].events = (POLLIN);
	static uint32_t sent;
	struct timespec ts, now, last_print;
	uint32_t count = 0, min = 1000000, av = 0;

	if (targ->g->nthreads > 1) {
		D("can only ping with 1 thread");
		return NULL;
	}

	clock_gettime(CLOCK_REALTIME_PRECISE, &last_print);
	while (n == 0 || (int)sent < n) {
		struct netmap_ring *ring = NETMAP_TXRING(nifp, 0);
		struct netmap_slot *slot;
		char *p;
	    for (i = 0; i < 1; i++) {
		slot = &ring->slot[ring->cur];
		slot->len = targ->g->pkt_size;
		p = NETMAP_BUF(ring, slot->buf_idx);

		if (ring->avail == 0) {
			D("-- ouch, cannot send");
		} else {
			pkt_copy(&targ->pkt, p, targ->g->pkt_size);
			clock_gettime(CLOCK_REALTIME_PRECISE, &ts);
			bcopy(&sent, p+42, sizeof(sent));
			bcopy(&ts, p+46, sizeof(ts));
			sent++;
			ring->cur = NETMAP_RING_NEXT(ring, ring->cur);
			ring->avail--;
		}
	    }
		/* should use a parameter to decide how often to send */
		if (poll(fds, 1, 3000) <= 0) {
			D("poll error/timeout on queue %d", targ->me);
			continue;
		}
		/* see what we got back */
		for (i = targ->qfirst; i < targ->qlast; i++) {
			ring = NETMAP_RXRING(nifp, i);
			while (ring->avail > 0) {
				uint32_t seq;
				slot = &ring->slot[ring->cur];
				p = NETMAP_BUF(ring, slot->buf_idx);

				clock_gettime(CLOCK_REALTIME_PRECISE, &now);
				bcopy(p+42, &seq, sizeof(seq));
				bcopy(p+46, &ts, sizeof(ts));
				ts.tv_sec = now.tv_sec - ts.tv_sec;
				ts.tv_nsec = now.tv_nsec - ts.tv_nsec;
				if (ts.tv_nsec < 0) {
					ts.tv_nsec += 1000000000;
					ts.tv_sec--;
				}
				if (0) D("seq %d/%d delta %d.%09d", seq, sent,
					(int)ts.tv_sec, (int)ts.tv_nsec);
				if (ts.tv_nsec < (int)min)
					min = ts.tv_nsec;
				count ++;
				av += ts.tv_nsec;
				ring->avail--;
				ring->cur = NETMAP_RING_NEXT(ring, ring->cur);
				rx++;
			}
		}
		//D("tx %d rx %d", sent, rx);
		//usleep(100000);
		ts.tv_sec = now.tv_sec - last_print.tv_sec;
		ts.tv_nsec = now.tv_nsec - last_print.tv_nsec;
		if (ts.tv_nsec < 0) {
			ts.tv_nsec += 1000000000;
			ts.tv_sec--;
		}
		if (ts.tv_sec >= 1) {
			D("count %d min %d av %d",
				count, min, av/count);
			count = 0;
			av = 0;
			min = 100000000;
			last_print = now;
		}
	}
	return NULL;
}
Esempio n. 21
0
/*
 * reply to ping requests
 */
static void *
ponger_body(void *data)
{
	struct targ *targ = (struct targ *) data;
	struct pollfd fds[1];
	struct netmap_if *nifp = targ->nifp;
	struct netmap_ring *txring, *rxring;
	int i, rx = 0, sent = 0, n = targ->g->npackets;
	fds[0].fd = targ->fd;
	fds[0].events = (POLLIN);

	if (targ->g->nthreads > 1) {
		D("can only reply ping with 1 thread");
		return NULL;
	}
	D("understood ponger %d but don't know how to do it", n);
	while (n == 0 || sent < n) {
		uint32_t txcur, txavail;
//#define BUSYWAIT
#ifdef BUSYWAIT
		ioctl(fds[0].fd, NIOCRXSYNC, NULL);
#else
		if (poll(fds, 1, 1000) <= 0) {
			D("poll error/timeout on queue %d", targ->me);
			continue;
		}
#endif
		txring = NETMAP_TXRING(nifp, 0);
		txcur = txring->cur;
		txavail = txring->avail;
		/* see what we got back */
		for (i = targ->qfirst; i < targ->qlast; i++) {
			rxring = NETMAP_RXRING(nifp, i);
			while (rxring->avail > 0) {
				uint32_t cur = rxring->cur;
				struct netmap_slot *slot = &rxring->slot[cur];
				char *src, *dst;
				src = NETMAP_BUF(rxring, slot->buf_idx);
				//D("got pkt %p of size %d", src, slot->len);
				rxring->avail--;
				rxring->cur = NETMAP_RING_NEXT(rxring, cur);
				rx++;
				if (txavail == 0)
					continue;
				dst = NETMAP_BUF(txring,
				    txring->slot[txcur].buf_idx);
				/* copy... */
				pkt_copy(src, dst, slot->len);
				txring->slot[txcur].len = slot->len;
				/* XXX swap src dst mac */
				txcur = NETMAP_RING_NEXT(txring, txcur);
				txavail--;
				sent++;
			}
		}
		txring->cur = txcur;
		txring->avail = txavail;
		targ->count = sent;
#ifdef BUSYWAIT
		ioctl(fds[0].fd, NIOCTXSYNC, NULL);
#endif
		//D("tx %d rx %d", sent, rx);
	}
	return NULL;
}
Esempio n. 22
0
unsigned int
peak_netmap_forward(struct peak_netmap *u_packet, const char *ifname)
{
	/* assume the user is sane enough to not pass NULL */
	struct _peak_netmap *packet = NETPKT_FROM_USER(u_packet);
	struct netmap_ring *source = packet->ring;
	struct netmap_ring *drain;
	struct my_ring *me;
	unsigned int i, di;

	if (!source) {
		/* packet is empty */
		return (0);
	}

	i = peak_netmap_find(ifname);
	if (i >= NETMAP_COUNT()) {
		/* not found means drop */
		peak_netmap_drop(packet);
		return (0);
	}

	me = self->me[i];

	for (di = me->begin; di < me->end; ++di) {
		struct netmap_slot *rs, *ts;
		uint32_t pkt;

		drain = NETMAP_TXRING(me->nifp, di);
		if (!drain->avail) {
			continue;
		}

		i = drain->cur;

		rs = &source->slot[packet->i];
		ts = &drain->slot[i];

		pkt = ts->buf_idx;
		ts->buf_idx = rs->buf_idx;
		rs->buf_idx = pkt;

		ts->len = rs->len;

		/* report the buffer change */
		ts->flags |= NS_BUF_CHANGED;
		rs->flags |= NS_BUF_CHANGED;

		--source->avail;
		--drain->avail;

		source->cur = NETMAP_RING_NEXT(source, packet->i);
		drain->cur = NETMAP_RING_NEXT(drain, i);

		NETPKT_PUT(packet);

		return (0);
	}

	/* could not release packet, try again */
	return (1);
}