int pcap_inject(pcap_t *p, const void *buf, size_t size) { struct my_ring *me = p; u_int si; ND("cnt %d", cnt); /* scan all rings */ for (si = me->begin; si < me->end; si++) { struct netmap_ring *ring = NETMAP_TXRING(me->nifp, si); ND("ring has %d pkts", ring->avail); if (ring->avail == 0) continue; u_int i = ring->cur; u_int idx = ring->slot[i].buf_idx; if (idx < 2) { D("%s bogus TX index %d at offset %d", me->nifp->ni_name, idx, i); sleep(2); } u_char *dst = (u_char *)NETMAP_BUF(ring, idx); ring->slot[i].len = size; pkt_copy(buf, dst, size); ring->cur = NETMAP_RING_NEXT(ring, i); ring->avail--; // if (ring->avail == 0) ioctl(me->fd, NIOCTXSYNC, NULL); return size; } errno = ENOBUFS; return -1; }
/* * create and enqueue a batch of packets on a ring. * On the last one set NS_REPORT to tell the driver to generate * an interrupt when done. */ static int send_packets(struct netmap_ring *ring, struct pkt *pkt, int size, u_int count, int options) { u_int sent, cur = ring->cur; if (ring->avail < count) count = ring->avail; #if 0 if (options & (OPT_COPY | OPT_PREFETCH) ) { for (sent = 0; sent < count; sent++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); prefetch(p); cur = NETMAP_RING_NEXT(ring, cur); } cur = ring->cur; } #endif for (sent = 0; sent < count; sent++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); if (options & OPT_COPY) pkt_copy(pkt, p, size); else if (options & OPT_MEMCPY) memcpy(p, pkt, size); else if (options & OPT_PREFETCH) prefetch(p); slot->len = size; if (sent == count - 1) slot->flags |= NS_REPORT; cur = NETMAP_RING_NEXT(ring, cur); } ring->avail -= sent; ring->cur = cur; return (sent); }
static ssize_t netmap_receive(NetClientState *nc, const uint8_t *buf, size_t size) { NetmapState *s = DO_UPCAST(NetmapState, nc, nc); struct netmap_ring *ring = s->tx; uint32_t i; uint32_t idx; uint8_t *dst; if (unlikely(!ring)) { /* Drop. */ return size; } if (unlikely(size > ring->nr_buf_size)) { RD(5, "[netmap_receive] drop packet of size %d > %d\n", (int)size, ring->nr_buf_size); return size; } if (nm_ring_empty(ring)) { /* No available slots in the netmap TX ring. */ netmap_write_poll(s, true); return 0; } i = ring->cur; idx = ring->slot[i].buf_idx; dst = (uint8_t *)NETMAP_BUF(ring, idx); ring->slot[i].len = size; ring->slot[i].flags = 0; pkt_copy(buf, dst, size); ring->cur = ring->head = nm_ring_next(ring, i); ioctl(s->nmd->fd, NIOCTXSYNC, NULL); return size; }
/* * move up to 'limit' pkts from rxring to txring swapping buffers. */ static int process_rings(struct netmap_ring *rxring, struct netmap_ring *txring, u_int limit, const char *msg) { u_int j, k, m = 0; /* print a warning if any of the ring flags is set (e.g. NM_REINIT) */ if (rxring->flags || txring->flags) D("%s rxflags %x txflags %x", msg, rxring->flags, txring->flags); j = rxring->cur; /* RX */ k = txring->cur; /* TX */ m = nm_ring_space(rxring); if (m < limit) limit = m; m = nm_ring_space(txring); if (m < limit) limit = m; m = limit; while (limit-- > 0) { struct netmap_slot *rs = &rxring->slot[j]; struct netmap_slot *ts = &txring->slot[k]; #ifdef NO_SWAP char *rxbuf = NETMAP_BUF(rxring, rs->buf_idx); char *txbuf = NETMAP_BUF(txring, ts->buf_idx); #else uint32_t pkt; #endif /* swap packets */ if (ts->buf_idx < 2 || rs->buf_idx < 2) { D("wrong index rx[%d] = %d -> tx[%d] = %d", j, rs->buf_idx, k, ts->buf_idx); sleep(2); } #ifndef NO_SWAP pkt = ts->buf_idx; ts->buf_idx = rs->buf_idx; rs->buf_idx = pkt; #endif /* copy the packet length. */ if (rs->len < 14 || rs->len > 2048) D("wrong len %d rx[%d] -> tx[%d]", rs->len, j, k); else if (verbose > 1) D("%s send len %d rx[%d] -> tx[%d]", msg, rs->len, j, k); ts->len = rs->len; #ifdef NO_SWAP pkt_copy(rxbuf, txbuf, ts->len); #else /* report the buffer change. */ ts->flags |= NS_BUF_CHANGED; rs->flags |= NS_BUF_CHANGED; #endif /* NO_SWAP */ j = nm_ring_next(rxring, j); k = nm_ring_next(txring, k); } rxring->head = rxring->cur = j; txring->head = txring->cur = k; if (verbose && m > 0) D("%s sent %d packets to %p", msg, m, txring); return (m); }
int pcap_findalldevs(pcap_if_t **alldevsp, char *errbuf) { pcap_if_t *top = NULL; #ifndef linux struct ifaddrs *i_head, *i; pcap_if_t *cur; struct pcap_addr *tail = NULL; int l; D("listing all devs"); *alldevsp = NULL; i_head = NULL; if (getifaddrs(&i_head)) { D("cannot get if addresses"); return -1; } for (i = i_head; i; i = i->ifa_next) { //struct ifaddrs *ifa; struct pcap_addr *pca; //struct sockaddr *sa; D("got interface %s", i->ifa_name); if (!top || strcmp(top->name, i->ifa_name)) { /* new interface */ l = sizeof(*top) + strlen(i->ifa_name) + 1; cur = calloc(1, l); if (cur == NULL) { D("no space for if descriptor"); continue; } cur->name = (char *)(cur + 1); //cur->flags = i->ifa_flags; strcpy(cur->name, i->ifa_name); cur->description = NULL; cur->next = top; top = cur; tail = NULL; } /* now deal with addresses */ D("%s addr family %d len %d %s %s", top->name, i->ifa_addr->sa_family, i->ifa_addr->sa_len, i->ifa_netmask ? "Netmask" : "", i->ifa_broadaddr ? "Broadcast" : ""); l = sizeof(struct pcap_addr) + (i->ifa_addr ? i->ifa_addr->sa_len:0) + (i->ifa_netmask ? i->ifa_netmask->sa_len:0) + (i->ifa_broadaddr? i->ifa_broadaddr->sa_len:0); pca = calloc(1, l); if (pca == NULL) { D("no space for if addr"); continue; } #define SA_NEXT(x) ((struct sockaddr *)((char *)(x) + (x)->sa_len)) pca->addr = (struct sockaddr *)(pca + 1); pkt_copy(i->ifa_addr, pca->addr, i->ifa_addr->sa_len); if (i->ifa_netmask) { pca->netmask = SA_NEXT(pca->addr); bcopy(i->ifa_netmask, pca->netmask, i->ifa_netmask->sa_len); if (i->ifa_broadaddr) { pca->broadaddr = SA_NEXT(pca->netmask); bcopy(i->ifa_broadaddr, pca->broadaddr, i->ifa_broadaddr->sa_len); } } if (tail == NULL) { top->addresses = pca; } else { tail->next = pca; } tail = pca; } freeifaddrs(i_head); #endif /* !linux */ (void)errbuf; /* UNUSED */ *alldevsp = top; return 0; }
static int agent_send(struct vr_interface *vif, struct vr_packet *pkt, void *ifspecific) { int len; struct agent_hdr *hdr; unsigned char *rewrite; struct vr_interface_stats *stats = vif_get_stats(vif, pkt->vp_cpu); struct vr_packet *pkt_c; struct agent_send_params *params = (struct agent_send_params *)ifspecific; vr_preset(pkt); if (pkt_head_space(pkt) < AGENT_PKT_HEAD_SPACE) { len = pkt_len(pkt); if (agent_trap_may_truncate(params->trap_reason)) { len = MINIMUM(len, VR_AGENT_MIN_PACKET_LEN); } pkt_c = pkt_copy(pkt, 0, len); if (pkt_c) { vr_pfree(pkt, VP_DROP_DUPLICATED); pkt = pkt_c; } } hdr = (struct agent_hdr *)pkt_push(pkt, sizeof(struct agent_hdr)); if (!hdr) goto drop; hdr->hdr_ifindex = htons(pkt->vp_if->vif_idx); hdr->hdr_vrf = htons(params->trap_vrf); hdr->hdr_cmd = htons(params->trap_reason); switch (params->trap_reason) { case AGENT_TRAP_FLOW_MISS: case AGENT_TRAP_ECMP_RESOLVE: case AGENT_TRAP_SOURCE_MISMATCH: if (params->trap_param) hdr->hdr_cmd_param = htonl(*(unsigned int *)(params->trap_param)); break; case AGENT_TRAP_DIAG: if (params->trap_param) hdr->hdr_cmd_param = htonl(*(unsigned int *)(params->trap_param)); break; default: hdr->hdr_cmd_param = 0; break; } rewrite = pkt_push(pkt, VR_ETHER_HLEN); if (!rewrite) goto drop; memcpy(rewrite, vif->vif_rewrite, VR_ETHER_HLEN); return vif->vif_tx(vif, pkt); drop: stats->vis_oerrors++; vr_pfree(pkt, VP_DROP_PUSH); return 0; }
/* * reply to ping requests */ static void * ponger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *txring, *rxring; int i, rx = 0, sent = 0, n = targ->g->npackets; fds[0].fd = targ->fd; fds[0].events = (POLLIN); if (targ->g->nthreads > 1) { D("can only reply ping with 1 thread"); return NULL; } D("understood ponger %d but don't know how to do it", n); while (n == 0 || sent < n) { uint32_t txcur, txavail; //#define BUSYWAIT #ifdef BUSYWAIT ioctl(fds[0].fd, NIOCRXSYNC, NULL); #else if (poll(fds, 1, 1000) <= 0) { D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); continue; } #endif txring = NETMAP_TXRING(nifp, 0); txcur = txring->cur; txavail = nm_ring_space(txring); /* see what we got back */ for (i = targ->qfirst; i < targ->qlast; i++) { rxring = NETMAP_RXRING(nifp, i); while (!nm_ring_empty(rxring)) { uint16_t *spkt, *dpkt; uint32_t cur = rxring->cur; struct netmap_slot *slot = &rxring->slot[cur]; char *src, *dst; src = NETMAP_BUF(rxring, slot->buf_idx); //D("got pkt %p of size %d", src, slot->len); rxring->head = rxring->cur = nm_ring_next(rxring, cur); rx++; if (txavail == 0) continue; dst = NETMAP_BUF(txring, txring->slot[txcur].buf_idx); /* copy... */ dpkt = (uint16_t *)dst; spkt = (uint16_t *)src; pkt_copy(src, dst, slot->len); dpkt[0] = spkt[3]; dpkt[1] = spkt[4]; dpkt[2] = spkt[5]; dpkt[3] = spkt[0]; dpkt[4] = spkt[1]; dpkt[5] = spkt[2]; txring->slot[txcur].len = slot->len; /* XXX swap src dst mac */ txcur = nm_ring_next(txring, txcur); txavail--; sent++; } } txring->head = txring->cur = txcur; targ->count = sent; #ifdef BUSYWAIT ioctl(fds[0].fd, NIOCTXSYNC, NULL); #endif //D("tx %d rx %d", sent, rx); } return NULL; }
static void * pinger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; int i, rx = 0, n = targ->g->npackets; void *frame; int size; frame = &targ->pkt; frame += sizeof(targ->pkt.vh) - targ->g->virt_header; size = targ->g->pkt_size + targ->g->virt_header; fds[0].fd = targ->fd; fds[0].events = (POLLIN); static uint32_t sent; struct timespec ts, now, last_print; uint32_t count = 0, min = 1000000000, av = 0; if (targ->g->nthreads > 1) { D("can only ping with 1 thread"); return NULL; } clock_gettime(CLOCK_REALTIME_PRECISE, &last_print); now = last_print; while (n == 0 || (int)sent < n) { struct netmap_ring *ring = NETMAP_TXRING(nifp, 0); struct netmap_slot *slot; char *p; for (i = 0; i < 1; i++) { /* XXX why the loop for 1 pkt ? */ slot = &ring->slot[ring->cur]; slot->len = size; p = NETMAP_BUF(ring, slot->buf_idx); if (nm_ring_empty(ring)) { D("-- ouch, cannot send"); } else { pkt_copy(frame, p, size); clock_gettime(CLOCK_REALTIME_PRECISE, &ts); bcopy(&sent, p+42, sizeof(sent)); bcopy(&ts, p+46, sizeof(ts)); sent++; ring->head = ring->cur = nm_ring_next(ring, ring->cur); } } /* should use a parameter to decide how often to send */ if (poll(fds, 1, 3000) <= 0) { D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); continue; } /* see what we got back */ for (i = targ->qfirst; i < targ->qlast; i++) { ring = NETMAP_RXRING(nifp, i); while (!nm_ring_empty(ring)) { uint32_t seq; slot = &ring->slot[ring->cur]; p = NETMAP_BUF(ring, slot->buf_idx); clock_gettime(CLOCK_REALTIME_PRECISE, &now); bcopy(p+42, &seq, sizeof(seq)); bcopy(p+46, &ts, sizeof(ts)); ts.tv_sec = now.tv_sec - ts.tv_sec; ts.tv_nsec = now.tv_nsec - ts.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (1) D("seq %d/%d delta %d.%09d", seq, sent, (int)ts.tv_sec, (int)ts.tv_nsec); if (ts.tv_nsec < (int)min) min = ts.tv_nsec; count ++; av += ts.tv_nsec; ring->head = ring->cur = nm_ring_next(ring, ring->cur); rx++; } } //D("tx %d rx %d", sent, rx); //usleep(100000); ts.tv_sec = now.tv_sec - last_print.tv_sec; ts.tv_nsec = now.tv_nsec - last_print.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (ts.tv_sec >= 1) { D("count %d min %d av %d", count, min, av/count); count = 0; av = 0; min = 100000000; last_print = now; } } return NULL; }
/* * create and enqueue a batch of packets on a ring. * On the last one set NS_REPORT to tell the driver to generate * an interrupt when done. */ static int send_packets(struct netmap_ring *ring, struct pkt *pkt, void *frame, int size, struct glob_arg *g, u_int count, int options, u_int nfrags) { u_int n, sent, cur = ring->cur; int fcnt; n = nm_ring_space(ring); if (n < count) count = n; if (count < nfrags) { D("truncating packet, no room for frags %d %d", count, nfrags); } #if 0 if (options & (OPT_COPY | OPT_PREFETCH) ) { for (sent = 0; sent < count; sent++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); prefetch(p); cur = nm_ring_next(ring, cur); } cur = ring->cur; } #endif for (fcnt = nfrags, sent = 0; sent < count; sent++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); slot->flags = 0; if (options & OPT_INDIRECT) { slot->flags |= NS_INDIRECT; slot->ptr = (uint64_t)frame; } else if (options & OPT_COPY) { pkt_copy(frame, p, size); if (fcnt == 1) update_addresses(pkt, g); } else if (options & OPT_MEMCPY) { memcpy(p, frame, size); if (fcnt == 1) update_addresses(pkt, g); } else if (options & OPT_PREFETCH) { prefetch(p); } if (options & OPT_DUMP) dump_payload(p, size, ring, cur); slot->len = size; if (--fcnt > 0) slot->flags |= NS_MOREFRAG; else fcnt = nfrags; if (sent == count - 1) { slot->flags &= ~NS_MOREFRAG; slot->flags |= NS_REPORT; } cur = nm_ring_next(ring, cur); } ring->head = ring->cur = cur; return (sent); }
static ssize_t netmap_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) { NetmapState *s = DO_UPCAST(NetmapState, nc, nc); struct netmap_ring *ring = s->tx; uint32_t last; uint32_t idx; uint8_t *dst; int j; uint32_t i; if (unlikely(!ring)) { /* Drop the packet. */ return iov_size(iov, iovcnt); } last = i = ring->cur; if (nm_ring_space(ring) < iovcnt) { /* Not enough netmap slots. */ netmap_write_poll(s, true); return 0; } for (j = 0; j < iovcnt; j++) { int iov_frag_size = iov[j].iov_len; int offset = 0; int nm_frag_size; /* Split each iovec fragment over more netmap slots, if necessary. */ while (iov_frag_size) { nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size); if (unlikely(nm_ring_empty(ring))) { /* We run out of netmap slots while splitting the iovec fragments. */ netmap_write_poll(s, true); return 0; } idx = ring->slot[i].buf_idx; dst = (uint8_t *)NETMAP_BUF(ring, idx); ring->slot[i].len = nm_frag_size; ring->slot[i].flags = NS_MOREFRAG; pkt_copy(iov[j].iov_base + offset, dst, nm_frag_size); last = i; i = nm_ring_next(ring, i); offset += nm_frag_size; iov_frag_size -= nm_frag_size; } } /* The last slot must not have NS_MOREFRAG set. */ ring->slot[last].flags &= ~NS_MOREFRAG; /* Now update ring->cur and ring->head. */ ring->cur = ring->head = i; ioctl(s->nmd->fd, NIOCTXSYNC, NULL); return iov_size(iov, iovcnt); }