static void deliverframe(struct virtif_user *viu) { struct rte_mbuf *m, *m0; struct iovec iov[STACK_IOV]; struct iovec *iovp, *iovp0; assert(viu->viu_nbufpkts > 0 && viu->viu_bufidx < MAX_PKT_BURST); m0 = viu->viu_m_pkts[viu->viu_bufidx]; assert(m0 != NULL); viu->viu_bufidx++; viu->viu_nbufpkts--; if (m0->pkt.nb_segs > STACK_IOV) { iovp = malloc(sizeof(*iovp) * m0->pkt.nb_segs); if (iovp == NULL) return; /* drop */ } else { iovp = iov; } iovp0 = iovp; for (m = m0; m; m = m->pkt.next, iovp++) { iovp->iov_base = rte_pktmbuf_mtod(m, void *); iovp->iov_len = rte_pktmbuf_data_len(m); } VIF_DELIVERPKT(viu->viu_virtifsc, iovp0, iovp-iovp0); rte_pktmbuf_free(m0); if (iovp0 != iov) free(iovp0); }
/* * Note: this thread is the only one pulling packets off of any * given netmap instance */ static void * receiver(void *arg) { struct virtif_user *viu = arg; struct iovec iov; struct netmap_if *nifp = viu->nm_nifp; struct netmap_ring *ring = NETMAP_RXRING(nifp, 0); struct netmap_slot *slot; struct pollfd pfd; int prv; rumpuser_component_kthread(); for (;;) { pfd.fd = viu->viu_fd; pfd.events = POLLIN; if (viu->viu_dying) { break; } prv = 0; while (nm_ring_empty(ring) && prv == 0) { DPRINTF(("receive pkt via netmap\n")); prv = poll(&pfd, 1, 1000); if (prv > 0 || (prv < 0 && errno != EAGAIN)) break; } #if 0 /* XXX: report non-transient errors */ if (ring->avail == 0) { rv = errno; break; } #endif slot = &ring->slot[ring->cur]; DPRINTF(("got pkt of size %d\n", slot->len)); iov.iov_base = NETMAP_BUF(ring, slot->buf_idx); iov.iov_len = slot->len; /* XXX: allow batch processing */ rumpuser_component_schedule(NULL); VIF_DELIVERPKT(viu->viu_virtifsc, &iov, 1); rumpuser_component_unschedule(); ring->head = ring->cur = nm_ring_next(ring, ring->cur); } rumpuser_component_kthread_release(); return NULL; }