void VIFHYPER_DESTROY(struct virtif_user *viu) { void *cookie = rumpuser_component_unschedule(); pthread_join(viu->viu_pt, NULL); close(viu->viu_fd); free(viu); rumpuser_component_schedule(cookie); }
/* * Note: this thread is the only one pulling packets off of any * given netmap instance */ static void * receiver(void *arg) { struct virtif_user *viu = arg; struct iovec iov; struct netmap_if *nifp = viu->nm_nifp; struct netmap_ring *ring = NETMAP_RXRING(nifp, 0); struct netmap_slot *slot; struct pollfd pfd; int prv; rumpuser_component_kthread(); for (;;) { pfd.fd = viu->viu_fd; pfd.events = POLLIN; if (viu->viu_dying) { break; } prv = 0; while (nm_ring_empty(ring) && prv == 0) { DPRINTF(("receive pkt via netmap\n")); prv = poll(&pfd, 1, 1000); if (prv > 0 || (prv < 0 && errno != EAGAIN)) break; } #if 0 /* XXX: report non-transient errors */ if (ring->avail == 0) { rv = errno; break; } #endif slot = &ring->slot[ring->cur]; DPRINTF(("got pkt of size %d\n", slot->len)); iov.iov_base = NETMAP_BUF(ring, slot->buf_idx); iov.iov_len = slot->len; /* XXX: allow batch processing */ rumpuser_component_schedule(NULL); VIF_DELIVERPKT(viu->viu_virtifsc, &iov, 1); rumpuser_component_unschedule(); ring->head = ring->cur = nm_ring_next(ring, ring->cur); } rumpuser_component_kthread_release(); return NULL; }
void VIFHYPER_SEND(struct virtif_user *viu, struct iovec *iov, size_t iovlen) { void *cookie = NULL; /* XXXgcc */ struct netmap_if *nifp = viu->nm_nifp; struct netmap_ring *ring = NETMAP_TXRING(nifp, 0); char *p; int retries; int unscheduled = 0; unsigned n; DPRINTF(("sending pkt via netmap len %d\n", (int)iovlen)); for (retries = 10; !(n = nm_ring_space(ring)) && retries > 0; retries--) { struct pollfd pfd; if (!unscheduled) { cookie = rumpuser_component_unschedule(); unscheduled = 1; } pfd.fd = viu->viu_fd; pfd.events = POLLOUT; DPRINTF(("cannot send on netmap, ring full\n")); (void)poll(&pfd, 1, 500 /* ms */); } if (n > 0) { int i, totlen = 0; struct netmap_slot *slot = &ring->slot[ring->cur]; #define MAX_BUF_SIZE 1900 p = NETMAP_BUF(ring, slot->buf_idx); for (i = 0; totlen < MAX_BUF_SIZE && i < iovlen; i++) { int n = iov[i].iov_len; if (totlen + n > MAX_BUF_SIZE) { n = MAX_BUF_SIZE - totlen; DPRINTF(("truncating long pkt")); } memcpy(p + totlen, iov[i].iov_base, n); totlen += n; } #undef MAX_BUF_SIZE slot->len = totlen; ring->head = ring->cur = nm_ring_next(ring, ring->cur); if (ioctl(viu->viu_fd, NIOCTXSYNC, NULL) < 0) perror("NIOCTXSYNC"); } if (unscheduled) rumpuser_component_schedule(cookie); }
int rumpcomp_ugenhc_ioctl(int fd, u_long cmd, void *data, int *ioctlrv) { void *cookie; int rv; cookie = rumpuser_component_unschedule(); *ioctlrv = ioctl(fd, cmd, data); if (*ioctlrv == -1) rv = errno; else rv = 0; rumpuser_component_schedule(cookie); return rumpuser_component_errtrans(rv); }
/* * Get mbuf off of interface, copy it into memory provided by the * TCP/IP stack. TODO: share TCP/IP stack mbufs with DPDK mbufs to avoid * data copy. */ int rumpcomp_virtif_recv(struct virtif_user *viu, void *data, size_t dlen, size_t *rcvp) { void *cookie = rumpuser_component_unschedule(); uint8_t *p = data; struct rte_mbuf *m, *m0; struct rte_pktmbuf *mp; int nb_rx, rv; for (;;) { nb_rx = rte_eth_rx_burst(IF_PORTID, 0, &m, 1); if (nb_rx) { assert(nb_rx == 1); mp = &m->pkt; if (mp->pkt_len > dlen) { /* for now, just drop packets we can't handle */ printf("warning: virtif recv packet too big " "%d vs. %zu\n", mp->pkt_len, dlen); rte_pktmbuf_free(m); continue; } *rcvp = mp->pkt_len; m0 = m; do { mp = &m->pkt; memcpy(p, mp->data, mp->data_len); p += mp->data_len; } while ((m = mp->next) != NULL); rte_pktmbuf_free(m0); rv = 0; break; } else { usleep(10000); /* XXX: don't 100% busyloop */ } } rumpuser_component_schedule(cookie); return rv; }
static void * receiver(void *arg) { struct virtif_user *viu = arg; /* step 1: this newly created host thread needs a rump kernel context */ rumpuser_component_kthread(); /* step 2: deliver packets until interface is decommissioned */ while (!viu->viu_dying) { /* we have cached frames. schedule + deliver */ if (viu->viu_nbufpkts > 0) { rumpuser_component_schedule(NULL); while (viu->viu_nbufpkts > 0) { deliverframe(viu); } rumpuser_component_unschedule(); } /* none cached. ok, try to get some */ if (viu->viu_nbufpkts == 0) { viu->viu_nbufpkts = rte_eth_rx_burst(IF_PORTID, 0, viu->viu_m_pkts, MAX_PKT_BURST); viu->viu_bufidx = 0; } if (viu->viu_nbufpkts == 0) { /* * For now, don't ultrabusyloop. * I don't have an overabundance of * spare cores in my vm. */ usleep(10000); } } return NULL; }
int VIFHYPER_CREATE(const char *devstr, struct virtif_sc *vif_sc, uint8_t *enaddr, struct virtif_user **viup) { struct virtif_user *viu = NULL; void *cookie; int rv; cookie = rumpuser_component_unschedule(); viu = malloc(sizeof(*viu)); if (viu == NULL) { rv = errno; goto out; } viu->viu_fd = opennetmap(devstr, viu, enaddr); if (viu->viu_fd == -1) { rv = errno; free(viu); goto out; } viu->viu_dying = 0; viu->viu_virtifsc = vif_sc; if ((rv = pthread_create(&viu->viu_pt, NULL, receiver, viu)) != 0) { printf("%s: pthread_create failed!\n", VIF_STRING(VIFHYPER_CREATE)); close(viu->viu_fd); free(viu); } out: rumpuser_component_schedule(cookie); *viup = viu; return rumpuser_component_errtrans(rv); }
/* * To send, we copy the data from the TCP/IP stack memory into DPDK * memory. TODO: share TCP/IP stack mbufs with DPDK mbufs to avoid * data copy. */ void rumpcomp_virtif_send(struct virtif_user *viu, struct iovec *iov, size_t iovlen) { void *cookie = rumpuser_component_unschedule(); struct rte_mbuf *m; void *dptr; unsigned i; m = rte_pktmbuf_alloc(mbpool); for (i = 0; i < iovlen; i++) { dptr = rte_pktmbuf_append(m, iov[i].iov_len); if (dptr == NULL) { /* log error somehow? */ rte_pktmbuf_free(m); goto out; } memcpy(dptr, iov[i].iov_base, iov[i].iov_len); } rte_eth_tx_burst(IF_PORTID, 0, &m, 1); out: rumpuser_component_schedule(cookie); }
static void * intrthread(void *arg) { struct irq *irq = arg; mach_port_t delivery_port; mach_port_t pset, psetcntl; int ret; int val; rumpuser_component_kthread(); ret = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &delivery_port); if (ret) err(ret, "mach_port_allocate"); ret = thread_get_assignment (mach_thread_self (), &pset); if (ret) err(ret, "thread_get_assignment"); ret = host_processor_set_priv (master_host, pset, &psetcntl); if (ret) err(ret, "host_processor_set_priv"); thread_max_priority (mach_thread_self (), psetcntl, 0); ret = thread_priority (mach_thread_self (), RUMP_IRQ_PRIO, 0); if (ret) err(ret, "thread_priority"); ret = device_intr_register(master_device, irq->intrline, 0, 0x04000000, delivery_port, MACH_MSG_TYPE_MAKE_SEND); if (ret) { warn("device_intr_register"); return 0; } device_intr_enable (master_device, irq->intrline, TRUE); int irq_server (mach_msg_header_t *inp, mach_msg_header_t *outp) { mach_intr_notification_t *intr_header = (mach_intr_notification_t *) inp; ((mig_reply_header_t *) outp)->RetCode = MIG_NO_REPLY; if (inp->msgh_id != MACH_INTR_NOTIFY) return 0; /* It's an interrupt not for us. It shouldn't happen. */ if (intr_header->line != irq->intrline) { printf ("We get interrupt %d, %d is expected", intr_header->line, irq->intrline); return 1; } rumpcomp_pci_confread(0, irq->device, 0, 0x04, &val); if (val & 0x400) { printf("interrupt disabled!\n"); val &= ~0x400; rumpcomp_pci_confwrite(0, irq->device, 0, 0x04, val); } rumpuser_component_schedule(NULL); irq->handler(irq->data); rumpuser_component_unschedule(); /* If the irq has been disabled by the linux device, * we don't need to reenable the real one. */ device_intr_enable (master_device, irq->intrline, TRUE); return 1; }