/* * Note: this thread is the only one pulling packets off of any * given netmap instance */ static void * receiver(void *arg) { struct virtif_user *viu = arg; struct iovec iov; struct netmap_if *nifp = viu->nm_nifp; struct netmap_ring *ring = NETMAP_RXRING(nifp, 0); struct netmap_slot *slot; struct pollfd pfd; int prv; rumpuser_component_kthread(); for (;;) { pfd.fd = viu->viu_fd; pfd.events = POLLIN; if (viu->viu_dying) { break; } prv = 0; while (nm_ring_empty(ring) && prv == 0) { DPRINTF(("receive pkt via netmap\n")); prv = poll(&pfd, 1, 1000); if (prv > 0 || (prv < 0 && errno != EAGAIN)) break; } #if 0 /* XXX: report non-transient errors */ if (ring->avail == 0) { rv = errno; break; } #endif slot = &ring->slot[ring->cur]; DPRINTF(("got pkt of size %d\n", slot->len)); iov.iov_base = NETMAP_BUF(ring, slot->buf_idx); iov.iov_len = slot->len; /* XXX: allow batch processing */ rumpuser_component_schedule(NULL); VIF_DELIVERPKT(viu->viu_virtifsc, &iov, 1); rumpuser_component_unschedule(); ring->head = ring->cur = nm_ring_next(ring, ring->cur); } rumpuser_component_kthread_release(); return NULL; }
static void * receiver(void *arg) { struct virtif_user *viu = arg; /* step 1: this newly created host thread needs a rump kernel context */ rumpuser_component_kthread(); /* step 2: deliver packets until interface is decommissioned */ while (!viu->viu_dying) { /* we have cached frames. schedule + deliver */ if (viu->viu_nbufpkts > 0) { rumpuser_component_schedule(NULL); while (viu->viu_nbufpkts > 0) { deliverframe(viu); } rumpuser_component_unschedule(); } /* none cached. ok, try to get some */ if (viu->viu_nbufpkts == 0) { viu->viu_nbufpkts = rte_eth_rx_burst(IF_PORTID, 0, viu->viu_m_pkts, MAX_PKT_BURST); viu->viu_bufidx = 0; } if (viu->viu_nbufpkts == 0) { /* * For now, don't ultrabusyloop. * I don't have an overabundance of * spare cores in my vm. */ usleep(10000); } } return NULL; }
static void * intrthread(void *arg) { struct irq *irq = arg; mach_port_t delivery_port; mach_port_t pset, psetcntl; int ret; int val; rumpuser_component_kthread(); ret = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &delivery_port); if (ret) err(ret, "mach_port_allocate"); ret = thread_get_assignment (mach_thread_self (), &pset); if (ret) err(ret, "thread_get_assignment"); ret = host_processor_set_priv (master_host, pset, &psetcntl); if (ret) err(ret, "host_processor_set_priv"); thread_max_priority (mach_thread_self (), psetcntl, 0); ret = thread_priority (mach_thread_self (), RUMP_IRQ_PRIO, 0); if (ret) err(ret, "thread_priority"); ret = device_intr_register(master_device, irq->intrline, 0, 0x04000000, delivery_port, MACH_MSG_TYPE_MAKE_SEND); if (ret) { warn("device_intr_register"); return 0; } device_intr_enable (master_device, irq->intrline, TRUE); int irq_server (mach_msg_header_t *inp, mach_msg_header_t *outp) { mach_intr_notification_t *intr_header = (mach_intr_notification_t *) inp; ((mig_reply_header_t *) outp)->RetCode = MIG_NO_REPLY; if (inp->msgh_id != MACH_INTR_NOTIFY) return 0; /* It's an interrupt not for us. It shouldn't happen. */ if (intr_header->line != irq->intrline) { printf ("We get interrupt %d, %d is expected", intr_header->line, irq->intrline); return 1; } rumpcomp_pci_confread(0, irq->device, 0, 0x04, &val); if (val & 0x400) { printf("interrupt disabled!\n"); val &= ~0x400; rumpcomp_pci_confwrite(0, irq->device, 0, 0x04, val); } rumpuser_component_schedule(NULL); irq->handler(irq->data); rumpuser_component_unschedule(); /* If the irq has been disabled by the linux device, * we don't need to reenable the real one. */ device_intr_enable (master_device, irq->intrline, TRUE); return 1; }