static void tap_send(void *opaque) { TAPState *s = opaque; int size; while (qemu_can_send_packet(&s->nc)) { uint8_t *buf = s->buf; size = tap_read_packet(s->fd, s->buf, sizeof(s->buf)); if (size <= 0) { break; } if (s->host_vnet_hdr_len && !s->using_vnet_hdr) { buf += s->host_vnet_hdr_len; size -= s->host_vnet_hdr_len; } size = qemu_send_packet_async(&s->nc, buf, size, tap_send_completed); if (size == 0) { tap_read_poll(s, false); break; } else if (size < 0) { break; } } }
/* outside world -> VM */ static void vnic_send(void *opaque) { VNICState *vsp = opaque; int ret; do { ret = vnic_read_packet(vsp, vsp->vns_buf, sizeof (vsp->vns_buf)); if (ret <= 0) break; ret = qemu_send_packet_async(&vsp->vns_nc, vsp->vns_buf, ret, vnic_send_completed); if (ret == 0) vnic_read_poll(vsp, 0); } while (ret > 0 && qemu_can_send_packet(&vsp->vns_nc)); }
static void tap_send(void *opaque) { TAPState *s = opaque; int size; int packets = 0; while (true) { uint8_t *buf = s->buf; size = tap_read_packet(s->fd, s->buf, sizeof(s->buf)); if (size <= 0) { break; } if (s->host_vnet_hdr_len && !s->using_vnet_hdr) { buf += s->host_vnet_hdr_len; size -= s->host_vnet_hdr_len; } size = qemu_send_packet_async(&s->nc, buf, size, tap_send_completed); if (size == 0) { tap_read_poll(s, false); break; } else if (size < 0) { break; } /* * When the host keeps receiving more packets while tap_send() is * running we can hog the QEMU global mutex. Limit the number of * packets that are processed per tap_send() callback to prevent * stalling the guest. */ packets++; if (packets >= 50) { break; } } }
/* VM -> outside world */ static ssize_t vnic_receive(VLANClientState *ncp, const uint8_t *buf, size_t size) { VNICState *vsp = DO_UPCAST(VNICState, vns_nc, ncp); if (vsp->vns_ds.vnds_enabled && is_dhcp_request(buf, size)) { int ret; ret = create_dhcp_response(buf, size, &vsp->vns_ds); if (!ret) return size; ret = qemu_send_packet_async(&vsp->vns_nc, vsp->vns_ds.vnds_buf, ret, vnic_send_completed); if (ret == 0) vnic_read_poll(vsp, 0); return size; } return (vnic_write_packet(vsp, buf, size)); }
static void tap_send(void *opaque) { TAPState *s = opaque; int size; do { uint8_t *buf = s->buf; size = tap_read_packet(s->fd, s->buf, sizeof(s->buf)); if (size <= 0) { break; } if (s->has_vnet_hdr && !s->using_vnet_hdr) { buf += sizeof(struct virtio_net_hdr); size -= sizeof(struct virtio_net_hdr); } size = qemu_send_packet_async(&s->nc, buf, size, tap_send_completed); if (size == 0) { tap_read_poll(s, 0); } } while (size > 0 && qemu_can_send_packet(&s->nc)); }
static ssize_t vnic_receive_iov(VLANClientState *ncp, const struct iovec *iov, int iovcnt) { int ret, fvec, i; size_t total; VNICState *vsp = DO_UPCAST(VNICState, vns_nc, ncp); assert(iovcnt <= FRAMEIO_NVECS_MAX); /* * Copy the iovcs to our write frameio. Also, check if any of these is * valid dhcp and handle it immediately. */ for (i = 0, fvec = 0; i < iovcnt; i++, iov++) { if (vsp->vns_ds.vnds_enabled && is_dhcp_request(iov->iov_base, iov->iov_len)) { /* * Basically drop the packet because we can't send a * reply at this time. It's unfortunate, but we don't * really have the proper infrastructure to do something * else with this at this time. */ if (!vnic_can_send(vsp)) continue; ret = create_dhcp_response(iov->iov_base, iov->iov_len, &vsp->vns_ds); /* This failed, drop it and continue */ if (ret == 0) continue; ret = qemu_send_packet_async(&vsp->vns_nc, vsp->vns_ds.vnds_buf, ret, vnic_send_completed); /* * qemu has told us that it can't receive any more data * at this time for the guest (host->guest traffic) so * turn off our read poll until we get that the send has * completed. */ if (ret == 0) vnic_read_poll(vsp, 0); continue; } vsp->vns_wfio->fio_vecs[fvec].fv_buf = iov->iov_base; vsp->vns_wfio->fio_vecs[fvec].fv_buflen = iov->iov_len; fvec++; } vsp->vns_wfio->fio_nvecs = fvec; do { ret = vnd_frameio_write(vsp->vns_hdl, vsp->vns_wfio); } while (ret == -1 && errno == EINTR); if (ret == -1 && errno == EAGAIN) { vnic_write_poll(vsp, 1); return (0); } total = 0; for (i = 0; i < vsp->vns_wfio->fio_nvecs; i++) { if (vsp->vns_wfio->fio_vecs[i].fv_actlen == 0 && vsp->vns_wfio->fio_vecs[i].fv_buflen == 0) break; total += vsp->vns_wfio->fio_vecs[i].fv_actlen; } return (total); }
static ssize_t vnic_receive_iov(VLANClientState *ncp, const struct iovec *iov, int iovcnt) { int ret, i; size_t total, altsize; VNICState *vsp = DO_UPCAST(VNICState, vns_nc, ncp); for (total = 0, i = 0; i < iovcnt; i++) { total += (iov + i)->iov_len; } if (vsp->vns_ds.vnds_enabled && is_dhcp_requestv(iov, iovcnt)) { /* * Basically drop the packet because we can't send a * reply at this time. It's unfortunate, but we don't * really have the proper infrastructure to do something * else with this at this time. */ if (!vnic_can_send(vsp)) return (total); ret = create_dhcp_responsev(iov, iovcnt, &vsp->vns_ds); /* This failed, drop it and continue */ if (ret == 0) return (total); ret = qemu_send_packet_async(&vsp->vns_nc, vsp->vns_ds.vnds_buf, ret, vnic_send_completed); /* * qemu has told us that it can't receive any more data * at this time for the guest (host->guest traffic) so * turn off our read poll until we get that the send has * completed. */ if (ret == 0) vnic_read_poll(vsp, 0); return (total); } /* * Copy the iovcs to our write frameio. Be on the lookout for someone * giving us more vectors than we support in frameio. In that case, * let's go ahead and just simply concat the rest. */ for (i = 0; i < MIN(iovcnt, FRAMEIO_NVECS_MAX - 1); i++, iov++) { vsp->vns_wfio->fio_vecs[i].fv_buf = iov->iov_base; vsp->vns_wfio->fio_vecs[i].fv_buflen = iov->iov_len; } altsize = 0; for (i = MIN(iovcnt, FRAMEIO_NVECS_MAX - 1); i != iovcnt; i++, iov++) { /* * The packet is too large. We're goin to silently drop it... */ if (altsize + iov->iov_len > VNIC_BUFSIZE) return (total); bcopy(iov->iov_base, vsp->vns_txbuf + altsize, iov->iov_len); altsize += iov->iov_len; } if (altsize != 0) { vsp->vns_wfio->fio_vecs[FRAMEIO_NVECS_MAX-1].fv_buf = vsp->vns_txbuf; vsp->vns_wfio->fio_vecs[FRAMEIO_NVECS_MAX-1].fv_buflen = altsize; } vsp->vns_wfio->fio_nvecs = MIN(iovcnt, FRAMEIO_NVECS_MAX); vsp->vns_wfio->fio_nvpf = MIN(iovcnt, FRAMEIO_NVECS_MAX); do { ret = vnd_frameio_write(vsp->vns_hdl, vsp->vns_wfio); } while (ret == -1 && errno == EINTR); if (ret == -1 && errno == EAGAIN) { vnic_write_poll(vsp, 1); return (0); } else if (ret == -1) { abort(); } total = 0; for (i = 0; i < vsp->vns_wfio->fio_nvecs; i++) { if (vsp->vns_wfio->fio_vecs[i].fv_actlen == 0 && vsp->vns_wfio->fio_vecs[i].fv_buflen == 0) break; total += vsp->vns_wfio->fio_vecs[i].fv_actlen; } return (total); }