static ssize_t dump_receive(NetClientState *nc, const uint8_t *buf, size_t size) { DumpState *s = DO_UPCAST(DumpState, nc, nc); struct pcap_sf_pkthdr hdr; int64_t ts; int caplen; /* Early return in case of previous error. */ if (s->fd < 0) { return size; } ts = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL); caplen = size > s->pcap_caplen ? s->pcap_caplen : size; hdr.ts.tv_sec = ts / 1000000 + s->start_ts; hdr.ts.tv_usec = ts % 1000000; hdr.caplen = caplen; hdr.len = size; if (write(s->fd, &hdr, sizeof(hdr)) != sizeof(hdr) || write(s->fd, buf, caplen) != caplen) { qemu_log("-net dump write error - stop dump\n"); close(s->fd); s->fd = -1; } return size; }
static ssize_t dump_receive_iov(DumpState *s, const struct iovec *iov, int cnt) { struct pcap_sf_pkthdr hdr; int64_t ts; int caplen; size_t size = iov_size(iov, cnt); struct iovec dumpiov[cnt + 1]; /* Early return in case of previous error. */ if (s->fd < 0) { return size; } ts = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL); caplen = size > s->pcap_caplen ? s->pcap_caplen : size; hdr.ts.tv_sec = ts / 1000000 + s->start_ts; hdr.ts.tv_usec = ts % 1000000; hdr.caplen = caplen; hdr.len = size; dumpiov[0].iov_base = &hdr; dumpiov[0].iov_len = sizeof(hdr); cnt = iov_copy(&dumpiov[1], cnt, iov, cnt, 0, caplen); if (writev(s->fd, dumpiov, cnt + 1) != sizeof(hdr) + caplen) { error_report("network dump write error - stopping dump"); close(s->fd); s->fd = -1; } return size; }
static void filter_buffer_setup_timer(NetFilterState *nf) { FilterBufferState *s = FILTER_BUFFER(nf); if (s->interval) { timer_init_us(&s->release_timer, QEMU_CLOCK_VIRTUAL, filter_buffer_release_timer, nf); /* Timer armed to fire in s->interval microseconds. */ timer_mod(&s->release_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + s->interval); } }
static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong buf = args[1]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev); target_long ret; trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf); if (!sdev) { hcall_dprintf("Bad device\n"); return H_PARAMETER; } if ((check_bd(dev, buf, 4) < 0) || (VLAN_BD_LEN(buf) < 16)) { hcall_dprintf("Bad buffer enqueued\n"); return H_PARAMETER; } if (!dev->isopen) { return H_RESOURCE; } if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { ret = spapr_vlan_add_rxbuf_to_pool(dev, buf); } else { ret = spapr_vlan_add_rxbuf_to_page(dev, buf); } if (ret) { return ret; } dev->rx_bufs++; /* * Give guest some more time to add additional RX buffers before we * flush the receive queue, so that e.g. fragmented IP packets can * be passed to the guest in one go later (instead of passing single * fragments if there is only one receive buffer available). */ timer_mod(dev->rxp_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 500); return H_SUCCESS; }
static void filter_buffer_release_timer(void *opaque) { NetFilterState *nf = opaque; FilterBufferState *s = FILTER_BUFFER(nf); /* * Note: filter_buffer_flush() drops packets that can't be sent * TODO: We should leave them queued. But currently there's no way * for the next filter or receiver to notify us that it can receive * more packets. */ filter_buffer_flush(nf); /* Timer rearmed to fire again in s->interval microseconds. */ timer_mod(&s->release_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + s->interval); }