Пример #1
0
static void
pci_vtrnd_notify(void *vsc, struct vqueue_info *vq)
{
	struct iovec iov;
	struct pci_vtrnd_softc *sc;
	int len;
	uint16_t idx;

	sc = vsc;

	if (sc->vrsc_fd < 0) {
		vq_endchains(vq, 0);
		return;
	}

	while (vq_has_descs(vq)) {
		vq_getchain(vq, &idx, &iov, 1, NULL);

		len = (int) read(sc->vrsc_fd, iov.iov_base, iov.iov_len);

		DPRINTF(("vtrnd: vtrnd_notify(): %d\r\n", len));

		/* Catastrophe if unable to read from /dev/random */
		assert(len > 0);

		/*
		 * Release this chain and handle more
		 */
		vq_relchain(vq, idx, (uint32_t)len);
	}
	vq_endchains(vq, 1);	/* Generate interrupt if appropriate. */
}
Пример #2
0
static void
pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq)
{
	struct iovec iov[VTNET_MAXSEGS + 1];
	int i, n;
	int plen, tlen;

	/*
	 * Obtain chain of descriptors.  The first one is
	 * really the header descriptor, so we need to sum
	 * up two lengths: packet length and transfer length.
	 */
	n = vq_getchain(vq, iov, VTNET_MAXSEGS, NULL);
	assert(n >= 1 && n <= VTNET_MAXSEGS);
	plen = 0;
	tlen = iov[0].iov_len;
	for (i = 1; i < n; i++) {
		plen += iov[i].iov_len;
		tlen += iov[i].iov_len;
	}

	DPRINTF(("virtio: packet send, %d bytes, %d segs\n\r", plen, n));
	pci_vtnet_tap_tx(sc, &iov[1], n - 1, plen);

	/* chain is processed, release it and set tlen */
	vq_relchain(vq, tlen);
}
Пример #3
0
static void
pci_vtcon_control_send(struct pci_vtcon_softc *sc,
    struct pci_vtcon_control *ctrl, const void *payload, size_t len)
{
	struct vqueue_info *vq;
	struct iovec iov;
	uint16_t idx;
	int n;

	vq = pci_vtcon_port_to_vq(&sc->vsc_control_port, true);

	if (!vq_has_descs(vq))
		return;

	n = vq_getchain(vq, &idx, &iov, 1, NULL);

	assert(n == 1);

	memcpy(iov.iov_base, ctrl, sizeof(struct pci_vtcon_control));
	if (payload != NULL && len > 0)
		memcpy(iov.iov_base + sizeof(struct pci_vtcon_control),
		     payload, len);

	vq_relchain(vq, idx, sizeof(struct pci_vtcon_control) + len);
	vq_endchains(vq, 1);
}
Пример #4
0
static void
pci_vtcon_sock_rx(int fd __unused, enum ev_type t __unused, void *arg)
{
	struct pci_vtcon_port *port;
	struct pci_vtcon_sock *sock = (struct pci_vtcon_sock *)arg;
	struct vqueue_info *vq;
	struct iovec iov;
	static char dummybuf[2048];
	int len, n;
	uint16_t idx;

	port = sock->vss_port;
	vq = pci_vtcon_port_to_vq(port, true);

	if (!sock->vss_open || !port->vsp_rx_ready) {
		len = read(sock->vss_conn_fd, dummybuf, sizeof(dummybuf));
		if (len == 0)
			goto close;

		return;
	}

	if (!vq_has_descs(vq)) {
		len = read(sock->vss_conn_fd, dummybuf, sizeof(dummybuf));
		vq_endchains(vq, 1);
		if (len == 0)
			goto close;

		return;
	}

	do {
		n = vq_getchain(vq, &idx, &iov, 1, NULL);
		len = readv(sock->vss_conn_fd, &iov, n);

		if (len == 0 || (len < 0 && errno == EWOULDBLOCK)) {
			vq_retchain(vq);
			vq_endchains(vq, 0);
			if (len == 0)
				goto close;

			return;
		}

		vq_relchain(vq, idx, len);
	} while (vq_has_descs(vq));

	vq_endchains(vq, 1);

close:
	mevent_delete_close(sock->vss_conn_evp);
	sock->vss_conn_fd = -1;
	sock->vss_open = false;
}
Пример #5
0
static void
vtbe_proc_rx(struct vtbe_softc *sc, struct vqueue_info *vq)
{
	struct iovec iov[DESC_COUNT];
	struct ifnet *ifp;
	struct uio uio;
	struct mbuf *m;
	int iolen;
	int i;
	int n;

	ifp = sc->ifp;

	n = vq_getchain(sc->beri_mem_offset, vq, iov,
		DESC_COUNT, NULL);

	KASSERT(n >= 1 && n <= DESC_COUNT,
		("wrong n %d", n));

	iolen = 0;
	for (i = 1; i < n; i++) {
		iolen += iov[i].iov_len;
	}

	uio.uio_resid = iolen;
	uio.uio_iov = &iov[1];
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_iovcnt = (n - 1);
	uio.uio_rw = UIO_WRITE;

	if ((m = m_uiotombuf(&uio, M_NOWAIT, 0, ETHER_ALIGN,
	    M_PKTHDR)) == NULL) {
		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
		goto done;
	}

	m->m_pkthdr.rcvif = ifp;

	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);

	CURVNET_SET(ifp->if_vnet);
	VTBE_UNLOCK(sc);
	(*ifp->if_input)(ifp, m);
	VTBE_LOCK(sc);
	CURVNET_RESTORE();

done:
	vq_relchain(vq, iov, n, iolen + sc->hdrsize);
}
Пример #6
0
/* xhyve: FIXME
 *
 * pci_vtblk_done seems to deadlock when called from pci_vtblk_proc?
 */
static void
pci_vtblk_done_locked(struct blockif_req *br, int err)
{
	struct pci_vtblk_ioreq *io = br->br_param;
	struct pci_vtblk_softc *sc = io->io_sc;

	/* convert errno into a virtio block error return */
	if (err == EOPNOTSUPP || err == ENOSYS)
		*io->io_status = VTBLK_S_UNSUPP;
	else if (err != 0)
		*io->io_status = VTBLK_S_IOERR;
	else
		*io->io_status = VTBLK_S_OK;

	/*
	 * Return the descriptor back to the host.
	 * We wrote 1 byte (our status) to host.
	 */
	//pthread_mutex_lock(&sc->vsc_mtx);
	vq_relchain(&sc->vbsc_vq, io->io_idx, 1);
	vq_endchains(&sc->vbsc_vq, 0);
	//pthread_mutex_unlock(&sc->vsc_mtx);
}
Пример #7
0
static void
pci_vtcon_notify_tx(void *vsc, struct vqueue_info *vq)
{
	struct pci_vtcon_softc *sc;
	struct pci_vtcon_port *port;
	struct iovec iov[1];
	uint16_t idx, n;
	uint16_t flags[8];

	sc = vsc;
	port = pci_vtcon_vq_to_port(sc, vq);

	while (vq_has_descs(vq)) {
		n = vq_getchain(vq, &idx, iov, 1, flags);
		if (port != NULL)
			port->vsp_cb(port, port->vsp_arg, iov, 1);

		/*
		 * Release this chain and handle more
		 */
		vq_relchain(vq, idx, 0);
	}
	vq_endchains(vq, 1);	/* Generate interrupt if appropriate. */
}
Пример #8
0
static void
pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
{
	struct iovec iov[VTNET_MAXSEGS], *riov;
	struct vqueue_info *vq;
	void *vrx;
	int len, n;

	/*
	 * Should never be called without a valid tap fd
	 */
	assert(sc->vsc_tapfd != -1);

	/*
	 * But, will be called when the rx ring hasn't yet
	 * been set up or the guest is resetting the device.
	 */
	if (!sc->vsc_rx_ready || sc->resetting) {
		/*
		 * Drop the packet and try later.
		 */
		(void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
		return;
	}

	/*
	 * Check for available rx buffers
	 */
	vq = &sc->vsc_queues[VTNET_RXQ];
	vq_startchains(vq);
	if (!vq_has_descs(vq)) {
		/*
		 * Drop the packet and try later.  Interrupt on
		 * empty, if that's negotiated.
		 */
		(void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
		vq_endchains(vq, 1);
		return;
	}

	do {
		/*
		 * Get descriptor chain.
		 */
		n = vq_getchain(vq, iov, VTNET_MAXSEGS, NULL);
		assert(n >= 1 && n <= VTNET_MAXSEGS);

		/*
		 * Get a pointer to the rx header, and use the
		 * data immediately following it for the packet buffer.
		 */
		vrx = iov[0].iov_base;
		riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen);

		len = readv(sc->vsc_tapfd, riov, n);

		if (len < 0 && errno == EWOULDBLOCK) {
			/*
			 * No more packets, but still some avail ring
			 * entries.  Interrupt if needed/appropriate.
			 */
			vq_endchains(vq, 0);
			return;
		}

		/*
		 * The only valid field in the rx packet header is the
		 * number of buffers if merged rx bufs were negotiated.
		 */
		memset(vrx, 0, sc->rx_vhdrlen);

		if (sc->rx_merge) {
			struct virtio_net_rxhdr *vrxh;

			vrxh = vrx;
			vrxh->vrh_bufs = 1;
		}

		/*
		 * Release this chain and handle more chains.
		 */
		vq_relchain(vq, len + sc->rx_vhdrlen);
	} while (vq_has_descs(vq));

	/* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
	vq_endchains(vq, 1);
}
Пример #9
0
static void
pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
{
	struct vqueue_info *vq;
	struct virtio_net_rxhdr *vrx;
	uint8_t *buf;
	int len;
	struct iovec iov;

	/*
	 * Should never be called without a valid tap fd
	 */
	assert(sc->vsc_tapfd != -1);

	/*
	 * But, will be called when the rx ring hasn't yet
	 * been set up or the guest is resetting the device.
	 */
	if (!sc->vsc_rx_ready || sc->resetting) {
		/*
		 * Drop the packet and try later.
		 */
		(void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
		return;
	}

	/*
	 * Check for available rx buffers
	 */
	vq = &sc->vsc_queues[VTNET_RXQ];
	vq_startchains(vq);
	if (!vq_has_descs(vq)) {
		/*
		 * Drop the packet and try later.  Interrupt on
		 * empty, if that's negotiated.
		 */
		(void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
		vq_endchains(vq, 1);
		return;
	}

	do {
		/*
		 * Get descriptor chain, which should have just
		 * one descriptor in it.
		 * ??? allow guests to use multiple descs?
		 */
		assert(vq_getchain(vq, &iov, 1, NULL) == 1);

		/*
		 * Get a pointer to the rx header, and use the
		 * data immediately following it for the packet buffer.
		 */
		vrx = iov.iov_base;
		buf = (uint8_t *)(vrx + 1);

		len = read(sc->vsc_tapfd, buf,
			   iov.iov_len - sizeof(struct virtio_net_rxhdr));

		if (len < 0 && errno == EWOULDBLOCK) {
			/*
			 * No more packets, but still some avail ring
			 * entries.  Interrupt if needed/appropriate.
			 */
			vq_endchains(vq, 0);
			return;
		}

		/*
		 * The only valid field in the rx packet header is the
		 * number of buffers, which is always 1 without TSO
		 * support.
		 */
		memset(vrx, 0, sizeof(struct virtio_net_rxhdr));
		vrx->vrh_bufs = 1;

		/*
		 * Release this chain and handle more chains.
		 */
		vq_relchain(vq, len + sizeof(struct virtio_net_rxhdr));
	} while (vq_has_descs(vq));

	/* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
	vq_endchains(vq, 1);
}
Пример #10
0
static void
pci_vtblk_proc(struct pci_vtblk_softc *sc, struct vqueue_info *vq)
{
	struct virtio_blk_hdr *vbh;
	uint8_t *status;
	int i, n;
	int err;
	int iolen;
	int writeop, type;
	off_t offset;
	struct iovec iov[VTBLK_MAXSEGS + 2];
	uint16_t flags[VTBLK_MAXSEGS + 2];

	n = vq_getchain(vq, iov, VTBLK_MAXSEGS + 2, flags);

	/*
	 * The first descriptor will be the read-only fixed header,
	 * and the last is for status (hence +2 above and below).
	 * The remaining iov's are the actual data I/O vectors.
	 *
	 * XXX - note - this fails on crash dump, which does a
	 * VIRTIO_BLK_T_FLUSH with a zero transfer length
	 */
	assert(n >= 2 && n <= VTBLK_MAXSEGS + 2);

	assert((flags[0] & VRING_DESC_F_WRITE) == 0);
	assert(iov[0].iov_len == sizeof(struct virtio_blk_hdr));
	vbh = iov[0].iov_base;

	status = iov[--n].iov_base;
	assert(iov[n].iov_len == 1);
	assert(flags[n] & VRING_DESC_F_WRITE);

	/*
	 * XXX
	 * The guest should not be setting the BARRIER flag because
	 * we don't advertise the capability.
	 */
	type = vbh->vbh_type & ~VBH_FLAG_BARRIER;
	writeop = (type == VBH_OP_WRITE);

	offset = vbh->vbh_sector * DEV_BSIZE;

	iolen = 0;
	for (i = 1; i < n; i++) {
		/*
		 * - write op implies read-only descriptor,
		 * - read/ident op implies write-only descriptor,
		 * therefore test the inverse of the descriptor bit
		 * to the op.
		 */
		assert(((flags[i] & VRING_DESC_F_WRITE) == 0) == writeop);
		iolen += iov[i].iov_len;
	}

	DPRINTF(("virtio-block: %s op, %d bytes, %d segs, offset %ld\n\r", 
		 writeop ? "write" : "read/ident", iolen, i - 1, offset));

	switch (type) {
	case VBH_OP_WRITE:
		err = pwritev(sc->vbsc_fd, iov + 1, i - 1, offset);
		break;
	case VBH_OP_READ:
		err = preadv(sc->vbsc_fd, iov + 1, i - 1, offset);
		break;
	case VBH_OP_IDENT:
		/* Assume a single buffer */
		strlcpy(iov[1].iov_base, sc->vbsc_ident,
		    MIN(iov[1].iov_len, sizeof(sc->vbsc_ident)));
		err = 0;
		break;
	case VBH_OP_FLUSH:
	case VBH_OP_FLUSH_OUT:
		err = fsync(sc->vbsc_fd);
		break;
	default:
		err = -ENOSYS;
		break;
	}

	/* convert errno into a virtio block error return */
	if (err < 0) {
		if (err == -ENOSYS)
			*status = VTBLK_S_UNSUPP;
		else
			*status = VTBLK_S_IOERR;
	} else
		*status = VTBLK_S_OK;

	/*
	 * Return the descriptor back to the host.
	 * We wrote 1 byte (our status) to host.
	 */
	vq_relchain(vq, 1);
}
Пример #11
0
static void
vtbe_txstart_locked(struct vtbe_softc *sc)
{
	struct virtio_net_hdr_mrg_rxbuf *vnh;
	struct iovec iov[DESC_COUNT];
	struct vqueue_info *vq;
	struct iovec *riov;
	struct ifnet *ifp;
	struct mbuf *m;
	struct uio uio;
	int enqueued;
	int iolen;
	int error;
	int *addr;
	int reg;
	int len;
	int n;

	VTBE_ASSERT_LOCKED(sc);

	/* RX queue */
	vq = &sc->vs_queues[0];
	if (!vq_has_descs(vq)) {
		return;
	}

	ifp = sc->ifp;
	if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
		return;
	}

	enqueued = 0;

	if (!vq_ring_ready(vq))
		return;

	vq->vq_save_used = be16toh(vq->vq_used->idx);

	for (;;) {
		if (!vq_has_descs(vq)) {
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
			break;
		}

		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL) {
			break;
		}

		n = vq_getchain(sc->beri_mem_offset, vq, iov,
			DESC_COUNT, NULL);

		KASSERT(n >= 1 && n <= DESC_COUNT,
			("wrong descriptors num %d", n));

		addr = iov[0].iov_base;
		len = iov[0].iov_len;

		vnh = iov[0].iov_base;
		memset(vnh, 0, sc->hdrsize);
		vnh->num_buffers = htobe16(1);

		iov[0].iov_len -= sc->hdrsize;
		iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base +
					sc->hdrsize);
		riov = &iov[0];

		uio.uio_resid = iov[0].iov_len;
		uio.uio_iov = riov;
		uio.uio_segflg = UIO_SYSSPACE;
		uio.uio_iovcnt = 1;
		uio.uio_offset = 0;
		uio.uio_rw = UIO_READ;

		error = m_mbuftouio(&uio, m, 0);
		if (error)
			panic("m_mbuftouio failed\n");

		iolen = (len - iov[0].iov_len - sc->hdrsize);
		vq_relchain(vq, iov, 0, iolen + sc->hdrsize);
		paddr_unmap((void *)addr, len);

		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);

		BPF_MTAP(ifp, m);
		m_freem(m);

		++enqueued;
	}

	if (enqueued != 0) {
		reg = htobe32(VIRTIO_MMIO_INT_VRING);
		WRITE4(sc, VIRTIO_MMIO_INTERRUPT_STATUS, reg);

		PIO_SET(sc->pio_send, Q_INTR, 1);
	}
}