Beispiel #1
0
static void
pci_vtnet_tap_rx(struct pci_vtnet_softc *sc)
{
	struct vqueue_info *vq;
	struct virtio_net_rxhdr *vrx;
	uint8_t *buf;
	int len;
	struct iovec iov;

	/*
	 * Should never be called without a valid tap fd
	 */
	assert(sc->vsc_tapfd != -1);

	/*
	 * But, will be called when the rx ring hasn't yet
	 * been set up or the guest is resetting the device.
	 */
	if (!sc->vsc_rx_ready || sc->resetting) {
		/*
		 * Drop the packet and try later.
		 */
		(void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
		return;
	}

	/*
	 * Check for available rx buffers
	 */
	vq = &sc->vsc_queues[VTNET_RXQ];
	vq_startchains(vq);
	if (!vq_has_descs(vq)) {
		/*
		 * Drop the packet and try later.  Interrupt on
		 * empty, if that's negotiated.
		 */
		(void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf));
		vq_endchains(vq, 1);
		return;
	}

	do {
		/*
		 * Get descriptor chain, which should have just
		 * one descriptor in it.
		 * ??? allow guests to use multiple descs?
		 */
		assert(vq_getchain(vq, &iov, 1, NULL) == 1);

		/*
		 * Get a pointer to the rx header, and use the
		 * data immediately following it for the packet buffer.
		 */
		vrx = iov.iov_base;
		buf = (uint8_t *)(vrx + 1);

		len = read(sc->vsc_tapfd, buf,
			   iov.iov_len - sizeof(struct virtio_net_rxhdr));

		if (len < 0 && errno == EWOULDBLOCK) {
			/*
			 * No more packets, but still some avail ring
			 * entries.  Interrupt if needed/appropriate.
			 */
			vq_endchains(vq, 0);
			return;
		}

		/*
		 * The only valid field in the rx packet header is the
		 * number of buffers, which is always 1 without TSO
		 * support.
		 */
		memset(vrx, 0, sizeof(struct virtio_net_rxhdr));
		vrx->vrh_bufs = 1;

		/*
		 * Release this chain and handle more chains.
		 */
		vq_relchain(vq, len + sizeof(struct virtio_net_rxhdr));
	} while (vq_has_descs(vq));

	/* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
	vq_endchains(vq, 1);
}
static void
pci_vtnet_netmap_rx(struct pci_vtnet_softc *sc)
{
	struct iovec iov[VTNET_MAXSEGS], *riov;
	struct vqueue_info *vq;
	void *vrx;
	int len, n;
	uint16_t idx;

	/*
	 * Should never be called without a valid netmap descriptor
	 */
	assert(sc->vsc_nmd != NULL);

	/*
	 * But, will be called when the rx ring hasn't yet
	 * been set up or the guest is resetting the device.
	 */
	if (!sc->vsc_rx_ready || sc->resetting) {
		/*
		 * Drop the packet and try later.
		 */
		(void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf);
		return;
	}

	/*
	 * Check for available rx buffers
	 */
	vq = &sc->vsc_queues[VTNET_RXQ];
	if (!vq_has_descs(vq)) {
		/*
		 * Drop the packet and try later.  Interrupt on
		 * empty, if that's negotiated.
		 */
		(void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf);
		vq_endchains(vq, 1);
		return;
	}

	do {
		/*
		 * Get descriptor chain.
		 */
		n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL);
		assert(n >= 1 && n <= VTNET_MAXSEGS);

		/*
		 * Get a pointer to the rx header, and use the
		 * data immediately following it for the packet buffer.
		 */
		vrx = iov[0].iov_base;
		riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen);

		len = pci_vtnet_netmap_readv(sc->vsc_nmd, riov, n);

		if (len == 0) {
			/*
			 * No more packets, but still some avail ring
			 * entries.  Interrupt if needed/appropriate.
			 */
			vq_retchain(vq);
			vq_endchains(vq, 0);
			return;
		}

		/*
		 * The only valid field in the rx packet header is the
		 * number of buffers if merged rx bufs were negotiated.
		 */
		memset(vrx, 0, sc->rx_vhdrlen);

		if (sc->rx_merge) {
			struct virtio_net_rxhdr *vrxh;

			vrxh = vrx;
			vrxh->vrh_bufs = 1;
		}

		/*
		 * Release this chain and handle more chains.
		 */
		vq_relchain(vq, idx, len + sc->rx_vhdrlen);
	} while (vq_has_descs(vq));

	/* Interrupt if needed, including for NOTIFY_ON_EMPTY. */
	vq_endchains(vq, 1);
}