Example #1
0
int
vq_getchain(uint32_t offs, struct vqueue_info *vq,
	struct iovec *iov, int n_iov, uint16_t *flags)
{
	volatile struct vring_desc *vdir, *vindir, *vp;
	int idx, ndesc, n_indir;
	int head, next;
	int i;

	idx = vq->vq_last_avail;
	ndesc = (be16toh(vq->vq_avail->idx) - idx);
	if (ndesc == 0)
		return (0);

	head = be16toh(vq->vq_avail->ring[idx & (vq->vq_qsize - 1)]);
	next = head;

	for (i = 0; i < VQ_MAX_DESCRIPTORS; next = be16toh(vdir->next)) {
		vdir = &vq->vq_desc[next];
		if ((be16toh(vdir->flags) & VRING_DESC_F_INDIRECT) == 0) {
			_vq_record(offs, i, vdir, iov, n_iov, flags);
			i++;
		} else {
			n_indir = be32toh(vdir->len) / 16;
			vindir = paddr_map(offs, be64toh(vdir->addr),
					be32toh(vdir->len));
			next = 0;
			for (;;) {
				vp = &vindir[next];
				_vq_record(offs, i, vp, iov, n_iov, flags);
				i+=1;
				if ((be16toh(vp->flags) & \
					VRING_DESC_F_NEXT) == 0)
					break;
				next = be16toh(vp->next);
			}
			paddr_unmap((void *)vindir, be32toh(vdir->len));
		}

		if ((be16toh(vdir->flags) & VRING_DESC_F_NEXT) == 0)
			return (i);
	}

	return (i);
}
Example #2
0
void
vq_relchain(struct vqueue_info *vq, struct iovec *iov, int n, uint32_t iolen)
{
	volatile struct vring_used_elem *vue;
	volatile struct vring_used *vu;
	uint16_t head, uidx, mask;
	int i;

	mask = vq->vq_qsize - 1;
	head = be16toh(vq->vq_avail->ring[vq->vq_last_avail++ & mask]);

	vu = vq->vq_used;
	uidx = be16toh(vu->idx);
	vue = &vu->ring[uidx++ & mask];
	vue->id = htobe16(head);
	vue->len = htobe32(iolen);
	vu->idx = htobe16(uidx);

	/* Clean up */
	for (i = 1; i < (n-1); i++) {
		paddr_unmap((void *)iov[i].iov_base, iov[i].iov_len);
	}
}
Example #3
0
static void
vtbe_txstart_locked(struct vtbe_softc *sc)
{
	struct virtio_net_hdr_mrg_rxbuf *vnh;
	struct iovec iov[DESC_COUNT];
	struct vqueue_info *vq;
	struct iovec *riov;
	struct ifnet *ifp;
	struct mbuf *m;
	struct uio uio;
	int enqueued;
	int iolen;
	int error;
	int *addr;
	int reg;
	int len;
	int n;

	VTBE_ASSERT_LOCKED(sc);

	/* RX queue */
	vq = &sc->vs_queues[0];
	if (!vq_has_descs(vq)) {
		return;
	}

	ifp = sc->ifp;
	if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
		return;
	}

	enqueued = 0;

	if (!vq_ring_ready(vq))
		return;

	vq->vq_save_used = be16toh(vq->vq_used->idx);

	for (;;) {
		if (!vq_has_descs(vq)) {
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
			break;
		}

		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL) {
			break;
		}

		n = vq_getchain(sc->beri_mem_offset, vq, iov,
			DESC_COUNT, NULL);

		KASSERT(n >= 1 && n <= DESC_COUNT,
			("wrong descriptors num %d", n));

		addr = iov[0].iov_base;
		len = iov[0].iov_len;

		vnh = iov[0].iov_base;
		memset(vnh, 0, sc->hdrsize);
		vnh->num_buffers = htobe16(1);

		iov[0].iov_len -= sc->hdrsize;
		iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base +
					sc->hdrsize);
		riov = &iov[0];

		uio.uio_resid = iov[0].iov_len;
		uio.uio_iov = riov;
		uio.uio_segflg = UIO_SYSSPACE;
		uio.uio_iovcnt = 1;
		uio.uio_offset = 0;
		uio.uio_rw = UIO_READ;

		error = m_mbuftouio(&uio, m, 0);
		if (error)
			panic("m_mbuftouio failed\n");

		iolen = (len - iov[0].iov_len - sc->hdrsize);
		vq_relchain(vq, iov, 0, iolen + sc->hdrsize);
		paddr_unmap((void *)addr, len);

		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);

		BPF_MTAP(ifp, m);
		m_freem(m);

		++enqueued;
	}

	if (enqueued != 0) {
		reg = htobe32(VIRTIO_MMIO_INT_VRING);
		WRITE4(sc, VIRTIO_MMIO_INTERRUPT_STATUS, reg);

		PIO_SET(sc->pio_send, Q_INTR, 1);
	}
}