Exemple #1
0
static int virtio_net_switch2port_xfer(struct vmm_netport *p,
				       struct vmm_mbuf *mb)
{
	u16 head = 0;
	u32 iov_cnt = 0, total_len = 0, pkt_len = 0;
	struct virtio_net_dev *ndev = p->priv;
	struct virtio_queue *vq = &ndev->vqs[VIRTIO_NET_RX_QUEUE];
	struct virtio_iovec *iov = ndev->rx_iov;
	struct virtio_device *dev = ndev->vdev;

	pkt_len = min(VIRTIO_NET_MTU, mb->m_pktlen);

	if (virtio_queue_available(vq)) {
		head = virtio_queue_get_iovec(vq, iov, &iov_cnt, &total_len);
	}

	if (iov_cnt > 1) {
		virtio_iovec_fill_zeros(dev, &iov[0], 1);
		virtio_buf_to_iovec_write(dev, &iov[1], 1,
						M_BUFADDR(mb), pkt_len);
		virtio_queue_set_used_elem(vq, head, iov[0].len + pkt_len);

		if (virtio_queue_should_signal(vq)) {
			dev->tra->notify(dev, VIRTIO_NET_RX_QUEUE);
		}
	}

	m_freem(mb);

	return VMM_OK;
}
Exemple #2
0
static void virtio_net_tx_lazy(struct vmm_netport *port, void *arg, int budget)
{
	u16 head = 0;
	u32 iov_cnt = 0, pkt_len = 0, total_len = 0;
	struct virtio_net_dev *ndev = arg;
	struct virtio_device *dev = ndev->vdev;
	struct virtio_queue *vq = &ndev->vqs[VIRTIO_NET_TX_QUEUE];
	struct virtio_iovec *iov = ndev->tx_iov;
	struct vmm_mbuf *mb;

	while ((budget > 0) && virtio_queue_available(vq)) {
		head = virtio_queue_get_iovec(vq, iov, &iov_cnt, &total_len);

		/* iov[0] is offload info */
		pkt_len = total_len - iov[0].len;

		if (pkt_len <= VIRTIO_NET_MTU) {
			MGETHDR(mb, 0, 0);
			MEXTMALLOC(mb, pkt_len, M_WAIT);
			virtio_iovec_to_buf_read(dev, 
						 &iov[1], iov_cnt - 1,
						 M_BUFADDR(mb), pkt_len);
			mb->m_len = mb->m_pktlen = pkt_len;
			vmm_port2switch_xfer_mbuf(ndev->port, mb);
		}

		virtio_queue_set_used_elem(vq, head, total_len);

		budget--;
	}

	if (virtio_queue_should_signal(vq)) {
		dev->tra->notify(dev, VIRTIO_NET_TX_QUEUE);
	}
}
Exemple #3
0
/*
 * Like _bus_dmamap_load(), but for mbufs.
 */
int
bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
	struct mbuf *m0, int flags)
{
	paddr_t lastaddr = 0;
	int seg, error, first;
	struct mbuf *m;

	/*
	 * Make sure that on error condition we return "no valid mappings."
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

#ifdef DIAGNOSTIC
	if ((m0->m_flags & M_PKTHDR) == 0)
		panic("_bus_dmamap_load_mbuf: no packet header");
#endif

	if (m0->m_pkthdr.len > map->_dm_size)
		return (EINVAL);

	first = 1;
	seg = 0;
	error = 0;
	for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) {
		if (m->m_len == 0)
			continue;
#ifdef POOL_VTOPHYS
		/* XXX Could be better about coalescing. */
		/* XXX Doesn't check boundaries. */
		switch (m->m_flags & (M_EXT|M_CLUSTER)) {
		case M_EXT|M_CLUSTER:
			/* XXX KDASSERT */
			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
			lastaddr = m->m_ext.ext_paddr +
			    (m->m_data - m->m_ext.ext_buf);
 have_addr:
			if (first == 0 && ++seg >= map->_dm_segcnt) {
				error = EFBIG;
				continue;
			}
			map->dm_segs[seg].ds_addr =
			    rumpcomp_pci_virt_to_mach((void *)lastaddr);
			map->dm_segs[seg].ds_len = m->m_len;
			lastaddr += m->m_len;
			continue;

		case 0:
			lastaddr = m->m_paddr + M_BUFOFFSET(m) +
			    (m->m_data - M_BUFADDR(m));
			goto have_addr;

		default:
			break;
		}
#endif
		error = _bus_dmamap_load_buffer(t, map, m->m_data,
		    m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first);
	}
	if (error == 0) {
		map->dm_mapsize = m0->m_pkthdr.len;
		map->dm_nsegs = seg + 1;
	}
	return (error);
}