/*
 * Helper for sbappendchainaddr: prepend a struct sockaddr* to
 * an mbuf chain.
 */
static inline struct mbuf *
m_prepend_sockaddr(struct sockbuf *sb, struct mbuf *m0,
		   const struct sockaddr *asa)
{
	struct mbuf *m;
	const int salen = asa->sa_len;

	KASSERT(solocked(sb->sb_so));

	/* only the first in each chain need be a pkthdr */
	MGETHDR(m, M_DONTWAIT, MT_SONAME);
	if (m == 0)
		return (0);
	MCLAIM(m, sb->sb_mowner);
#ifdef notyet
	if (salen > MHLEN) {
		MEXTMALLOC(m, salen, M_NOWAIT);
		if ((m->m_flags & M_EXT) == 0) {
			m_free(m);
			return (0);
		}
	}
#else
	KASSERT(salen <= MHLEN);
#endif
	m->m_len = salen;
	memcpy(mtod(m, void *), asa, salen);
	m->m_next = m0;
	m->m_pkthdr.len = salen + m0->m_pkthdr.len;

	return m;
}
Beispiel #2
0
static void
virtif_worker(void *arg)
{
	struct ifnet *ifp = arg;
	struct virtif_sc *sc = ifp->if_softc;
	struct mbuf *m;
	size_t plen = ETHER_MAX_LEN_JUMBO+1;
	ssize_t n;
	int error;

	for (;;) {
		m = m_gethdr(M_WAIT, MT_DATA);
		MEXTMALLOC(m, plen, M_WAIT);

		n = rumpuser_read(sc->sc_tapfd, mtod(m, void *), plen, &error);
		KASSERT(n < ETHER_MAX_LEN_JUMBO);
		if (n <= 0) {
			m_freem(m);
			break;
		}
		m->m_len = m->m_pkthdr.len = n;
		m->m_pkthdr.rcvif = ifp;
		ether_input(ifp, m);
	}

	panic("virtif_workin is a lazy boy %d\n", error);
}
Beispiel #3
0
static void virtio_net_tx_lazy(struct vmm_netport *port, void *arg, int budget)
{
	u16 head = 0;
	u32 iov_cnt = 0, pkt_len = 0, total_len = 0;
	struct virtio_net_dev *ndev = arg;
	struct virtio_device *dev = ndev->vdev;
	struct virtio_queue *vq = &ndev->vqs[VIRTIO_NET_TX_QUEUE];
	struct virtio_iovec *iov = ndev->tx_iov;
	struct vmm_mbuf *mb;

	while ((budget > 0) && virtio_queue_available(vq)) {
		head = virtio_queue_get_iovec(vq, iov, &iov_cnt, &total_len);

		/* iov[0] is offload info */
		pkt_len = total_len - iov[0].len;

		if (pkt_len <= VIRTIO_NET_MTU) {
			MGETHDR(mb, 0, 0);
			MEXTMALLOC(mb, pkt_len, M_WAIT);
			virtio_iovec_to_buf_read(dev, 
						 &iov[1], iov_cnt - 1,
						 M_BUFADDR(mb), pkt_len);
			mb->m_len = mb->m_pktlen = pkt_len;
			vmm_port2switch_xfer_mbuf(ndev->port, mb);
		}

		virtio_queue_set_used_elem(vq, head, total_len);

		budget--;
	}

	if (virtio_queue_should_signal(vq)) {
		dev->tra->notify(dev, VIRTIO_NET_TX_QUEUE);
	}
}
Beispiel #4
0
/**
 * uIP doesn't provide a mechanism to create a raw-IP packet so
 * we trigger the sending of ECHO_REQUEST by sending ourself an
 * ECHO_REPLY message with all-zeroes destination IP address.
 *
 * A global completion variable is used to notify the reception 
 * of the actual ECHO_REPLY
 */
int vmm_netstack_send_icmp_echo(u8 *ripaddr, u16 size, u16 seqno, 
			      struct vmm_icmp_echo_reply *reply)
{
	struct vmm_mbuf *mbuf;
	struct uip_icmp_echo_request *echo_req;
	u16 all_zeroes_addr[] = {0, 0}; 
	u8 *tmp;
	u64 timeout = (u64)20000000000;
	u16 ethsize;

	/* Create a mbuf */
	MGETHDR(mbuf, 0, 0);
	ethsize = UIP_ICMP_LLH_LEN + UIP_ICMP_ECHO_DLEN;
	MEXTMALLOC(mbuf, ethsize, 0);
	mbuf->m_len = mbuf->m_pktlen = ethsize;
	/* Skip the src & dst mac addresses as they will be filled by 
	 * uip_netport_loopback_send */
	tmp = mtod(mbuf, u8 *) + 12;
	/* IPv4 ethertype */
	*tmp++ = 0x08;
	*tmp++ = 0x00;
	/* Fillup the echo_request structure embedded in ICMP payload */
	echo_req = (struct uip_icmp_echo_request *)(tmp + UIP_ICMP_IPH_LEN);
	uip_ipaddr_copy(echo_req->ripaddr, ripaddr);
	echo_req->len = size;
	echo_req->seqno = seqno;
	/* Fillup the IP header */
	uip_create_ip_pkt(tmp, all_zeroes_addr, (ethsize - UIP_LLH_LEN));
	/* Fillup the ICMP header at last as the icmpchksum is calculated 
	 * over entire icmp message */
	uip_create_icmp_pkt(tmp, ICMP_ECHO_REPLY, 
			    (ethsize - UIP_LLH_LEN - UIP_IPH_LEN), 0);

	/* Update pointer to store uip_ping_reply */
	uip_ping_reply = reply;

	/* Send the mbuf to self to trigger ICMP_ECHO */
	uip_netport_loopback_send(mbuf);
	/* Wait for the reply until timeout */
	vmm_completion_wait_timeout(&uip_ping_done, &timeout);
	/* The callback has copied the reply data before completing, so we
	 * can safely set the pointer as NULL to prevent unwanted callbacks */
	uip_ping_reply = NULL;
	if(timeout == (u64)0) 
		return VMM_EFAIL;
	return VMM_OK;
}
Beispiel #5
0
/*
 * XXX In a perfect world, we wouldn't pass around socket control
 * XXX arguments in mbufs, and this could go away.
 */
int
sockargs(struct mbuf **mp, const void *bf, size_t buflen, int type)
{
	struct sockaddr	*sa;
	struct mbuf	*m;
	int		error;

	/*
	 * We can't allow socket names > UCHAR_MAX in length, since that
	 * will overflow sa_len.  Control data more than a page size in
	 * length is just too much.
	 */
	if (buflen > (type == MT_SONAME ? UCHAR_MAX : PAGE_SIZE))
		return EINVAL;

	/* Allocate an mbuf to hold the arguments. */
	m = m_get(M_WAIT, type);
	/* can't claim.  don't who to assign it to. */
	if (buflen > MLEN) {
		/*
		 * Won't fit into a regular mbuf, so we allocate just
		 * enough external storage to hold the argument.
		 */
		MEXTMALLOC(m, buflen, M_WAITOK);
	}
	m->m_len = buflen;
	error = copyin(bf, mtod(m, void *), buflen);
	if (error) {
		(void)m_free(m);
		return error;
	}
	ktrkuser(mbuftypes[type], mtod(m, void *), buflen);
	*mp = m;
	if (type == MT_SONAME) {
		sa = mtod(m, struct sockaddr *);
#if BYTE_ORDER != BIG_ENDIAN
		/*
		 * 4.3BSD compat thing - need to stay, since bind(2),
		 * connect(2), sendto(2) were not versioned for COMPAT_43.
		 */
		if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
			sa->sa_family = sa->sa_len;
#endif
		sa->sa_len = buflen;
	}
Beispiel #6
0
/**
 *  Prefetching of ARP mapping is done by sending ourself a broadcast ARP 
 *  message with ARP_HINT as opcode.
 */
void vmm_netstack_prefetch_arp_mapping(u8 *ipaddr)
{
	struct vmm_mbuf *mbuf;
	int size;
	u64 timeout = (u64)5000000000;

	/* No need to prefetch our own mapping */
	if(!memcmp(ipaddr, uip_hostaddr, 4)) {
		return;
	}

	/* Create a mbuf */
	MGETHDR(mbuf, 0, 0);
	size = sizeof(struct arp_hdr);
	MEXTMALLOC(mbuf, size, 0);
	mbuf->m_len = mbuf->m_pktlen = size;
	/* Create an ARP HINT packet in the buffer */
	uip_create_broadcast_eth_arp_pkt(mtod(mbuf, u8 *), ipaddr, ARP_HINT);
	/* Send the mbuf to self to trigger ARP prefetch */
	uip_netport_loopback_send(mbuf);

	/* Block till arp prefetch is done */
	vmm_completion_wait_timeout(&uip_arp_prefetch_done, &timeout);
}
/*
 * Append address and data, and optionally, control (ancillary) data
 * to the receive queue of a socket.  If present,
 * m0 must include a packet header with total length.
 * Returns 0 if no space in sockbuf or insufficient mbufs.
 */
int
sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0,
	struct mbuf *control)
{
	struct mbuf	*m, *n, *nlast;
	int		space, len;

	KASSERT(solocked(sb->sb_so));

	space = asa->sa_len;

	if (m0 != NULL) {
		if ((m0->m_flags & M_PKTHDR) == 0)
			panic("sbappendaddr");
		space += m0->m_pkthdr.len;
#ifdef MBUFTRACE
		m_claimm(m0, sb->sb_mowner);
#endif
	}
	for (n = control; n; n = n->m_next) {
		space += n->m_len;
		MCLAIM(n, sb->sb_mowner);
		if (n->m_next == 0)	/* keep pointer to last control buf */
			break;
	}
	if (space > sbspace(sb))
		return (0);
	MGET(m, M_DONTWAIT, MT_SONAME);
	if (m == 0)
		return (0);
	MCLAIM(m, sb->sb_mowner);
	/*
	 * XXX avoid 'comparison always true' warning which isn't easily
	 * avoided.
	 */
	len = asa->sa_len;
	if (len > MLEN) {
		MEXTMALLOC(m, asa->sa_len, M_NOWAIT);
		if ((m->m_flags & M_EXT) == 0) {
			m_free(m);
			return (0);
		}
	}
	m->m_len = asa->sa_len;
	memcpy(mtod(m, void *), asa, asa->sa_len);
	if (n)
		n->m_next = m0;		/* concatenate data to control */
	else
		control = m0;
	m->m_next = control;

	SBLASTRECORDCHK(sb, "sbappendaddr 1");

	for (n = m; n->m_next != NULL; n = n->m_next)
		sballoc(sb, n);
	sballoc(sb, n);
	nlast = n;
	SBLINKRECORD(sb, m);

	sb->sb_mbtail = nlast;
	SBLASTMBUFCHK(sb, "sbappendaddr");
	SBLASTRECORDCHK(sb, "sbappendaddr 2");

	return (1);
}