コード例 #1
0
ファイル: if_tap.c プロジェクト: ryo/netbsd-src
static int
tap_dev_write(int unit, struct uio *uio, int flags)
{
	struct tap_softc *sc =
	    device_lookup_private(&tap_cd, unit);
	struct ifnet *ifp;
	struct mbuf *m, **mp;
	int error = 0;
	int s;

	if (sc == NULL)
		return (ENXIO);

	getnanotime(&sc->sc_mtime);
	ifp = &sc->sc_ec.ec_if;

	/* One write, one packet, that's the rule */
	MGETHDR(m, M_DONTWAIT, MT_DATA);
	if (m == NULL) {
		ifp->if_ierrors++;
		return (ENOBUFS);
	}
	m->m_pkthdr.len = uio->uio_resid;

	mp = &m;
	while (error == 0 && uio->uio_resid > 0) {
		if (*mp != m) {
			MGET(*mp, M_DONTWAIT, MT_DATA);
			if (*mp == NULL) {
				error = ENOBUFS;
				break;
			}
		}
		(*mp)->m_len = min(MHLEN, uio->uio_resid);
		error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio);
		mp = &(*mp)->m_next;
	}
	if (error) {
		ifp->if_ierrors++;
		m_freem(m);
		return (error);
	}

	ifp->if_ipackets++;
	m_set_rcvif(m, ifp);

	bpf_mtap(ifp, m);
	s = splnet();
	if_input(ifp, m);
	splx(s);

	return (0);
}
コード例 #2
0
ファイル: qe.c プロジェクト: ryo/netbsd-src
/*
 * Pull data off an interface.
 * Len is the length of data, with local net header stripped.
 * We copy the data into mbufs.  When full cluster sized units are present,
 * we copy into clusters.
 */
static inline struct mbuf *
qe_get(struct qe_softc *sc, int idx, int totlen)
{
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct mbuf *m;
	struct mbuf *top, **mp;
	int len, pad, boff = 0;
	uint8_t *bp;

	bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ;

	MGETHDR(m, M_DONTWAIT, MT_DATA);
	if (m == NULL)
		return (NULL);
	m_set_rcvif(m, ifp);
	m->m_pkthdr.len = totlen;
	pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
	m->m_data += pad;
	len = MHLEN - pad;
	top = NULL;
	mp = ⊤

	while (totlen > 0) {
		if (top) {
			MGET(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				m_freem(top);
				return (NULL);
			}
			len = MLEN;
		}
		if (top && totlen >= MINCLSIZE) {
			MCLGET(m, M_DONTWAIT);
			if (m->m_flags & M_EXT)
				len = MCLBYTES;
		}
		m->m_len = len = min(totlen, len);
		memcpy(mtod(m, void *), bp + boff, len);
		boff += len;
		totlen -= len;
		*mp = m;
		mp = &m->m_next;
	}

	return (top);
}
コード例 #3
0
ファイル: netconfig.c プロジェクト: huikang/buildrump.sh
int
rump_netconfig_auto_ipv6(const char *ifname)
{
	struct ifnet *ifp;
	int ifindex;
	struct socket *rsso = NULL;
	int rv = 0;
	int hoplimit = 255;
	struct mbuf *m_nam = NULL,
		    *m_outbuf = NULL;
	struct sockaddr_in6 *sin6;
	char *buf;
	struct nd_router_solicit rs;
	struct nd_opt_hdr opt;

	ifp = ifunit(ifname);
	if (ifp == NULL) {
		rv = ENXIO;
		goto out;
	}
	if (ifp->if_sadl->sdl_type != IFT_ETHER) {
		rv = EINVAL;
		goto out;
	}

	rv = socreate(PF_INET6, &rsso, SOCK_RAW, IPPROTO_ICMPV6, curlwp, NULL);
	if (rv != 0)
		goto out;
	ifindex = ifp->if_index;
	rv = so_setsockopt(curlwp, rsso, IPPROTO_IPV6, IPV6_MULTICAST_IF,
			&ifindex, sizeof ifindex);
	if (rv != 0)
		goto out;
	rv = so_setsockopt(curlwp, rsso, IPPROTO_IPV6, IPV6_MULTICAST_HOPS,
			&hoplimit, sizeof hoplimit);
	if (rv != 0)
		goto out;

	m_nam = m_get(M_WAIT, MT_SONAME);
	sin6 = mtod(m_nam, struct sockaddr_in6 *);
	sin6->sin6_len = m_nam->m_len = sizeof (*sin6);
	sin6->sin6_family = AF_INET6;
	netconfig_inet_pton6("ff02::2", &sin6->sin6_addr);

#define rslen (sizeof rs + sizeof opt + ETHER_ADDR_LEN)
	CTASSERT(rslen <= MCLBYTES);
	m_outbuf = m_gethdr(M_WAIT, MT_DATA);
	m_clget(m_outbuf, M_WAIT);
	m_outbuf->m_pkthdr.len = m_outbuf->m_len = rslen;


#if __NetBSD_Prereq__(7,99,31)
	m_set_rcvif(m_outbuf, NULL);
#else
	m_outbuf->m_pkthdr.rcvif = NULL;
#endif

#undef rslen
	buf = mtod(m_outbuf, char *);
	memset(&rs, 0, sizeof rs);
	rs.nd_rs_type = ND_ROUTER_SOLICIT;
	memset(&opt, 0, sizeof opt);
	opt.nd_opt_type = ND_OPT_SOURCE_LINKADDR;
	opt.nd_opt_len = 1; /* units of 8 octets */
	memcpy(buf, &rs, sizeof rs);
	buf += sizeof rs;
	memcpy(buf, &opt, sizeof opt);
	buf += sizeof opt;
	memcpy(buf, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);

	ip6_accept_rtadv = 1;
	rv = rump_netconfig_ifup(ifname);
	if (rv != 0)
		goto out;
#if __NetBSD_Prereq__(7,99,12)
	rv = (*rsso->so_send)(rsso, (struct sockaddr *)sin6, NULL, m_outbuf,
			NULL, 0, curlwp);
#else
	rv = (*rsso->so_send)(rsso, m_nam, NULL, m_outbuf, NULL, 0, curlwp);
#endif
	if (rv == 0)
		/* *(so_send)() takes ownership of m_outbuf on success */
		m_outbuf = NULL;
	else
		goto out;

	rv = 0;
out:
	if (m_nam)
		m_freem(m_nam);
	if (m_outbuf)
		m_freem(m_outbuf);
	if (rsso)
		soclose(rsso);
	return rv;
}
コード例 #4
0
ファイル: if_shmem.c プロジェクト: ryo/netbsd-src
static void
shmif_rcv(void *arg)
{
	struct ifnet *ifp = arg;
	struct shmif_sc *sc = ifp->if_softc;
	struct shmif_mem *busmem;
	struct mbuf *m = NULL;
	struct ether_header *eth;
	uint32_t nextpkt;
	bool wrap, passup;
	int error;
	const int align
	    = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);

 reup:
	mutex_enter(&sc->sc_mtx);
	while ((ifp->if_flags & IFF_RUNNING) == 0 && !sc->sc_dying)
		cv_wait(&sc->sc_cv, &sc->sc_mtx);
	mutex_exit(&sc->sc_mtx);

	busmem = sc->sc_busmem;

	while (ifp->if_flags & IFF_RUNNING) {
		struct shmif_pkthdr sp;

		if (m == NULL) {
			m = m_gethdr(M_WAIT, MT_DATA);
			MCLGET(m, M_WAIT);
			m->m_data += align;
		}

		DPRINTF(("waiting %d/%" PRIu64 "\n",
		    sc->sc_nextpacket, sc->sc_devgen));
		KASSERT(m->m_flags & M_EXT);

		shmif_lockbus(busmem);
		KASSERT(busmem->shm_magic == SHMIF_MAGIC);
		KASSERT(busmem->shm_gen >= sc->sc_devgen);

		/* need more data? */
		if (sc->sc_devgen == busmem->shm_gen && 
		    shmif_nextpktoff(busmem, busmem->shm_last)
		     == sc->sc_nextpacket) {
			shmif_unlockbus(busmem);
			error = 0;
			rumpcomp_shmif_watchwait(sc->sc_kq);
			if (__predict_false(error))
				printf("shmif_rcv: wait failed %d\n", error);
			membar_consumer();
			continue;
		}

		if (stillvalid_p(sc)) {
			nextpkt = sc->sc_nextpacket;
		} else {
			KASSERT(busmem->shm_gen > 0);
			nextpkt = busmem->shm_first;
			if (busmem->shm_first > busmem->shm_last)
				sc->sc_devgen = busmem->shm_gen - 1;
			else
				sc->sc_devgen = busmem->shm_gen;
			DPRINTF(("dev %p overrun, new data: %d/%" PRIu64 "\n",
			    sc, nextpkt, sc->sc_devgen));
		}

		/*
		 * If our read pointer is ahead the bus last write, our
		 * generation must be one behind.
		 */
		KASSERT(!(nextpkt > busmem->shm_last
		    && sc->sc_devgen == busmem->shm_gen));

		wrap = false;
		nextpkt = shmif_busread(busmem, &sp,
		    nextpkt, sizeof(sp), &wrap);
		KASSERT(sp.sp_len <= ETHERMTU + ETHER_HDR_LEN);
		nextpkt = shmif_busread(busmem, mtod(m, void *),
		    nextpkt, sp.sp_len, &wrap);

		DPRINTF(("shmif_rcv: read packet of length %d at %d\n",
		    sp.sp_len, nextpkt));

		sc->sc_nextpacket = nextpkt;
		shmif_unlockbus(sc->sc_busmem);

		if (wrap) {
			sc->sc_devgen++;
			DPRINTF(("dev %p generation now %" PRIu64 "\n",
			    sc, sc->sc_devgen));
		}

		/*
		 * Ignore packets too short to possibly be valid.
		 * This is hit at least for the first frame on a new bus.
		 */
		if (__predict_false(sp.sp_len < ETHER_HDR_LEN)) {
			DPRINTF(("shmif read packet len %d < ETHER_HDR_LEN\n",
			    sp.sp_len));
			continue;
		}

		m->m_len = m->m_pkthdr.len = sp.sp_len;
		m_set_rcvif(m, ifp);

		/*
		 * Test if we want to pass the packet upwards
		 */
		eth = mtod(m, struct ether_header *);
		if (sp.sp_sender == sc->sc_uuid) {
			passup = false;
		} else if (memcmp(eth->ether_dhost, CLLADDR(ifp->if_sadl),
		    ETHER_ADDR_LEN) == 0) {
			passup = true;
		} else if (ETHER_IS_MULTICAST(eth->ether_dhost)) {
			passup = true;
		} else if (ifp->if_flags & IFF_PROMISC) {
			m->m_flags |= M_PROMISC;
			passup = true;
		} else {
			passup = false;
		}

		if (passup) {
			int bound;
			ifp->if_ipackets++;
			KERNEL_LOCK(1, NULL);
			/* Prevent LWP migrations between CPUs for psref(9) */
			bound = curlwp_bind();
			bpf_mtap(ifp, m);
			if_input(ifp, m);
			curlwp_bindx(bound);
			KERNEL_UNLOCK_ONE(NULL);
			m = NULL;
		}
		/* else: reuse mbuf for a future packet */
	}
	m_freem(m);
	m = NULL;

	if (!sc->sc_dying)
		goto reup;

	kthread_exit(0);
}
コード例 #5
0
ファイル: if_loop.c プロジェクト: ryo/netbsd-src
int
looutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
    const struct rtentry *rt)
{
	pktqueue_t *pktq = NULL;
	struct ifqueue *ifq = NULL;
	int s, isr = -1;
	int csum_flags;
	int error = 0;
	size_t pktlen;

	MCLAIM(m, ifp->if_mowner);

	KERNEL_LOCK(1, NULL);

	if ((m->m_flags & M_PKTHDR) == 0)
		panic("looutput: no header mbuf");
	if (ifp->if_flags & IFF_LOOPBACK)
		bpf_mtap_af(ifp, dst->sa_family, m);
	m_set_rcvif(m, ifp);

	if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
		m_freem(m);
		error = (rt->rt_flags & RTF_BLACKHOLE ? 0 :
			rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
		goto out;
	}

	pktlen = m->m_pkthdr.len;
	ifp->if_opackets++;
	ifp->if_obytes += pktlen;

#ifdef ALTQ
	/*
	 * ALTQ on the loopback interface is just for debugging.  It's
	 * used only for loopback interfaces, not for a simplex interface.
	 */
	if ((ALTQ_IS_ENABLED(&ifp->if_snd) || TBR_IS_ENABLED(&ifp->if_snd)) &&
	    ifp->if_start == lostart) {
		/*
		 * If the queueing discipline needs packet classification,
		 * do it before prepending the link headers.
		 */
		IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);

		M_PREPEND(m, sizeof(uint32_t), M_DONTWAIT);
		if (m == NULL) {
			error = ENOBUFS;
			goto out;
		}
		*(mtod(m, uint32_t *)) = dst->sa_family;

		error = if_transmit_lock(ifp, m);
		goto out;
	}
#endif /* ALTQ */

	m_tag_delete_nonpersistent(m);

#ifdef MPLS
	if (rt != NULL && rt_gettag(rt) != NULL &&
	    rt_gettag(rt)->sa_family == AF_MPLS &&
	    (m->m_flags & (M_MCAST | M_BCAST)) == 0) {
		union mpls_shim msh;
		msh.s_addr = MPLS_GETSADDR(rt);
		if (msh.shim.label != MPLS_LABEL_IMPLNULL) {
			ifq = &mplsintrq;
			isr = NETISR_MPLS;
		}
	}
	if (isr != NETISR_MPLS)
#endif
	switch (dst->sa_family) {

#ifdef INET
	case AF_INET:
		csum_flags = m->m_pkthdr.csum_flags;
		KASSERT((csum_flags & ~(M_CSUM_IPv4|M_CSUM_UDPv4)) == 0);
		if (csum_flags != 0 && IN_LOOPBACK_NEED_CHECKSUM(csum_flags)) {
			ip_undefer_csum(m, 0, csum_flags);
		}
		m->m_pkthdr.csum_flags = 0;
		pktq = ip_pktq;
		break;
#endif
#ifdef INET6
	case AF_INET6:
		csum_flags = m->m_pkthdr.csum_flags;
		KASSERT((csum_flags & ~M_CSUM_UDPv6) == 0);
		if (csum_flags != 0 &&
		    IN6_LOOPBACK_NEED_CHECKSUM(csum_flags)) {
			ip6_undefer_csum(m, 0, csum_flags);
		}
		m->m_pkthdr.csum_flags = 0;
		m->m_flags |= M_LOOP;
		pktq = ip6_pktq;
		break;
#endif
#ifdef NETATALK
	case AF_APPLETALK:
	        ifq = &atintrq2;
		isr = NETISR_ATALK;
		break;
#endif
	default:
		printf("%s: can't handle af%d\n", ifp->if_xname,
		    dst->sa_family);
		m_freem(m);
		error = EAFNOSUPPORT;
		goto out;
	}

	s = splnet();
	if (__predict_true(pktq)) {
		error = 0;

		if (__predict_true(pktq_enqueue(pktq, m, 0))) {
			ifp->if_ipackets++;
			ifp->if_ibytes += pktlen;
		} else {
			m_freem(m);
			error = ENOBUFS;
		}
		splx(s);
		goto out;
	}
	if (IF_QFULL(ifq)) {
		IF_DROP(ifq);
		m_freem(m);
		splx(s);
		error = ENOBUFS;
		goto out;
	}
	IF_ENQUEUE(ifq, m);
	schednetisr(isr);
	ifp->if_ipackets++;
	ifp->if_ibytes += m->m_pkthdr.len;
	splx(s);
out:
	KERNEL_UNLOCK_ONE(NULL);
	return error;
}