Ejemplo n.º 1
0
/*
 * Helper for sbappendchainaddr: prepend a struct sockaddr* to
 * an mbuf chain.
 */
static inline struct mbuf *
m_prepend_sockaddr(struct sockbuf *sb, struct mbuf *m0,
		   const struct sockaddr *asa)
{
	struct mbuf *m;
	const int salen = asa->sa_len;

	KASSERT(solocked(sb->sb_so));

	/* only the first in each chain need be a pkthdr */
	MGETHDR(m, M_DONTWAIT, MT_SONAME);
	if (m == 0)
		return (0);
	MCLAIM(m, sb->sb_mowner);
#ifdef notyet
	if (salen > MHLEN) {
		MEXTMALLOC(m, salen, M_NOWAIT);
		if ((m->m_flags & M_EXT) == 0) {
			m_free(m);
			return (0);
		}
	}
#else
	KASSERT(salen <= MHLEN);
#endif
	m->m_len = salen;
	memcpy(mtod(m, void *), asa, salen);
	m->m_next = m0;
	m->m_pkthdr.len = salen + m0->m_pkthdr.len;

	return m;
}
Ejemplo n.º 2
0
/*
 * Get peer socket name.
 */
int
do_sys_getpeername(int fd, struct mbuf **nam)
{
	struct socket	*so;
	struct mbuf	*m;
	int		error;

	if ((error = fd_getsock(fd, &so)) != 0)
		return error;

	m = m_getclr(M_WAIT, MT_SONAME);
	MCLAIM(m, so->so_mowner);

	solock(so);
	if ((so->so_state & SS_ISCONNECTED) == 0)
		error = ENOTCONN;
	else {
		*nam = m;
		error = (*so->so_proto->pr_usrreqs->pr_peeraddr)(so, m);
	}
	sounlock(so);
	if (error != 0)
		m_free(m);
	fd_putfile(fd);
	return error;
}
Ejemplo n.º 3
0
/*
 * Ip input routine.  Checksum and byte swap header.  If fragmented
 * try to reassemble.  Process options.  Pass to next level.
 */
void
ip_input(struct mbuf *m)
{
	struct ip *ip = NULL;
	struct in_ifaddr *ia;
	struct ifaddr *ifa;
	int hlen = 0, len;
	int downmatch;
	int checkif;
	int srcrt = 0;

	MCLAIM(m, &ip_rx_mowner);
	KASSERT((m->m_flags & M_PKTHDR) != 0);

	/*
	 * If no IP addresses have been set yet but the interfaces
	 * are receiving, can't do anything with incoming packets yet.
	 */
	if (TAILQ_FIRST(&in_ifaddrhead) == 0)
		goto bad;
	IP_STATINC(IP_STAT_TOTAL);
	/*
	 * If the IP header is not aligned, slurp it up into a new
	 * mbuf with space for link headers, in the event we forward
	 * it.  Otherwise, if it is aligned, make sure the entire
	 * base IP header is in the first mbuf of the chain.
	 */
	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) {
		if ((m = m_copyup(m, sizeof(struct ip),
				  (max_linkhdr + 3) & ~3)) == NULL) {
			/* XXXJRT new stat, please */
			IP_STATINC(IP_STAT_TOOSMALL);
			return;
		}
	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
Ejemplo n.º 4
0
int
do_sys_connect(struct lwp *l, int fd, struct mbuf *nam)
{
	struct socket	*so;
	int		error;
	int		interrupted = 0;

	if ((error = fd_getsock(fd, &so)) != 0) {
		m_freem(nam);
		return (error);
	}
	solock(so);
	MCLAIM(nam, so->so_mowner);
	if ((so->so_state & SS_ISCONNECTING) != 0) {
		error = EALREADY;
		goto out;
	}

	error = soconnect(so, nam, l);
	if (error)
		goto bad;
	if ((so->so_state & (SS_NBIO|SS_ISCONNECTING)) ==
	    (SS_NBIO|SS_ISCONNECTING)) {
		error = EINPROGRESS;
		goto out;
	}
	while ((so->so_state & SS_ISCONNECTING) != 0 && so->so_error == 0) {
		error = sowait(so, true, 0);
		if (__predict_false((so->so_state & SS_ISABORTING) != 0)) {
			error = EPIPE;
			interrupted = 1;
			break;
		}
		if (error) {
			if (error == EINTR || error == ERESTART)
				interrupted = 1;
			break;
		}
	}
	if (error == 0) {
		error = so->so_error;
		so->so_error = 0;
	}
 bad:
	if (!interrupted)
		so->so_state &= ~SS_ISCONNECTING;
	if (error == ERESTART)
		error = EINTR;
 out:
	sounlock(so);
	fd_putfile(fd);
	m_freem(nam);
	return error;
}
Ejemplo n.º 5
0
int
sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
{
	struct mbuf	*m, *mlast, *n;
	int		space;

	KASSERT(solocked(sb->sb_so));

	space = 0;
	if (control == 0)
		panic("sbappendcontrol");
	for (m = control; ; m = m->m_next) {
		space += m->m_len;
		MCLAIM(m, sb->sb_mowner);
		if (m->m_next == 0)
			break;
	}
	n = m;			/* save pointer to last control buffer */
	for (m = m0; m; m = m->m_next) {
		MCLAIM(m, sb->sb_mowner);
		space += m->m_len;
	}
	if (space > sbspace(sb))
		return (0);
	n->m_next = m0;			/* concatenate data to control */

	SBLASTRECORDCHK(sb, "sbappendcontrol 1");

	for (m = control; m->m_next != NULL; m = m->m_next)
		sballoc(sb, m);
	sballoc(sb, m);
	mlast = m;
	SBLINKRECORD(sb, control);

	sb->sb_mbtail = mlast;
	SBLASTMBUFCHK(sb, "sbappendcontrol");
	SBLASTRECORDCHK(sb, "sbappendcontrol 2");

	return (1);
}
Ejemplo n.º 6
0
int
do_sys_bind(struct lwp *l, int fd, struct mbuf *nam)
{
	struct socket	*so;
	int		error;

	if ((error = fd_getsock(fd, &so)) != 0) {
		m_freem(nam);
		return (error);
	}
	MCLAIM(nam, so->so_mowner);
	error = sobind(so, nam, l);
	m_freem(nam);
	fd_putfile(fd);
	return error;
}
Ejemplo n.º 7
0
/*
 * Add a receive buffer to the indicated descriptor.
 */
int
ze_add_rxbuf(struct ze_softc *sc, int i)
{
	struct mbuf *m;
	struct ze_rdes *rp;
	int error;

	MGETHDR(m, M_DONTWAIT, MT_DATA);
	if (m == NULL)
		return (ENOBUFS);

	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
	MCLGET(m, M_DONTWAIT);
	if ((m->m_flags & M_EXT) == 0) {
		m_freem(m);
		return (ENOBUFS);
	}

	if (sc->sc_rxmbuf[i] != NULL)
		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);

	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
	    BUS_DMA_READ|BUS_DMA_NOWAIT);
	if (error)
		panic("%s: can't load rx DMA map %d, error = %d",
		    device_xname(sc->sc_dev), i, error);
	sc->sc_rxmbuf[i] = m;

	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);

	/*
	 * We know that the mbuf cluster is page aligned. Also, be sure
	 * that the IP header will be longword aligned.
	 */
	m->m_data += 2;
	rp = &sc->sc_zedata->zc_recv[i];
	rp->ze_bufsize = (m->m_ext.ext_size - 2);
	rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
	rp->ze_framelen = ZE_FRAMELEN_OW;

	return (0);
}
Ejemplo n.º 8
0
/*
 * Get local socket name.
 */
int
do_sys_getsockname(int fd, struct mbuf **nam)
{
	struct socket	*so;
	struct mbuf	*m;
	int		error;

	if ((error = fd_getsock(fd, &so)) != 0)
		return error;

	m = m_getclr(M_WAIT, MT_SONAME);
	MCLAIM(m, so->so_mowner);

	*nam = m;
	solock(so);
	error = (*so->so_proto->pr_usrreqs->pr_sockaddr)(so, m);
	sounlock(so);
	if (error != 0)
		m_free(m);
	fd_putfile(fd);
	return error;
}
Ejemplo n.º 9
0
/*
 * Internal version of mount system call for diskless setup.
 * Separate function because we used to call it twice.
 * (once for root and once for swap)
 */
static int
nfs_mount_diskless(struct nfs_dlmount *ndmntp, const char *mntname, struct mount **mpp, struct vnode **vpp, struct lwp *l)
	/* mntname:	 mount point name */
{
	struct mount *mp;
	struct mbuf *m;
	int error;

	vfs_rootmountalloc(MOUNT_NFS, mntname, &mp);

	mp->mnt_op = &nfs_vfsops;

	/*
	 * Historical practice expects NFS root file systems to
	 * be initially mounted r/w.
	 */
	mp->mnt_flag &= ~MNT_RDONLY;

	/* Get mbuf for server sockaddr. */
	m = m_get(M_WAIT, MT_SONAME);
	if (m == NULL)
		panic("nfs_mountroot: mget soname for %s", mntname);
	MCLAIM(m, &nfs_mowner);
	memcpy(mtod(m, void *), (void *)ndmntp->ndm_args.addr,
	      (m->m_len = ndmntp->ndm_args.addr->sa_len));

	error = mountnfs(&ndmntp->ndm_args, mp, m, mntname,
			 ndmntp->ndm_args.hostname, vpp, l);
	if (error) {
		vfs_unbusy(mp, false, NULL);
		vfs_destroy(mp);
		printf("nfs_mountroot: mount %s failed: %d\n",
		       mntname, error);
	} else
		*mpp = mp;

	return (error);
}
Ejemplo n.º 10
0
/*
 * Append address and data, and optionally, control (ancillary) data
 * to the receive queue of a socket.  If present,
 * m0 must include a packet header with total length.
 * Returns 0 if no space in sockbuf or insufficient mbufs.
 */
int
sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0,
	struct mbuf *control)
{
	struct mbuf	*m, *n, *nlast;
	int		space, len;

	KASSERT(solocked(sb->sb_so));

	space = asa->sa_len;

	if (m0 != NULL) {
		if ((m0->m_flags & M_PKTHDR) == 0)
			panic("sbappendaddr");
		space += m0->m_pkthdr.len;
#ifdef MBUFTRACE
		m_claimm(m0, sb->sb_mowner);
#endif
	}
	for (n = control; n; n = n->m_next) {
		space += n->m_len;
		MCLAIM(n, sb->sb_mowner);
		if (n->m_next == 0)	/* keep pointer to last control buf */
			break;
	}
	if (space > sbspace(sb))
		return (0);
	MGET(m, M_DONTWAIT, MT_SONAME);
	if (m == 0)
		return (0);
	MCLAIM(m, sb->sb_mowner);
	/*
	 * XXX avoid 'comparison always true' warning which isn't easily
	 * avoided.
	 */
	len = asa->sa_len;
	if (len > MLEN) {
		MEXTMALLOC(m, asa->sa_len, M_NOWAIT);
		if ((m->m_flags & M_EXT) == 0) {
			m_free(m);
			return (0);
		}
	}
	m->m_len = asa->sa_len;
	memcpy(mtod(m, void *), asa, asa->sa_len);
	if (n)
		n->m_next = m0;		/* concatenate data to control */
	else
		control = m0;
	m->m_next = control;

	SBLASTRECORDCHK(sb, "sbappendaddr 1");

	for (n = m; n->m_next != NULL; n = n->m_next)
		sballoc(sb, n);
	sballoc(sb, n);
	nlast = n;
	SBLINKRECORD(sb, m);

	sb->sb_mbtail = nlast;
	SBLASTMBUFCHK(sb, "sbappendaddr");
	SBLASTRECORDCHK(sb, "sbappendaddr 2");

	return (1);
}
Ejemplo n.º 11
0
static
#ifndef GPROF
inline
#endif
int
tcp_build_datapkt(struct tcpcb *tp, struct socket *so, int off,
    long len, int hdrlen, struct mbuf **mp)
{
	struct mbuf *m, *m0;
	uint64_t *tcps;

	tcps = TCP_STAT_GETREF();
	if (tp->t_force && len == 1)
		tcps[TCP_STAT_SNDPROBE]++;
	else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
		tcps[TCP_STAT_SNDREXMITPACK]++;
		tcps[TCP_STAT_SNDREXMITBYTE] += len;
	} else {
		tcps[TCP_STAT_SNDPACK]++;
		tcps[TCP_STAT_SNDBYTE] += len;
	}
	TCP_STAT_PUTREF();
#ifdef notyet
	if ((m = m_copypack(so->so_snd.sb_mb, off,
	    (int)len, max_linkhdr + hdrlen)) == 0)
		return (ENOBUFS);
	/*
	 * m_copypack left space for our hdr; use it.
	 */
	m->m_len += hdrlen;
	m->m_data -= hdrlen;
#else
	MGETHDR(m, M_DONTWAIT, MT_HEADER);
	if (__predict_false(m == NULL))
		return (ENOBUFS);
	MCLAIM(m, &tcp_tx_mowner);

	/*
	 * XXX Because other code assumes headers will fit in
	 * XXX one header mbuf.
	 *
	 * (This code should almost *never* be run.)
	 */
	if (__predict_false((max_linkhdr + hdrlen) > MHLEN)) {
		TCP_OUTPUT_COUNTER_INCR(&tcp_output_bigheader);
		MCLGET(m, M_DONTWAIT);
		if ((m->m_flags & M_EXT) == 0) {
			m_freem(m);
			return (ENOBUFS);
		}
	}

	m->m_data += max_linkhdr;
	m->m_len = hdrlen;

	/*
	 * To avoid traversing the whole sb_mb chain for correct
	 * data to send, remember last sent mbuf, its offset and
	 * the sent size.  When called the next time, see if the
	 * data to send is directly following the previous transfer.
	 * This is important for large TCP windows.
	 */
	if (off == 0 || tp->t_lastm == NULL ||
	    (tp->t_lastoff + tp->t_lastlen) != off) {
		TCP_OUTPUT_COUNTER_INCR(&tcp_output_predict_miss);
		/*
		 * Either a new packet or a retransmit.
		 * Start from the beginning.
		 */
		tp->t_lastm = so->so_snd.sb_mb;
		tp->t_inoff = off;
	} else {
		TCP_OUTPUT_COUNTER_INCR(&tcp_output_predict_hit);
		tp->t_inoff += tp->t_lastlen;
	}

	/* Traverse forward to next packet */
	while (tp->t_inoff > 0) {
		if (tp->t_lastm == NULL)
			panic("tp->t_lastm == NULL");
		if (tp->t_inoff < tp->t_lastm->m_len)
			break;
		tp->t_inoff -= tp->t_lastm->m_len;
		tp->t_lastm = tp->t_lastm->m_next;
	}

	tp->t_lastoff = off;
	tp->t_lastlen = len;
	m0 = tp->t_lastm;
	off = tp->t_inoff;

	if (len <= M_TRAILINGSPACE(m)) {
		m_copydata(m0, off, (int) len, mtod(m, char *) + hdrlen);
		m->m_len += len;
		TCP_OUTPUT_COUNTER_INCR(&tcp_output_copysmall);
	} else {
Ejemplo n.º 12
0
int
looutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
    struct rtentry *rt)
{
	int s, isr;
	struct ifqueue *ifq = NULL;

	MCLAIM(m, ifp->if_mowner);
	if ((m->m_flags & M_PKTHDR) == 0)
		panic("looutput: no header mbuf");
#if NBPFILTER > 0
	if (ifp->if_bpf && (ifp->if_flags & IFF_LOOPBACK))
		bpf_mtap_af(ifp->if_bpf, dst->sa_family, m);
#endif
	m->m_pkthdr.rcvif = ifp;

	if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
		m_freem(m);
		return (rt->rt_flags & RTF_BLACKHOLE ? 0 :
			rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
	}

	ifp->if_opackets++;
	ifp->if_obytes += m->m_pkthdr.len;

#ifdef ALTQ
	/*
	 * ALTQ on the loopback interface is just for debugging.  It's
	 * used only for loopback interfaces, not for a simplex interface.
	 */
	if ((ALTQ_IS_ENABLED(&ifp->if_snd) || TBR_IS_ENABLED(&ifp->if_snd)) &&
	    ifp->if_start == lostart) {
		struct altq_pktattr pktattr;
		int error;

		/*
		 * If the queueing discipline needs packet classification,
		 * do it before prepending the link headers.
		 */
		IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);

		M_PREPEND(m, sizeof(uint32_t), M_DONTWAIT);
		if (m == NULL)
			return (ENOBUFS);
		*(mtod(m, uint32_t *)) = dst->sa_family;

		s = splnet();
		IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
		(*ifp->if_start)(ifp);
		splx(s);
		return (error);
	}
#endif /* ALTQ */

	m_tag_delete_nonpersistent(m);

	switch (dst->sa_family) {

#ifdef INET
	case AF_INET:
		ifq = &ipintrq;
		isr = NETISR_IP;
		break;
#endif
#ifdef INET6
	case AF_INET6:
		m->m_flags |= M_LOOP;
		ifq = &ip6intrq;
		isr = NETISR_IPV6;
		break;
#endif
#ifdef ISO
	case AF_ISO:
		ifq = &clnlintrq;
		isr = NETISR_ISO;
		break;
#endif
#ifdef IPX
	case AF_IPX:
		ifq = &ipxintrq;
		isr = NETISR_IPX;
		break;
#endif
#ifdef NETATALK
	case AF_APPLETALK:
	        ifq = &atintrq2;
		isr = NETISR_ATALK;
		break;
#endif
	default:
		printf("%s: can't handle af%d\n", ifp->if_xname,
		    dst->sa_family);
		m_freem(m);
		return (EAFNOSUPPORT);
	}
	s = splnet();
	if (IF_QFULL(ifq)) {
		IF_DROP(ifq);
		m_freem(m);
		splx(s);
		return (ENOBUFS);
	}
	IF_ENQUEUE(ifq, m);
	// schednetisr(isr);
	ifp->if_ipackets++;
	ifp->if_ibytes += m->m_pkthdr.len;
	splx(s);
	return (0);
}
Ejemplo n.º 13
0
/*
 * ae_start:		[ifnet interface function]
 *
 *	Start packet transmission on the interface.
 */
static void
ae_start(struct ifnet *ifp)
{
	struct ae_softc *sc = ifp->if_softc;
	struct mbuf *m0, *m;
	struct ae_txsoft *txs;
	bus_dmamap_t dmamap;
	int error, firsttx, nexttx, lasttx = 1, ofree, seg;

	DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n",
	    device_xname(sc->sc_dev), sc->sc_flags, ifp->if_flags));


	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
		return;

	/*
	 * Remember the previous number of free descriptors and
	 * the first descriptor we'll use.
	 */
	ofree = sc->sc_txfree;
	firsttx = sc->sc_txnext;

	DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n",
	    device_xname(sc->sc_dev), ofree, firsttx));

	/*
	 * Loop through the send queue, setting up transmit descriptors
	 * until we drain the queue, or use up all available transmit
	 * descriptors.
	 */
	while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
	       sc->sc_txfree != 0) {
		/*
		 * Grab a packet off the queue.
		 */
		IFQ_POLL(&ifp->if_snd, m0);
		if (m0 == NULL)
			break;
		m = NULL;

		dmamap = txs->txs_dmamap;

		/*
		 * Load the DMA map.  If this fails, the packet either
		 * didn't fit in the alloted number of segments, or we were
		 * short on resources.  In this case, we'll copy and try
		 * again.
		 */
		if (((mtod(m0, uintptr_t) & 3) != 0) ||
		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
		      BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: unable to allocate Tx mbuf\n",
				    device_xname(sc->sc_dev));
				break;
			}
			MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
			if (m0->m_pkthdr.len > MHLEN) {
				MCLGET(m, M_DONTWAIT);
				if ((m->m_flags & M_EXT) == 0) {
					printf("%s: unable to allocate Tx "
					    "cluster\n", device_xname(sc->sc_dev));
					m_freem(m);
					break;
				}
			}
			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
			if (error) {
				printf("%s: unable to load Tx buffer, "
				    "error = %d\n", device_xname(sc->sc_dev),
				    error);
				break;
			}
		}

		/*
		 * Ensure we have enough descriptors free to describe
		 * the packet.
		 */
		if (dmamap->dm_nsegs > sc->sc_txfree) {
			/*
			 * Not enough free descriptors to transmit this
			 * packet.  We haven't committed to anything yet,
			 * so just unload the DMA map, put the packet
			 * back on the queue, and punt.  Notify the upper
			 * layer that there are no more slots left.
			 *
			 * XXX We could allocate an mbuf and copy, but
			 * XXX it is worth it?
			 */
			ifp->if_flags |= IFF_OACTIVE;
			bus_dmamap_unload(sc->sc_dmat, dmamap);
			if (m != NULL)
				m_freem(m);
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m0);
		if (m != NULL) {
			m_freem(m0);
			m0 = m;
		}

		/*
		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
		 */

		/* Sync the DMA map. */
		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
		    BUS_DMASYNC_PREWRITE);

		/*
		 * Initialize the transmit descriptors.
		 */
		for (nexttx = sc->sc_txnext, seg = 0;
		     seg < dmamap->dm_nsegs;
		     seg++, nexttx = AE_NEXTTX(nexttx)) {
			/*
			 * If this is the first descriptor we're
			 * enqueueing, don't set the OWN bit just
			 * yet.  That could cause a race condition.
			 * We'll do it below.
			 */
			sc->sc_txdescs[nexttx].ad_status =
			    (nexttx == firsttx) ? 0 : ADSTAT_OWN;
			sc->sc_txdescs[nexttx].ad_bufaddr1 =
			    dmamap->dm_segs[seg].ds_addr;
			sc->sc_txdescs[nexttx].ad_ctl =
			    (dmamap->dm_segs[seg].ds_len <<
				ADCTL_SIZE1_SHIFT) |
				(nexttx == (AE_NTXDESC - 1) ?
				    ADCTL_ER : 0);
			lasttx = nexttx;
		}

		KASSERT(lasttx != -1);

		/* Set `first segment' and `last segment' appropriately. */
		sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS;
		sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS;

#ifdef AE_DEBUG
		if (ifp->if_flags & IFF_DEBUG) {
			printf("     txsoft %p transmit chain:\n", txs);
			for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) {
				printf("     descriptor %d:\n", seg);
				printf("       ad_status:   0x%08x\n",
				    sc->sc_txdescs[seg].ad_status);
				printf("       ad_ctl:      0x%08x\n",
				    sc->sc_txdescs[seg].ad_ctl);
				printf("       ad_bufaddr1: 0x%08x\n",
				    sc->sc_txdescs[seg].ad_bufaddr1);
				printf("       ad_bufaddr2: 0x%08x\n",
				    sc->sc_txdescs[seg].ad_bufaddr2);
				if (seg == lasttx)
					break;
			}
		}
#endif

		/* Sync the descriptors we're using. */
		AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/*
		 * Store a pointer to the packet so we can free it later,
		 * and remember what txdirty will be once the packet is
		 * done.
		 */
		txs->txs_mbuf = m0;
		txs->txs_firstdesc = sc->sc_txnext;
		txs->txs_lastdesc = lasttx;
		txs->txs_ndescs = dmamap->dm_nsegs;

		/* Advance the tx pointer. */
		sc->sc_txfree -= dmamap->dm_nsegs;
		sc->sc_txnext = nexttx;

		SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
		SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);

		/*
		 * Pass the packet to any BPF listeners.
		 */
		bpf_mtap(ifp, m0);
	}
Ejemplo n.º 14
0
/* initiate output routine */
void
iee_start(struct ifnet *ifp)
{
	struct iee_softc *sc = ifp->if_softc;
	struct mbuf *m = NULL;
	struct iee_tbd *tbd;
	int t;
	int n;

	if (sc->sc_next_cb != 0)
		/* There is already a CMD running. Defer packet enqueuing. */
		return;
	for (t = 0 ; t < IEE_NCB ; t++) {
		IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
		if (sc->sc_tx_mbuf[t] == NULL)
			break;
		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
		    sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
			/*
			 * The packet needs more TBD than we support.
			 * Copy the packet into a mbuf cluster to get it out.
			 */
			printf("%s: iee_start: failed to load DMA map\n",
			    device_xname(sc->sc_dev));
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL) {
				printf("%s: iee_start: can't allocate mbuf\n",
				    device_xname(sc->sc_dev));
				m_freem(sc->sc_tx_mbuf[t]);
				t--;
				continue;
			}
			MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
			MCLGET(m, M_DONTWAIT);
			if ((m->m_flags & M_EXT) == 0) {
				printf("%s: iee_start: can't allocate mbuf "
				    "cluster\n", device_xname(sc->sc_dev));
				m_freem(sc->sc_tx_mbuf[t]);
				m_freem(m);
				t--;
				continue;
			}
			m_copydata(sc->sc_tx_mbuf[t], 0,
			    sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, void *));
			m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
			m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
			m_freem(sc->sc_tx_mbuf[t]);
			sc->sc_tx_mbuf[t] = m;
			if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
		    	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
				printf("%s: iee_start: can't load TX DMA map\n",
				    device_xname(sc->sc_dev));
				m_freem(sc->sc_tx_mbuf[t]);
				t--;
				continue;
			}
		}
		for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
			tbd = SC_TBD(sc, sc->sc_next_tbd + n);
			tbd->tbd_tb_addr =
			    IEE_SWAPA32(sc->sc_tx_map[t]->dm_segs[n].ds_addr);
			tbd->tbd_size =
			    sc->sc_tx_map[t]->dm_segs[n].ds_len;
			tbd->tbd_link_addr =
			    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_tbd_off +
			    sc->sc_tbd_sz * (sc->sc_next_tbd + n + 1)));
		}
		SC_TBD(sc, sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
		bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
		    sc->sc_tbd_off + sc->sc_next_tbd * sc->sc_tbd_sz,
		    sc->sc_tbd_sz * sc->sc_tx_map[t]->dm_nsegs,
		    BUS_DMASYNC_PREWRITE);
		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
		    sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
			    | IEE_CB_I);
		else
			iee_cb_setup(sc, IEE_CB_CMD_TR);
		sc->sc_next_tbd += n;
		/* Pass packet to bpf if someone listens. */
		bpf_mtap(ifp, sc->sc_tx_mbuf[t]);
	}
Ejemplo n.º 15
0
int
looutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
         struct rtentry *rt)
{
    pktqueue_t *pktq = NULL;
    struct ifqueue *ifq = NULL;
    int s, isr = -1;
    int csum_flags;
    size_t pktlen;

    MCLAIM(m, ifp->if_mowner);
    KASSERT(KERNEL_LOCKED_P());

    if ((m->m_flags & M_PKTHDR) == 0)
        panic("looutput: no header mbuf");
    if (ifp->if_flags & IFF_LOOPBACK)
        bpf_mtap_af(ifp, dst->sa_family, m);
    m->m_pkthdr.rcvif = ifp;

    if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
        m_freem(m);
        return (rt->rt_flags & RTF_BLACKHOLE ? 0 :
                rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
    }

    pktlen = m->m_pkthdr.len;
    ifp->if_opackets++;
    ifp->if_obytes += pktlen;

#ifdef ALTQ
    /*
     * ALTQ on the loopback interface is just for debugging.  It's
     * used only for loopback interfaces, not for a simplex interface.
     */
    if ((ALTQ_IS_ENABLED(&ifp->if_snd) || TBR_IS_ENABLED(&ifp->if_snd)) &&
            ifp->if_start == lostart) {
        struct altq_pktattr pktattr;
        int error;

        /*
         * If the queueing discipline needs packet classification,
         * do it before prepending the link headers.
         */
        IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);

        M_PREPEND(m, sizeof(uint32_t), M_DONTWAIT);
        if (m == NULL)
            return (ENOBUFS);
        *(mtod(m, uint32_t *)) = dst->sa_family;

        s = splnet();
        IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
        (*ifp->if_start)(ifp);
        splx(s);
        return (error);
    }
#endif /* ALTQ */

    m_tag_delete_nonpersistent(m);

#ifdef MPLS
    if (rt != NULL && rt_gettag(rt) != NULL &&
            rt_gettag(rt)->sa_family == AF_MPLS &&
            (m->m_flags & (M_MCAST | M_BCAST)) == 0) {
        union mpls_shim msh;
        msh.s_addr = MPLS_GETSADDR(rt);
        if (msh.shim.label != MPLS_LABEL_IMPLNULL) {
            ifq = &mplsintrq;
            isr = NETISR_MPLS;
        }
    }
    if (isr != NETISR_MPLS)
#endif
        switch (dst->sa_family) {

#ifdef INET
        case AF_INET:
            csum_flags = m->m_pkthdr.csum_flags;
            KASSERT((csum_flags & ~(M_CSUM_IPv4|M_CSUM_UDPv4)) == 0);
            if (csum_flags != 0 && IN_LOOPBACK_NEED_CHECKSUM(csum_flags)) {
                ip_undefer_csum(m, 0, csum_flags);
            }
            m->m_pkthdr.csum_flags = 0;
            pktq = ip_pktq;
            break;
#endif
#ifdef INET6
        case AF_INET6:
            csum_flags = m->m_pkthdr.csum_flags;
            KASSERT((csum_flags & ~M_CSUM_UDPv6) == 0);
            if (csum_flags != 0 &&
                    IN6_LOOPBACK_NEED_CHECKSUM(csum_flags)) {
                ip6_undefer_csum(m, 0, csum_flags);
            }
            m->m_pkthdr.csum_flags = 0;
            m->m_flags |= M_LOOP;
            pktq = ip6_pktq;
            break;
#endif
#ifdef IPX
        case AF_IPX:
            ifq = &ipxintrq;
            isr = NETISR_IPX;
            break;
#endif
#ifdef NETATALK
        case AF_APPLETALK:
            ifq = &atintrq2;
            isr = NETISR_ATALK;
            break;
#endif
        default:
            printf("%s: can't handle af%d\n", ifp->if_xname,
                   dst->sa_family);
            m_freem(m);
            return (EAFNOSUPPORT);
        }

    s = splnet();
    if (__predict_true(pktq)) {
        int error = 0;

        if (__predict_true(pktq_enqueue(pktq, m, 0))) {
            ifp->if_ipackets++;
            ifp->if_ibytes += pktlen;
        } else {
            m_freem(m);
            error = ENOBUFS;
        }
        splx(s);
        return error;
    }
    if (IF_QFULL(ifq)) {
        IF_DROP(ifq);
        m_freem(m);
        splx(s);
        return (ENOBUFS);
    }
    IF_ENQUEUE(ifq, m);
    schednetisr(isr);
    ifp->if_ipackets++;
    ifp->if_ibytes += m->m_pkthdr.len;
    splx(s);
    return (0);
}
Ejemplo n.º 16
0
static int
do_sys_sendmsg_so(struct lwp *l, int s, struct socket *so, file_t *fp,
    struct msghdr *mp, int flags, register_t *retsize)
{

	struct iovec	aiov[UIO_SMALLIOV], *iov = aiov, *tiov, *ktriov = NULL;
	struct mbuf	*to, *control;
	struct uio	auio;
	size_t		len, iovsz;
	int		i, error;

	ktrkuser("msghdr", mp, sizeof *mp);

	/* If the caller passed us stuff in mbufs, we must free them. */
	to = (mp->msg_flags & MSG_NAMEMBUF) ? mp->msg_name : NULL;
	control = (mp->msg_flags & MSG_CONTROLMBUF) ? mp->msg_control : NULL;
	iovsz = mp->msg_iovlen * sizeof(struct iovec);

	if (mp->msg_flags & MSG_IOVUSRSPACE) {
		if ((unsigned int)mp->msg_iovlen > UIO_SMALLIOV) {
			if ((unsigned int)mp->msg_iovlen > IOV_MAX) {
				error = EMSGSIZE;
				goto bad;
			}
			iov = kmem_alloc(iovsz, KM_SLEEP);
		}
		if (mp->msg_iovlen != 0) {
			error = copyin(mp->msg_iov, iov, iovsz);
			if (error)
				goto bad;
		}
		mp->msg_iov = iov;
	}

	auio.uio_iov = mp->msg_iov;
	auio.uio_iovcnt = mp->msg_iovlen;
	auio.uio_rw = UIO_WRITE;
	auio.uio_offset = 0;			/* XXX */
	auio.uio_resid = 0;
	KASSERT(l == curlwp);
	auio.uio_vmspace = l->l_proc->p_vmspace;

	for (i = 0, tiov = mp->msg_iov; i < mp->msg_iovlen; i++, tiov++) {
		/*
		 * Writes return ssize_t because -1 is returned on error.
		 * Therefore, we must restrict the length to SSIZE_MAX to
		 * avoid garbage return values.
		 */
		auio.uio_resid += tiov->iov_len;
		if (tiov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) {
			error = EINVAL;
			goto bad;
		}
	}

	if (mp->msg_name && to == NULL) {
		error = sockargs(&to, mp->msg_name, mp->msg_namelen,
		    MT_SONAME);
		if (error)
			goto bad;
	}

	if (mp->msg_control) {
		if (mp->msg_controllen < CMSG_ALIGN(sizeof(struct cmsghdr))) {
			error = EINVAL;
			goto bad;
		}
		if (control == NULL) {
			error = sockargs(&control, mp->msg_control,
			    mp->msg_controllen, MT_CONTROL);
			if (error)
				goto bad;
		}
	}

	if (ktrpoint(KTR_GENIO) && iovsz > 0) {
		ktriov = kmem_alloc(iovsz, KM_SLEEP);
		memcpy(ktriov, auio.uio_iov, iovsz);
	}

	if (mp->msg_name)
		MCLAIM(to, so->so_mowner);
	if (mp->msg_control)
		MCLAIM(control, so->so_mowner);

	len = auio.uio_resid;
	error = (*so->so_send)(so, to, &auio, NULL, control, flags, l);
	/* Protocol is responsible for freeing 'control' */
	control = NULL;

	if (error) {
		if (auio.uio_resid != len && (error == ERESTART ||
		    error == EINTR || error == EWOULDBLOCK))
			error = 0;
		if (error == EPIPE && (fp->f_flag & FNOSIGPIPE) == 0 &&
		    (flags & MSG_NOSIGNAL) == 0) {
			mutex_enter(proc_lock);
			psignal(l->l_proc, SIGPIPE);
			mutex_exit(proc_lock);
		}
	}
	if (error == 0)
		*retsize = len - auio.uio_resid;

bad:
	if (ktriov != NULL) {
		ktrgeniov(s, UIO_WRITE, ktriov, *retsize, error);
		kmem_free(ktriov, iovsz);
	}

	if (iov != aiov)
		kmem_free(iov, iovsz);
	if (to)
		m_freem(to);
	if (control)
		m_freem(control);

	return error;
}
Ejemplo n.º 17
0
/*
 * IP output.  The packet in mbuf chain m contains a skeletal IP
 * header (with len, off, ttl, proto, tos, src, dst).
 * The mbuf chain containing the packet will be freed.
 * The mbuf opt, if present, will not be freed.
 */
int
ip_output(struct mbuf *m0, ...)
{
	struct rtentry *rt;
	struct ip *ip;
	struct ifnet *ifp;
	struct mbuf *m = m0;
	int hlen = sizeof (struct ip);
	int len, error = 0;
	struct route iproute;
	const struct sockaddr_in *dst;
	struct in_ifaddr *ia;
	struct ifaddr *xifa;
	struct mbuf *opt;
	struct route *ro;
	int flags, sw_csum;
	u_long mtu;
	struct ip_moptions *imo;
	struct socket *so;
	va_list ap;
	struct secpolicy *sp = NULL;
	bool natt_frag = false;
	bool __unused done = false;
	union {
		struct sockaddr		dst;
		struct sockaddr_in	dst4;
	} u;
	struct sockaddr *rdst = &u.dst;	/* real IP destination, as opposed
					 * to the nexthop
					 */

	len = 0;
	va_start(ap, m0);
	opt = va_arg(ap, struct mbuf *);
	ro = va_arg(ap, struct route *);
	flags = va_arg(ap, int);
	imo = va_arg(ap, struct ip_moptions *);
	so = va_arg(ap, struct socket *);
	va_end(ap);

	MCLAIM(m, &ip_tx_mowner);

	KASSERT((m->m_flags & M_PKTHDR) != 0);
	KASSERT((m->m_pkthdr.csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) == 0);
	KASSERT((m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) !=
	    (M_CSUM_TCPv4|M_CSUM_UDPv4));

	if (opt) {
		m = ip_insertoptions(m, opt, &len);
		if (len >= sizeof(struct ip))
			hlen = len;
	}
	ip = mtod(m, struct ip *);

	/*
	 * Fill in IP header.
	 */
	if ((flags & (IP_FORWARDING|IP_RAWOUTPUT)) == 0) {
		ip->ip_v = IPVERSION;
		ip->ip_off = htons(0);
		/* ip->ip_id filled in after we find out source ia */
		ip->ip_hl = hlen >> 2;
		IP_STATINC(IP_STAT_LOCALOUT);
	} else {
Ejemplo n.º 18
0
/*
 * How frame reception is done:
 * Each Receive Frame Descriptor has one associated Receive Buffer Descriptor.
 * Each RBD points to the data area of an mbuf cluster. The RFDs are linked
 * together in a circular list. sc->sc_rx_done is the count of RFDs in the
 * list already processed / the number of the RFD that has to be checked for
 * a new frame first at the next RX interrupt. Upon successful reception of
 * a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
 * cluster is allocated and the RFD / RBD are reinitialized accordingly.
 * 
 * When a RFD list overrun occurred the whole RFD and RBD lists are
 * reinitialized and frame reception is started again.
 */
int
iee_intr(void *intarg)
{
	struct iee_softc *sc = intarg;
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct iee_rfd *rfd;
	struct iee_rbd *rbd;
	bus_dmamap_t rx_map;
	struct mbuf *rx_mbuf;
	struct mbuf *new_mbuf;
	int scb_status;
	int scb_cmd;
	int n, col;
	uint16_t status, count, cmd;

	if ((ifp->if_flags & IFF_RUNNING) == 0) {
		(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
		return 1;
	}
	IEE_SCBSYNC(sc, BUS_DMASYNC_POSTREAD);
	scb_status = SC_SCB(sc)->scb_status;
	scb_cmd = SC_SCB(sc)->scb_cmd;
	for (;;) {
		rfd = SC_RFD(sc, sc->sc_rx_done);
		IEE_RFDSYNC(sc, sc->sc_rx_done,
		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
		status = rfd->rfd_status;
		if ((status & IEE_RFD_C) == 0) {
			IEE_RFDSYNC(sc, sc->sc_rx_done, BUS_DMASYNC_PREREAD);
			break;
		}
		rfd->rfd_status = 0;
		IEE_RFDSYNC(sc, sc->sc_rx_done,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);

		/* At least one packet was received. */
		rx_map = sc->sc_rx_map[sc->sc_rx_done];
		rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
		IEE_RBDSYNC(sc, (sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD,
		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
		SC_RBD(sc, (sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
		    &= ~IEE_RBD_EL;
		IEE_RBDSYNC(sc, (sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
		rbd = SC_RBD(sc, sc->sc_rx_done);
		IEE_RBDSYNC(sc, sc->sc_rx_done,
		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
		count = rbd->rbd_count;
		if ((status & IEE_RFD_OK) == 0
		    || (count & IEE_RBD_EOF) == 0
		    || (count & IEE_RBD_F) == 0){
			/* Receive error, skip frame and reuse buffer. */
			rbd->rbd_count = 0;
			rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
			IEE_RBDSYNC(sc, sc->sc_rx_done,
			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
			printf("%s: iee_intr: receive error %d, rfd_status="
			    "0x%.4x, rfd_count=0x%.4x\n",
			    device_xname(sc->sc_dev),
			    ++sc->sc_rx_err, status, count);
			sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
			continue;
		}
		bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_map->dm_mapsize,
		    BUS_DMASYNC_POSTREAD);
		rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
		    count & IEE_RBD_COUNT;
		rx_mbuf->m_pkthdr.rcvif = ifp;
		MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
		if (new_mbuf == NULL) {
			printf("%s: iee_intr: can't allocate mbuf\n",
			    device_xname(sc->sc_dev));
			break;
		}
		MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
		MCLGET(new_mbuf, M_DONTWAIT);
		if ((new_mbuf->m_flags & M_EXT) == 0) {
			printf("%s: iee_intr: can't alloc mbuf cluster\n",
			    device_xname(sc->sc_dev));
			m_freem(new_mbuf);
			break;
		}
		bus_dmamap_unload(sc->sc_dmat, rx_map);
		new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES - 2;
		new_mbuf->m_data += 2;
		if (bus_dmamap_load_mbuf(sc->sc_dmat, rx_map,
		    new_mbuf, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
			panic("%s: iee_intr: can't load RX DMA map\n",
			    device_xname(sc->sc_dev));
		bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
		    rx_map->dm_mapsize, BUS_DMASYNC_PREREAD);
		bpf_mtap(ifp, rx_mbuf);
		(*ifp->if_input)(ifp, rx_mbuf);
		ifp->if_ipackets++;
		sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
		rbd->rbd_count = 0;
		rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
		rbd->rbd_rb_addr = IEE_SWAPA32(rx_map->dm_segs[0].ds_addr);
		IEE_RBDSYNC(sc, sc->sc_rx_done,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
		sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
	}
	if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
		/* Receive Overrun, reinit receive ring buffer. */
		for (n = 0 ; n < IEE_NRFD ; n++) {
			rfd = SC_RFD(sc, n);
			rbd = SC_RBD(sc, n);
			rfd->rfd_cmd = IEE_RFD_SF;
			rfd->rfd_link_addr =
			    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rfd_off
			    + sc->sc_rfd_sz * ((n + 1) % IEE_NRFD)));
			rbd->rbd_next_rbd =
			    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rbd_off
			    + sc->sc_rbd_sz * ((n + 1) % IEE_NRFD)));
			rbd->rbd_size = IEE_RBD_EL |
			    sc->sc_rx_map[n]->dm_segs[0].ds_len;
			rbd->rbd_rb_addr =
			    IEE_SWAPA32(sc->sc_rx_map[n]->dm_segs[0].ds_addr);
		}
		SC_RFD(sc, 0)->rfd_rbd_addr =
		    IEE_SWAPA32(IEE_PHYS_SHMEM(sc->sc_rbd_off));
		sc->sc_rx_done = 0;
		bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, sc->sc_rfd_off,
		    sc->sc_rfd_sz * IEE_NRFD + sc->sc_rbd_sz * IEE_NRFD,
		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
		(sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
		printf("%s: iee_intr: receive ring buffer overrun\n",
		    device_xname(sc->sc_dev));
	}

	if (sc->sc_next_cb != 0) {
		IEE_CBSYNC(sc, sc->sc_next_cb - 1,
		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
		status = SC_CB(sc, sc->sc_next_cb - 1)->cb_status;
		IEE_CBSYNC(sc, sc->sc_next_cb - 1,
		    BUS_DMASYNC_PREREAD);
		if ((status & IEE_CB_C) != 0) {
			/* CMD list finished */
			ifp->if_timer = 0;
			if (sc->sc_next_tbd != 0) {
				/* A TX CMD list finished, cleanup */
				for (n = 0 ; n < sc->sc_next_cb ; n++) {
					m_freem(sc->sc_tx_mbuf[n]);
					sc->sc_tx_mbuf[n] = NULL;
					bus_dmamap_unload(sc->sc_dmat,
					    sc->sc_tx_map[n]);
					IEE_CBSYNC(sc, n,
				    	    BUS_DMASYNC_POSTREAD|
					    BUS_DMASYNC_POSTWRITE);
					status = SC_CB(sc, n)->cb_status;
					IEE_CBSYNC(sc, n,
				    	    BUS_DMASYNC_PREREAD);
					if ((status & IEE_CB_COL) != 0 &&
					    (status & IEE_CB_MAXCOL) == 0)
						col = 16;
					else
						col = status
						    & IEE_CB_MAXCOL;
					sc->sc_tx_col += col;
					if ((status & IEE_CB_OK) != 0) {
						ifp->if_opackets++;
						ifp->if_collisions += col;
					}
				}
				sc->sc_next_tbd = 0;
				ifp->if_flags &= ~IFF_OACTIVE;
			}
			for (n = 0 ; n < sc->sc_next_cb; n++) {
				/*
				 * Check if a CMD failed, but ignore TX errors.
				 */
				IEE_CBSYNC(sc, n,
				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
				cmd = SC_CB(sc, n)->cb_cmd;
				status = SC_CB(sc, n)->cb_status;
				IEE_CBSYNC(sc, n, BUS_DMASYNC_PREREAD);
				if ((cmd & IEE_CB_CMD) != IEE_CB_CMD_TR &&
				    (status & IEE_CB_OK) == 0)
					printf("%s: iee_intr: scb_status=0x%x "
					    "scb_cmd=0x%x failed command %d: "
					    "cb_status[%d]=0x%.4x "
					    "cb_cmd[%d]=0x%.4x\n",
					    device_xname(sc->sc_dev),
					    scb_status, scb_cmd,
					    ++sc->sc_cmd_err,
					    n, status, n, cmd);
			}
			sc->sc_next_cb = 0;
			if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
				iee_cb_setup(sc, IEE_CB_CMD_MCS |
				    IEE_CB_S | IEE_CB_EL | IEE_CB_I);
				(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
			} else
				/* Try to get deferred packets going. */
				iee_start(ifp);
		}
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_crc_err) != sc->sc_crc_err) {
		sc->sc_crc_err = IEE_SWAP32(SC_SCB(sc)->scb_crc_err);
		printf("%s: iee_intr: crc_err=%d\n", device_xname(sc->sc_dev),
		    sc->sc_crc_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_align_err) != sc->sc_align_err) {
		sc->sc_align_err = IEE_SWAP32(SC_SCB(sc)->scb_align_err);
		printf("%s: iee_intr: align_err=%d\n", device_xname(sc->sc_dev),
		    sc->sc_align_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_resource_err) != sc->sc_resource_err) {
		sc->sc_resource_err = IEE_SWAP32(SC_SCB(sc)->scb_resource_err);
		printf("%s: iee_intr: resource_err=%d\n",
		    device_xname(sc->sc_dev), sc->sc_resource_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_overrun_err) != sc->sc_overrun_err) {
		sc->sc_overrun_err = IEE_SWAP32(SC_SCB(sc)->scb_overrun_err);
		printf("%s: iee_intr: overrun_err=%d\n",
		    device_xname(sc->sc_dev), sc->sc_overrun_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
		sc->sc_rcvcdt_err = IEE_SWAP32(SC_SCB(sc)->scb_rcvcdt_err);
		printf("%s: iee_intr: rcvcdt_err=%d\n",
		    device_xname(sc->sc_dev), sc->sc_rcvcdt_err);
	}
	if (IEE_SWAP32(SC_SCB(sc)->scb_short_fr_err) != sc->sc_short_fr_err) {
		sc->sc_short_fr_err = IEE_SWAP32(SC_SCB(sc)->scb_short_fr_err);
		printf("%s: iee_intr: short_fr_err=%d\n",
		    device_xname(sc->sc_dev), sc->sc_short_fr_err);
	}
	IEE_SCBSYNC(sc, BUS_DMASYNC_PREREAD);
	(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
	return 1;
}