Пример #1
0
/*
 * Incoming linkage from device drivers, where we have a mbuf chain
 * but need to prepend a VLAN encapsulation header.
 *
 * Con up a minimal dummy header to pacify bpf.  Allocate (only) a
 * struct m_hdr on the stack.  This is safe as bpf only reads from the
 * fields in this header that we initialize, and will not try to free
 * it or keep a pointer to it.
 */
void
bpf_mtap_ether(caddr_t arg, struct mbuf *m, u_int direction)
{
#if NVLAN > 0
	struct m_hdr mh;
	struct ether_vlan_header evh;

	if ((m->m_flags & M_VLANTAG) == 0)
#endif
	{
		bpf_mtap(arg, m, direction);
		return;
	}

#if NVLAN > 0
	bcopy(mtod(m, char *), &evh, ETHER_HDR_LEN);
	evh.evl_proto = evh.evl_encap_proto;
	evh.evl_encap_proto = htons(ETHERTYPE_VLAN);
	evh.evl_tag = htons(m->m_pkthdr.ether_vtag);
	m->m_len -= ETHER_HDR_LEN;
	m->m_data += ETHER_HDR_LEN;

	mh.mh_flags = 0;
	mh.mh_next = m;
	mh.mh_len = sizeof(evh);
	mh.mh_data = (caddr_t)&evh;

	bpf_mtap(arg, (struct mbuf *) &mh, direction);
	m->m_flags |= mh.mh_flags & M_FILDROP;

	m->m_len += ETHER_HDR_LEN;
	m->m_data -= ETHER_HDR_LEN;
#endif
}
Пример #2
0
static int
tap_dev_close(struct tap_softc *sc)
{
	struct ifnet *ifp;
	int s;

	s = splnet();
	/* Let tap_start handle packets again */
	ifp = &sc->sc_ec.ec_if;
	ifp->if_flags &= ~IFF_OACTIVE;

	/* Purge output queue */
	if (!(IFQ_IS_EMPTY(&ifp->if_snd))) {
		struct mbuf *m;

		for (;;) {
			IFQ_DEQUEUE(&ifp->if_snd, m);
			if (m == NULL)
				break;

			ifp->if_opackets++;
			bpf_mtap(ifp, m);
			m_freem(m);
		}
	}
	splx(s);

	if (sc->sc_sih != NULL) {
		softint_disestablish(sc->sc_sih);
		sc->sc_sih = NULL;
	}
	sc->sc_flags &= ~(TAP_INUSE | TAP_ASYNCIO);

	return (0);
}
Пример #3
0
void
smsc_start(struct ifnet *ifp)
{
	struct smsc_softc	*sc = ifp->if_softc;
	struct mbuf		*m_head = NULL;

	/* Don't send anything if there is no link or controller is busy. */
	if ((sc->sc_flags & SMSC_FLAG_LINK) == 0 ||
		(ifp->if_flags & IFF_OACTIVE) != 0) {
		return;
	}

	IFQ_POLL(&ifp->if_snd, m_head);
	if (m_head == NULL)
		return;

	if (smsc_encap(sc, m_head, 0)) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}
	IFQ_DEQUEUE(&ifp->if_snd, m_head);

#if NBPFILTER > 0
	if (ifp->if_bpf)
		bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
#endif
	ifp->if_flags |= IFF_OACTIVE;
}
Пример #4
0
void
imxenet_recv(struct imxenet_softc *sc)
{
	struct ifnet *ifp = &sc->sc_ac.ac_if;

	bus_dmamap_sync(sc->rbdma.dma_tag, sc->rbdma.dma_map,
	    0, sc->rbdma.dma_size,
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
	    0, sc->rxdma.dma_size,
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	while (!(sc->rx_desc_base[sc->cur_rx].status & ENET_RXD_EMPTY))
	{
		struct mbuf *m;
		m = imxenet_newbuf();

		if (m == NULL) {
			ifp->if_ierrors++;
			goto done;
		}

		ifp->if_ipackets++;
		m->m_pkthdr.rcvif = ifp;
		m->m_pkthdr.len = m->m_len = sc->rx_desc_base[sc->cur_rx].data_length;
		m_adj(m, ETHER_ALIGN);

		memcpy(mtod(m, char *), sc->rx_buffer_base[sc->cur_rx].data,
		    sc->rx_desc_base[sc->cur_rx].data_length);

		sc->rx_desc_base[sc->cur_rx].status |= ENET_RXD_EMPTY;
		sc->rx_desc_base[sc->cur_rx].data_length = 0;

		bus_dmamap_sync(sc->rbdma.dma_tag, sc->rbdma.dma_map,
		    ENET_MAX_PKT_SIZE * sc->cur_rx, ENET_MAX_PKT_SIZE,
		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
		    sizeof(struct imxenet_buf_desc) * sc->cur_rx,
		    sizeof(struct imxenet_buf_desc),
		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

		if (sc->rx_desc_base[sc->cur_rx].status & ENET_RXD_WRAP)
			sc->cur_rx = 0;
		else
			sc->cur_rx++;

		/* push the packet up */
#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
#endif
		ether_input_mbuf(ifp, m);
	}

done:
	/* rx descriptors are ready */
	HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR);
}
Пример #5
0
void
pairstart(struct ifnet *ifp)
{
	struct pair_softc	*sc = (struct pair_softc *)ifp->if_softc;
	struct mbuf_list	 ml = MBUF_LIST_INITIALIZER();
	struct ifnet		*pairedifp;
	struct mbuf		*m;

	pairedifp = if_get(sc->sc_pairedif);

	for (;;) {
		IFQ_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL)
			break;

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif /* NBPFILTER > 0 */

		ifp->if_opackets++;
		if (pairedifp != NULL) {
			if (m->m_flags & M_PKTHDR)
				m_resethdr(m);
			ml_enqueue(&ml, m);
		} else
			m_freem(m);
	}

	if (pairedifp != NULL) {
		if_input(pairedifp, &ml);
		if_put(pairedifp);
	}
}
Пример #6
0
/*
 * This is the function where we SEND packets.
 *
 * There is no 'receive' equivalent.  A typical driver will get
 * interrupts from the hardware, and from there will inject new packets
 * into the network stack.
 *
 * Once handled, a packet must be freed.  A real driver might not be able
 * to fit all the pending packets into the hardware, and is allowed to
 * return before having sent all the packets.  It should then use the
 * if_flags flag IFF_OACTIVE to notify the upper layer.
 *
 * There are also other flags one should check, such as IFF_PAUSE.
 *
 * It is our duty to make packets available to BPF listeners.
 *
 * You should be aware that this function is called by the Ethernet layer
 * at splnet().
 *
 * When the device is opened, we have to pass the packet(s) to the
 * userland.  For that we stay in OACTIVE mode while the userland gets
 * the packets, and we send a signal to the processes waiting to read.
 *
 * wakeup(sc) is the counterpart to the tsleep call in
 * tap_dev_read, while selnotify() is used for kevent(2) and
 * poll(2) (which includes select(2)) listeners.
 */
static void
tap_start(struct ifnet *ifp)
{
	struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
	struct mbuf *m0;

	if ((sc->sc_flags & TAP_INUSE) == 0) {
		/* Simply drop packets */
		for(;;) {
			IFQ_DEQUEUE(&ifp->if_snd, m0);
			if (m0 == NULL)
				return;

			ifp->if_opackets++;
			bpf_mtap(ifp, m0);

			m_freem(m0);
		}
	} else if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
		ifp->if_flags |= IFF_OACTIVE;
		wakeup(sc);
		selnotify(&sc->sc_rsel, 0, 1);
		if (sc->sc_flags & TAP_ASYNCIO)
			softint_schedule(sc->sc_sih);
	}
}
Пример #7
0
void
smsc_start(struct ifnet *ifp)
{
    struct smsc_softc	*sc = ifp->if_softc;
    struct mbuf		*m_head = NULL;

    /* Don't send anything if there is no link or controller is busy. */
    if ((sc->sc_flags & SMSC_FLAG_LINK) == 0) {
        return;
    }

    if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING)
        return;

    IFQ_POLL(&ifp->if_snd, m_head);
    if (m_head == NULL)
        return;

    if (smsc_encap(sc, m_head, 0)) {
        ifp->if_flags |= IFF_OACTIVE;
        return;
    }
    IFQ_DEQUEUE(&ifp->if_snd, m_head);

    bpf_mtap(ifp, m_head);

    ifp->if_flags |= IFF_OACTIVE;

    /*
     * Set a timeout in case the chip goes out to lunch.
     */
    ifp->if_timer = 5;
}
Пример #8
0
/*
 * Pass a packet to the higher levels.
 */
static inline void
be_read(struct be_softc *sc, int idx, int len)
{
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct mbuf *m;

	if (len <= sizeof(struct ether_header) ||
	    len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
#ifdef BEDEBUG
		if (sc->sc_debug)
			printf("%s: invalid packet size %d; dropping\n",
			    ifp->if_xname, len);
#endif
		ifp->if_ierrors++;
		return;
	}

	/*
	 * Pull packet off interface.
	 */
	m = be_get(sc, idx, len);
	if (m == NULL) {
		ifp->if_ierrors++;
		return;
	}
	ifp->if_ipackets++;

	/*
	 * Check if there's a BPF listener on this interface.
	 * If so, hand off the raw packet to BPF.
	 */
	bpf_mtap(ifp, m);
	/* Pass the packet up. */
	(*ifp->if_input)(ifp, m);
}
Пример #9
0
void
mace_read(struct mc_softc *sc, caddr_t pkt, int len)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct mbuf *m;

	if (len <= sizeof(struct ether_header) ||
	    len > ETHERMTU + sizeof(struct ether_header)) {
#ifdef MCDEBUG
		printf("%s: invalid packet size %d; dropping\n",
		    sc->sc_dev.dv_xname, len);
#endif
		ifp->if_ierrors++;
		return;
	}

	m = mace_get(sc, pkt, len);
	if (m == NULL) {
		ifp->if_ierrors++;
		return;
	}

	ifp->if_ipackets++;

#if NBPFILTER > 0
	/* Pass the packet to any BPF listeners. */
	if (ifp->if_bpf)
		bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
#endif

	/* Pass the packet up. */
	ether_input_mbuf(ifp, m);
}
Пример #10
0
/*
 * Pass a packet to the higher levels.
 */
void
elread(struct el_softc *sc, int len)
{
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct mbuf *m;

	if (len <= sizeof(struct ether_header) ||
	    len > ETHER_MAX_LEN) {
		printf("%s: invalid packet size %d; dropping\n",
		    device_xname(sc->sc_dev), len);
		ifp->if_ierrors++;
		return;
	}

	/* Pull packet off interface. */
	m = elget(sc, len);
	if (m == 0) {
		ifp->if_ierrors++;
		return;
	}

	ifp->if_ipackets++;

	/*
	 * Check if there's a BPF listener on this interface.
	 * If so, hand off the raw packet to BPF.
	 */
	bpf_mtap(ifp, m);

	if_percpuq_enqueue(ifp->if_percpuq, m);
}
Пример #11
0
/*
 * Encapsulate a packet of type family for the local net.
 */
void
mc_start(struct ifnet *ifp)
{
	struct mc_softc	*sc = ifp->if_softc;
	struct mbuf	*m;

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	while (1) {
		if (ifp->if_flags & IFF_OACTIVE)
			return;

		IFQ_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL)
			return;

#if NBPFILTER > 0
		/*
		 * If bpf is listening on this interface, let it
		 * see the packet before we commit it to the wire.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		/*
		 * Copy the mbuf chain into the transmit buffer.
		 */
		ifp->if_flags |= IFF_OACTIVE;
		maceput(sc, m);

		ifp->if_opackets++;		/* # of pkts */
	}
}
Пример #12
0
static void
virtif_start(struct ifnet *ifp)
{
	struct virtif_sc *sc = ifp->if_softc;
	struct mbuf *m, *m0;
	struct iovec io[LB_SH];
	int i;

	ifp->if_flags |= IFF_OACTIVE;

	for (;;) {
		IF_DEQUEUE(&ifp->if_snd, m0);
		if (!m0) {
			break;
		}

		m = m0;
		for (i = 0; i < LB_SH && m; ) {
			if (m->m_len) {
				io[i].iov_base = mtod(m, void *);
				io[i].iov_len = m->m_len;
				i++;
			}
			m = m->m_next;
		}
		if (i == LB_SH && m)
			panic("lazy bum");
		bpf_mtap(ifp, m0);

		VIFHYPER_SEND(sc->sc_viu, io, i);

		m_freem(m0);
		ifp->if_opackets++;
	}
Пример #13
0
/*
 * sonic_read -- pull packet off interface and forward to
 * appropriate protocol handler
 */
static inline int 
sonic_read(struct sn_softc *sc, void *pkt, int len)
{
	struct ifnet *ifp = &sc->sc_if;
	struct mbuf *m;

#ifdef SNDEBUG
	{
		printf("%s: rcvd %p len=%d type=0x%x from %s",
		    devoce_xname(sc->sc_dev), et, len, htons(et->ether_type),
		    ether_sprintf(et->ether_shost));
		printf(" (to %s)\n", ether_sprintf(et->ether_dhost));
	}
#endif /* SNDEBUG */

	if (len < (ETHER_MIN_LEN - ETHER_CRC_LEN) ||
	    len > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
		printf("%s: invalid packet length %d bytes\n",
		    device_xname(sc->sc_dev), len);
		return 0;
	}

	m = sonic_get(sc, pkt, len);
	if (m == NULL)
		return 0;
	/* Pass the packet to any BPF listeners. */
	bpf_mtap(ifp, m);
	(*ifp->if_input)(ifp, m);
	return 1;
}
Пример #14
0
/*
 * Pass a packet to the higher levels.
 */
inline void
qe_read(struct qe_softc *sc, int idx, int len)
{
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	struct mbuf *m;

	if (len <= sizeof(struct ether_header) ||
	    len > ETHERMTU + sizeof(struct ether_header)) {

		printf("%s: invalid packet size %d; dropping\n",
			ifp->if_xname, len);

		ifp->if_ierrors++;
		return;
	}

	/*
	 * Pull packet off interface.
	 */
	m = qe_get(sc, idx, len);
	if (m == NULL) {
		ifp->if_ierrors++;
		return;
	}
	ifp->if_ipackets++;

	/*
	 * Check if there's a BPF listener on this interface.
	 * If so, hand off the raw packet to BPF.
	 */
	bpf_mtap(ifp, m);
	/* Pass the packet up. */
	if_percpuq_enqueue(ifp->if_percpuq, m);
}
Пример #15
0
void
imxenet_start(struct ifnet *ifp)
{
	struct imxenet_softc *sc = ifp->if_softc;
	struct mbuf *m_head = NULL;

	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
		return;

	for (;;) {
		IFQ_POLL(&ifp->if_snd, m_head);
		if (m_head == NULL)
			break;

		if (imxenet_encap(sc, m_head)) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m_head);

		ifp->if_opackets++;

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
#endif

		m_freem(m_head);
	}
}
Пример #16
0
/* Async. stream output */
static void
fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp)
{
	struct mbuf *m;
	struct fw_xfer *xfer;
	struct fw_xferq *xferq;
	struct fw_pkt *fp;
	int i = 0;

	xfer = NULL;
	xferq = fwe->fd.fc->atq;
	while (xferq->queued < xferq->maxq) {
		IF_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL)
			break;
		xfer = fw_xfer_alloc();
		if (xfer == NULL) {
			return;
		}
#if __FreeBSD_version >= 500000
		BPF_MTAP(ifp, m);
#else
		if (ifp->if_bpf != NULL)
			bpf_mtap(ifp, m);
#endif

		xfer->send.off = 0;
		xfer->spd = 2;
		xfer->fc = fwe->fd.fc;
		xfer->retry_req = fw_asybusy;
		xfer->sc = (caddr_t)fwe;
		xfer->act.hand = fwe_output_callback;

		/* keep ip packet alignment for alpha */
		M_PREPEND(m, ALIGN_PAD, M_DONTWAIT);
		fp = (struct fw_pkt *)&xfer->dst; /* XXX */
		xfer->dst = *((int32_t *)&fwe->pkt_hdr);
		fp->mode.stream.len = htons(m->m_pkthdr.len);
		xfer->send.buf = (caddr_t) fp;
		xfer->mbuf = m;
		xfer->send.len = m->m_pkthdr.len + HDR_LEN;

		i++;
		if (fw_asyreq(xfer->fc, -1, xfer) != 0) {
			/* error */
			ifp->if_oerrors ++;
			/* XXX set error code */
			fwe_output_callback(xfer);
		} else {
			ifp->if_opackets ++;
		}
	}
#if 0
	if (i > 1)
		printf("%d queued\n", i);
#endif
	if (xfer != NULL)
		xferq->start(xfer->fc);
}
Пример #17
0
static int
tap_dev_write(int unit, struct uio *uio, int flags)
{
	struct tap_softc *sc =
	    device_lookup_private(&tap_cd, unit);
	struct ifnet *ifp;
	struct mbuf *m, **mp;
	int error = 0;
	int s;

	if (sc == NULL)
		return (ENXIO);

	getnanotime(&sc->sc_mtime);
	ifp = &sc->sc_ec.ec_if;

	/* One write, one packet, that's the rule */
	MGETHDR(m, M_DONTWAIT, MT_DATA);
	if (m == NULL) {
		ifp->if_ierrors++;
		return (ENOBUFS);
	}
	m->m_pkthdr.len = uio->uio_resid;

	mp = &m;
	while (error == 0 && uio->uio_resid > 0) {
		if (*mp != m) {
			MGET(*mp, M_DONTWAIT, MT_DATA);
			if (*mp == NULL) {
				error = ENOBUFS;
				break;
			}
		}
		(*mp)->m_len = min(MHLEN, uio->uio_resid);
		error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio);
		mp = &(*mp)->m_next;
	}
	if (error) {
		ifp->if_ierrors++;
		m_freem(m);
		return (error);
	}

	ifp->if_ipackets++;
	m_set_rcvif(m, ifp);

	bpf_mtap(ifp, m);
	s = splnet();
	if_input(ifp, m);
	splx(s);

	return (0);
}
Пример #18
0
/*
 * Encapsulate a packet of type family for the local net.
 */
static void
snstart(struct ifnet *ifp)
{
	struct sn_softc	*sc = ifp->if_softc;
	struct mbuf	*m;
	int		mtd_next;

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

outloop:
	/* Check for room in the xmit buffer. */
	if ((mtd_next = (sc->mtd_free + 1)) == NTDA)
		mtd_next = 0;

	if (mtd_next == sc->mtd_hw) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	IF_DEQUEUE(&ifp->if_snd, m);
	if (m == 0)
		return;

	/* We need the header for m_pkthdr.len. */
	if ((m->m_flags & M_PKTHDR) == 0)
		panic("%s: snstart: no header mbuf", device_xname(sc->sc_dev));

	/*
	 * If bpf is listening on this interface, let it
	 * see the packet before we commit it to the wire.
	 */
	bpf_mtap(ifp, m);

	/*
	 * If there is nothing in the o/p queue, and there is room in
	 * the Tx ring, then send the packet directly.  Otherwise append
	 * it to the o/p queue.
	 */
	if ((sonicput(sc, m, mtd_next)) == 0) {
		IF_PREPEND(&ifp->if_snd, m);
		return;
	}

	sc->mtd_prev = sc->mtd_free;
	sc->mtd_free = mtd_next;

	ifp->if_opackets++;		/* # of pkts */

	/* Jump back for possibly more punishment. */
	goto outloop;
}
Пример #19
0
/*
 * Start output on interface.
 * We make two assumptions here:
 *  1) that the current priority is set to splnet _before_ this code
 *     is called *and* is returned to the appropriate priority after
 *     return
 *  2) that the IFF_OACTIVE flag is checked before this code is called
 *     (i.e. that the output part of the interface is idle)
 */
void
bestart(struct ifnet *ifp)
{
	struct be_softc *sc = (struct be_softc *)ifp->if_softc;
	struct qec_xd *txd = sc->sc_rb.rb_txd;
	struct mbuf *m;
	unsigned int bix, len;
	unsigned int ntbuf = sc->sc_rb.rb_ntbuf;

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	bix = sc->sc_rb.rb_tdhead;

	for (;;) {
		IFQ_DEQUEUE(&ifp->if_snd, m);
		if (m == 0)
			break;

#if NBPFILTER > 0
		/*
		 * If BPF is listening on this interface, let it see the
		 * packet before we commit it to the wire.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		/*
		 * Copy the mbuf chain into the transmit buffer.
		 */
		len = be_put(sc, bix, m);

		/*
		 * Initialize transmit registers and start transmission
		 */
		txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP |
				    (len & QEC_XD_LENGTH);
		bus_space_write_4(sc->sc_bustag, sc->sc_cr, BE_CRI_CTRL,
				  BE_CR_CTRL_TWAKEUP);

		if (++bix == QEC_XD_RING_MAXSIZE)
			bix = 0;

		if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}
	}

	sc->sc_rb.rb_tdhead = bix;
}
Пример #20
0
/*
 * Incoming linkage from device drivers, where we have a mbuf chain
 * but need to prepend the address family.
 *
 * Con up a minimal dummy header to pacify bpf.  We allocate (only) a
 * struct m_hdr on the stack.  This is safe as bpf only reads from the
 * fields in this header that we initialize, and will not try to free
 * it or keep a pointer to it.
 */
void
bpf_mtap_af(caddr_t arg, u_int32_t af, struct mbuf *m, u_int direction)
{
    struct m_hdr mh;

    mh.mh_flags = 0;
    mh.mh_next = m;
    mh.mh_len = 4;
    mh.mh_data = (caddr_t)&af;

    bpf_mtap(arg, (struct mbuf *) &mh, direction);
    m->m_flags |= mh.mh_flags & M_FILDROP;
}
Пример #21
0
void
dme_receive(struct dme_softc *sc, struct ifnet *ifp)
{
    uint8_t ready = 0x01;

    DPRINTF(("inside dme_receive\n"));

    while (ready == 0x01) {
        /* Packet received, retrieve it */

        /* Read without address increment to get the ready byte without moving past it. */
        bus_space_write_1(sc->sc_iot, sc->sc_ioh,
                          sc->dme_io, DM9000_MRCMDX);
        /* Dummy ready */
        ready = bus_space_read_1(sc->sc_iot, sc->sc_ioh, sc->dme_data);
        ready = bus_space_read_1(sc->sc_iot, sc->sc_ioh, sc->dme_data);
        ready &= 0x03;	/* we only want bits 1:0 */
        if (ready == 0x01) {
            uint8_t		rx_status;
            struct mbuf	*m;

            /* Read with address increment. */
            bus_space_write_1(sc->sc_iot, sc->sc_ioh,
                              sc->dme_io, DM9000_MRCMD);

            rx_status = sc->sc_pkt_read(sc, ifp, &m);

            if (rx_status & (DM9000_RSR_CE | DM9000_RSR_PLE)) {
                /* Error while receiving the packet,
                 * discard it and keep track of counters
                 */
                ifp->if_ierrors++;
                RX_DPRINTF(("dme_receive: "
                            "Error reciving packet\n"));
            } else if (rx_status & DM9000_RSR_LCS) {
                ifp->if_collisions++;
            } else {
                if (ifp->if_bpf)
                    bpf_mtap(ifp, m);
                ifp->if_ipackets++;
                (*ifp->if_input)(ifp, m);
            }

        } else if (ready != 0x00) {
            /* Should this be logged somehow? */
            printf("%s: Resetting chip\n",
                   device_xname(sc->sc_dev));
            dme_reset(sc);
        }
    }
}
Пример #22
0
/*
 * Incoming linkage from device drivers, where we have a mbuf chain
 * but need to prepend some arbitrary header from a linear buffer.
 *
 * Con up a minimal dummy header to pacify bpf.  Allocate (only) a
 * struct m_hdr on the stack.  This is safe as bpf only reads from the
 * fields in this header that we initialize, and will not try to free
 * it or keep a pointer to it.
 */
void
bpf_mtap_hdr(caddr_t arg, caddr_t data, u_int dlen, struct mbuf *m,
             u_int direction)
{
    struct m_hdr mh;

    mh.mh_flags = 0;
    mh.mh_next = m;
    mh.mh_len = dlen;
    mh.mh_data = data;

    bpf_mtap(arg, (struct mbuf *) &mh, direction);
    m->m_flags |= mh.mh_flags & M_FILDROP;
}
Пример #23
0
static int
epe_intr(void *arg)
{
	struct epe_softc *sc = (struct epe_softc *)arg;
	struct ifnet * ifp = &sc->sc_ec.ec_if;
	uint32_t ndq = 0, irq, *cur;

	irq = EPE_READ(IntStsC);
begin:
	cur = (uint32_t *)(EPE_READ(RXStsQCurAdd) -
		sc->ctrlpage_dsaddr + (char*)sc->ctrlpage);
	CTRLPAGE_DMASYNC(TX_QLEN * 3 * sizeof(uint32_t),
		RX_QLEN * 4 * sizeof(uint32_t), 
		BUS_DMASYNC_PREREAD);
	while (sc->RXStsQ_cur != cur) {
		if ((sc->RXStsQ_cur[0] & (RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) == 
			(RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) {
			uint32_t bi = (sc->RXStsQ_cur[1] >> 16) & 0x7fff;
			uint32_t fl = sc->RXStsQ_cur[1] & 0xffff;
			struct mbuf *m;

			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m != NULL) MCLGET(m, M_DONTWAIT);
			if (m != NULL && (m->m_flags & M_EXT)) {
				bus_dmamap_unload(sc->sc_dmat, 
					sc->rxq[bi].m_dmamap);
				sc->rxq[bi].m->m_pkthdr.rcvif = ifp;
				sc->rxq[bi].m->m_pkthdr.len = 
					sc->rxq[bi].m->m_len = fl;
				bpf_mtap(ifp, sc->rxq[bi].m);
                                (*ifp->if_input)(ifp, sc->rxq[bi].m);
				sc->rxq[bi].m = m;
				bus_dmamap_load(sc->sc_dmat, 
					sc->rxq[bi].m_dmamap, 
					m->m_ext.ext_buf, MCLBYTES,
					NULL, BUS_DMA_NOWAIT);
				sc->RXDQ[bi * 2] = 
					sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr;
			} else {
				/* Drop packets until we can get replacement
				 * empty mbufs for the RXDQ.
				 */
				if (m != NULL) {
					m_freem(m);
				}
				ifp->if_ierrors++;
			} 
		} else {
Пример #24
0
void
dme_prepare(struct dme_softc *sc, struct ifnet *ifp)
{
    struct mbuf *bufChain;
    uint16_t length;

    TX_DPRINTF(("dme_prepare: Entering\n"));

    if (sc->txready)
        panic("dme_prepare: Someone called us with txready set\n");

    IFQ_DEQUEUE(&ifp->if_snd, bufChain);
    if (bufChain == NULL) {
        TX_DPRINTF(("dme_prepare: Nothing to transmit\n"));
        ifp->if_flags &= ~IFF_OACTIVE; /* Clear OACTIVE bit */
        return; /* Nothing to transmit */
    }

    /* Element has now been removed from the queue, so we better send it */

    if (ifp->if_bpf)
        bpf_mtap(ifp, bufChain);

    /* Setup the DM9000 to accept the writes, and then write each buf in
       the chain. */

    TX_DATA_DPRINTF(("dme_prepare: Writing data: "));
    bus_space_write_1(sc->sc_iot, sc->sc_ioh, sc->dme_io, DM9000_MWCMD);
    length = sc->sc_pkt_write(sc, bufChain);
    TX_DATA_DPRINTF(("\n"));

    if (length % sc->sc_data_width != 0) {
        panic("dme_prepare: length is not compatible with IO_MODE");
    }

    sc->txready_length = length;
    sc->txready = 1;

    TX_DPRINTF(("dme_prepare: txbusy: %d\ndme_prepare: "
                "txready: %d, txready_length: %d\n",
                sc->txbusy, sc->txready, sc->txready_length));

    m_freem(bufChain);

    TX_DPRINTF(("dme_prepare: Leaving\n"));
}
Пример #25
0
/*
 * Setup output on interface.
 * Get another datagram to send off of the interface queue,
 * and map it to the interface before starting the output.
 * Must be called from ipl >= our interrupt level.
 */
void
destart(struct ifnet *ifp)
{
	struct de_softc *sc = ifp->if_softc;
	struct de_cdata *dc;
	struct de_ring *rp;
	struct mbuf *m;
	int nxmit, len;

	/*
	 * the following test is necessary, since
	 * the code is not reentrant and we have
	 * multiple transmission buffers.
	 */
	if (sc->sc_if.if_flags & IFF_OACTIVE)
		return;
	dc = sc->sc_dedata;
	for (nxmit = sc->sc_nxmit; nxmit < NXMT; nxmit++) {
		IFQ_DEQUEUE(&ifp->if_snd, m);
		if (m == 0)
			break;

		rp = &dc->dc_xrent[sc->sc_xfree];
		if (rp->r_flags & XFLG_OWN)
			panic("deuna xmit in progress");
#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		len = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[sc->sc_xfree], m);
		rp->r_slen = len;
		rp->r_tdrerr = 0;
		rp->r_flags = XFLG_STP|XFLG_ENP|XFLG_OWN;

		sc->sc_xfree++;
		if (sc->sc_xfree == NXMT)
			sc->sc_xfree = 0;
	}
	if (sc->sc_nxmit != nxmit) {
		sc->sc_nxmit = nxmit;
		if (ifp->if_flags & IFF_RUNNING)
			DE_WLOW(PCSR0_INTE|CMD_PDMD);
	}
}
Пример #26
0
/*
 * Ethernet interface receiver interface.
 * If input error just drop packet.
 * Otherwise purge input buffered data path and examine 
 * packet to determine type.  If can't determine length
 * from type, then have to drop packet.	 Othewise decapsulate
 * packet based on type and pass to type specific higher-level
 * input routine.
 */
void
derecv(struct de_softc *sc)
{
	struct ifnet *ifp = &sc->sc_if;
	struct de_ring *rp;
	struct de_cdata *dc;
	struct mbuf *m;
	int len;

	dc = sc->sc_dedata;
	rp = &dc->dc_rrent[sc->sc_rindex];
	while ((rp->r_flags & RFLG_OWN) == 0) {
		sc->sc_if.if_ipackets++;
		len = (rp->r_lenerr&RERR_MLEN) - ETHER_CRC_LEN;
		/* check for errors */
		if ((rp->r_flags & (RFLG_ERRS|RFLG_FRAM|RFLG_OFLO|RFLG_CRC)) ||
		    (rp->r_lenerr & (RERR_BUFL|RERR_UBTO))) {
			sc->sc_if.if_ierrors++;
			goto next;
		}
		m = if_ubaget(&sc->sc_ifuba, &sc->sc_ifr[sc->sc_rindex],
		    ifp, len);
		if (m == 0) {
			sc->sc_if.if_ierrors++;
			goto next;
		}
#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
#endif

		(*ifp->if_input)(ifp, m);

		/* hang the receive buffer again */
next:		rp->r_lenerr = 0;
		rp->r_flags = RFLG_OWN;

		/* check next receive buffer */
		sc->sc_rindex++;
		if (sc->sc_rindex == NRCV)
			sc->sc_rindex = 0;
		rp = &dc->dc_rrent[sc->sc_rindex];
	}
}
Пример #27
0
int
wanpipe_generic_input(struct ifnet *ifp, struct mbuf *m)
{
	sdla_t		*card;
#if NBPFILTER > 0
#endif /* NBPFILTER > 0 */

	if ((card = wanpipe_generic_getcard(ifp)) == NULL) {
		return (-EINVAL);
	}
	m->m_pkthdr.rcvif = ifp;
#if NBPFILTER > 0
	if (ifp->if_bpf)
		bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
#endif /* NBPFILTER > 0 */
	ifp->if_ipackets ++;
	sppp_input(ifp, m);
	return (0);
}
Пример #28
0
void
kue_start(struct ifnet *ifp)
{
	struct kue_softc	*sc = ifp->if_softc;
	struct mbuf		*m_head = NULL;

	DPRINTFN(10,("%s: %s: enter\n", sc->kue_dev.dv_xname,__func__));

	if (sc->kue_dying)
		return;

	if (ifp->if_flags & IFF_OACTIVE)
		return;

	IFQ_POLL(&ifp->if_snd, m_head);
	if (m_head == NULL)
		return;

	if (kue_send(sc, m_head, 0)) {
		ifp->if_flags |= IFF_OACTIVE;
		return;
	}

	IFQ_DEQUEUE(&ifp->if_snd, m_head);

#if NBPFILTER > 0
	/*
	 * If there's a BPF listener, bounce a copy of this frame
	 * to him.
	 */
	if (ifp->if_bpf)
		bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
#endif

	ifp->if_flags |= IFF_OACTIVE;

	/*
	 * Set a timeout in case the chip goes out to lunch.
	 */
	ifp->if_timer = 6;
}
Пример #29
0
void
tsec_start(struct ifnet *ifp)
{
	struct tsec_softc *sc = ifp->if_softc;
	struct mbuf *m;
	int idx;

	if (!(ifp->if_flags & IFF_RUNNING))
		return;
	if (ifp->if_flags & IFF_OACTIVE)
		return;
	if (IFQ_IS_EMPTY(&ifp->if_snd))
		return;

	idx = sc->sc_tx_prod;
	while ((sc->sc_txdesc[idx].td_status & TSEC_TX_TO1) == 0) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;

		if (tsec_encap(sc, m, &idx)) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}

		/* Now we are committed to transmit the packet. */
		IFQ_DEQUEUE(&ifp->if_snd, m);

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif
	}

	if (sc->sc_tx_prod != idx) {
		sc->sc_tx_prod = idx;

		/* Set a timeout in case the chip goes out to lunch. */
		ifp->if_timer = 5;
	}
}
Пример #30
0
void
cas_start(struct ifnet *ifp)
{
	struct cas_softc *sc = ifp->if_softc;
	struct mbuf *m;
	u_int32_t bix;

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		return;

	bix = sc->sc_tx_prod;
	while (sc->sc_txd[bix].sd_mbuf == NULL) {
		IFQ_POLL(&ifp->if_snd, m);
		if (m == NULL)
			break;

#if NBPFILTER > 0
		/*
		 * If BPF is listening on this interface, let it see the
		 * packet before we commit it to the wire.
		 */
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		/*
		 * Encapsulate this packet and start it going...
		 * or fail...
		 */
		if (cas_encap(sc, m, &bix)) {
			ifp->if_flags |= IFF_OACTIVE;
			break;
		}

		IFQ_DEQUEUE(&ifp->if_snd, m);
		ifp->if_timer = 5;
	}

	sc->sc_tx_prod = bix;
}